repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/errors/errors.go
vendor/github.com/bytedance/sonic/internal/decoder/errors/errors.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package errors import ( `encoding/json` `errors` `fmt` `reflect` `strconv` `strings` `github.com/bytedance/sonic/internal/native/types` `github.com/bytedance/sonic/internal/rt` ) type SyntaxError struct { Pos int Src string Code types.ParsingError Msg string } func (self SyntaxError) Error() string { return fmt.Sprintf("%q", self.Description()) } func (self SyntaxError) Description() string { return "Syntax error " + self.description() } func (self SyntaxError) description() string { /* check for empty source */ if self.Src == "" { return fmt.Sprintf("no sources available, the input json is empty: %#v", self) } p, x, q, y := calcBounds(len(self.Src), self.Pos) /* compose the error description */ return fmt.Sprintf( "at index %d: %s\n\n\t%s\n\t%s^%s\n", self.Pos, self.Message(), self.Src[p:q], strings.Repeat(".", x), strings.Repeat(".", y), ) } func calcBounds(size int, pos int) (lbound int, lwidth int, rbound int, rwidth int) { if pos >= size || pos < 0 { return 0, 0, size, 0 } i := 16 lbound = pos - i rbound = pos + i /* prevent slicing before the beginning */ if lbound < 0 { lbound, rbound, i = 0, rbound - lbound, i + lbound } /* prevent slicing beyond the end */ if n := size; rbound > n { n = rbound - n rbound = size /* move the left bound if possible */ if lbound > n { i += n lbound -= n } } /* left and right length */ lwidth = clamp_zero(i) rwidth = clamp_zero(rbound - lbound - i - 1) return } func (self SyntaxError) Message() string { if self.Msg == "" { return self.Code.Message() } return self.Msg } func clamp_zero(v int) int { if v < 0 { return 0 } else { return v } } /** JIT Error Helpers **/ var StackOverflow = &json.UnsupportedValueError { Str : "Value nesting too deep", Value : reflect.ValueOf("..."), } func ErrorWrap(src string, pos int, code types.ParsingError) error { return *error_wrap_heap(src, pos, code) } //go:noinline func error_wrap_heap(src string, pos int, code types.ParsingError) *SyntaxError { return &SyntaxError { Pos : pos, Src : src, Code : code, } } func ErrorType(vt *rt.GoType) error { return &json.UnmarshalTypeError{Type: vt.Pack()} } type MismatchTypeError struct { Pos int Src string Type reflect.Type } func swithchJSONType (src string, pos int) string { var val string switch src[pos] { case 'f': fallthrough case 't': val = "bool" case '"': val = "string" case '{': val = "object" case '[': val = "array" case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': val = "number" } return val } func (self MismatchTypeError) Error() string { se := SyntaxError { Pos : self.Pos, Src : self.Src, Code : types.ERR_MISMATCH, } return fmt.Sprintf("Mismatch type %s with value %s %q", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) } func (self MismatchTypeError) Description() string { se := SyntaxError { Pos : self.Pos, Src : self.Src, Code : types.ERR_MISMATCH, } return fmt.Sprintf("Mismatch type %s with value %s %s", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) } func ErrorMismatch(src string, pos int, vt *rt.GoType) error { return &MismatchTypeError { Pos : pos, Src : src, Type : vt.Pack(), } } func ErrorField(name string) error { return errors.New("json: unknown field " + strconv.Quote(name)) } func ErrorValue(value string, vtype reflect.Type) error { return &json.UnmarshalTypeError { Type : vtype, Value : value, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/consts/option.go
vendor/github.com/bytedance/sonic/internal/decoder/consts/option.go
package consts import ( `github.com/bytedance/sonic/internal/native/types` ) const ( F_use_int64 = 0 F_disable_urc = 2 F_disable_unknown = 3 F_copy_string = 4 F_use_number = types.B_USE_NUMBER F_validate_string = types.B_VALIDATE_STRING F_allow_control = types.B_ALLOW_CONTROL F_no_validate_json = types.B_NO_VALIDATE_JSON F_case_sensitive = 7 ) type Options uint64 const ( OptionUseInt64 Options = 1 << F_use_int64 OptionUseNumber Options = 1 << F_use_number OptionUseUnicodeErrors Options = 1 << F_disable_urc OptionDisableUnknown Options = 1 << F_disable_unknown OptionCopyString Options = 1 << F_copy_string OptionValidateString Options = 1 << F_validate_string OptionNoValidateJSON Options = 1 << F_no_validate_json OptionCaseSensitive Options = 1 << F_case_sensitive ) const ( MaxStack = 4096 )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go
vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package api import ( `bytes` `io` `sync` `github.com/bytedance/sonic/internal/native` `github.com/bytedance/sonic/internal/native/types` `github.com/bytedance/sonic/internal/rt` `github.com/bytedance/sonic/option` ) var ( minLeftBufferShift uint = 1 ) // StreamDecoder is the decoder context object for streaming input. type StreamDecoder struct { r io.Reader buf []byte scanp int scanned int64 err error Decoder } var bufPool = sync.Pool{ New: func () interface{} { return make([]byte, 0, option.DefaultDecoderBufferSize) }, } func freeBytes(buf []byte) { if rt.CanSizeResue(cap(buf)) { bufPool.Put(buf[:0]) } } // NewStreamDecoder adapts to encoding/json.NewDecoder API. // // NewStreamDecoder returns a new decoder that reads from r. func NewStreamDecoder(r io.Reader) *StreamDecoder { return &StreamDecoder{r : r} } // Decode decodes input stream into val with corresponding data. // Redundantly bytes may be read and left in its buffer, and can be used at next call. // Either io error from underlying io.Reader (except io.EOF) // or syntax error from data will be recorded and stop subsequently decoding. func (self *StreamDecoder) Decode(val interface{}) (err error) { // read more data into buf if self.More() { var s = self.scanp try_skip: var e = len(self.buf) var src = rt.Mem2Str(self.buf[s:e]) // try skip var x = 0; if y := native.SkipOneFast(&src, &x); y < 0 { if self.readMore() { goto try_skip } else { err = SyntaxError{e, self.s, types.ParsingError(-s), ""} self.setErr(err) return } } else { s = y + s e = x + s } // must copy string here for safety self.Decoder.Reset(string(self.buf[s:e])) err = self.Decoder.Decode(val) if err != nil { self.setErr(err) return } self.scanp = e _, empty := self.scan() if empty { // no remain valid bytes, thus we just recycle buffer mem := self.buf self.buf = nil freeBytes(mem) } else { // remain undecoded bytes, move them onto head n := copy(self.buf, self.buf[self.scanp:]) self.buf = self.buf[:n] } self.scanned += int64(self.scanp) self.scanp = 0 } return self.err } // InputOffset returns the input stream byte offset of the current decoder position. // The offset gives the location of the end of the most recently returned token and the beginning of the next token. func (self *StreamDecoder) InputOffset() int64 { return self.scanned + int64(self.scanp) } // Buffered returns a reader of the data remaining in the Decoder's buffer. // The reader is valid until the next call to Decode. func (self *StreamDecoder) Buffered() io.Reader { return bytes.NewReader(self.buf[self.scanp:]) } // More reports whether there is another element in the // current array or object being parsed. func (self *StreamDecoder) More() bool { if self.err != nil { return false } c, err := self.peek() return err == nil && c != ']' && c != '}' } // More reports whether there is another element in the // current array or object being parsed. func (self *StreamDecoder) readMore() bool { if self.err != nil { return false } var err error var n int for { // Grow buffer if not large enough. l := len(self.buf) realloc(&self.buf) n, err = self.r.Read(self.buf[l:cap(self.buf)]) self.buf = self.buf[: l+n] self.scanp = l _, empty := self.scan() if !empty { return true } // buffer has been scanned, now report any error if err != nil { self.setErr(err) return false } } } func (self *StreamDecoder) setErr(err error) { self.err = err mem := self.buf[:0] self.buf = nil freeBytes(mem) } func (self *StreamDecoder) peek() (byte, error) { var err error for { c, empty := self.scan() if !empty { return byte(c), nil } // buffer has been scanned, now report any error if err != nil { self.setErr(err) return 0, err } err = self.refill() } } func (self *StreamDecoder) scan() (byte, bool) { for i := self.scanp; i < len(self.buf); i++ { c := self.buf[i] if isSpace(c) { continue } self.scanp = i return c, false } return 0, true } func isSpace(c byte) bool { return types.SPACE_MASK & (1 << c) != 0 } func (self *StreamDecoder) refill() error { // Make room to read more into the buffer. // First slide down data already consumed. if self.scanp > 0 { self.scanned += int64(self.scanp) n := copy(self.buf, self.buf[self.scanp:]) self.buf = self.buf[:n] self.scanp = 0 } // Grow buffer if not large enough. realloc(&self.buf) // Read. Delay error for next iteration (after scan). n, err := self.r.Read(self.buf[len(self.buf):cap(self.buf)]) self.buf = self.buf[0 : len(self.buf)+n] return err } func realloc(buf *[]byte) bool { l := uint(len(*buf)) c := uint(cap(*buf)) if c == 0 { *buf = bufPool.Get().([]byte) return true } if c - l <= c >> minLeftBufferShift { e := l+(l>>minLeftBufferShift) if e <= c { e = c*2 } tmp := make([]byte, l, e) copy(tmp, *buf) *buf = tmp return true } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/api/decoder_amd64.go
vendor/github.com/bytedance/sonic/internal/decoder/api/decoder_amd64.go
//go:build go1.17 && !go1.25 // +build go1.17,!go1.25 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package api import ( "github.com/bytedance/sonic/internal/envs" "github.com/bytedance/sonic/internal/decoder/jitdec" "github.com/bytedance/sonic/internal/decoder/optdec" ) var ( pretouchImpl = jitdec.Pretouch decodeImpl = jitdec.Decode ) func init() { if envs.UseOptDec { pretouchImpl = optdec.Pretouch decodeImpl = optdec.Decode } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/api/decoder.go
vendor/github.com/bytedance/sonic/internal/decoder/api/decoder.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package api import ( `reflect` `github.com/bytedance/sonic/internal/native` `github.com/bytedance/sonic/internal/native/types` `github.com/bytedance/sonic/internal/decoder/consts` `github.com/bytedance/sonic/internal/decoder/errors` `github.com/bytedance/sonic/internal/rt` `github.com/bytedance/sonic/option` ) const ( _F_allow_control = consts.F_allow_control _F_copy_string = consts.F_copy_string _F_disable_unknown = consts.F_disable_unknown _F_disable_urc = consts.F_disable_urc _F_use_int64 = consts.F_use_int64 _F_use_number = consts.F_use_number _F_validate_string = consts.F_validate_string _F_case_sensitive = consts.F_case_sensitive _MaxStack = consts.MaxStack OptionUseInt64 = consts.OptionUseInt64 OptionUseNumber = consts.OptionUseNumber OptionUseUnicodeErrors = consts.OptionUseUnicodeErrors OptionDisableUnknown = consts.OptionDisableUnknown OptionCopyString = consts.OptionCopyString OptionValidateString = consts.OptionValidateString OptionNoValidateJSON = consts.OptionNoValidateJSON OptionCaseSensitive = consts.OptionCaseSensitive ) type ( Options = consts.Options MismatchTypeError = errors.MismatchTypeError SyntaxError = errors.SyntaxError ) func (self *Decoder) SetOptions(opts Options) { if (opts & consts.OptionUseNumber != 0) && (opts & consts.OptionUseInt64 != 0) { panic("can't set OptionUseInt64 and OptionUseNumber both!") } self.f = uint64(opts) } // Decoder is the decoder context object type Decoder struct { i int f uint64 s string } // NewDecoder creates a new decoder instance. func NewDecoder(s string) *Decoder { return &Decoder{s: s} } // Pos returns the current decoding position. func (self *Decoder) Pos() int { return self.i } func (self *Decoder) Reset(s string) { self.s = s self.i = 0 // self.f = 0 } func (self *Decoder) CheckTrailings() error { pos := self.i buf := self.s /* skip all the trailing spaces */ if pos != len(buf) { for pos < len(buf) && (types.SPACE_MASK & (1 << buf[pos])) != 0 { pos++ } } /* then it must be at EOF */ if pos == len(buf) { return nil } /* junk after JSON value */ return SyntaxError { Src : buf, Pos : pos, Code : types.ERR_INVALID_CHAR, } } // Decode parses the JSON-encoded data from current position and stores the result // in the value pointed to by val. func (self *Decoder) Decode(val interface{}) error { return decodeImpl(&self.s, &self.i, self.f, val) } // UseInt64 indicates the Decoder to unmarshal an integer into an interface{} as an // int64 instead of as a float64. func (self *Decoder) UseInt64() { self.f |= 1 << _F_use_int64 self.f &^= 1 << _F_use_number } // UseNumber indicates the Decoder to unmarshal a number into an interface{} as a // json.Number instead of as a float64. func (self *Decoder) UseNumber() { self.f &^= 1 << _F_use_int64 self.f |= 1 << _F_use_number } // UseUnicodeErrors indicates the Decoder to return an error when encounter invalid // UTF-8 escape sequences. func (self *Decoder) UseUnicodeErrors() { self.f |= 1 << _F_disable_urc } // DisallowUnknownFields indicates the Decoder to return an error when the destination // is a struct and the input contains object keys which do not match any // non-ignored, exported fields in the destination. func (self *Decoder) DisallowUnknownFields() { self.f |= 1 << _F_disable_unknown } // CopyString indicates the Decoder to decode string values by copying instead of referring. func (self *Decoder) CopyString() { self.f |= 1 << _F_copy_string } // ValidateString causes the Decoder to validate string values when decoding string value // in JSON. Validation is that, returning error when unescaped control chars(0x00-0x1f) or // invalid UTF-8 chars in the string value of JSON. func (self *Decoder) ValidateString() { self.f |= 1 << _F_validate_string } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in // order to reduce the first-hit latency. // // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is // a compile option to set the depth of recursive compile for the nested struct type. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { return pretouchImpl(vt, opts...) } // Skip skips only one json value, and returns first non-blank character position and its ending position if it is valid. // Otherwise, returns negative error code using start and invalid character position using end func Skip(data []byte) (start int, end int) { s := rt.Mem2Str(data) p := 0 m := types.NewStateMachine() ret := native.SkipOne(&s, &p, m, uint64(0)) types.FreeStateMachine(m) return ret, p }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/api/decoder_arm64.go
vendor/github.com/bytedance/sonic/internal/decoder/api/decoder_arm64.go
// +build go1.17,!go1.25 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package api import ( `github.com/bytedance/sonic/internal/decoder/optdec` `github.com/bytedance/sonic/internal/envs` ) var ( pretouchImpl = optdec.Pretouch decodeImpl = optdec.Decode ) func init() { // when in aarch64, we enable all optimization envs.EnableOptDec() envs.EnableFastMap() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/native.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/native.go
package optdec import ( "fmt" "reflect" "unsafe" "sync" "github.com/bytedance/sonic/internal/native" "github.com/bytedance/sonic/internal/native/types" "github.com/bytedance/sonic/internal/rt" "github.com/bytedance/sonic/utf8" ) type ErrorCode int const ( SONIC_OK = 0; SONIC_CONTROL_CHAR = 1; SONIC_INVALID_ESCAPED = 2; SONIC_INVALID_NUM = 3; SONIC_FLOAT_INF = 4; SONIC_EOF = 5; SONIC_INVALID_CHAR = 6; SONIC_EXPECT_KEY = 7; SONIC_EXPECT_COLON = 8; SONIC_EXPECT_OBJ_COMMA_OR_END = 9; SONIC_EXPECT_ARR_COMMA_OR_END = 10; SONIC_VISIT_FAILED = 11; SONIC_INVALID_ESCAPED_UTF = 12; SONIC_INVALID_LITERAL = 13; SONIC_STACK_OVERFLOW = 14; ) var ParsingErrors = []string{ SONIC_OK : "ok", SONIC_CONTROL_CHAR : "control chars in string", SONIC_INVALID_ESCAPED : "invalid escaped chars in string", SONIC_INVALID_NUM : "invalid number", SONIC_FLOAT_INF : "float infinity", SONIC_EOF : "eof", SONIC_INVALID_CHAR : "invalid chars", SONIC_EXPECT_KEY : "expect a json key", SONIC_EXPECT_COLON : "expect a `:`", SONIC_EXPECT_OBJ_COMMA_OR_END : "expect a `,` or `}`", SONIC_EXPECT_ARR_COMMA_OR_END : "expect a `,` or `]`", SONIC_VISIT_FAILED : "failed in json visitor", SONIC_INVALID_ESCAPED_UTF : "invalid escaped unicodes", SONIC_INVALID_LITERAL : "invalid literal(true/false/null)", SONIC_STACK_OVERFLOW : "json is exceeded max depth 4096, cause stack overflow", } func (code ErrorCode) Error() string { return ParsingErrors[code] } type node struct { typ uint64 val uint64 } // should consistent with native/parser.c type _nospaceBlock struct { _ [8]byte _ [8]byte } // should consistent with native/parser.c type nodeBuf struct { ncur uintptr parent int64 depth uint64 nstart uintptr nend uintptr iskey bool stat jsonStat } func (self *nodeBuf) init(nodes []node) { self.ncur = uintptr(unsafe.Pointer(&nodes[0])) self.nstart = self.ncur self.nend = self.ncur + uintptr(cap(nodes)) * unsafe.Sizeof(node{}) self.parent = -1 } // should consistent with native/parser.c type Parser struct { Json string padded []byte nodes []node dbuf []byte backup []node options uint64 // JSON cursor start uintptr cur uintptr end uintptr _nbk _nospaceBlock // node buffer cursor nbuf nodeBuf Utf8Inv bool isEface bool } // only when parse non-empty object/array are needed. type jsonStat struct { object uint32 array uint32 str uint32 number uint32 array_elems uint32 object_keys uint32 max_depth uint32 } var ( defaultJsonPaddedCap uintptr = 1 << 20 // 1 Mb defaultNodesCap uintptr = (1 << 20) / unsafe.Sizeof(node{}) // 1 Mb ) var parsePool sync.Pool = sync.Pool { New: func () interface{} { return &Parser{ options: 0, padded: make([]byte, 0, defaultJsonPaddedCap), nodes: make([]node, defaultNodesCap, defaultNodesCap), dbuf: make([]byte, types.MaxDigitNums, types.MaxDigitNums), } }, } var padding string = "x\"x\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" func newParser(data string, pos int, opt uint64) *Parser { p := parsePool.Get().(*Parser) /* validate json if needed */ if (opt & (1 << _F_validate_string)) != 0 && !utf8.ValidateString(data){ dbuf := utf8.CorrectWith(nil, rt.Str2Mem(data[pos:]), "\ufffd") dbuf = append(dbuf, padding...) p.Json = rt.Mem2Str(dbuf[:len(dbuf) - len(padding)]) p.Utf8Inv = true p.start = uintptr((*rt.GoString)(unsafe.Pointer(&p.Json)).Ptr) } else { p.Json = data // TODO: prevent too large JSON p.padded = append(p.padded, data[pos:]...) p.padded = append(p.padded, padding...) p.start = uintptr((*rt.GoSlice)(unsafe.Pointer(&p.padded)).Ptr) } p.cur = p.start p.end = p.cur + uintptr(len(p.Json)) p.options = opt p.nbuf.init(p.nodes) return p } func (p *Parser) Pos() int { return int(p.cur - p.start) } func (p *Parser) JsonBytes() []byte { if p.Utf8Inv { return (rt.Str2Mem(p.Json)) } else { return p.padded } } var nodeType = rt.UnpackType(reflect.TypeOf(node{})) //go:inline func calMaxNodeCap(jsonSize int) int { return jsonSize / 2 + 2 } func (p *Parser) parse() ErrorCode { // when decode into struct, we should decode number as possible old := p.options if !p.isEface { p.options &^= 1 << _F_use_number } // fast path with limited node buffer err := ErrorCode(native.ParseWithPadding(unsafe.Pointer(p))) if err != SONIC_VISIT_FAILED { p.options = old return err } // check OoB here offset := p.nbuf.ncur - p.nbuf.nstart curLen := int(offset / unsafe.Sizeof(node{})) if curLen != len(p.nodes) { panic(fmt.Sprintf("current len: %d, real len: %d cap: %d", curLen, len(p.nodes), cap(p.nodes))) } // node buf is not enough, continue parse // the maxCap is always meet all valid JSON maxCap := curLen + calMaxNodeCap(len(p.Json) - int(p.cur - p.start)) slice := rt.GoSlice{ Ptr: rt.Mallocgc(uintptr(maxCap) * nodeType.Size, nodeType, false), Len: maxCap, Cap: maxCap, } rt.Memmove(unsafe.Pointer(slice.Ptr), unsafe.Pointer(&p.nodes[0]), offset) p.backup = p.nodes p.nodes = *(*[]node)(unsafe.Pointer(&slice)) // update node cursor p.nbuf.nstart = uintptr(unsafe.Pointer(&p.nodes[0])) p.nbuf.nend = p.nbuf.nstart + uintptr(cap(p.nodes)) * unsafe.Sizeof(node{}) p.nbuf.ncur = p.nbuf.nstart + offset // continue parse json err = ErrorCode(native.ParseWithPadding(unsafe.Pointer(p))) p.options = old return err } func (p *Parser) reset() { p.options = 0 p.padded = p.padded[:0] // nodes is too large here, we will not reset it and use small backup nodes buffer if p.backup != nil { p.nodes = p.backup p.backup = nil } p.start = 0 p.cur = 0 p.end = 0 p.Json = "" p.nbuf = nodeBuf{} p._nbk = _nospaceBlock{} p.Utf8Inv = false p.isEface = false } func (p *Parser) free() { p.reset() parsePool.Put(p) } //go:noinline func (p *Parser) fixError(code ErrorCode) error { if code == SONIC_OK { return nil } if p.Pos() == 0 { code = SONIC_EOF; } pos := p.Pos() - 1 return error_syntax(pos, p.Json, ParsingErrors[code]) } func Parse(data string, opt uint64) error { p := newParser(data, 0, opt) err := p.parse() p.free() return err }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/helper.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/helper.go
package optdec import ( "encoding/json" "strconv" "github.com/bytedance/sonic/internal/native" "github.com/bytedance/sonic/internal/utils" "github.com/bytedance/sonic/internal/native/types" ) func SkipNumberFast(json string, start int) (int, bool) { // find the number ending, we parsed in native, it always valid pos := start for pos < len(json) && json[pos] != ']' && json[pos] != '}' && json[pos] != ',' { if json[pos] >= '0' && json[pos] <= '9' || json[pos] == '.' || json[pos] == '-' || json[pos] == '+' || json[pos] == 'e' || json[pos] == 'E' { pos += 1 } else { break } } // if not found number, return false if pos == start { return pos, false } return pos, true } func isSpace(c byte) bool { return c == ' ' || c == '\t' || c == '\n' || c == '\r' } // pos is the start index of the raw func ValidNumberFast(raw string) bool { ret := utils.SkipNumber(raw, 0) if ret < 0 { return false } // check trailing chars for ret < len(raw) { return false } return true } func SkipOneFast2(json string, pos *int) (int, error) { // find the number ending, we parsed in sonic-cpp, it always valid start := native.SkipOneFast(&json, pos) if start < 0 { return -1, error_syntax(*pos, json, types.ParsingError(-start).Error()) } return start, nil } func SkipOneFast(json string, pos int) (string, error) { // find the number ending, we parsed in sonic-cpp, it always valid start := native.SkipOneFast(&json, &pos) if start < 0 { // TODO: details error code return "", error_syntax(pos, json, types.ParsingError(-start).Error()) } return json[start:pos], nil } func ParseI64(raw string) (int64, error) { i64, err := strconv.ParseInt(raw, 10, 64) if err != nil { return 0, err } return i64, nil } func ParseBool(raw string) (bool, error) { var b bool err := json.Unmarshal([]byte(raw), &b) if err != nil { return false, err } return b, nil } func ParseU64(raw string) (uint64, error) { u64, err := strconv.ParseUint(raw, 10, 64) if err != nil { return 0, err } return u64, nil } func ParseF64(raw string) (float64, error) { f64, err := strconv.ParseFloat(raw, 64) if err != nil { return 0, err } return f64, nil } func Unquote(raw string) (string, error) { var u string err := json.Unmarshal([]byte(raw), &u) if err != nil { return "", err } return u, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go
package optdec import ( "encoding/json" "math" "unsafe" "github.com/bytedance/sonic/internal/envs" "github.com/bytedance/sonic/internal/rt" ) type Context struct { Parser *Parser efacePool *efacePool Stack bounedStack Utf8Inv bool } func (ctx *Context) Options() uint64 { return ctx.Parser.options } /************************* Stack and Pool Helper *******************/ type parentStat struct { con unsafe.Pointer remain uint64 } type bounedStack struct { stack []parentStat index int } func newStack(size int) bounedStack { return bounedStack{ stack: make([]parentStat, size + 2), index: 0, } } //go:nosplit func (s *bounedStack) Pop() (unsafe.Pointer, int, bool){ s.index-- con := s.stack[s.index].con remain := s.stack[s.index].remain &^ (uint64(1) << 63) isObj := (s.stack[s.index].remain & (uint64(1) << 63)) != 0 s.stack[s.index].con = nil s.stack[s.index].remain = 0 return con, int(remain), isObj } //go:nosplit func (s *bounedStack) Push(p unsafe.Pointer, remain int, isObj bool) { s.stack[s.index].con = p s.stack[s.index].remain = uint64(remain) if isObj { s.stack[s.index].remain |= (uint64(1) << 63) } s.index++ } type efacePool struct{ t64 rt.T64Pool tslice rt.TslicePool tstring rt.TstringPool efaceSlice rt.SlicePool } func newEfacePool(stat *jsonStat, useNumber bool) *efacePool { strs := int(stat.str) nums := 0 if useNumber { strs += int(stat.number) } else { nums = int(stat.number) } return &efacePool{ t64: rt.NewT64Pool(nums), tslice: rt.NewTslicePool(int(stat.array)), tstring: rt.NewTstringPool(strs), efaceSlice: rt.NewPool(rt.AnyType, int(stat.array_elems)), } } func (self *efacePool) GetMap(hint int) unsafe.Pointer { m := make(map[string]interface{}, hint) return *(*unsafe.Pointer)(unsafe.Pointer(&m)) } func (self *efacePool) GetSlice(hint int) unsafe.Pointer { return unsafe.Pointer(self.efaceSlice.GetSlice(hint)) } func (self *efacePool) ConvTSlice(val rt.GoSlice, typ *rt.GoType, dst unsafe.Pointer) { self.tslice.Conv(val, typ, (*interface{})(dst)) } func (self *efacePool) ConvF64(val float64, dst unsafe.Pointer) { self.t64.Conv(castU64(val), rt.Float64Type, (*interface{})(dst)) } func (self *efacePool) ConvTstring(val string, dst unsafe.Pointer) { self.tstring.Conv(val, (*interface{})(dst)) } func (self *efacePool) ConvTnum(val json.Number, dst unsafe.Pointer) { self.tstring.ConvNum(val, (*interface{})(dst)) } /********************************************************/ func canUseFastMap( opts uint64, root *rt.GoType) bool { return envs.UseFastMap && (opts & (1 << _F_copy_string)) == 0 && (opts & (1 << _F_use_int64)) == 0 && (root == rt.AnyType || root == rt.MapEfaceType || root == rt.SliceEfaceType) } func NewContext(json string, pos int, opts uint64, root *rt.GoType) (Context, error) { ctx := Context{ Parser: newParser(json, pos, opts), } if root == rt.AnyType || root == rt.MapEfaceType || root == rt.SliceEfaceType { ctx.Parser.isEface = true } ecode := ctx.Parser.parse() if ecode != 0 { return ctx, ctx.Parser.fixError(ecode) } useNumber := (opts & (1 << _F_use_number )) != 0 if canUseFastMap(opts, root) { ctx.efacePool = newEfacePool(&ctx.Parser.nbuf.stat, useNumber) ctx.Stack = newStack(int(ctx.Parser.nbuf.stat.max_depth)) } return ctx, nil } func (ctx *Context) Delete() { ctx.Parser.free() ctx.Parser = nil } type Node struct { cptr uintptr } func NewNode(cptr uintptr) Node { return Node{cptr: cptr} } type Dom struct { cdom uintptr } func (ctx *Context) Root() Node { root := (uintptr)(((*rt.GoSlice)(unsafe.Pointer(&ctx.Parser.nodes))).Ptr) return Node{cptr: root} } type Array struct { cptr uintptr } type Object struct { cptr uintptr } func (obj Object) Len() int { cobj := ptrCast(obj.cptr) return int(uint64(cobj.val) & ConLenMask) } func (arr Array) Len() int { carr := ptrCast(arr.cptr) return int(uint64(carr.val) & ConLenMask) } // / Helper functions to eliminate CGO calls func (val Node) Type() uint8 { ctype := ptrCast(val.cptr) return uint8(ctype.typ & TypeMask) } func (val Node) Next() uintptr { if val.Type() != KObject && val.Type() != KArray { return PtrOffset(val.cptr, 1) } cobj := ptrCast(val.cptr) offset := int64(uint64(cobj.val) >> ConLenBits) return PtrOffset(val.cptr, offset) } func (val *Node) next() { *val = NewNode(val.Next()) } type NodeIter struct { next uintptr } func NewNodeIter(node Node) NodeIter { return NodeIter{next: node.cptr} } func (iter *NodeIter) Next() Node { ret := NewNode(iter.next) iter.next = PtrOffset(iter.next, 1) return ret } func (iter *NodeIter) Peek() Node { return NewNode(iter.next) } func (val Node) U64() uint64 { cnum := ptrCast(val.cptr) return *(*uint64)((unsafe.Pointer)(&(cnum.val))) } func (val Node) I64() int64 { cnum := ptrCast(val.cptr) return *(*int64)((unsafe.Pointer)(&(cnum.val))) } func (val Node) IsNull() bool { return val.Type() == KNull } func (val Node) IsNumber() bool { return val.Type() & KNumber != 0 } func (val Node) F64() float64 { cnum := ptrCast(val.cptr) return *(*float64)((unsafe.Pointer)(&(cnum.val))) } func (val Node) Bool() bool { return val.Type() == KTrue } func (self Node) AsU64(ctx *Context) (uint64, bool) { if self.Type() == KUint { return self.U64(), true } else if self.Type() == KRawNumber { num, err := ParseU64(self.Raw(ctx)) if err != nil { return 0, false } return num, true } else { return 0, false } } func (val *Node) AsObj() (Object, bool) { var ret Object if val.Type() != KObject { return ret, false } return Object{ cptr: val.cptr, }, true } func (val Node) Obj() Object { return Object{cptr: val.cptr} } func (val Node) Arr() Array { return Array{cptr: val.cptr} } func (val *Node) AsArr() (Array, bool) { var ret Array if val.Type() != KArray { return ret, false } return Array{ cptr: val.cptr, }, true } func (self Node) AsI64(ctx *Context) (int64, bool) { typ := self.Type() if typ == KUint && self.U64() <= math.MaxInt64 { return int64(self.U64()), true } else if typ == KSint { return self.I64(), true } else if typ == KRawNumber { val, err := self.Number(ctx).Int64() if err != nil { return 0, false } return val, true } else { return 0, false } } func (self Node) AsByte(ctx *Context) (uint8, bool) { typ := self.Type() if typ == KUint && self.U64() <= math.MaxUint8 { return uint8(self.U64()), true } else if typ == KSint && self.I64() == 0 { return 0, true } else { return 0, false } } /********* Parse Node String into Value ***************/ func (val Node) ParseI64(ctx *Context) (int64, bool) { s, ok := val.AsStrRef(ctx) if !ok { return 0, false } if s == "null" { return 0, true } i, err := ParseI64(s) if err != nil { return 0, false } return i, true } func (val Node) ParseBool(ctx *Context) (bool, bool) { s, ok := val.AsStrRef(ctx) if !ok { return false, false } if s == "null" { return false, true } b, err := ParseBool(s) if err != nil { return false, false } return b, true } func (val Node) ParseU64(ctx *Context) (uint64, bool) { s, ok := val.AsStrRef(ctx) if !ok { return 0, false } if s == "null" { return 0, true } i, err := ParseU64(s) if err != nil { return 0, false } return i, true } func (val Node) ParseF64(ctx *Context) (float64, bool) { s, ok := val.AsStrRef(ctx) if !ok { return 0, false } if s == "null" { return 0, true } i, err := ParseF64(s) if err != nil { return 0, false } return i, true } func (val Node) ParseString(ctx *Context) (string, bool) { // should not use AsStrRef s, ok := val.AsStr(ctx) if !ok { return "", false } if s == "null" { return "", true } s, err := Unquote(s) if err != nil { return "", false } return s, true } func (val Node) ParseNumber(ctx *Context) (json.Number, bool) { // should not use AsStrRef s, ok := val.AsStr(ctx) if !ok { return json.Number(""), false } if s == "null" { return json.Number(""), true } end, ok := SkipNumberFast(s, 0) // has error or trailing chars if !ok || end != len(s) { return json.Number(""), false } return json.Number(s), true } func (val Node) AsF64(ctx *Context) (float64, bool) { switch val.Type() { case KUint: return float64(val.U64()), true case KSint: return float64(val.I64()), true case KReal: return float64(val.F64()), true case KRawNumber: f, err := val.Number(ctx).Float64(); return f, err == nil default: return 0, false } } func (val Node) AsBool() (bool, bool) { switch val.Type() { case KTrue: return true, true case KFalse: return false, true default: return false, false } } func (val Node) AsStr(ctx *Context) (string, bool) { switch val.Type() { case KStringCommon: s := val.StringRef(ctx) if (ctx.Options() & (1 << _F_copy_string) == 0) { return s, true } return string(rt.Str2Mem(s)), true case KStringEscaped: return val.StringCopyEsc(ctx), true default: return "", false } } func (val Node) AsStrRef(ctx *Context) (string, bool) { switch val.Type() { case KStringEscaped: node := ptrCast(val.cptr) offset := val.Position() len := int(node.val) return rt.Mem2Str(ctx.Parser.JsonBytes()[offset : offset + len]), true case KStringCommon: return val.StringRef(ctx), true default: return "", false } } func (val Node) AsStringText(ctx *Context) ([]byte, bool) { if !val.IsStr() { return nil, false } // clone to new bytes s, b := val.AsStrRef(ctx) return []byte(s), b } func (val Node) IsStr() bool { return (val.Type() == KStringCommon) || (val.Type() == KStringEscaped) } func (val Node) IsRawNumber() bool { return val.Type() == KRawNumber } func (val Node) Number(ctx *Context) json.Number { return json.Number(val.Raw(ctx)) } func (val Node) Raw(ctx *Context) string { node := ptrCast(val.cptr) len := int(node.val) offset := val.Position() return ctx.Parser.Json[offset:int(offset+len)] } func (val Node) Position() int { node := ptrCast(val.cptr) return int(node.typ >> PosBits) } func (val Node) AsNumber(ctx *Context) (json.Number, bool) { // parse JSON string as number if val.IsStr() { s, _ := val.AsStr(ctx) if !ValidNumberFast(s) { return "", false } else { return json.Number(s), true } } return val.NonstrAsNumber(ctx) } func (val Node) NonstrAsNumber(ctx *Context) (json.Number, bool) { // deal with raw number if val.IsRawNumber() { return val.Number(ctx), true } // deal with parse number if !val.IsNumber() { return json.Number(""), false } start := val.Position() end, ok := SkipNumberFast(ctx.Parser.Json, start) if !ok { return "", false } return json.Number(ctx.Parser.Json[start:end]), true } func (val Node) AsRaw(ctx *Context) string { // fast path for unescaped strings switch val.Type() { case KNull: return "null" case KTrue: return "true" case KFalse: return "false" case KStringCommon: node := ptrCast(val.cptr) len := int(node.val) offset := val.Position() // add start and end quote ref := rt.Str2Mem(ctx.Parser.Json)[offset-1 : offset+len+1] return rt.Mem2Str(ref) case KRawNumber: fallthrough case KRaw: return val.Raw(ctx) case KStringEscaped: raw, _ := SkipOneFast(ctx.Parser.Json, val.Position() - 1) return raw default: raw, err := SkipOneFast(ctx.Parser.Json, val.Position()) if err != nil { break } return raw } panic("should always be valid json here") } // reference from the input JSON as possible func (val Node) StringRef(ctx *Context) string { return val.Raw(ctx) } //go:nocheckptr func ptrCast(p uintptr) *node { return (*node)(unsafe.Pointer(p)) } func (val Node) StringCopyEsc(ctx *Context) string { // check whether there are in padded node := ptrCast(val.cptr) len := int(node.val) offset := val.Position() return string(ctx.Parser.JsonBytes()[offset : offset + len]) } func (val Node) Object() Object { return Object{cptr: val.cptr} } func (val Node) Array() Array { return Array{cptr: val.cptr} } func (val *Array) Children() uintptr { return PtrOffset(val.cptr, 1) } func (val *Object) Children() uintptr { return PtrOffset(val.cptr, 1) } func (val *Node) Equal(ctx *Context, lhs string) bool { // check whether escaped cstr := ptrCast(val.cptr) offset := int(val.Position()) len := int(cstr.val) return lhs == ctx.Parser.Json[offset:offset+len] } func (node *Node) AsMapEface(ctx *Context, vp unsafe.Pointer) error { if node.IsNull() { return nil } obj, ok := node.AsObj() if !ok { return newUnmatched(node.Position(), rt.MapEfaceType) } var err, gerr error size := obj.Len() var m map[string]interface{} if *(*unsafe.Pointer)(vp) == nil { if ctx.efacePool != nil { p := ctx.efacePool.GetMap(size) m = *(*map[string]interface{})(unsafe.Pointer(&p)) } else { m = make(map[string]interface{}, size) } } else { m = *(*map[string]interface{})(vp) } next := obj.Children() for i := 0; i < size; i++ { knode := NewNode(next) key, _ := knode.AsStr(ctx) val := NewNode(PtrOffset(next, 1)) m[key], err = val.AsEface(ctx) next = val.cptr if gerr == nil && err != nil { gerr = err } } *(*map[string]interface{})(vp) = m return gerr } func (node *Node) AsMapString(ctx *Context, vp unsafe.Pointer) error { obj, ok := node.AsObj() if !ok { return newUnmatched(node.Position(), rt.MapStringType) } size := obj.Len() var m map[string]string if *(*unsafe.Pointer)(vp) == nil { m = make(map[string]string, size) } else { m = *(*map[string]string)(vp) } var gerr error next := obj.Children() for i := 0; i < size; i++ { knode := NewNode(next) key, _ := knode.AsStr(ctx) val := NewNode(PtrOffset(next, 1)) m[key], ok = val.AsStr(ctx) if !ok { if gerr == nil { gerr = newUnmatched(val.Position(), rt.StringType) } next = val.Next() } else { next = PtrOffset(val.cptr, 1) } } *(*map[string]string)(vp) = m return gerr } func (node *Node) AsSliceEface(ctx *Context, vp unsafe.Pointer) error { arr, ok := node.AsArr() if !ok { return newUnmatched(node.Position(), rt.SliceEfaceType) } size := arr.Len() var s []interface{} if size != 0 && ctx.efacePool != nil { slice := rt.GoSlice { Ptr: ctx.efacePool.GetSlice(size), Len: size, Cap: size, } *(*rt.GoSlice)(unsafe.Pointer(&s)) = slice } else { s = *(*[]interface{})((unsafe.Pointer)(rt.MakeSlice(vp, rt.AnyType, size))) } *node = NewNode(arr.Children()) var err, gerr error for i := 0; i < size; i++ { s[i], err = node.AsEface(ctx) if gerr == nil && err != nil { gerr = err } } *(*[]interface{})(vp) = s return nil } func (node *Node) AsSliceI32(ctx *Context, vp unsafe.Pointer) error { arr, ok := node.AsArr() if !ok { return newUnmatched(node.Position(), rt.SliceI32Type) } size := arr.Len() s := *(*[]int32)((unsafe.Pointer)(rt.MakeSlice(vp, rt.Int32Type, size))) next := arr.Children() var gerr error for i := 0; i < size; i++ { val := NewNode(next) ret, ok := val.AsI64(ctx) if !ok || ret > math.MaxInt32 || ret < math.MinInt32 { if gerr == nil { gerr = newUnmatched(val.Position(), rt.Int32Type) } next = val.Next() } else { s[i] = int32(ret) next = PtrOffset(val.cptr, 1) } } *(*[]int32)(vp) = s return gerr } func (node *Node) AsSliceI64(ctx *Context, vp unsafe.Pointer) error { arr, ok := node.AsArr() if !ok { return newUnmatched(node.Position(), rt.SliceI64Type) } size := arr.Len() s := *(*[]int64)((unsafe.Pointer)(rt.MakeSlice(vp, rt.Int64Type, size))) next := arr.Children() var gerr error for i := 0; i < size; i++ { val := NewNode(next) ret, ok := val.AsI64(ctx) if !ok { if gerr == nil { gerr = newUnmatched(val.Position(), rt.Int64Type) } next = val.Next() } else { s[i] = ret next = PtrOffset(val.cptr, 1) } } *(*[]int64)(vp) = s return gerr } func (node *Node) AsSliceU32(ctx *Context, vp unsafe.Pointer) error { arr, ok := node.AsArr() if !ok { return newUnmatched(node.Position(), rt.SliceU32Type) } size := arr.Len() next := arr.Children() s := *(*[]uint32)((unsafe.Pointer)(rt.MakeSlice(vp, rt.Uint32Type, size))) var gerr error for i := 0; i < size; i++ { val := NewNode(next) ret, ok := val.AsU64(ctx) if !ok || ret > math.MaxUint32 { if gerr == nil { gerr = newUnmatched(val.Position(), rt.Uint32Type) } next = val.Next() } else { s[i] = uint32(ret) next = PtrOffset(val.cptr, 1) } } *(*[]uint32)(vp) = s return gerr } func (node *Node) AsSliceU64(ctx *Context, vp unsafe.Pointer) error { arr, ok := node.AsArr() if !ok { return newUnmatched(node.Position(), rt.SliceU64Type) } size := arr.Len() next := arr.Children() s := *(*[]uint64)((unsafe.Pointer)(rt.MakeSlice(vp, rt.Uint64Type, size))) var gerr error for i := 0; i < size; i++ { val := NewNode(next) ret, ok := val.AsU64(ctx) if !ok { if gerr == nil { gerr = newUnmatched(val.Position(), rt.Uint64Type) } next = val.Next() } else { s[i] = ret next = PtrOffset(val.cptr, 1) } } *(*[]uint64)(vp) = s return gerr } func (node *Node) AsSliceString(ctx *Context, vp unsafe.Pointer) error { arr, ok := node.AsArr() if !ok { return newUnmatched(node.Position(), rt.SliceStringType) } size := arr.Len() next := arr.Children() s := *(*[]string)((unsafe.Pointer)(rt.MakeSlice(vp, rt.StringType, size))) var gerr error for i := 0; i < size; i++ { val := NewNode(next) ret, ok := val.AsStr(ctx) if !ok { if gerr == nil { gerr = newUnmatched(val.Position(), rt.StringType) } next = val.Next() } else { s[i] = ret next = PtrOffset(val.cptr, 1) } } *(*[]string)(vp) = s return gerr } func (val *Node) AsSliceBytes(ctx *Context) ([]byte, error) { var origin []byte switch val.Type() { case KStringEscaped: node := ptrCast(val.cptr) offset := val.Position() len := int(node.val) origin = ctx.Parser.JsonBytes()[offset : offset + len] case KStringCommon: origin = rt.Str2Mem(val.StringRef(ctx)) case KArray: arr := val.Array() size := arr.Len() a := make([]byte, size) elem := NewNode(arr.Children()) var gerr error var ok bool for i := 0; i < size; i++ { a[i], ok = elem.AsByte(ctx) if !ok && gerr == nil { gerr = newUnmatched(val.Position(), rt.BytesType) } elem = NewNode(PtrOffset(elem.cptr, 1)) } return a, gerr default: return nil, newUnmatched(val.Position(), rt.BytesType) } b64, err := rt.DecodeBase64(origin) if err != nil { return nil, newUnmatched(val.Position(), rt.BytesType) } return b64, nil } // AsEface will always ok, because we have parse in native. func (node *Node) AsEface(ctx *Context) (interface{}, error) { if ctx.efacePool != nil { iter := NewNodeIter(*node) v := AsEfaceFast(&iter, ctx) *node = iter.Peek() return v, nil } else { return node.AsEfaceFallback(ctx) } } func parseSingleNode(node Node, ctx *Context) interface{} { var v interface{} switch node.Type() { case KObject: v = map[string]interface{}{} case KArray: v = []interface{}{} case KStringCommon: v = node.StringRef(ctx) case KStringEscaped: v = node.StringCopyEsc(ctx) case KTrue: v = true case KFalse: v = false case KNull: v = nil case KUint: v = float64(node.U64()) case KSint: v = float64(node.I64()) case KReal: v = float64(node.F64()) case KRawNumber: v = node.Number(ctx) default: panic("unreachable for as eface") } return v } func castU64(val float64) uint64 { return *((*uint64)(unsafe.Pointer((&val)))) } func AsEfaceFast(iter *NodeIter, ctx *Context) interface{} { var mp, sp, parent unsafe.Pointer // current container pointer var node Node var size int var isObj bool var slice rt.GoSlice var val unsafe.Pointer var vt **rt.GoType var vp *unsafe.Pointer var rootM unsafe.Pointer var rootS rt.GoSlice var root interface{} var key string node = iter.Next() switch node.Type() { case KObject: size = node.Object().Len() if size != 0 { ctx.Stack.Push(nil, 0, true) mp = ctx.efacePool.GetMap(size) rootM = mp isObj = true goto _object_key } else { return rt.GoEface { Type: rt.MapEfaceType, Value: ctx.efacePool.GetMap(0), }.Pack() } case KArray: size = node.Array().Len() if size != 0 { ctx.Stack.Push(nil, 0, false) sp = ctx.efacePool.GetSlice(size) slice = rt.GoSlice { Ptr: sp, Len: size, Cap: size, } rootS = slice isObj = false val = sp goto _arr_val; } else { ctx.efacePool.ConvTSlice(rt.EmptySlice, rt.SliceEfaceType, unsafe.Pointer(&root)) } case KStringCommon: ctx.efacePool.ConvTstring(node.StringRef(ctx), unsafe.Pointer(&root)) case KStringEscaped: ctx.efacePool.ConvTstring(node.StringCopyEsc(ctx), unsafe.Pointer(&root)) case KTrue: root = true case KFalse: root = false case KNull: root = nil case KUint: ctx.efacePool.ConvF64(float64(node.U64()), unsafe.Pointer(&root)) case KSint: ctx.efacePool.ConvF64(float64(node.I64()), unsafe.Pointer(&root)) case KReal: ctx.efacePool.ConvF64(node.F64(), unsafe.Pointer(&root)) case KRawNumber: ctx.efacePool.ConvTnum(node.Number(ctx), unsafe.Pointer(&root)) default: panic("unreachable for as eface") } return root _object_key: node = iter.Next() if node.Type() == KStringCommon { key = node.StringRef(ctx) } else { key = node.StringCopyEsc(ctx) } // interface{} slot in map bucket val = rt.Mapassign_faststr(rt.MapEfaceMapType, mp, key) vt = &(*rt.GoEface)(val).Type vp = &(*rt.GoEface)(val).Value // parse value node node = iter.Next() switch node.Type() { case KObject: newSize := node.Object().Len() newMp := ctx.efacePool.GetMap(newSize) *vt = rt.MapEfaceType *vp = newMp remain := size - 1 isObj = true if newSize != 0 { if remain > 0 { ctx.Stack.Push(mp, remain, true) } mp = newMp size = newSize goto _object_key; } case KArray: newSize := node.Array().Len() if newSize == 0 { ctx.efacePool.ConvTSlice(rt.EmptySlice, rt.SliceEfaceType, val) break; } newSp := ctx.efacePool.GetSlice(newSize) // pack to []interface{} ctx.efacePool.ConvTSlice(rt.GoSlice{ Ptr: newSp, Len: newSize, Cap: newSize, }, rt.SliceEfaceType, val) remain := size - 1 if remain > 0 { ctx.Stack.Push(mp, remain, true) } val = newSp isObj = false size = newSize goto _arr_val; case KStringCommon: ctx.efacePool.ConvTstring(node.StringRef(ctx), val) case KStringEscaped: ctx.efacePool.ConvTstring(node.StringCopyEsc(ctx), val) case KTrue: rt.ConvTBool(true, (*interface{})(val)) case KFalse: rt.ConvTBool(false, (*interface{})(val)) case KNull: /* skip */ case KUint: ctx.efacePool.ConvF64(float64(node.U64()), val) case KSint: ctx.efacePool.ConvF64(float64(node.I64()), val) case KReal: ctx.efacePool.ConvF64(node.F64(), val) case KRawNumber: ctx.efacePool.ConvTnum(node.Number(ctx), val) default: panic("unreachable for as eface") } // check size size -= 1 if size != 0 { goto _object_key; } parent, size, isObj = ctx.Stack.Pop() // parent is empty if parent == nil { if isObj { return rt.GoEface { Type: rt.MapEfaceType, Value: rootM, }.Pack() } else { ctx.efacePool.ConvTSlice(rootS, rt.SliceEfaceType, (unsafe.Pointer)(&root)) return root } } // continue to parse parent if isObj { mp = parent goto _object_key; } else { val = rt.PtrAdd(parent, rt.AnyType.Size) goto _arr_val; } _arr_val: // interface{} slot in slice vt = &(*rt.GoEface)(val).Type vp = &(*rt.GoEface)(val).Value // parse value node node = iter.Next() switch node.Type() { case KObject: newSize := node.Object().Len() newMp := ctx.efacePool.GetMap(newSize) *vt = rt.MapEfaceType *vp = newMp remain := size - 1 if newSize != 0 { // push next array elem into stack if remain > 0 { ctx.Stack.Push(val, remain, false) } mp = newMp size = newSize isObj = true goto _object_key; } case KArray: newSize := node.Array().Len() if newSize == 0 { ctx.efacePool.ConvTSlice(rt.EmptySlice, rt.SliceEfaceType, val) break; } newSp := ctx.efacePool.GetSlice(newSize) // pack to []interface{} ctx.efacePool.ConvTSlice(rt.GoSlice { Ptr: newSp, Len: newSize, Cap: newSize, }, rt.SliceEfaceType, val) remain := size - 1 if remain > 0 { ctx.Stack.Push(val, remain, false) } val = newSp isObj = false size = newSize goto _arr_val; case KStringCommon: ctx.efacePool.ConvTstring(node.StringRef(ctx), val) case KStringEscaped: ctx.efacePool.ConvTstring(node.StringCopyEsc(ctx), val) case KTrue: rt.ConvTBool(true, (*interface{})(val)) case KFalse: rt.ConvTBool(false, (*interface{})(val)) case KNull: /* skip */ case KUint: ctx.efacePool.ConvF64(float64(node.U64()), val) case KSint: ctx.efacePool.ConvF64(float64(node.I64()), val) case KReal: ctx.efacePool.ConvF64(node.F64(), val) case KRawNumber: ctx.efacePool.ConvTnum(node.Number(ctx), val) default: panic("unreachable for as eface") } // check size size -= 1 if size != 0 { val = rt.PtrAdd(val, rt.AnyType.Size) goto _arr_val; } parent, size, isObj = ctx.Stack.Pop() // parent is empty if parent == nil { if isObj { return rt.GoEface { Type: rt.MapEfaceType, Value: rootM, }.Pack() } else { ctx.efacePool.ConvTSlice(rootS, rt.SliceEfaceType, unsafe.Pointer(&root)) return root } } // continue to parse parent if isObj { mp = parent goto _object_key; } else { val = rt.PtrAdd(parent, rt.AnyType.Size) goto _arr_val; } } func (node *Node) AsEfaceFallback(ctx *Context) (interface{}, error) { switch node.Type() { case KObject: obj := node.Object() size := obj.Len() m := make(map[string]interface{}, size) *node = NewNode(obj.Children()) var gerr, err error for i := 0; i < size; i++ { key, _ := node.AsStr(ctx) *node = NewNode(PtrOffset(node.cptr, 1)) m[key], err = node.AsEfaceFallback(ctx) if gerr == nil && err != nil { gerr = err } } return m, gerr case KArray: arr := node.Array() size := arr.Len() a := make([]interface{}, size) *node = NewNode(arr.Children()) var gerr, err error for i := 0; i < size; i++ { a[i], err = node.AsEfaceFallback(ctx) if gerr == nil && err != nil { gerr = err } } return a, gerr case KStringCommon: str, _ := node.AsStr(ctx) *node = NewNode(PtrOffset(node.cptr, 1)) return str, nil case KStringEscaped: str := node.StringCopyEsc(ctx) *node = NewNode(PtrOffset(node.cptr, 1)) return str, nil case KTrue: *node = NewNode(PtrOffset(node.cptr, 1)) return true, nil case KFalse: *node = NewNode(PtrOffset(node.cptr, 1)) return false, nil case KNull: *node = NewNode(PtrOffset(node.cptr, 1)) return nil, nil default: // use float64 if ctx.Parser.options & (1 << _F_use_number) != 0 { num, ok := node.AsNumber(ctx) if !ok { // skip the unmacthed type *node = NewNode(node.Next()) return nil, newUnmatched(node.Position(), rt.JsonNumberType) } else { *node = NewNode(PtrOffset(node.cptr, 1)) return num, nil } } else if ctx.Parser.options & (1 << _F_use_int64) != 0 { // first try int64 i, ok := node.AsI64(ctx) if ok { *node = NewNode(PtrOffset(node.cptr, 1)) return i, nil } // is not integer, then use float64 f, ok := node.AsF64(ctx) if ok { *node = NewNode(PtrOffset(node.cptr, 1)) return f, nil } // skip the unmacthed type *node = NewNode(node.Next()) return nil, newUnmatched(node.Position(), rt.Int64Type) } else { num, ok := node.AsF64(ctx) if !ok { // skip the unmacthed type *node = NewNode(node.Next()) return nil, newUnmatched(node.Position(), rt.Float64Type) } else { *node = NewNode(PtrOffset(node.cptr, 1)) return num, nil } } } } //go:nosplit func PtrOffset(ptr uintptr, off int64) uintptr { return uintptr(int64(ptr) + off * int64(unsafe.Sizeof(node{}))) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/types.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/types.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package optdec import ( "encoding" "encoding/base64" "encoding/json" "reflect" "unsafe" "github.com/bytedance/sonic/internal/rt" ) var ( boolType = reflect.TypeOf(bool(false)) byteType = reflect.TypeOf(byte(0)) intType = reflect.TypeOf(int(0)) int8Type = reflect.TypeOf(int8(0)) int16Type = reflect.TypeOf(int16(0)) int32Type = reflect.TypeOf(int32(0)) int64Type = reflect.TypeOf(int64(0)) uintType = reflect.TypeOf(uint(0)) uint8Type = reflect.TypeOf(uint8(0)) uint16Type = reflect.TypeOf(uint16(0)) uint32Type = reflect.TypeOf(uint32(0)) uint64Type = reflect.TypeOf(uint64(0)) float32Type = reflect.TypeOf(float32(0)) float64Type = reflect.TypeOf(float64(0)) stringType = reflect.TypeOf("") bytesType = reflect.TypeOf([]byte(nil)) jsonNumberType = reflect.TypeOf(json.Number("")) base64CorruptInputError = reflect.TypeOf(base64.CorruptInputError(0)) anyType = rt.UnpackType(reflect.TypeOf((*interface{})(nil)).Elem()) ) var ( errorType = reflect.TypeOf((*error)(nil)).Elem() jsonUnmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() encodingTextUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() ) func rtype(t reflect.Type) (*rt.GoItab, *rt.GoType) { p := (*rt.GoIface)(unsafe.Pointer(&t)) return p.Itab, (*rt.GoType)(p.Value) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/errors.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/errors.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package optdec import ( "encoding/json" "errors" "reflect" "strconv" "github.com/bytedance/sonic/internal/rt" ) /** JIT Error Helpers **/ var stackOverflow = &json.UnsupportedValueError{ Str: "Value nesting too deep", Value: reflect.ValueOf("..."), } func error_type(vt *rt.GoType) error { return &json.UnmarshalTypeError{Type: vt.Pack()} } func error_mismatch(node Node, ctx *context, typ reflect.Type) error { return MismatchTypeError{ Pos: node.Position(), Src: ctx.Parser.Json, Type: typ, } } func newUnmatched(pos int, vt *rt.GoType) error { return MismatchTypeError{ Pos: pos, Src: "", Type: vt.Pack(), } } func error_field(name string) error { return errors.New("json: unknown field " + strconv.Quote(name)) } func error_value(value string, vtype reflect.Type) error { return &json.UnmarshalTypeError{ Type: vtype, Value: value, } } func error_syntax(pos int, src string, msg string) error { return SyntaxError{ Pos: pos, Src: src, Msg: msg, } } func error_unsuppoted(typ *rt.GoType) error { return &json.UnsupportedTypeError{ Type: typ.Pack(), } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/interface.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/interface.go
package optdec import ( "encoding" "encoding/json" "unsafe" "reflect" "github.com/bytedance/sonic/internal/rt" ) type efaceDecoder struct { } func (d *efaceDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { /* check the defined pointer type for issue 379 */ eface := (*rt.GoEface)(vp) /* not pointer type, or nil pointer, or self-pointed interface{}, such as ```go var v interface{} v = &v return v ``` see `issue758_test.go`. */ if eface.Value == nil || eface.Type.Kind() != reflect.Ptr || eface.Value == vp { ret, err := node.AsEface(ctx) if err != nil { return err } *(*interface{})(vp) = ret return nil } if node.IsNull() { if eface.Type.Indirect() || (!eface.Type.Indirect() && eface.Type.Pack().Elem().Kind() != reflect.Ptr) { *(*interface{})(vp) = nil return nil } } etp := rt.PtrElem(eface.Type) vp = eface.Value if eface.Type.IsNamed() { // check named pointer type, avoid call its `Unmarshaler` newp := vp etp = eface.Type vp = unsafe.Pointer(&newp) } else if !eface.Type.Indirect() { // check direct value etp = rt.UnpackType(eface.Type.Pack().Elem()) } dec, err := findOrCompile(etp) if err != nil { return err } return dec.FromDom(vp, node, ctx) } type ifaceDecoder struct { typ *rt.GoType } func (d *ifaceDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } iface := *(*rt.GoIface)(vp) if iface.Itab == nil { return error_type(d.typ) } vt := iface.Itab.Vt if vt.Kind() != reflect.Ptr || iface.Value == nil { return error_type(d.typ) } etp := rt.PtrElem(vt) vp = iface.Value /* check the defined pointer type for issue 379 */ if vt.IsNamed() { newp := vp etp = vt vp = unsafe.Pointer(&newp) } dec, err := findOrCompile(etp) if err != nil { return err } return dec.FromDom(vp, node, ctx) } type unmarshalTextDecoder struct { typ *rt.GoType } func (d *unmarshalTextDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } txt, ok := node.AsStringText(ctx) if !ok { return error_mismatch(node, ctx, d.typ.Pack()) } v := *(*interface{})(unsafe.Pointer(&rt.GoEface{ Type: d.typ, Value: vp, })) // fast path if u, ok := v.(encoding.TextUnmarshaler); ok { return u.UnmarshalText(txt) } // slow path rv := reflect.ValueOf(v) if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { return u.UnmarshalText(txt) } return error_type(d.typ) } type unmarshalJSONDecoder struct { typ *rt.GoType strOpt bool } func (d *unmarshalJSONDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { v := *(*interface{})(unsafe.Pointer(&rt.GoEface{ Type: d.typ, Value: vp, })) var input []byte if d.strOpt && node.IsNull() { input = []byte("null") } else if d.strOpt { s, ok := node.AsStringText(ctx) if !ok { return error_mismatch(node, ctx, d.typ.Pack()) } input = s } else { input = []byte(node.AsRaw(ctx)) } // fast path if u, ok := v.(json.Unmarshaler); ok { return u.UnmarshalJSON((input)) } // slow path rv := reflect.ValueOf(v) if u, ok := rv.Interface().(json.Unmarshaler); ok { return u.UnmarshalJSON(input) } return error_type(d.typ) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/decoder.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/decoder.go
package optdec import ( "reflect" "unsafe" "encoding/json" "github.com/bytedance/sonic/internal/rt" "github.com/bytedance/sonic/option" "github.com/bytedance/sonic/internal/decoder/errors" "github.com/bytedance/sonic/internal/decoder/consts" ) type ( MismatchTypeError = errors.MismatchTypeError SyntaxError = errors.SyntaxError ) const ( _F_allow_control = consts.F_allow_control _F_copy_string = consts.F_copy_string _F_disable_unknown = consts.F_disable_unknown _F_disable_urc = consts.F_disable_urc _F_use_int64 = consts.F_use_int64 _F_use_number = consts.F_use_number _F_validate_string = consts.F_validate_string ) type Options = consts.Options const ( OptionUseInt64 = consts.OptionUseInt64 OptionUseNumber = consts.OptionUseNumber OptionUseUnicodeErrors = consts.OptionUseUnicodeErrors OptionDisableUnknown = consts.OptionDisableUnknown OptionCopyString = consts.OptionCopyString OptionValidateString = consts.OptionValidateString ) func Decode(s *string, i *int, f uint64, val interface{}) error { vv := rt.UnpackEface(val) vp := vv.Value /* check for nil type */ if vv.Type == nil { return &json.InvalidUnmarshalError{} } /* must be a non-nil pointer */ if vp == nil || vv.Type.Kind() != reflect.Ptr { return &json.InvalidUnmarshalError{Type: vv.Type.Pack()} } etp := rt.PtrElem(vv.Type) /* check the defined pointer type for issue 379 */ if vv.Type.IsNamed() { newp := vp etp = vv.Type vp = unsafe.Pointer(&newp) } dec, err := findOrCompile(etp) if err != nil { return err } /* parse into document */ ctx, err := NewContext(*s, *i, uint64(f), etp) defer ctx.Delete() if ctx.Parser.Utf8Inv { *s = ctx.Parser.Json } if err != nil { goto fix_error; } err = dec.FromDom(vp, ctx.Root(), &ctx) fix_error: err = fix_error(*s, *i, err) // update position at last *i += ctx.Parser.Pos() return err } func fix_error(json string, pos int, err error) error { if e, ok := err.(SyntaxError); ok { return SyntaxError{ Pos: int(e.Pos) + pos, Src: json, Msg: e.Msg, } } if e, ok := err.(MismatchTypeError); ok { return &MismatchTypeError { Pos: int(e.Pos) + pos, Src: json, Type: e.Type, } } return err } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in // order to reduce the first-hit latency. // // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is // a compile option to set the depth of recursive compile for the nested struct type. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { cfg := option.DefaultCompileOptions() for _, opt := range opts { opt(&cfg) } return pretouchRec(map[reflect.Type]bool{vt:true}, cfg) } func pretouchType(_vt reflect.Type, opts option.CompileOptions) (map[reflect.Type]bool, error) { /* compile function */ compiler := newCompiler().apply(opts) decoder := func(vt *rt.GoType, _ ...interface{}) (interface{}, error) { if f, err := compiler.compileType(_vt); err != nil { return nil, err } else { return f, nil } } /* find or compile */ vt := rt.UnpackType(_vt) if val := programCache.Get(vt); val != nil { return nil, nil } else if _, err := programCache.Compute(vt, decoder); err == nil { return compiler.visited, nil } else { return nil, err } } func pretouchRec(vtm map[reflect.Type]bool, opts option.CompileOptions) error { if opts.RecursiveDepth < 0 || len(vtm) == 0 { return nil } next := make(map[reflect.Type]bool) for vt := range(vtm) { sub, err := pretouchType(vt, opts) if err != nil { return err } for svt := range(sub) { next[svt] = true } } opts.RecursiveDepth -= 1 return pretouchRec(next, opts) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/compiler.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/compiler.go
package optdec import ( "fmt" "reflect" "github.com/bytedance/sonic/option" "github.com/bytedance/sonic/internal/rt" "github.com/bytedance/sonic/internal/caching" ) var ( programCache = caching.CreateProgramCache() ) func findOrCompile(vt *rt.GoType) (decFunc, error) { makeDecoder := func(vt *rt.GoType, _ ...interface{}) (interface{}, error) { ret, err := newCompiler().compileType(vt.Pack()) return ret, err } if val := programCache.Get(vt); val != nil { return val.(decFunc), nil } else if ret, err := programCache.Compute(vt, makeDecoder); err == nil { return ret.(decFunc), nil } else { return nil, err } } type compiler struct { visited map[reflect.Type]bool depth int counts int opts option.CompileOptions namedPtr bool } func newCompiler() *compiler { return &compiler{ visited: make(map[reflect.Type]bool), opts: option.DefaultCompileOptions(), } } func (self *compiler) apply(opts option.CompileOptions) *compiler { self.opts = opts return self } const _CompileMaxDepth = 4096 func (c *compiler) enter(vt reflect.Type) { c.visited[vt] = true c.depth += 1 if c.depth > _CompileMaxDepth { panic(*stackOverflow) } } func (c *compiler) exit(vt reflect.Type) { c.visited[vt] = false c.depth -= 1 } func (c *compiler) compileInt(vt reflect.Type) decFunc { switch vt.Size() { case 4: switch vt.Kind() { case reflect.Uint: fallthrough case reflect.Uintptr: return &u32Decoder{} case reflect.Int: return &i32Decoder{} } case 8: switch vt.Kind() { case reflect.Uint: fallthrough case reflect.Uintptr: return &u64Decoder{} case reflect.Int: return &i64Decoder{} } default: panic("not supported pointer size: " + fmt.Sprint(vt.Size())) } panic("unreachable") } func (c *compiler) rescue(ep *error) { if val := recover(); val != nil { if err, ok := val.(error); ok { *ep = err } else { panic(val) } } } func (c *compiler) compileType(vt reflect.Type) (rt decFunc, err error) { defer c.rescue(&err) rt = c.compile(vt) return rt, err } func (c *compiler) compile(vt reflect.Type) decFunc { if c.visited[vt] { return &recuriveDecoder{ typ: rt.UnpackType(vt), } } dec := c.tryCompilePtrUnmarshaler(vt, false) if dec != nil { return dec } return c.compileBasic(vt) } func (c *compiler) compileBasic(vt reflect.Type) decFunc { defer func() { c.counts += 1 }() switch vt.Kind() { case reflect.Bool: return &boolDecoder{} case reflect.Int8: return &i8Decoder{} case reflect.Int16: return &i16Decoder{} case reflect.Int32: return &i32Decoder{} case reflect.Int64: return &i64Decoder{} case reflect.Uint8: return &u8Decoder{} case reflect.Uint16: return &u16Decoder{} case reflect.Uint32: return &u32Decoder{} case reflect.Uint64: return &u64Decoder{} case reflect.Float32: return &f32Decoder{} case reflect.Float64: return &f64Decoder{} case reflect.Uint: fallthrough case reflect.Uintptr: fallthrough case reflect.Int: return c.compileInt(vt) case reflect.String: return c.compileString(vt) case reflect.Array: return c.compileArray(vt) case reflect.Interface: return c.compileInterface(vt) case reflect.Map: return c.compileMap(vt) case reflect.Ptr: return c.compilePtr(vt) case reflect.Slice: return c.compileSlice(vt) case reflect.Struct: return c.compileStruct(vt) default: return &unsupportedTypeDecoder{ typ: rt.UnpackType(vt), } } } func (c *compiler) compilePtr(vt reflect.Type) decFunc { c.enter(vt) defer c.exit(vt) // special logic for Named Ptr, issue 379 if reflect.PtrTo(vt.Elem()) != vt { c.namedPtr = true return &ptrDecoder{ typ: rt.UnpackType(vt.Elem()), deref: c.compileBasic(vt.Elem()), } } return &ptrDecoder{ typ: rt.UnpackType(vt.Elem()), deref: c.compile(vt.Elem()), } } func (c *compiler) compileArray(vt reflect.Type) decFunc { c.enter(vt) defer c.exit(vt) return &arrayDecoder{ len: vt.Len(), elemType: rt.UnpackType(vt.Elem()), elemDec: c.compile(vt.Elem()), typ: vt, } } func (c *compiler) compileString(vt reflect.Type) decFunc { if vt == jsonNumberType { return &numberDecoder{} } return &stringDecoder{} } func (c *compiler) tryCompileSliceUnmarshaler(vt reflect.Type) decFunc { pt := reflect.PtrTo(vt.Elem()) if pt.Implements(jsonUnmarshalerType) { return &sliceDecoder{ elemType: rt.UnpackType(vt.Elem()), elemDec: c.compile(vt.Elem()), typ: vt, } } if pt.Implements(encodingTextUnmarshalerType) { return &sliceDecoder{ elemType: rt.UnpackType(vt.Elem()), elemDec: c.compile(vt.Elem()), typ: vt, } } return nil } func (c *compiler) compileSlice(vt reflect.Type) decFunc { c.enter(vt) defer c.exit(vt) // Some common slice, use a decoder, to avoid function calls et := rt.UnpackType(vt.Elem()) /* first checking `[]byte` */ if et.Kind() == reflect.Uint8 /* []byte */ { return c.compileSliceBytes(vt) } dec := c.tryCompileSliceUnmarshaler(vt) if dec != nil { return dec } if vt == reflect.TypeOf([]interface{}{}) { return &sliceEfaceDecoder{} } if et.IsInt32() { return &sliceI32Decoder{} } if et.IsInt64() { return &sliceI64Decoder{} } if et.IsUint32() { return &sliceU32Decoder{} } if et.IsUint64() { return &sliceU64Decoder{} } if et.Kind() == reflect.String && et != rt.JsonNumberType { return &sliceStringDecoder{} } return &sliceDecoder{ elemType: rt.UnpackType(vt.Elem()), elemDec: c.compile(vt.Elem()), typ: vt, } } func (c *compiler) compileSliceBytes(vt reflect.Type) decFunc { ep := reflect.PtrTo(vt.Elem()) if ep.Implements(jsonUnmarshalerType) { return &sliceBytesUnmarshalerDecoder{ elemType: rt.UnpackType(vt.Elem()), elemDec: c.compile(vt.Elem()), typ: vt, } } if ep.Implements(encodingTextUnmarshalerType) { return &sliceBytesUnmarshalerDecoder{ elemType: rt.UnpackType(vt.Elem()), elemDec: c.compile(vt.Elem()), typ: vt, } } return &sliceBytesDecoder{} } func (c *compiler) compileInterface(vt reflect.Type) decFunc { c.enter(vt) defer c.exit(vt) if vt.NumMethod() == 0 { return &efaceDecoder{} } if vt.Implements(jsonUnmarshalerType) { return &unmarshalJSONDecoder{ typ: rt.UnpackType(vt), } } if vt.Implements(encodingTextUnmarshalerType) { return &unmarshalTextDecoder{ typ: rt.UnpackType(vt), } } return &ifaceDecoder{ typ: rt.UnpackType(vt), } } func (c *compiler) compileMap(vt reflect.Type) decFunc { c.enter(vt) defer c.exit(vt) // check the key unmarshaler at first decKey := tryCompileKeyUnmarshaler(vt) if decKey != nil { return &mapDecoder{ mapType: rt.MapType(rt.UnpackType(vt)), keyDec: decKey, elemDec: c.compile(vt.Elem()), } } // Most common map, use a decoder, to avoid function calls if vt == reflect.TypeOf(map[string]interface{}{}) { return &mapEfaceDecoder{} } else if vt == reflect.TypeOf(map[string]string{}) { return &mapStringDecoder{} } // Some common integer map later mt := rt.MapType(rt.UnpackType(vt)) if mt.Key.Kind() == reflect.String && mt.Key != rt.JsonNumberType { return &mapStrKeyDecoder{ mapType: mt, assign: rt.GetMapStrAssign(vt), elemDec: c.compile(vt.Elem()), } } if mt.Key.IsInt64() { return &mapI64KeyDecoder{ mapType: mt, elemDec: c.compile(vt.Elem()), assign: rt.GetMap64Assign(vt), } } if mt.Key.IsInt32() { return &mapI32KeyDecoder{ mapType: mt, elemDec: c.compile(vt.Elem()), assign: rt.GetMap32Assign(vt), } } if mt.Key.IsUint64() { return &mapU64KeyDecoder{ mapType: mt, elemDec: c.compile(vt.Elem()), assign: rt.GetMap64Assign(vt), } } if mt.Key.IsUint32() { return &mapU32KeyDecoder{ mapType: mt, elemDec: c.compile(vt.Elem()), assign: rt.GetMap32Assign(vt), } } // Generic map return &mapDecoder{ mapType: mt, keyDec: c.compileMapKey(vt), elemDec: c.compile(vt.Elem()), } } func tryCompileKeyUnmarshaler(vt reflect.Type) decKey { pt := reflect.PtrTo(vt.Key()) /* check for `encoding.TextUnmarshaler` with pointer receiver */ if pt.Implements(encodingTextUnmarshalerType) { return decodeKeyTextUnmarshaler } /* NOTE: encoding/json not support map key with `json.Unmarshaler` */ return nil } func (c *compiler) compileMapKey(vt reflect.Type) decKey { switch vt.Key().Kind() { case reflect.Int8: return decodeKeyI8 case reflect.Int16: return decodeKeyI16 case reflect.Uint8: return decodeKeyU8 case reflect.Uint16: return decodeKeyU16 // NOTE: actually, encoding/json can't use float as map key case reflect.Float32: return decodeFloat32Key case reflect.Float64: return decodeFloat64Key case reflect.String: if rt.UnpackType(vt.Key()) == rt.JsonNumberType { return decodeJsonNumberKey } fallthrough default: return nil } } // maybe vt is a named type, and not a pointer receiver, see issue 379 func (c *compiler) tryCompilePtrUnmarshaler(vt reflect.Type, strOpt bool) decFunc { pt := reflect.PtrTo(vt) /* check for `json.Unmarshaler` with pointer receiver */ if pt.Implements(jsonUnmarshalerType) { return &unmarshalJSONDecoder{ typ: rt.UnpackType(pt), strOpt: strOpt, } } /* check for `encoding.TextMarshaler` with pointer receiver */ if pt.Implements(encodingTextUnmarshalerType) { /* TextUnmarshal not support, string tag */ if strOpt { panicForInvalidStrType(vt) } return &unmarshalTextDecoder{ typ: rt.UnpackType(pt), } } return nil } func panicForInvalidStrType(vt reflect.Type) { panic(error_type(rt.UnpackType(vt))) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/map.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/map.go
package optdec import ( "encoding" "encoding/json" "math" "reflect" "unsafe" "github.com/bytedance/sonic/internal/rt" ) /** Decoder for most common map types: map[string]interface{}, map[string]string **/ type mapEfaceDecoder struct { } func (d *mapEfaceDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*map[string]interface{})(vp) = nil return nil } return node.AsMapEface(ctx, vp) } type mapStringDecoder struct { } func (d *mapStringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*map[string]string)(vp) = nil return nil } return node.AsMapString(ctx, vp) } /** Decoder for map with string key **/ type mapStrKeyDecoder struct { mapType *rt.GoMapType elemDec decFunc assign rt.MapStrAssign typ reflect.Type } func (d *mapStrKeyDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } obj, ok := node.AsObj() if !ok { return error_mismatch(node, ctx, d.mapType.Pack()) } // allocate map m := *(*unsafe.Pointer)(vp) if m == nil { m = rt.Makemap(&d.mapType.GoType, obj.Len()) } var gerr error next := obj.Children() for i := 0; i < obj.Len(); i++ { keyn := NewNode(next) key, _ := keyn.AsStr(ctx) valn := NewNode(PtrOffset(next, 1)) valp := d.assign(d.mapType, m, key) err := d.elemDec.FromDom(valp, valn, ctx) if gerr == nil && err != nil { gerr = err } next = valn.Next() } *(*unsafe.Pointer)(vp) = m return gerr } /** Decoder for map with int32 or int64 key **/ type mapI32KeyDecoder struct { mapType *rt.GoMapType elemDec decFunc assign rt.Map32Assign } func (d *mapI32KeyDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } obj, ok := node.AsObj() if !ok { return error_mismatch(node, ctx, d.mapType.Pack()) } // allocate map m := *(*unsafe.Pointer)(vp) if m == nil { m = rt.Makemap(&d.mapType.GoType, obj.Len()) } next := obj.Children() var gerr error for i := 0; i < obj.Len(); i++ { keyn := NewNode(next) k, ok := keyn.ParseI64(ctx) if !ok || k > math.MaxInt32 || k < math.MinInt32 { if gerr == nil { gerr = error_mismatch(keyn, ctx, d.mapType.Pack()) } valn := NewNode(PtrOffset(next, 1)) next = valn.Next() continue } key := int32(k) ku32 := *(*uint32)(unsafe.Pointer(&key)) valn := NewNode(PtrOffset(next, 1)) valp := d.assign(d.mapType, m, ku32) err := d.elemDec.FromDom(valp, valn, ctx) if gerr == nil && err != nil { gerr = err } next = valn.Next() } *(*unsafe.Pointer)(vp) = m return gerr } type mapI64KeyDecoder struct { mapType *rt.GoMapType elemDec decFunc assign rt.Map64Assign } func (d *mapI64KeyDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } obj, ok := node.AsObj() if !ok { return error_mismatch(node, ctx, d.mapType.Pack()) } // allocate map m := *(*unsafe.Pointer)(vp) if m == nil { m = rt.Makemap(&d.mapType.GoType, obj.Len()) } var gerr error next := obj.Children() for i := 0; i < obj.Len(); i++ { keyn := NewNode(next) key, ok := keyn.ParseI64(ctx) if !ok { if gerr == nil { gerr = error_mismatch(keyn, ctx, d.mapType.Pack()) } valn := NewNode(PtrOffset(next, 1)) next = valn.Next() continue } ku64 := *(*uint64)(unsafe.Pointer(&key)) valn := NewNode(PtrOffset(next, 1)) valp := d.assign(d.mapType, m, ku64) err := d.elemDec.FromDom(valp, valn, ctx) if gerr == nil && err != nil { gerr = err } next = valn.Next() } *(*unsafe.Pointer)(vp) = m return gerr } /** Decoder for map with unt32 or uint64 key **/ type mapU32KeyDecoder struct { mapType *rt.GoMapType elemDec decFunc assign rt.Map32Assign } func (d *mapU32KeyDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } obj, ok := node.AsObj() if !ok { return error_mismatch(node, ctx, d.mapType.Pack()) } // allocate map m := *(*unsafe.Pointer)(vp) if m == nil { m = rt.Makemap(&d.mapType.GoType, obj.Len()) } var gerr error next := obj.Children() for i := 0; i < obj.Len(); i++ { keyn := NewNode(next) k, ok := keyn.ParseU64(ctx) if !ok || k > math.MaxUint32 { if gerr == nil { gerr = error_mismatch(keyn, ctx, d.mapType.Pack()) } valn := NewNode(PtrOffset(next, 1)) next = valn.Next() continue } key := uint32(k) valn := NewNode(PtrOffset(next, 1)) valp := d.assign(d.mapType, m, key) err := d.elemDec.FromDom(valp, valn, ctx) if gerr == nil && err != nil { gerr = err } next = valn.Next() } *(*unsafe.Pointer)(vp) = m return gerr } type mapU64KeyDecoder struct { mapType *rt.GoMapType elemDec decFunc assign rt.Map64Assign } func (d *mapU64KeyDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } obj, ok := node.AsObj() if !ok { return error_mismatch(node, ctx, d.mapType.Pack()) } // allocate map m := *(*unsafe.Pointer)(vp) if m == nil { m = rt.Makemap(&d.mapType.GoType, obj.Len()) } var gerr error next := obj.Children() for i := 0; i < obj.Len(); i++ { keyn := NewNode(next) key, ok := keyn.ParseU64(ctx) if !ok { if gerr == nil { gerr = error_mismatch(keyn, ctx, d.mapType.Pack()) } valn := NewNode(PtrOffset(next, 1)) next = valn.Next() continue } valn := NewNode(PtrOffset(next, 1)) valp := d.assign(d.mapType, m, key) err := d.elemDec.FromDom(valp, valn, ctx) if gerr == nil && err != nil { gerr = err } next = valn.Next() } *(*unsafe.Pointer)(vp) = m return gerr } /** Decoder for generic cases */ type decKey func(dec *mapDecoder, raw string) (interface{}, error) func decodeKeyU8(dec *mapDecoder, raw string) (interface{}, error) { key, err := Unquote(raw) if err != nil { return nil, err } ret, err := ParseU64(key) if err != nil { return nil, err } if ret > math.MaxUint8 { return nil, error_value(key, dec.mapType.Key.Pack()) } return uint8(ret), nil } func decodeKeyU16(dec *mapDecoder, raw string) (interface{}, error) { key, err := Unquote(raw) if err != nil { return nil, err } ret, err := ParseU64(key) if err != nil { return nil, err } if ret > math.MaxUint16 { return nil, error_value(key, dec.mapType.Key.Pack()) } return uint16(ret), nil } func decodeKeyI8(dec *mapDecoder, raw string) (interface{}, error) { key, err := Unquote(raw) if err != nil { return nil, err } ret, err := ParseI64(key) if err != nil { return nil, err } if ret > math.MaxInt8 || ret < math.MinInt8 { return nil, error_value(key, dec.mapType.Key.Pack()) } return int8(ret), nil } func decodeKeyI16(dec *mapDecoder, raw string) (interface{}, error) { key, err := Unquote(raw) if err != nil { return nil, err } ret, err := ParseI64(key) if err != nil { return nil, err } if ret > math.MaxInt16 || ret < math.MinInt16 { return nil, error_value(key, dec.mapType.Key.Pack()) } return int16(ret), nil } func decodeKeyTextUnmarshaler(dec *mapDecoder, raw string) (interface{}, error) { key, err := Unquote(raw) if err != nil { return nil, err } ret := reflect.New(dec.mapType.Key.Pack()).Interface() err = ret.(encoding.TextUnmarshaler).UnmarshalText(rt.Str2Mem(key)) if err != nil { return nil, err } return ret, nil } func decodeFloat32Key(dec *mapDecoder, raw string) (interface{}, error) { key, err := Unquote(raw) if err != nil { return nil, err } ret, err := ParseF64(key) if err != nil { return nil, err } if ret > math.MaxFloat32 || ret < -math.MaxFloat32 { return nil, error_value(key, dec.mapType.Key.Pack()) } return float32(ret), nil } func decodeFloat64Key(dec *mapDecoder, raw string) (interface{}, error) { key, err := Unquote(raw) if err != nil { return nil, err } return ParseF64(key) } func decodeJsonNumberKey(dec *mapDecoder, raw string) (interface{}, error) { // skip the quote raw = raw[1:len(raw)-1] end, ok := SkipNumberFast(raw, 0) // check trailing chars if !ok || end != len(raw) { return nil, error_value(raw, rt.JsonNumberType.Pack()) } return json.Number(raw[0:end]), nil } type mapDecoder struct { mapType *rt.GoMapType keyDec decKey elemDec decFunc } func (d *mapDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } obj, ok := node.AsObj() if !ok || d.keyDec == nil { return error_mismatch(node, ctx, d.mapType.Pack()) } // allocate map m := *(*unsafe.Pointer)(vp) if m == nil { m = rt.Makemap(&d.mapType.GoType, obj.Len()) } next := obj.Children() var gerr error for i := 0; i < obj.Len(); i++ { keyn := NewNode(next) raw := keyn.AsRaw(ctx) key, err := d.keyDec(d, raw) if err != nil { if gerr == nil { gerr = error_mismatch(keyn, ctx, d.mapType.Pack()) } valn := NewNode(PtrOffset(next, 1)) next = valn.Next() continue } valn := NewNode(PtrOffset(next, 1)) keyp := rt.UnpackEface(key).Value valp := rt.Mapassign(d.mapType, m, keyp) err = d.elemDec.FromDom(valp, valn, ctx) if gerr == nil && err != nil { gerr = err } next = valn.Next() } *(*unsafe.Pointer)(vp) = m return gerr }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/slice.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/slice.go
package optdec import ( "reflect" "unsafe" "github.com/bytedance/sonic/internal/rt" ) type sliceDecoder struct { elemType *rt.GoType elemDec decFunc typ reflect.Type } var ( emptyPtr = &struct{}{} ) func (d *sliceDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } arr, ok := node.AsArr() if !ok { return error_mismatch(node, ctx, d.typ) } slice := rt.MakeSlice(vp, d.elemType, arr.Len()) elems := slice.Ptr next := arr.Children() var gerr error for i := 0; i < arr.Len(); i++ { val := NewNode(next) elem := unsafe.Pointer(uintptr(elems) + uintptr(i)*d.elemType.Size) err := d.elemDec.FromDom(elem, val, ctx) if gerr == nil && err != nil { gerr = err } next = val.Next() } *(*rt.GoSlice)(vp) = *slice return gerr } type arrayDecoder struct { len int elemType *rt.GoType elemDec decFunc typ reflect.Type } //go:nocheckptr func (d *arrayDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } arr, ok := node.AsArr() if !ok { return error_mismatch(node, ctx, d.typ) } next := arr.Children() i := 0 var gerr error for ; i < d.len && i < arr.Len(); i++ { elem := unsafe.Pointer(uintptr(vp) + uintptr(i)*d.elemType.Size) val := NewNode(next) err := d.elemDec.FromDom(elem, val, ctx) if gerr == nil && err != nil { gerr = err } next = val.Next() } /* zero rest of array */ addr := uintptr(vp) + uintptr(i)*d.elemType.Size n := uintptr(d.len-i) * d.elemType.Size /* the boundary pointer may points to another unknown object, so we need to avoid using it */ if n != 0 { rt.ClearMemory(d.elemType, unsafe.Pointer(addr), n) } return gerr } type sliceEfaceDecoder struct { } func (d *sliceEfaceDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } /* if slice is empty, just call `AsSliceEface` */ if ((*rt.GoSlice)(vp)).Len == 0 { return node.AsSliceEface(ctx, vp) } decoder := sliceDecoder{ elemType: rt.AnyType, elemDec: &efaceDecoder{}, typ: rt.SliceEfaceType.Pack(), } return decoder.FromDom(vp, node, ctx) } type sliceI32Decoder struct { } func (d *sliceI32Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } return node.AsSliceI32(ctx, vp) } type sliceI64Decoder struct { } func (d *sliceI64Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } return node.AsSliceI64(ctx, vp) } type sliceU32Decoder struct { } func (d *sliceU32Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } return node.AsSliceU32(ctx, vp) } type sliceU64Decoder struct { } func (d *sliceU64Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } return node.AsSliceU64(ctx, vp) } type sliceStringDecoder struct { } func (d *sliceStringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } return node.AsSliceString(ctx, vp) } type sliceBytesDecoder struct { } func (d *sliceBytesDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } s, err := node.AsSliceBytes(ctx) *(*[]byte)(vp) = s return err } type sliceBytesUnmarshalerDecoder struct { elemType *rt.GoType elemDec decFunc typ reflect.Type } func (d *sliceBytesUnmarshalerDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*rt.GoSlice)(vp) = rt.GoSlice{} return nil } /* parse JSON string into `[]byte` */ if node.IsStr() { slice, err := node.AsSliceBytes(ctx) if err != nil { return err } *(*[]byte)(vp) = slice return nil } /* parse JSON array into `[]byte` */ arr, ok := node.AsArr() if !ok { return error_mismatch(node, ctx, d.typ) } slice := rt.MakeSlice(vp, d.elemType, arr.Len()) elems := slice.Ptr var gerr error next := arr.Children() for i := 0; i < arr.Len(); i++ { child := NewNode(next) elem := unsafe.Pointer(uintptr(elems) + uintptr(i)*d.elemType.Size) err := d.elemDec.FromDom(elem, child, ctx) if gerr == nil && err != nil { gerr = err } next = child.Next() } *(*rt.GoSlice)(vp) = *slice return gerr }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/const.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/const.go
package optdec import "math" /* Copied from sonic-rs // JSON Value Type const NULL: u64 = 0; const BOOL: u64 = 2; const FALSE: u64 = BOOL; const TRUE: u64 = (1 << 3) | BOOL; const NUMBER: u64 = 3; const UINT: u64 = NUMBER; const SINT: u64 = (1 << 3) | NUMBER; const REAL: u64 = (2 << 3) | NUMBER; const RAWNUMBER: u64 = (3 << 3) | NUMBER; const STRING: u64 = 4; const STRING_COMMON: u64 = STRING; const STRING_HASESCAPED: u64 = (1 << 3) | STRING; const OBJECT: u64 = 6; const ARRAY: u64 = 7; /// JSON Type Mask const POS_MASK: u64 = (!0) << 32; const POS_BITS: u64 = 32; const TYPE_MASK: u64 = 0xFF; const TYPE_BITS: u64 = 8; */ const ( // BasicType: 3 bits KNull = 0 // xxxxx000 KBool = 2 // xxxxx010 KNumber = 3 // xxxxx011 KString = 4 // xxxxx100 KRaw = 5 // xxxxx101 KObject = 6 // xxxxx110 KArray = 7 // xxxxx111 // SubType: 2 bits KFalse = (0 << 3) | KBool // xxx00_010, 2 KTrue = (1 << 3) | KBool // xxx01_010, 10 KUint = (0 << 3) | KNumber // xxx00_011, 3 KSint = (1 << 3) | KNumber // xxx01_011, 11 KReal = (2 << 3) | KNumber // xxx10_011, 19 KRawNumber = (3 << 3) | KNumber // xxx11_011, 27 KStringCommon = KString // xxx00_100, 4 KStringEscaped = (1 << 3) | KString // xxx01_100, 12 ) const ( PosMask = math.MaxUint64 << 32 PosBits = 32 TypeMask = 0xFF TypeBits = 8 ConLenMask = uint64(math.MaxUint32) ConLenBits = 32 )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/context.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/context.go
package optdec type context = Context
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/compile_struct.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/compile_struct.go
package optdec import ( "fmt" "reflect" caching "github.com/bytedance/sonic/internal/optcaching" "github.com/bytedance/sonic/internal/rt" "github.com/bytedance/sonic/internal/resolver" ) const ( _MAX_FIELDS = 50 // cutoff at 50 fields struct ) func (c *compiler) compileIntStringOption(vt reflect.Type) decFunc { switch vt.Size() { case 4: switch vt.Kind() { case reflect.Uint: fallthrough case reflect.Uintptr: return &u32StringDecoder{} case reflect.Int: return &i32StringDecoder{} } case 8: switch vt.Kind() { case reflect.Uint: fallthrough case reflect.Uintptr: return &u64StringDecoder{} case reflect.Int: return &i64StringDecoder{} } default: panic("not supported pointer size: " + fmt.Sprint(vt.Size())) } panic("unreachable") } func isInteger(vt reflect.Type) bool { switch vt.Kind() { case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr, reflect.Int: return true default: return false } } func (c *compiler) assertStringOptTypes(vt reflect.Type) { if c.depth > _CompileMaxDepth { panic(*stackOverflow) } c.depth += 1 defer func () { c.depth -= 1 }() if isInteger(vt) { return } switch vt.Kind() { case reflect.String, reflect.Bool, reflect.Float32, reflect.Float64: return case reflect.Ptr: c.assertStringOptTypes(vt.Elem()) default: panicForInvalidStrType(vt) } } func (c *compiler) compileFieldStringOption(vt reflect.Type) decFunc { c.assertStringOptTypes(vt) unmDec := c.tryCompilePtrUnmarshaler(vt, true) if unmDec != nil { return unmDec } switch vt.Kind() { case reflect.String: if vt == jsonNumberType { return &numberStringDecoder{} } return &strStringDecoder{} case reflect.Bool: return &boolStringDecoder{} case reflect.Int8: return &i8StringDecoder{} case reflect.Int16: return &i16StringDecoder{} case reflect.Int32: return &i32StringDecoder{} case reflect.Int64: return &i64StringDecoder{} case reflect.Uint8: return &u8StringDecoder{} case reflect.Uint16: return &u16StringDecoder{} case reflect.Uint32: return &u32StringDecoder{} case reflect.Uint64: return &u64StringDecoder{} case reflect.Float32: return &f32StringDecoder{} case reflect.Float64: return &f64StringDecoder{} case reflect.Uint: fallthrough case reflect.Uintptr: fallthrough case reflect.Int: return c.compileIntStringOption(vt) case reflect.Ptr: return &ptrStrDecoder{ typ: rt.UnpackType(vt.Elem()), deref: c.compileFieldStringOption(vt.Elem()), } default: panicForInvalidStrType(vt) return nil } } func (c *compiler) compileStruct(vt reflect.Type) decFunc { c.enter(vt) defer c.exit(vt) if c.namedPtr { c.namedPtr = false return c.compileStructBody(vt) } if c.depth >= c.opts.MaxInlineDepth + 1 || (c.counts > 0 && vt.NumField() >= _MAX_FIELDS) { return &recuriveDecoder{ typ: rt.UnpackType(vt), } } else { return c.compileStructBody(vt) } } func (c *compiler) compileStructBody(vt reflect.Type) decFunc { fv := resolver.ResolveStruct(vt) entries := make([]fieldEntry, 0, len(fv)) for _, f := range fv { var dec decFunc /* dealt with field tag options */ if f.Opts&resolver.F_stringize != 0 { dec = c.compileFieldStringOption(f.Type) } else { dec = c.compile(f.Type) } /* deal with embedded pointer fields */ if f.Path[0].Kind == resolver.F_deref { dec = &embeddedFieldPtrDecoder{ field: f, fieldDec: dec, fieldName: f.Name, } } entries = append(entries, fieldEntry{ FieldMeta: f, fieldDec: dec, }) } return &structDecoder{ fieldMap: caching.NewFieldLookup(fv), fields: entries, structName: vt.Name(), typ: vt, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/functor.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/functor.go
package optdec import ( "encoding/json" "math" "unsafe" "github.com/bytedance/sonic/internal/rt" "github.com/bytedance/sonic/internal/resolver" ) type decFunc interface { FromDom(vp unsafe.Pointer, node Node, ctx *context) error } type ptrDecoder struct { typ *rt.GoType deref decFunc } // Pointer Value is allocated in the Caller func (d *ptrDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } if *(*unsafe.Pointer)(vp) == nil { *(*unsafe.Pointer)(vp) = rt.Mallocgc(d.typ.Size, d.typ, true) } return d.deref.FromDom(*(*unsafe.Pointer)(vp), node, ctx) } type embeddedFieldPtrDecoder struct { field resolver.FieldMeta fieldDec decFunc fieldName string } // Pointer Value is allocated in the Caller func (d *embeddedFieldPtrDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } // seek into the pointer vp = unsafe.Pointer(uintptr(vp) - uintptr(d.field.Path[0].Size)) for _, f := range d.field.Path { deref := rt.UnpackType(f.Type) vp = unsafe.Pointer(uintptr(vp) + f.Size) if f.Kind == resolver.F_deref { if *(*unsafe.Pointer)(vp) == nil { *(*unsafe.Pointer)(vp) = rt.Mallocgc(deref.Size, deref, true) } vp = *(*unsafe.Pointer)(vp) } } return d.fieldDec.FromDom(vp, node, ctx) } type i8Decoder struct{} func (d *i8Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsI64(ctx) if !ok || ret > math.MaxInt8 || ret < math.MinInt8 { return error_mismatch(node, ctx, int8Type) } *(*int8)(vp) = int8(ret) return nil } type i16Decoder struct{} func (d *i16Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsI64(ctx) if !ok || ret > math.MaxInt16 || ret < math.MinInt16 { return error_mismatch(node, ctx, int16Type) } *(*int16)(vp) = int16(ret) return nil } type i32Decoder struct{} func (d *i32Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsI64(ctx) if !ok || ret > math.MaxInt32 || ret < math.MinInt32 { return error_mismatch(node, ctx, int32Type) } *(*int32)(vp) = int32(ret) return nil } type i64Decoder struct{} func (d *i64Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsI64(ctx) if !ok { return error_mismatch(node, ctx, int64Type) } *(*int64)(vp) = int64(ret) return nil } type u8Decoder struct{} func (d *u8Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsU64(ctx) if !ok || ret > math.MaxUint8 { err := error_mismatch(node, ctx, uint8Type) return err } *(*uint8)(vp) = uint8(ret) return nil } type u16Decoder struct{} func (d *u16Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsU64(ctx) if !ok || ret > math.MaxUint16 { return error_mismatch(node, ctx, uint16Type) } *(*uint16)(vp) = uint16(ret) return nil } type u32Decoder struct{} func (d *u32Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsU64(ctx) if !ok || ret > math.MaxUint32 { return error_mismatch(node, ctx, uint32Type) } *(*uint32)(vp) = uint32(ret) return nil } type u64Decoder struct{} func (d *u64Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsU64(ctx) if !ok { return error_mismatch(node, ctx, uint64Type) } *(*uint64)(vp) = uint64(ret) return nil } type f32Decoder struct{} func (d *f32Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsF64(ctx) if !ok || ret > math.MaxFloat32 || ret < -math.MaxFloat32 { return error_mismatch(node, ctx, float32Type) } *(*float32)(vp) = float32(ret) return nil } type f64Decoder struct{} func (d *f64Decoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsF64(ctx) if !ok { return error_mismatch(node, ctx, float64Type) } *(*float64)(vp) = float64(ret) return nil } type boolDecoder struct { } func (d *boolDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsBool() if !ok { return error_mismatch(node, ctx, boolType) } *(*bool)(vp) = bool(ret) return nil } type stringDecoder struct { } func (d *stringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } ret, ok := node.AsStr(ctx) if !ok { return error_mismatch(node, ctx, stringType) } *(*string)(vp) = ret return nil } type numberDecoder struct { } func (d *numberDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } num, ok := node.AsNumber(ctx) if !ok { return error_mismatch(node, ctx, jsonNumberType) } *(*json.Number)(vp) = num return nil } type recuriveDecoder struct { typ *rt.GoType } func (d *recuriveDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { dec, err := findOrCompile(d.typ) if err != nil { return err } return dec.FromDom(vp, node, ctx) } type unsupportedTypeDecoder struct { typ *rt.GoType } func (d *unsupportedTypeDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } return error_unsuppoted(d.typ) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/stringopts.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/stringopts.go
package optdec import ( "encoding/json" "math" "unsafe" "github.com/bytedance/sonic/internal/rt" ) type ptrStrDecoder struct { typ *rt.GoType deref decFunc } // Pointer Value is allocated in the Caller func (d *ptrStrDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { *(*unsafe.Pointer)(vp) = nil return nil } s, ok := node.AsStrRef(ctx) if !ok { return error_mismatch(node, ctx, stringType) } if s == "null" { *(*unsafe.Pointer)(vp) = nil return nil } if *(*unsafe.Pointer)(vp) == nil { *(*unsafe.Pointer)(vp) = rt.Mallocgc(d.typ.Size, d.typ, true) } return d.deref.FromDom(*(*unsafe.Pointer)(vp), node, ctx) } type boolStringDecoder struct { } func (d *boolStringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } s, ok := node.AsStrRef(ctx) if !ok { return error_mismatch(node, ctx, stringType) } if s == "null" { return nil } b, err := ParseBool(s) if err != nil { return error_mismatch(node, ctx, boolType) } *(*bool)(vp) = b return nil } func parseI64(node Node, ctx *context) (int64, error, bool) { if node.IsNull() { return 0, nil, true } s, ok := node.AsStrRef(ctx) if !ok { return 0, error_mismatch(node, ctx, stringType), false } if s == "null" { return 0, nil, true } ret, err := ParseI64(s) return ret, err, false } type i8StringDecoder struct{} func (d *i8StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseI64(node, ctx) if null { return nil } if err != nil { return err } if ret > math.MaxInt8 || ret < math.MinInt8 { return error_mismatch(node, ctx, int8Type) } *(*int8)(vp) = int8(ret) return nil } type i16StringDecoder struct{} func (d *i16StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseI64(node, ctx) if null { return nil } if err != nil { return err } if ret > math.MaxInt16 || ret < math.MinInt16 { return error_mismatch(node, ctx, int16Type) } *(*int16)(vp) = int16(ret) return nil } type i32StringDecoder struct{} func (d *i32StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseI64(node, ctx) if null { return nil } if err != nil { return err } if ret > math.MaxInt32 || ret < math.MinInt32 { return error_mismatch(node, ctx, int32Type) } *(*int32)(vp) = int32(ret) return nil } type i64StringDecoder struct{} func (d *i64StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseI64(node, ctx) if null { return nil } if err != nil { return err } *(*int64)(vp) = int64(ret) return nil } func parseU64(node Node, ctx *context) (uint64, error, bool) { if node.IsNull() { return 0, nil, true } s, ok := node.AsStrRef(ctx) if !ok { return 0, error_mismatch(node, ctx, stringType), false } if s == "null" { return 0, nil, true } ret, err := ParseU64(s) return ret, err, false } type u8StringDecoder struct{} func (d *u8StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseU64(node, ctx) if null { return nil } if err != nil { return err } if ret > math.MaxUint8 { return error_mismatch(node, ctx, uint8Type) } *(*uint8)(vp) = uint8(ret) return nil } type u16StringDecoder struct{} func (d *u16StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseU64(node, ctx) if null { return nil } if err != nil { return err } if ret > math.MaxUint16 { return error_mismatch(node, ctx, uint16Type) } *(*uint16)(vp) = uint16(ret) return nil } type u32StringDecoder struct{} func (d *u32StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseU64(node, ctx) if null { return nil } if err != nil { return err } if ret > math.MaxUint32 { return error_mismatch(node, ctx, uint32Type) } *(*uint32)(vp) = uint32(ret) return nil } type u64StringDecoder struct{} func (d *u64StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { ret, err, null := parseU64(node, ctx) if null { return nil } if err != nil { return err } *(*uint64)(vp) = uint64(ret) return nil } type f32StringDecoder struct{} func (d *f32StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } s, ok := node.AsStrRef(ctx) if !ok { return error_mismatch(node, ctx, stringType) } if s == "null" { return nil } ret, err := ParseF64(s) if err != nil || ret > math.MaxFloat32 || ret < -math.MaxFloat32 { return error_mismatch(node, ctx, float32Type) } *(*float32)(vp) = float32(ret) return nil } type f64StringDecoder struct{} func (d *f64StringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } s, ok := node.AsStrRef(ctx) if !ok { return error_mismatch(node, ctx, stringType) } if s == "null" { return nil } ret, err := ParseF64(s) if err != nil { return error_mismatch(node, ctx, float64Type) } *(*float64)(vp) = float64(ret) return nil } /* parse string field with string options */ type strStringDecoder struct{} func (d *strStringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } s, ok := node.AsStrRef(ctx) if !ok { return error_mismatch(node, ctx, stringType) } if s == "null" { return nil } s, err := Unquote(s) if err != nil { return error_mismatch(node, ctx, stringType) } *(*string)(vp) = s return nil } type numberStringDecoder struct{} func (d *numberStringDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } s, ok := node.AsStrRef(ctx) if !ok { return error_mismatch(node, ctx, stringType) } if s == "null" { return nil } num, ok := node.ParseNumber(ctx) if !ok { return error_mismatch(node, ctx, jsonNumberType) } end, ok := SkipNumberFast(s, 0) // has error or trailing chars if !ok || end != len(s) { return error_mismatch(node, ctx, jsonNumberType) } *(*json.Number)(vp) = json.Number(num) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/decoder/optdec/structs.go
vendor/github.com/bytedance/sonic/internal/decoder/optdec/structs.go
package optdec import ( "reflect" "unsafe" "github.com/bytedance/sonic/internal/decoder/consts" caching "github.com/bytedance/sonic/internal/optcaching" "github.com/bytedance/sonic/internal/resolver" ) type fieldEntry struct { resolver.FieldMeta fieldDec decFunc } type structDecoder struct { fieldMap caching.FieldLookup fields []fieldEntry structName string typ reflect.Type } func (d *structDecoder) FromDom(vp unsafe.Pointer, node Node, ctx *context) error { if node.IsNull() { return nil } var gerr error obj, ok := node.AsObj() if !ok { return error_mismatch(node, ctx, d.typ) } next := obj.Children() for i := 0; i < obj.Len(); i++ { key, _ := NewNode(next).AsStrRef(ctx) val := NewNode(PtrOffset(next, 1)) next = val.Next() // find field idx idx := d.fieldMap.Get(key, ctx.Options()&uint64(consts.OptionCaseSensitive) != 0) if idx == -1 { if Options(ctx.Options())&OptionDisableUnknown != 0 { return error_field(key) } continue } offset := d.fields[idx].Path[0].Size elem := unsafe.Pointer(uintptr(vp) + offset) err := d.fields[idx].fieldDec.FromDom(elem, val, ctx) // deal with mismatch type errors if gerr == nil && err != nil { // TODO: better error info gerr = err } } return gerr }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/base64_compat.go
vendor/github.com/bytedance/sonic/internal/rt/base64_compat.go
// +build !amd64 !go1.17 go1.25 package rt import ( "encoding/base64" ) func DecodeBase64(raw []byte) ([]byte, error) { ret := make([]byte, base64.StdEncoding.DecodedLen(len(raw))) n, err := base64.StdEncoding.Decode(ret, raw) if err != nil { return nil, err } return ret[:n], nil } func EncodeBase64ToString(src []byte) string { return base64.StdEncoding.EncodeToString(src) } func EncodeBase64(buf []byte, src []byte) []byte { if len(src) == 0 { return append(buf, '"', '"') } buf = append(buf, '"') need := base64.StdEncoding.EncodedLen(len(src)) if cap(buf) - len(buf) < need { tmp := make([]byte, len(buf), len(buf) + need*2) copy(tmp, buf) buf = tmp } base64.StdEncoding.Encode(buf[len(buf):cap(buf)], src) buf = buf[:len(buf) + need] buf = append(buf, '"') return buf }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/fastvalue.go
vendor/github.com/bytedance/sonic/internal/rt/fastvalue.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import ( "reflect" "unsafe" ) var ( reflectRtypeItab = findReflectRtypeItab() ) // GoType.KindFlags const const ( F_direct = 1 << 5 F_kind_mask = (1 << 5) - 1 ) // GoType.Flags const const ( tflagUncommon uint8 = 1 << 0 tflagExtraStar uint8 = 1 << 1 tflagNamed uint8 = 1 << 2 tflagRegularMemory uint8 = 1 << 3 ) type GoType struct { Size uintptr PtrData uintptr Hash uint32 Flags uint8 Align uint8 FieldAlign uint8 KindFlags uint8 Traits unsafe.Pointer GCData *byte Str int32 PtrToSelf int32 } func (self *GoType) IsNamed() bool { return (self.Flags & tflagNamed) != 0 } func (self *GoType) Kind() reflect.Kind { return reflect.Kind(self.KindFlags & F_kind_mask) } func (self *GoType) Pack() (t reflect.Type) { (*GoIface)(unsafe.Pointer(&t)).Itab = reflectRtypeItab (*GoIface)(unsafe.Pointer(&t)).Value = unsafe.Pointer(self) return } func (self *GoType) String() string { return self.Pack().String() } func (self *GoType) Indirect() bool { return self.KindFlags&F_direct == 0 } type GoItab struct { it unsafe.Pointer Vt *GoType hv uint32 _ [4]byte fn [1]uintptr } type GoIface struct { Itab *GoItab Value unsafe.Pointer } type GoEface struct { Type *GoType Value unsafe.Pointer } func (self GoEface) Pack() (v interface{}) { *(*GoEface)(unsafe.Pointer(&v)) = self return } type GoPtrType struct { GoType Elem *GoType } type GoMapType struct { GoType Key *GoType Elem *GoType Bucket *GoType Hasher func(unsafe.Pointer, uintptr) uintptr KeySize uint8 ElemSize uint8 BucketSize uint16 Flags uint32 } func (self *GoMapType) IndirectElem() bool { return self.Flags&2 != 0 } type GoStructType struct { GoType Pkg *byte Fields []GoStructField } type GoStructField struct { Name *byte Type *GoType OffEmbed uintptr } type GoInterfaceType struct { GoType PkgPath *byte Methods []GoInterfaceMethod } type GoInterfaceMethod struct { Name int32 Type int32 } type GoSlice struct { Ptr unsafe.Pointer Len int Cap int } type GoString struct { Ptr unsafe.Pointer Len int } func PtrElem(t *GoType) *GoType { return (*GoPtrType)(unsafe.Pointer(t)).Elem } func MapType(t *GoType) *GoMapType { return (*GoMapType)(unsafe.Pointer(t)) } func IfaceType(t *GoType) *GoInterfaceType { return (*GoInterfaceType)(unsafe.Pointer(t)) } func UnpackType(t reflect.Type) *GoType { return (*GoType)((*GoIface)(unsafe.Pointer(&t)).Value) } func UnpackEface(v interface{}) GoEface { return *(*GoEface)(unsafe.Pointer(&v)) } func UnpackIface(v interface{}) GoIface { return *(*GoIface)(unsafe.Pointer(&v)) } func findReflectRtypeItab() *GoItab { v := reflect.TypeOf(struct{}{}) return (*GoIface)(unsafe.Pointer(&v)).Itab } func AssertI2I2(t *GoType, i GoIface) (r GoIface) { inter := IfaceType(t) tab := i.Itab if tab == nil { return } if (*GoInterfaceType)(tab.it) != inter { tab = GetItab(inter, tab.Vt, true) if tab == nil { return } } r.Itab = tab r.Value = i.Value return } func (t *GoType) IsInt64() bool { return t.Kind() == reflect.Int64 || (t.Kind() == reflect.Int && t.Size == 8) } func (t *GoType) IsInt32() bool { return t.Kind() == reflect.Int32 || (t.Kind() == reflect.Int && t.Size == 4) } //go:nosplit func (t *GoType) IsUint64() bool { isUint := t.Kind() == reflect.Uint || t.Kind() == reflect.Uintptr return t.Kind() == reflect.Uint64 || (isUint && t.Size == 8) } //go:nosplit func (t *GoType) IsUint32() bool { isUint := t.Kind() == reflect.Uint || t.Kind() == reflect.Uintptr return t.Kind() == reflect.Uint32 || (isUint && t.Size == 4) } //go:nosplit func PtrAdd(ptr unsafe.Pointer, offset uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(ptr) + offset) } //go:noescape //go:linkname GetItab runtime.getitab func GetItab(inter *GoInterfaceType, typ *GoType, canfail bool) *GoItab
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/table.go
vendor/github.com/bytedance/sonic/internal/rt/table.go
// Copyright 2024 CloudWeGo Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rt import "unicode/utf8" var SafeSet = [utf8.RuneSelf]bool{ ' ': true, '!': true, '"': false, '#': true, '$': true, '%': true, '&': true, '\'': true, '(': true, ')': true, '*': true, '+': true, ',': true, '-': true, '.': true, '/': true, '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, ':': true, ';': true, '<': true, '=': true, '>': true, '?': true, '@': true, 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, 'Y': true, 'Z': true, '[': true, '\\': false, ']': true, '^': true, '_': true, '`': true, 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, 'y': true, 'z': true, '{': true, '|': true, '}': true, '~': true, '\u007f': true, } var Hex = "0123456789abcdef"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/types.go
vendor/github.com/bytedance/sonic/internal/rt/types.go
package rt import ( "reflect" "unsafe" "encoding/json" ) func AsGoType(t uintptr) *GoType { return (*GoType)(unsafe.Pointer(t)) } var ( BoolType = UnpackType(reflect.TypeOf(false)) ByteType = UnpackType(reflect.TypeOf(byte(0))) IncntType = UnpackType(reflect.TypeOf(int(0))) Int8Type = UnpackType(reflect.TypeOf(int8(0))) Int16Type = UnpackType(reflect.TypeOf(int16(0))) Int32Type = UnpackType(reflect.TypeOf(int32(0))) Int64Type = UnpackType(reflect.TypeOf(int64(0))) UintType = UnpackType(reflect.TypeOf(uint(0))) Uint8Type = UnpackType(reflect.TypeOf(uint8(0))) Uint16Type = UnpackType(reflect.TypeOf(uint16(0))) Uint32Type = UnpackType(reflect.TypeOf(uint32(0))) Uint64Type = UnpackType(reflect.TypeOf(uint64(0))) Float32Type = UnpackType(reflect.TypeOf(float32(0))) Float64Type = UnpackType(reflect.TypeOf(float64(0))) StringType = UnpackType(reflect.TypeOf("")) BytesType = UnpackType(reflect.TypeOf([]byte(nil))) JsonNumberType = UnpackType(reflect.TypeOf(json.Number(""))) SliceEfaceType = UnpackType(reflect.TypeOf([]interface{}(nil))) SliceStringType = UnpackType(reflect.TypeOf([]string(nil))) SliceI32Type = UnpackType(reflect.TypeOf([]int32(nil))) SliceI64Type = UnpackType(reflect.TypeOf([]int64(nil))) SliceU32Type = UnpackType(reflect.TypeOf([]uint32(nil))) SliceU64Type = UnpackType(reflect.TypeOf([]uint64(nil))) AnyType = UnpackType(reflect.TypeOf((*interface{})(nil)).Elem()) MapEfaceType = UnpackType(reflect.TypeOf(map[string]interface{}(nil))) MapStringType = UnpackType(reflect.TypeOf(map[string]string(nil))) MapEfaceMapType = MapType(UnpackType(reflect.TypeOf(map[string]interface{}(nil)))) )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/growslice_legacy.go
vendor/github.com/bytedance/sonic/internal/rt/growslice_legacy.go
// +build go1.16,!go1.20 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import ( _ `unsafe` ) //go:linkname GrowSlice runtime.growslice //goland:noinspection GoUnusedParameter func GrowSlice(et *GoType, old GoSlice, cap int) GoSlice
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/map_legacy.go
vendor/github.com/bytedance/sonic/internal/rt/map_legacy.go
// +build !go1.24 package rt import ( "unsafe" ) type GoMapIterator struct { K unsafe.Pointer V unsafe.Pointer T *GoMapType H unsafe.Pointer Buckets unsafe.Pointer Bptr *unsafe.Pointer Overflow *[]unsafe.Pointer OldOverflow *[]unsafe.Pointer StartBucket uintptr Offset uint8 Wrapped bool B uint8 I uint8 Bucket uintptr CheckBucket uintptr }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/stubs.go
vendor/github.com/bytedance/sonic/internal/rt/stubs.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import ( "reflect" "unsafe" ) //go:noescape //go:linkname Memmove runtime.memmove func Memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) //go:noescape //go:linkname MemEqual runtime.memequal //goland:noinspection GoUnusedParameter func MemEqual(a unsafe.Pointer, b unsafe.Pointer, size uintptr) bool //go:linkname Mapiternext runtime.mapiternext func Mapiternext(it *GoMapIterator) //go:linkname Mapiterinit runtime.mapiterinit func Mapiterinit(t *GoMapType, m unsafe.Pointer, it *GoMapIterator) //go:linkname Maplen reflect.maplen func Maplen(h unsafe.Pointer) int //go:linkname IsValidNumber encoding/json.isValidNumber func IsValidNumber(s string) bool //go:nosplit //go:linkname MemclrHasPointers runtime.memclrHasPointers //goland:noinspection GoUnusedParameter func MemclrHasPointers(ptr unsafe.Pointer, n uintptr) //go:linkname MemclrNoHeapPointers runtime.memclrNoHeapPointers //goland:noinspection GoUnusedParameter func MemclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) //go:linkname newarray runtime.newarray func newarray(typ *GoType, n int) unsafe.Pointer func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(p) + x) } func ClearMemory(et *GoType, ptr unsafe.Pointer, size uintptr) { if et.PtrData == 0 { MemclrNoHeapPointers(ptr, size) } else { MemclrHasPointers(ptr, size) } } // runtime.maxElementSize const _max_map_element_size uintptr = 128 func IsMapfast(vt reflect.Type) bool { return vt.Elem().Size() <= _max_map_element_size } //go:linkname Mallocgc runtime.mallocgc //goland:noinspection GoUnusedParameter func Mallocgc(size uintptr, typ *GoType, needzero bool) unsafe.Pointer //go:linkname Makemap reflect.makemap func Makemap(*GoType, int) unsafe.Pointer //go:linkname MakemapSmall runtime.makemap_small func MakemapSmall() unsafe.Pointer //go:linkname Mapassign runtime.mapassign //goland:noinspection GoUnusedParameter func Mapassign(t *GoMapType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer //go:linkname Mapassign_fast32 runtime.mapassign_fast32 //goland:noinspection GoUnusedParameter func Mapassign_fast32(t *GoMapType, h unsafe.Pointer, k uint32) unsafe.Pointer //go:linkname Mapassign_fast64 runtime.mapassign_fast64 //goland:noinspection GoUnusedParameter func Mapassign_fast64(t *GoMapType, h unsafe.Pointer, k uint64) unsafe.Pointer //go:linkname Mapassign_faststr runtime.mapassign_faststr //goland:noinspection GoUnusedParameter func Mapassign_faststr(t *GoMapType, h unsafe.Pointer, s string) unsafe.Pointer type MapStrAssign func (t *GoMapType, h unsafe.Pointer, s string) unsafe.Pointer func GetMapStrAssign(vt reflect.Type) MapStrAssign { if IsMapfast(vt) { return Mapassign_faststr } else { return func (t *GoMapType, h unsafe.Pointer, s string) unsafe.Pointer { return Mapassign(t, h, unsafe.Pointer(&s)) } } } type Map32Assign func(t *GoMapType, h unsafe.Pointer, k uint32) unsafe.Pointer func GetMap32Assign(vt reflect.Type) Map32Assign { if IsMapfast(vt) { return Mapassign_fast32 } else { return func (t *GoMapType, h unsafe.Pointer, s uint32) unsafe.Pointer { return Mapassign(t, h, unsafe.Pointer(&s)) } } } type Map64Assign func(t *GoMapType, h unsafe.Pointer, k uint64) unsafe.Pointer func GetMap64Assign(vt reflect.Type) Map64Assign { if IsMapfast(vt) { return Mapassign_fast64 } else { return func (t *GoMapType, h unsafe.Pointer, s uint64) unsafe.Pointer { return Mapassign(t, h, unsafe.Pointer(&s)) } } } var emptyBytes = make([]byte, 0, 0) var EmptySlice = *(*GoSlice)(unsafe.Pointer(&emptyBytes)) //go:linkname MakeSliceStd runtime.makeslice //goland:noinspection GoUnusedParameter func MakeSliceStd(et *GoType, len int, cap int) unsafe.Pointer func MakeSlice(oldPtr unsafe.Pointer, et *GoType, newLen int) *GoSlice { if newLen == 0 { return &EmptySlice } if *(*unsafe.Pointer)(oldPtr) == nil { return &GoSlice{ Ptr: MakeSliceStd(et, newLen, newLen), Len: newLen, Cap: newLen, } } old := (*GoSlice)(oldPtr) if old.Cap >= newLen { old.Len = newLen return old } new := GrowSlice(et, *old, newLen) // we should clear the memory from [oldLen:newLen] if et.PtrData == 0 { oldlenmem := uintptr(old.Len) * et.Size newlenmem := uintptr(newLen) * et.Size MemclrNoHeapPointers(add(new.Ptr, oldlenmem), newlenmem-oldlenmem) } new.Len = newLen return &new } //go:nosplit //go:linkname Throw runtime.throw //goland:noinspection GoUnusedParameter func Throw(s string) //go:linkname ConvT64 runtime.convT64 //goland:noinspection GoUnusedParameter func ConvT64(v uint64) unsafe.Pointer //go:linkname ConvTslice runtime.convTslice //goland:noinspection GoUnusedParameter func ConvTslice(v []byte) unsafe.Pointer //go:linkname ConvTstring runtime.convTstring //goland:noinspection GoUnusedParameter func ConvTstring(v string) unsafe.Pointer //go:linkname Mapassign_fast64ptr runtime.mapassign_fast64ptr //goland:noinspection GoUnusedParameter func Mapassign_fast64ptr(t *GoMapType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer //go:noescape //go:linkname Strhash runtime.strhash func Strhash(_ unsafe.Pointer, _ uintptr) uintptr
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/pool.go
vendor/github.com/bytedance/sonic/internal/rt/pool.go
package rt import ( "unsafe" ) type SlicePool struct { pool unsafe.Pointer len int index int typ uintptr } func NewPool(typ *GoType, size int) SlicePool { return SlicePool{pool: newarray(typ, size), len: size, typ: uintptr(unsafe.Pointer(typ))} } func (self *SlicePool) GetSlice(size int) unsafe.Pointer { // pool is full, fallback to normal alloc if size > self.Remain() { return newarray(AsGoType(self.typ), size) } ptr := PtrAdd(self.pool, uintptr(self.index)* AsGoType(self.typ).Size) self.index += size return ptr } func (self *SlicePool) Remain() int { return self.len - self.index }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/fastconv.go
vendor/github.com/bytedance/sonic/internal/rt/fastconv.go
package rt import ( "unsafe" "encoding/json" ) // Copied from Golang var staticuint64s = [...]uint64{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, } const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize var zeroVal [maxZero]byte type TslicePool struct { pool []GoSlice index int } func NewTslicePool (hint int) TslicePool { return TslicePool{ pool: make([]GoSlice, hint, hint), index: 0, } } func (self *TslicePool) Conv(val GoSlice, typ *GoType, ep *interface{}) { var vp unsafe.Pointer if ((*GoSlice)(unsafe.Pointer(&val))).Ptr == nil { vp = unsafe.Pointer(&zeroVal[0]) } else if self.index < len(self.pool) { dst := &(self.pool)[self.index] *dst = val self.index++ vp = unsafe.Pointer(dst) } else { vp = Mallocgc(unsafe.Sizeof(val), BytesType, true) } *((*GoEface)(unsafe.Pointer(ep))) = GoEface{Type: typ, Value: vp} } func (self *TslicePool) Free() { self.pool = nil } type TstringPool struct { pool []string index int } func NewTstringPool (hint int) TstringPool { return TstringPool{ pool: make([]string, hint), index: 0, } } func (self *TstringPool) Conv(val string, ep *interface{}) { var vp unsafe.Pointer if val == "" { vp = unsafe.Pointer(&zeroVal[0]) } else if self.index < len(self.pool) { dst := &(self.pool)[self.index] *dst = val self.index++ vp = unsafe.Pointer(dst) } else { vp = Mallocgc(unsafe.Sizeof(val), StringType, true) } // convert into interface{} *((*GoEface)(unsafe.Pointer(ep))) = GoEface{Type: StringType, Value: vp} } func (self *TstringPool) ConvNum(val json.Number, ep *interface{}) { var vp unsafe.Pointer if val == "" { vp = unsafe.Pointer(&zeroVal[0]) } else if self.index < len(self.pool) { dst := &(self.pool)[self.index] *dst = string(val) self.index++ vp = unsafe.Pointer(dst) } else { vp = Mallocgc(unsafe.Sizeof(val), StringType, true) } // convert into interface{} *((*GoEface)(unsafe.Pointer(ep))) = GoEface{Type: JsonNumberType, Value: vp} } func (self *TstringPool) Free() { self.pool = nil } type T64Pool struct { pool []uint64 index int } func NewT64Pool (hint int) T64Pool { return T64Pool{ pool: make([]uint64, hint, hint), index: 0, } } func (self *T64Pool) Conv(val uint64, typ *GoType, ep *interface{}) { var vp unsafe.Pointer if val < uint64(len(staticuint64s)) { vp = unsafe.Pointer(&staticuint64s[val]) } else if self.index < len(self.pool) { dst := &(self.pool)[self.index] *dst = val self.index++ vp = unsafe.Pointer(dst) } else { vp = Mallocgc(8, Uint64Type, false) } // convert into interface{} *((*GoEface)(unsafe.Pointer(ep))) = GoEface{Type: typ, Value: vp} } func (self *T64Pool) Free() { self.pool = nil } func ConvTBool(val bool, ep *interface{}) { var vp unsafe.Pointer if val { vp = unsafe.Pointer(&staticuint64s[1]) } else { vp = unsafe.Pointer(&staticuint64s[0]) } *((*GoEface)(unsafe.Pointer(ep))) = GoEface{Type: BoolType, Value: vp} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/map_nosiwss_go124.go
vendor/github.com/bytedance/sonic/internal/rt/map_nosiwss_go124.go
//go:build go1.24 && !go1.25 && !goexperiment.swissmap // +build go1.24,!go1.25,!goexperiment.swissmap package rt import ( "unsafe" ) type GoMapIterator struct { K unsafe.Pointer V unsafe.Pointer T *GoMapType H unsafe.Pointer Buckets unsafe.Pointer Bptr *unsafe.Pointer Overflow *[]unsafe.Pointer OldOverflow *[]unsafe.Pointer StartBucket uintptr Offset uint8 Wrapped bool B uint8 I uint8 Bucket uintptr CheckBucket uintptr // different from go1.23 ClearSeq uint64 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/gcwb.go
vendor/github.com/bytedance/sonic/internal/rt/gcwb.go
// +build go1.21,!go1.25 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import ( `sync/atomic` `unsafe` `golang.org/x/arch/x86/x86asm` ) //go:linkname GcWriteBarrier2 runtime.gcWriteBarrier2 func GcWriteBarrier2() //go:linkname RuntimeWriteBarrier runtime.writeBarrier var RuntimeWriteBarrier uintptr const ( _MaxInstr = 15 ) func isvar(arg x86asm.Arg) bool { v, ok := arg.(x86asm.Mem) return ok && v.Base == x86asm.RIP } func iszero(arg x86asm.Arg) bool { v, ok := arg.(x86asm.Imm) return ok && v == 0 } func GcwbAddr() uintptr { var err error var off uintptr var ins x86asm.Inst /* get the function address */ pc := uintptr(0) fp := FuncAddr(atomic.StorePointer) /* search within the first 16 instructions */ for i := 0; i < 16; i++ { mem := unsafe.Pointer(uintptr(fp) + pc) buf := BytesFrom(mem, _MaxInstr, _MaxInstr) /* disassemble the instruction */ if ins, err = x86asm.Decode(buf, 64); err != nil { panic("gcwbaddr: " + err.Error()) } /* check for a byte comparison with zero */ if ins.Op == x86asm.CMP && ins.MemBytes == 1 && isvar(ins.Args[0]) && iszero(ins.Args[1]) { off = pc + uintptr(ins.Len) + uintptr(ins.Args[0].(x86asm.Mem).Disp) break } /* move to next instruction */ nb := ins.Len pc += uintptr(nb) } /* check for address */ if off == 0 { panic("gcwbaddr: could not locate the variable `writeBarrier`") } else { return uintptr(fp) + off } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/map_siwss_go124.go
vendor/github.com/bytedance/sonic/internal/rt/map_siwss_go124.go
//go:build go1.24 && !go1.25 && goexperiment.swissmap // +build go1.24,!go1.25,goexperiment.swissmap package rt import ( "unsafe" ) type GoMapIterator struct { K unsafe.Pointer V unsafe.Pointer T *GoMapType It unsafe.Pointer }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/assertI2I.go
vendor/github.com/bytedance/sonic/internal/rt/assertI2I.go
// +build go1.17 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import ( _ `unsafe` ) func AssertI2I(t *GoType, i GoIface) (r GoIface) { inter := IfaceType(t) tab := i.Itab if tab == nil { return } if (*GoInterfaceType)(tab.it) != inter { tab = GetItab(inter, tab.Vt, true) if tab == nil { return } } r.Itab = tab r.Value = i.Value return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/int48.go
vendor/github.com/bytedance/sonic/internal/rt/int48.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt const ( MinInt48 int64 = -(1 << 47) MaxInt48 int64 = +(1 << 47) - 1 ) func PackInt(v int) uint64 { if u := uint64(v); int64(v) < MinInt48 || int64(v) > MaxInt48 { panic("int48 out of range") } else { return ((u >> 63) << 47) | (u & 0x00007fffffffffff) } } func UnpackInt(v uint64) int { v &= 0x0000ffffffffffff v |= (v >> 47) * (0xffff << 48) return int(v) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/growslice.go
vendor/github.com/bytedance/sonic/internal/rt/growslice.go
// +build go1.20 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import "unsafe" // Growslice to newCap, not append length // Note: the [old, newCap) will not be zeroed if et does not have any ptr data. func GrowSlice(et *GoType, old GoSlice, newCap int) GoSlice { if newCap < old.Len { panic("growslice's newCap is smaller than old length") } s := growslice(old.Ptr, newCap, old.Cap, newCap - old.Len, et) s.Len = old.Len return s } //go:linkname growslice runtime.growslice //goland:noinspection GoUnusedParameter func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *GoType) GoSlice
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/fastmem.go
vendor/github.com/bytedance/sonic/internal/rt/fastmem.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import ( "reflect" "unsafe" "github.com/bytedance/sonic/option" ) //go:nosplit func Get16(v []byte) int16 { return *(*int16)((*GoSlice)(unsafe.Pointer(&v)).Ptr) } //go:nosplit func Get32(v []byte) int32 { return *(*int32)((*GoSlice)(unsafe.Pointer(&v)).Ptr) } //go:nosplit func Get64(v []byte) int64 { return *(*int64)((*GoSlice)(unsafe.Pointer(&v)).Ptr) } //go:nosplit func Mem2Str(v []byte) (s string) { (*GoString)(unsafe.Pointer(&s)).Len = (*GoSlice)(unsafe.Pointer(&v)).Len (*GoString)(unsafe.Pointer(&s)).Ptr = (*GoSlice)(unsafe.Pointer(&v)).Ptr return } //go:nosplit func Str2Mem(s string) (v []byte) { (*GoSlice)(unsafe.Pointer(&v)).Cap = (*GoString)(unsafe.Pointer(&s)).Len (*GoSlice)(unsafe.Pointer(&v)).Len = (*GoString)(unsafe.Pointer(&s)).Len (*GoSlice)(unsafe.Pointer(&v)).Ptr = (*GoString)(unsafe.Pointer(&s)).Ptr return } func BytesFrom(p unsafe.Pointer, n int, c int) (r []byte) { (*GoSlice)(unsafe.Pointer(&r)).Ptr = p (*GoSlice)(unsafe.Pointer(&r)).Len = n (*GoSlice)(unsafe.Pointer(&r)).Cap = c return } func FuncAddr(f interface{}) unsafe.Pointer { if vv := UnpackEface(f); vv.Type.Kind() != reflect.Func { panic("f is not a function") } else { return *(*unsafe.Pointer)(vv.Value) } } //go:nocheckptr func IndexChar(src string, index int) unsafe.Pointer { return unsafe.Pointer(uintptr((*GoString)(unsafe.Pointer(&src)).Ptr) + uintptr(index)) } //go:nocheckptr func IndexByte(ptr []byte, index int) unsafe.Pointer { return unsafe.Pointer(uintptr((*GoSlice)(unsafe.Pointer(&ptr)).Ptr) + uintptr(index)) } func GuardSlice(buf *[]byte, n int) { c := cap(*buf) l := len(*buf) if c-l < n { c = c>>1 + n + l if c < 32 { c = 32 } tmp := make([]byte, l, c) copy(tmp, *buf) *buf = tmp } } func GuardSlice2(buf []byte, n int) []byte { c := cap(buf) l := len(buf) if c-l < n { c = c>>1 + n + l if c < 32 { c = 32 } tmp := make([]byte, l, c) copy(tmp, buf) buf = tmp } return buf } //go:nosplit func Ptr2SlicePtr(s unsafe.Pointer, l int, c int) unsafe.Pointer { slice := &GoSlice{ Ptr: s, Len: l, Cap: c, } return unsafe.Pointer(slice) } //go:nosplit func StrPtr(s string) unsafe.Pointer { return (*GoString)(unsafe.Pointer(&s)).Ptr } //go:nosplit func StrFrom(p unsafe.Pointer, n int64) (s string) { (*GoString)(unsafe.Pointer(&s)).Ptr = p (*GoString)(unsafe.Pointer(&s)).Len = int(n) return } // NoEscape hides a pointer from escape analysis. NoEscape is // the identity function but escape analysis doesn't think the // output depends on the input. NoEscape is inlined and currently // compiles down to zero instructions. // USE CAREFULLY! //go:nosplit //goland:noinspection GoVetUnsafePointer func NoEscape(p unsafe.Pointer) unsafe.Pointer { x := uintptr(p) return unsafe.Pointer(x ^ 0) } //go:nosplit func MoreStack(size uintptr) //go:nosplit func Add(ptr unsafe.Pointer, off uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(ptr) + off) } // CanSizeResue func CanSizeResue(cap int) bool { return cap <= int(option.LimitBufferSize) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/base64_amd64.go
vendor/github.com/bytedance/sonic/internal/rt/base64_amd64.go
// +build amd64,go1.17,!go1.25 package rt import ( _ "unsafe" "github.com/cloudwego/base64x" ) func DecodeBase64(raw []byte) ([]byte, error) { ret := make([]byte, base64x.StdEncoding.DecodedLen(len(raw))) n, err := base64x.StdEncoding.Decode(ret, raw) if err != nil { return nil, err } return ret[:n], nil } func EncodeBase64ToString(src []byte) string { return base64x.StdEncoding.EncodeToString(src) } func EncodeBase64(buf []byte, src []byte) []byte { if len(src) == 0 { return append(buf, '"', '"') } buf = append(buf, '"') need := base64x.StdEncoding.EncodedLen(len(src)) if cap(buf) - len(buf) < need { tmp := make([]byte, len(buf), len(buf) + need*2) copy(tmp, buf) buf = tmp } base64x.StdEncoding.Encode(buf[len(buf):cap(buf)], src) buf = buf[:len(buf) + need] buf = append(buf, '"') return buf } //go:linkname SubrB64Decode github.com/cloudwego/base64x._subr__b64decode var SubrB64Decode uintptr //go:linkname SubrB64Encode github.com/cloudwego/base64x._subr__b64encode var SubrB64Encode uintptr
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/rt/gcwb_legacy.go
vendor/github.com/bytedance/sonic/internal/rt/gcwb_legacy.go
// +build go1.17,!go1.21 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rt import ( _ `unsafe` ) //go:linkname GcWriteBarrierAX runtime.gcWriteBarrier func GcWriteBarrierAX() //go:linkname RuntimeWriteBarrier runtime.writeBarrier var RuntimeWriteBarrier uintptr
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/caching/pcache.go
vendor/github.com/bytedance/sonic/internal/caching/pcache.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package caching import ( `sync` `sync/atomic` `unsafe` `github.com/bytedance/sonic/internal/rt` ) /** Program Map **/ const ( _LoadFactor = 0.5 _InitCapacity = 4096 // must be a power of 2 ) type _ProgramMap struct { n uint64 m uint32 b []_ProgramEntry } type _ProgramEntry struct { vt *rt.GoType fn interface{} } func newProgramMap() *_ProgramMap { return &_ProgramMap { n: 0, m: _InitCapacity - 1, b: make([]_ProgramEntry, _InitCapacity), } } func (self *_ProgramMap) copy() *_ProgramMap { fork := &_ProgramMap{ n: self.n, m: self.m, b: make([]_ProgramEntry, len(self.b)), } for i, f := range self.b { fork.b[i] = f } return fork } func (self *_ProgramMap) get(vt *rt.GoType) interface{} { i := self.m + 1 p := vt.Hash & self.m /* linear probing */ for ; i > 0; i-- { if b := self.b[p]; b.vt == vt { return b.fn } else if b.vt == nil { break } else { p = (p + 1) & self.m } } /* not found */ return nil } func (self *_ProgramMap) add(vt *rt.GoType, fn interface{}) *_ProgramMap { p := self.copy() f := float64(atomic.LoadUint64(&p.n) + 1) / float64(p.m + 1) /* check for load factor */ if f > _LoadFactor { p = p.rehash() } /* insert the value */ p.insert(vt, fn) return p } func (self *_ProgramMap) rehash() *_ProgramMap { c := (self.m + 1) << 1 r := &_ProgramMap{m: c - 1, b: make([]_ProgramEntry, int(c))} /* rehash every entry */ for i := uint32(0); i <= self.m; i++ { if b := self.b[i]; b.vt != nil { r.insert(b.vt, b.fn) } } /* rebuild successful */ return r } func (self *_ProgramMap) insert(vt *rt.GoType, fn interface{}) { h := vt.Hash p := h & self.m /* linear probing */ for i := uint32(0); i <= self.m; i++ { if b := &self.b[p]; b.vt != nil { p += 1 p &= self.m } else { b.vt = vt b.fn = fn atomic.AddUint64(&self.n, 1) return } } /* should never happens */ panic("no available slots") } /** RCU Program Cache **/ type ProgramCache struct { m sync.Mutex p unsafe.Pointer } func CreateProgramCache() *ProgramCache { return &ProgramCache { m: sync.Mutex{}, p: unsafe.Pointer(newProgramMap()), } } func (self *ProgramCache) Get(vt *rt.GoType) interface{} { return (*_ProgramMap)(atomic.LoadPointer(&self.p)).get(vt) } func (self *ProgramCache) Compute(vt *rt.GoType, compute func(*rt.GoType, ... interface{}) (interface{}, error), ex ...interface{}) (interface{}, error) { var err error var val interface{} /* use defer to prevent inlining of this function */ self.m.Lock() defer self.m.Unlock() /* double check with write lock held */ if val = self.Get(vt); val != nil { return val, nil } /* compute the value */ if val, err = compute(vt, ex...); err != nil { return nil, err } /* update the RCU cache */ atomic.StorePointer(&self.p, unsafe.Pointer((*_ProgramMap)(atomic.LoadPointer(&self.p)).add(vt, val))) return val, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/caching/fcache.go
vendor/github.com/bytedance/sonic/internal/caching/fcache.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package caching import ( `strings` `unsafe` `github.com/bytedance/sonic/internal/rt` ) type FieldMap struct { N uint64 b unsafe.Pointer m map[string]int } type FieldEntry struct { ID int Name string Hash uint64 } const ( FieldMap_N = int64(unsafe.Offsetof(FieldMap{}.N)) FieldMap_b = int64(unsafe.Offsetof(FieldMap{}.b)) FieldEntrySize = int64(unsafe.Sizeof(FieldEntry{})) ) func newBucket(n int) unsafe.Pointer { v := make([]FieldEntry, n) return (*rt.GoSlice)(unsafe.Pointer(&v)).Ptr } func CreateFieldMap(n int) *FieldMap { return &FieldMap { N: uint64(n * 2), b: newBucket(n * 2), // LoadFactor = 0.5 m: make(map[string]int, n * 2), } } func (self *FieldMap) At(p uint64) *FieldEntry { off := uintptr(p) * uintptr(FieldEntrySize) return (*FieldEntry)(unsafe.Pointer(uintptr(self.b) + off)) } // Get searches FieldMap by name. JIT generated assembly does NOT call this // function, rather it implements its own version directly in assembly. So // we must ensure this function stays in sync with the JIT generated one. func (self *FieldMap) Get(name string) int { h := StrHash(name) p := h % self.N s := self.At(p) /* find the element; * the hash map is never full, so the loop will always terminate */ for s.Hash != 0 { if s.Hash == h && s.Name == name { return s.ID } else { p = (p + 1) % self.N s = self.At(p) } } /* not found */ return -1 } func (self *FieldMap) Set(name string, i int) { h := StrHash(name) p := h % self.N s := self.At(p) /* searching for an empty slot; * the hash map is never full, so the loop will always terminate */ for s.Hash != 0 { p = (p + 1) % self.N s = self.At(p) } /* set the value */ s.ID = i s.Hash = h s.Name = name /* add the case-insensitive version, prefer the one with smaller field ID */ key := strings.ToLower(name) if v, ok := self.m[key]; !ok || i < v { self.m[key] = i } } func (self *FieldMap) GetCaseInsensitive(name string) int { if i, ok := self.m[strings.ToLower(name)]; ok { return i } else { return -1 } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/internal/caching/hashing.go
vendor/github.com/bytedance/sonic/internal/caching/hashing.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package caching import ( `unsafe` `github.com/bytedance/sonic/internal/rt` ) var ( V_strhash = rt.UnpackEface(rt.Strhash) S_strhash = *(*uintptr)(V_strhash.Value) ) func StrHash(s string) uint64 { if v := rt.Strhash(unsafe.Pointer(&s), 0); v == 0 { return 1 } else { return uint64(v) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/unquote/unquote.go
vendor/github.com/bytedance/sonic/unquote/unquote.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package unquote import ( `unsafe` `runtime` `github.com/bytedance/sonic/internal/native` `github.com/bytedance/sonic/internal/native/types` `github.com/bytedance/sonic/internal/rt` ) // String unescapes an escaped string (not including `"` at beginning and end) // It validates invalid UTF8 and replace with `\ufffd` func String(s string) (ret string, err types.ParsingError) { mm := make([]byte, 0, len(s)) err = intoBytesUnsafe(s, &mm, true) ret = rt.Mem2Str(mm) return } // IntoBytes is same with String besides it output result into a buffer m func IntoBytes(s string, m *[]byte) types.ParsingError { if cap(*m) < len(s) { return types.ERR_EOF } else { return intoBytesUnsafe(s, m, true) } } // String unescapes an escaped string (not including `"` at beginning and end) // - replace enables replacing invalid utf8 escaped char with `\uffd` func _String(s string, replace bool) (ret string, err error) { mm := make([]byte, 0, len(s)) err = intoBytesUnsafe(s, &mm, replace) ret = rt.Mem2Str(mm) return } func intoBytesUnsafe(s string, m *[]byte, replace bool) types.ParsingError { pos := -1 slv := (*rt.GoSlice)(unsafe.Pointer(m)) str := (*rt.GoString)(unsafe.Pointer(&s)) flags := uint64(0) if replace { /* unquote as the default configuration, replace invalid unicode with \ufffd */ flags |= types.F_UNICODE_REPLACE } ret := native.Unquote(str.Ptr, str.Len, slv.Ptr, &pos, flags) /* check for errors */ if ret < 0 { return types.ParsingError(-ret) } /* update the length */ slv.Len = ret runtime.KeepAlive(s) return 0 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/parser.go
vendor/github.com/bytedance/sonic/ast/parser.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( "fmt" "sync" "sync/atomic" "github.com/bytedance/sonic/internal/native/types" "github.com/bytedance/sonic/internal/rt" ) const ( _DEFAULT_NODE_CAP int = 16 _APPEND_GROW_SHIFT = 1 ) const ( _ERR_NOT_FOUND types.ParsingError = 33 _ERR_UNSUPPORT_TYPE types.ParsingError = 34 ) var ( // ErrNotExist means both key and value doesn't exist ErrNotExist error = newError(_ERR_NOT_FOUND, "value not exists") // ErrUnsupportType means API on the node is unsupported ErrUnsupportType error = newError(_ERR_UNSUPPORT_TYPE, "unsupported type") ) type Parser struct { p int s string noLazy bool loadOnce bool skipValue bool dbuf *byte } /** Parser Private Methods **/ func (self *Parser) delim() types.ParsingError { n := len(self.s) p := self.lspace(self.p) /* check for EOF */ if p >= n { return types.ERR_EOF } /* check for the delimtier */ if self.s[p] != ':' { return types.ERR_INVALID_CHAR } /* update the read pointer */ self.p = p + 1 return 0 } func (self *Parser) object() types.ParsingError { n := len(self.s) p := self.lspace(self.p) /* check for EOF */ if p >= n { return types.ERR_EOF } /* check for the delimtier */ if self.s[p] != '{' { return types.ERR_INVALID_CHAR } /* update the read pointer */ self.p = p + 1 return 0 } func (self *Parser) array() types.ParsingError { n := len(self.s) p := self.lspace(self.p) /* check for EOF */ if p >= n { return types.ERR_EOF } /* check for the delimtier */ if self.s[p] != '[' { return types.ERR_INVALID_CHAR } /* update the read pointer */ self.p = p + 1 return 0 } func (self *Parser) lspace(sp int) int { ns := len(self.s) for ; sp<ns && isSpace(self.s[sp]); sp+=1 {} return sp } func (self *Parser) backward() { for ; self.p >= 0 && isSpace(self.s[self.p]); self.p-=1 {} } func (self *Parser) decodeArray(ret *linkedNodes) (Node, types.ParsingError) { sp := self.p ns := len(self.s) /* check for EOF */ if self.p = self.lspace(sp); self.p >= ns { return Node{}, types.ERR_EOF } /* check for empty array */ if self.s[self.p] == ']' { self.p++ return Node{t: types.V_ARRAY}, 0 } /* allocate array space and parse every element */ for { var val Node var err types.ParsingError if self.skipValue { /* skip the value */ var start int if start, err = self.skipFast(); err != 0 { return Node{}, err } if self.p > ns { return Node{}, types.ERR_EOF } t := switchRawType(self.s[start]) if t == _V_NONE { return Node{}, types.ERR_INVALID_CHAR } val = newRawNode(self.s[start:self.p], t, false) }else{ /* decode the value */ if val, err = self.Parse(); err != 0 { return Node{}, err } } /* add the value to result */ ret.Push(val) self.p = self.lspace(self.p) /* check for EOF */ if self.p >= ns { return Node{}, types.ERR_EOF } /* check for the next character */ switch self.s[self.p] { case ',' : self.p++ case ']' : self.p++; return newArray(ret), 0 default: // if val.isLazy() { // return newLazyArray(self, ret), 0 // } return Node{}, types.ERR_INVALID_CHAR } } } func (self *Parser) decodeObject(ret *linkedPairs) (Node, types.ParsingError) { sp := self.p ns := len(self.s) /* check for EOF */ if self.p = self.lspace(sp); self.p >= ns { return Node{}, types.ERR_EOF } /* check for empty object */ if self.s[self.p] == '}' { self.p++ return Node{t: types.V_OBJECT}, 0 } /* decode each pair */ for { var val Node var njs types.JsonState var err types.ParsingError /* decode the key */ if njs = self.decodeValue(); njs.Vt != types.V_STRING { return Node{}, types.ERR_INVALID_CHAR } /* extract the key */ idx := self.p - 1 key := self.s[njs.Iv:idx] /* check for escape sequence */ if njs.Ep != -1 { if key, err = unquote(key); err != 0 { return Node{}, err } } /* expect a ':' delimiter */ if err = self.delim(); err != 0 { return Node{}, err } if self.skipValue { /* skip the value */ var start int if start, err = self.skipFast(); err != 0 { return Node{}, err } if self.p > ns { return Node{}, types.ERR_EOF } t := switchRawType(self.s[start]) if t == _V_NONE { return Node{}, types.ERR_INVALID_CHAR } val = newRawNode(self.s[start:self.p], t, false) } else { /* decode the value */ if val, err = self.Parse(); err != 0 { return Node{}, err } } /* add the value to result */ // FIXME: ret's address may change here, thus previous referred node in ret may be invalid !! ret.Push(NewPair(key, val)) self.p = self.lspace(self.p) /* check for EOF */ if self.p >= ns { return Node{}, types.ERR_EOF } /* check for the next character */ switch self.s[self.p] { case ',' : self.p++ case '}' : self.p++; return newObject(ret), 0 default: // if val.isLazy() { // return newLazyObject(self, ret), 0 // } return Node{}, types.ERR_INVALID_CHAR } } } func (self *Parser) decodeString(iv int64, ep int) (Node, types.ParsingError) { p := self.p - 1 s := self.s[iv:p] /* fast path: no escape sequence */ if ep == -1 { return NewString(s), 0 } /* unquote the string */ out, err := unquote(s) /* check for errors */ if err != 0 { return Node{}, err } else { return newBytes(rt.Str2Mem(out)), 0 } } /** Parser Interface **/ func (self *Parser) Pos() int { return self.p } // Parse returns a ast.Node representing the parser's JSON. // NOTICE: the specific parsing lazy dependens parser's option // It only parse first layer and first child for Object or Array be default func (self *Parser) Parse() (Node, types.ParsingError) { switch val := self.decodeValue(); val.Vt { case types.V_EOF : return Node{}, types.ERR_EOF case types.V_NULL : return nullNode, 0 case types.V_TRUE : return trueNode, 0 case types.V_FALSE : return falseNode, 0 case types.V_STRING : return self.decodeString(val.Iv, val.Ep) case types.V_ARRAY: s := self.p - 1; if p := skipBlank(self.s, self.p); p >= self.p && self.s[p] == ']' { self.p = p + 1 return Node{t: types.V_ARRAY}, 0 } if self.noLazy { if self.loadOnce { self.noLazy = false } return self.decodeArray(new(linkedNodes)) } // NOTICE: loadOnce always keep raw json for object or array if self.loadOnce { self.p = s s, e := self.skipFast() if e != 0 { return Node{}, e } return newRawNode(self.s[s:self.p], types.V_ARRAY, true), 0 } return newLazyArray(self), 0 case types.V_OBJECT: s := self.p - 1; if p := skipBlank(self.s, self.p); p >= self.p && self.s[p] == '}' { self.p = p + 1 return Node{t: types.V_OBJECT}, 0 } // NOTICE: loadOnce always keep raw json for object or array if self.noLazy { if self.loadOnce { self.noLazy = false } return self.decodeObject(new(linkedPairs)) } if self.loadOnce { self.p = s s, e := self.skipFast() if e != 0 { return Node{}, e } return newRawNode(self.s[s:self.p], types.V_OBJECT, true), 0 } return newLazyObject(self), 0 case types.V_DOUBLE : return NewNumber(self.s[val.Ep:self.p]), 0 case types.V_INTEGER : return NewNumber(self.s[val.Ep:self.p]), 0 default : return Node{}, types.ParsingError(-val.Vt) } } func (self *Parser) searchKey(match string) types.ParsingError { ns := len(self.s) if err := self.object(); err != 0 { return err } /* check for EOF */ if self.p = self.lspace(self.p); self.p >= ns { return types.ERR_EOF } /* check for empty object */ if self.s[self.p] == '}' { self.p++ return _ERR_NOT_FOUND } var njs types.JsonState var err types.ParsingError /* decode each pair */ for { /* decode the key */ if njs = self.decodeValue(); njs.Vt != types.V_STRING { return types.ERR_INVALID_CHAR } /* extract the key */ idx := self.p - 1 key := self.s[njs.Iv:idx] /* check for escape sequence */ if njs.Ep != -1 { if key, err = unquote(key); err != 0 { return err } } /* expect a ':' delimiter */ if err = self.delim(); err != 0 { return err } /* skip value */ if key != match { if _, err = self.skipFast(); err != 0 { return err } } else { return 0 } /* check for EOF */ self.p = self.lspace(self.p) if self.p >= ns { return types.ERR_EOF } /* check for the next character */ switch self.s[self.p] { case ',': self.p++ case '}': self.p++ return _ERR_NOT_FOUND default: return types.ERR_INVALID_CHAR } } } func (self *Parser) searchIndex(idx int) types.ParsingError { ns := len(self.s) if err := self.array(); err != 0 { return err } /* check for EOF */ if self.p = self.lspace(self.p); self.p >= ns { return types.ERR_EOF } /* check for empty array */ if self.s[self.p] == ']' { self.p++ return _ERR_NOT_FOUND } var err types.ParsingError /* allocate array space and parse every element */ for i := 0; i < idx; i++ { /* decode the value */ if _, err = self.skipFast(); err != 0 { return err } /* check for EOF */ self.p = self.lspace(self.p) if self.p >= ns { return types.ERR_EOF } /* check for the next character */ switch self.s[self.p] { case ',': self.p++ case ']': self.p++ return _ERR_NOT_FOUND default: return types.ERR_INVALID_CHAR } } return 0 } func (self *Node) skipNextNode() *Node { if !self.isLazy() { return nil } parser, stack := self.getParserAndArrayStack() ret := &stack.v sp := parser.p ns := len(parser.s) /* check for EOF */ if parser.p = parser.lspace(sp); parser.p >= ns { return newSyntaxError(parser.syntaxError(types.ERR_EOF)) } /* check for empty array */ if parser.s[parser.p] == ']' { parser.p++ self.setArray(ret) return nil } var val Node /* skip the value */ if start, err := parser.skipFast(); err != 0 { return newSyntaxError(parser.syntaxError(err)) } else { t := switchRawType(parser.s[start]) if t == _V_NONE { return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) } val = newRawNode(parser.s[start:parser.p], t, false) } /* add the value to result */ ret.Push(val) self.l++ parser.p = parser.lspace(parser.p) /* check for EOF */ if parser.p >= ns { return newSyntaxError(parser.syntaxError(types.ERR_EOF)) } /* check for the next character */ switch parser.s[parser.p] { case ',': parser.p++ return ret.At(ret.Len()-1) case ']': parser.p++ self.setArray(ret) return ret.At(ret.Len()-1) default: return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) } } func (self *Node) skipNextPair() (*Pair) { if !self.isLazy() { return nil } parser, stack := self.getParserAndObjectStack() ret := &stack.v sp := parser.p ns := len(parser.s) /* check for EOF */ if parser.p = parser.lspace(sp); parser.p >= ns { return newErrorPair(parser.syntaxError(types.ERR_EOF)) } /* check for empty object */ if parser.s[parser.p] == '}' { parser.p++ self.setObject(ret) return nil } /* decode one pair */ var val Node var njs types.JsonState var err types.ParsingError /* decode the key */ if njs = parser.decodeValue(); njs.Vt != types.V_STRING { return newErrorPair(parser.syntaxError(types.ERR_INVALID_CHAR)) } /* extract the key */ idx := parser.p - 1 key := parser.s[njs.Iv:idx] /* check for escape sequence */ if njs.Ep != -1 { if key, err = unquote(key); err != 0 { return newErrorPair(parser.syntaxError(err)) } } /* expect a ':' delimiter */ if err = parser.delim(); err != 0 { return newErrorPair(parser.syntaxError(err)) } /* skip the value */ if start, err := parser.skipFast(); err != 0 { return newErrorPair(parser.syntaxError(err)) } else { t := switchRawType(parser.s[start]) if t == _V_NONE { return newErrorPair(parser.syntaxError(types.ERR_INVALID_CHAR)) } val = newRawNode(parser.s[start:parser.p], t, false) } /* add the value to result */ ret.Push(NewPair(key, val)) self.l++ parser.p = parser.lspace(parser.p) /* check for EOF */ if parser.p >= ns { return newErrorPair(parser.syntaxError(types.ERR_EOF)) } /* check for the next character */ switch parser.s[parser.p] { case ',': parser.p++ return ret.At(ret.Len()-1) case '}': parser.p++ self.setObject(ret) return ret.At(ret.Len()-1) default: return newErrorPair(parser.syntaxError(types.ERR_INVALID_CHAR)) } } /** Parser Factory **/ // Loads parse all json into interface{} func Loads(src string) (int, interface{}, error) { ps := &Parser{s: src} np, err := ps.Parse() /* check for errors */ if err != 0 { return 0, nil, ps.ExportError(err) } else { x, err := np.Interface() if err != nil { return 0, nil, err } return ps.Pos(), x, nil } } // LoadsUseNumber parse all json into interface{}, with numeric nodes casted to json.Number func LoadsUseNumber(src string) (int, interface{}, error) { ps := &Parser{s: src} np, err := ps.Parse() /* check for errors */ if err != 0 { return 0, nil, err } else { x, err := np.InterfaceUseNumber() if err != nil { return 0, nil, err } return ps.Pos(), x, nil } } // NewParser returns pointer of new allocated parser func NewParser(src string) *Parser { return &Parser{s: src} } // NewParser returns new allocated parser func NewParserObj(src string) Parser { return Parser{s: src} } // decodeNumber controls if parser decodes the number values instead of skip them // WARN: once you set decodeNumber(true), please set decodeNumber(false) before you drop the parser // otherwise the memory CANNOT be reused func (self *Parser) decodeNumber(decode bool) { if !decode && self.dbuf != nil { types.FreeDbuf(self.dbuf) self.dbuf = nil return } if decode && self.dbuf == nil { self.dbuf = types.NewDbuf() } } // ExportError converts types.ParsingError to std Error func (self *Parser) ExportError(err types.ParsingError) error { if err == _ERR_NOT_FOUND { return ErrNotExist } return fmt.Errorf("%q", SyntaxError{ Pos : self.p, Src : self.s, Code: err, }.Description()) } func backward(src string, i int) int { for ; i>=0 && isSpace(src[i]); i-- {} return i } func newRawNode(str string, typ types.ValueType, lock bool) Node { ret := Node{ t: typ | _V_RAW, p: rt.StrPtr(str), l: uint(len(str)), } if lock { ret.m = new(sync.RWMutex) } return ret } var typeJumpTable = [256]types.ValueType{ '"' : types.V_STRING, '-' : _V_NUMBER, '0' : _V_NUMBER, '1' : _V_NUMBER, '2' : _V_NUMBER, '3' : _V_NUMBER, '4' : _V_NUMBER, '5' : _V_NUMBER, '6' : _V_NUMBER, '7' : _V_NUMBER, '8' : _V_NUMBER, '9' : _V_NUMBER, '[' : types.V_ARRAY, 'f' : types.V_FALSE, 'n' : types.V_NULL, 't' : types.V_TRUE, '{' : types.V_OBJECT, } func switchRawType(c byte) types.ValueType { return typeJumpTable[c] } func (self *Node) loadt() types.ValueType { return (types.ValueType)(atomic.LoadInt64(&self.t)) } func (self *Node) lock() bool { if m := self.m; m != nil { m.Lock() return true } return false } func (self *Node) unlock() { if m := self.m; m != nil { m.Unlock() } } func (self *Node) rlock() bool { if m := self.m; m != nil { m.RLock() return true } return false } func (self *Node) runlock() { if m := self.m; m != nil { m.RUnlock() } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/api.go
vendor/github.com/bytedance/sonic/ast/api.go
//go:build (amd64 && go1.17 && !go1.25) || (arm64 && go1.20 && !go1.25) // +build amd64,go1.17,!go1.25 arm64,go1.20,!go1.25 /* * Copyright 2022 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( `runtime` `unsafe` `github.com/bytedance/sonic/encoder` `github.com/bytedance/sonic/internal/native` `github.com/bytedance/sonic/internal/native/types` `github.com/bytedance/sonic/internal/rt` uq `github.com/bytedance/sonic/unquote` `github.com/bytedance/sonic/utf8` ) var typeByte = rt.UnpackEface(byte(0)).Type //go:nocheckptr func quote(buf *[]byte, val string) { *buf = append(*buf, '"') if len(val) == 0 { *buf = append(*buf, '"') return } sp := rt.IndexChar(val, 0) nb := len(val) b := (*rt.GoSlice)(unsafe.Pointer(buf)) // input buffer for nb > 0 { // output buffer dp := unsafe.Pointer(uintptr(b.Ptr) + uintptr(b.Len)) dn := b.Cap - b.Len // call native.Quote, dn is byte count it outputs ret := native.Quote(sp, nb, dp, &dn, 0) // update *buf length b.Len += dn // no need more output if ret >= 0 { break } // double buf size *b = rt.GrowSlice(typeByte, *b, b.Cap*2) // ret is the complement of consumed input ret = ^ret // update input buffer nb -= ret sp = unsafe.Pointer(uintptr(sp) + uintptr(ret)) } runtime.KeepAlive(buf) runtime.KeepAlive(sp) *buf = append(*buf, '"') } func unquote(src string) (string, types.ParsingError) { return uq.String(src) } func (self *Parser) decodeValue() (val types.JsonState) { sv := (*rt.GoString)(unsafe.Pointer(&self.s)) flag := types.F_USE_NUMBER if self.dbuf != nil { flag = 0 val.Dbuf = self.dbuf val.Dcap = types.MaxDigitNums } self.p = native.Value(sv.Ptr, sv.Len, self.p, &val, uint64(flag)) return } func (self *Parser) skip() (int, types.ParsingError) { fsm := types.NewStateMachine() start := native.SkipOne(&self.s, &self.p, fsm, 0) types.FreeStateMachine(fsm) if start < 0 { return self.p, types.ParsingError(-start) } return start, 0 } func (self *Node) encodeInterface(buf *[]byte) error { //WARN: NOT compatible with json.Encoder return encoder.EncodeInto(buf, self.packAny(), encoder.NoEncoderNewline) } func (self *Parser) skipFast() (int, types.ParsingError) { start := native.SkipOneFast(&self.s, &self.p) if start < 0 { return self.p, types.ParsingError(-start) } return start, 0 } func (self *Parser) getByPath(validate bool, path ...interface{}) (int, types.ParsingError) { var fsm *types.StateMachine if validate { fsm = types.NewStateMachine() } start := native.GetByPath(&self.s, &self.p, &path, fsm) if validate { types.FreeStateMachine(fsm) } runtime.KeepAlive(path) if start < 0 { return self.p, types.ParsingError(-start) } return start, 0 } func validate_utf8(str string) bool { return utf8.ValidateString(str) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/error.go
vendor/github.com/bytedance/sonic/ast/error.go
package ast import ( `fmt` `strings` `unsafe` `github.com/bytedance/sonic/internal/native/types` ) func newError(err types.ParsingError, msg string) *Node { return &Node{ t: V_ERROR, l: uint(err), p: unsafe.Pointer(&msg), } } func newErrorPair(err SyntaxError) *Pair { return &Pair{0, "", *newSyntaxError(err)} } // Error returns error message if the node is invalid func (self Node) Error() string { if self.t != V_ERROR { return "" } else { return *(*string)(self.p) } } func newSyntaxError(err SyntaxError) *Node { msg := err.Description() return &Node{ t: V_ERROR, l: uint(err.Code), p: unsafe.Pointer(&msg), } } func (self *Parser) syntaxError(err types.ParsingError) SyntaxError { return SyntaxError{ Pos : self.p, Src : self.s, Code: err, } } func unwrapError(err error) *Node { if se, ok := err.(*Node); ok { return se }else if sse, ok := err.(Node); ok { return &sse } else { msg := err.Error() return &Node{ t: V_ERROR, p: unsafe.Pointer(&msg), } } } type SyntaxError struct { Pos int Src string Code types.ParsingError Msg string } func (self SyntaxError) Error() string { return fmt.Sprintf("%q", self.Description()) } func (self SyntaxError) Description() string { return "Syntax error " + self.description() } func (self SyntaxError) description() string { i := 16 p := self.Pos - i q := self.Pos + i /* check for empty source */ if self.Src == "" { return fmt.Sprintf("no sources available, the input json is empty: %#v", self) } /* prevent slicing before the beginning */ if p < 0 { p, q, i = 0, q - p, i + p } /* prevent slicing beyond the end */ if n := len(self.Src); q > n { n = q - n q = len(self.Src) /* move the left bound if possible */ if p > n { i += n p -= n } } /* left and right length */ x := clamp_zero(i) y := clamp_zero(q - p - i - 1) /* compose the error description */ return fmt.Sprintf( "at index %d: %s\n\n\t%s\n\t%s^%s\n", self.Pos, self.Message(), self.Src[p:q], strings.Repeat(".", x), strings.Repeat(".", y), ) } func (self SyntaxError) Message() string { if self.Msg == "" { return self.Code.Message() } return self.Msg } func clamp_zero(v int) int { if v < 0 { return 0 } else { return v } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/node.go
vendor/github.com/bytedance/sonic/ast/node.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( "encoding/json" "fmt" "strconv" "sync" "sync/atomic" "unsafe" "github.com/bytedance/sonic/internal/native/types" "github.com/bytedance/sonic/internal/rt" ) const ( _V_NONE types.ValueType = 0 _V_NODE_BASE types.ValueType = 1 << 5 _V_LAZY types.ValueType = 1 << 7 _V_RAW types.ValueType = 1 << 8 _V_NUMBER = _V_NODE_BASE + 1 _V_ANY = _V_NODE_BASE + 2 _V_ARRAY_LAZY = _V_LAZY | types.V_ARRAY _V_OBJECT_LAZY = _V_LAZY | types.V_OBJECT _MASK_LAZY = _V_LAZY - 1 _MASK_RAW = _V_RAW - 1 ) const ( V_NONE = 0 V_ERROR = 1 V_NULL = int(types.V_NULL) V_TRUE = int(types.V_TRUE) V_FALSE = int(types.V_FALSE) V_ARRAY = int(types.V_ARRAY) V_OBJECT = int(types.V_OBJECT) V_STRING = int(types.V_STRING) V_NUMBER = int(_V_NUMBER) V_ANY = int(_V_ANY) ) type Node struct { t types.ValueType l uint p unsafe.Pointer m *sync.RWMutex } // UnmarshalJSON is just an adapter to json.Unmarshaler. // If you want better performance, use Searcher.GetByPath() directly func (self *Node) UnmarshalJSON(data []byte) (err error) { *self = NewRaw(string(data)) return self.Check() } /** Node Type Accessor **/ // Type returns json type represented by the node // It will be one of bellows: // V_NONE = 0 (empty node, key not exists) // V_ERROR = 1 (error node) // V_NULL = 2 (json value `null`, key exists) // V_TRUE = 3 (json value `true`) // V_FALSE = 4 (json value `false`) // V_ARRAY = 5 (json value array) // V_OBJECT = 6 (json value object) // V_STRING = 7 (json value string) // V_NUMBER = 33 (json value number ) // V_ANY = 34 (golang interface{}) // // Deprecated: not concurrent safe. Use TypeSafe instead func (self Node) Type() int { return int(self.t & _MASK_LAZY & _MASK_RAW) } // Type concurrently-safe returns json type represented by the node // It will be one of bellows: // V_NONE = 0 (empty node, key not exists) // V_ERROR = 1 (error node) // V_NULL = 2 (json value `null`, key exists) // V_TRUE = 3 (json value `true`) // V_FALSE = 4 (json value `false`) // V_ARRAY = 5 (json value array) // V_OBJECT = 6 (json value object) // V_STRING = 7 (json value string) // V_NUMBER = 33 (json value number ) // V_ANY = 34 (golang interface{}) func (self *Node) TypeSafe() int { return int(self.loadt() & _MASK_LAZY & _MASK_RAW) } func (self *Node) itype() types.ValueType { return self.t & _MASK_LAZY & _MASK_RAW } // Exists returns false only if the self is nil or empty node V_NONE func (self *Node) Exists() bool { if self == nil { return false } t := self.loadt() return t != V_ERROR && t != _V_NONE } // Valid reports if self is NOT V_ERROR or nil func (self *Node) Valid() bool { if self == nil { return false } return self.loadt() != V_ERROR } // Check checks if the node itself is valid, and return: // - ErrNotExist If the node is nil // - Its underlying error If the node is V_ERROR func (self *Node) Check() error { if self == nil { return ErrNotExist } else if self.loadt() != V_ERROR { return nil } else { return self } } // isRaw returns true if node's underlying value is raw json // // Deprecated: not concurrent safe func (self Node) IsRaw() bool { return self.t & _V_RAW != 0 } // IsRaw returns true if node's underlying value is raw json func (self *Node) isRaw() bool { return self.loadt() & _V_RAW != 0 } func (self *Node) isLazy() bool { return self != nil && self.t & _V_LAZY != 0 } func (self *Node) isAny() bool { return self != nil && self.loadt() == _V_ANY } /** Simple Value Methods **/ // Raw returns json representation of the node, func (self *Node) Raw() (string, error) { if self == nil { return "", ErrNotExist } lock := self.rlock() if !self.isRaw() { if lock { self.runlock() } buf, err := self.MarshalJSON() return rt.Mem2Str(buf), err } ret := self.toString() if lock { self.runlock() } return ret, nil } func (self *Node) checkRaw() error { if err := self.Check(); err != nil { return err } if self.isRaw() { self.parseRaw(false) } return self.Check() } // Bool returns bool value represented by this node, // including types.V_TRUE|V_FALSE|V_NUMBER|V_STRING|V_ANY|V_NULL, // V_NONE will return error func (self *Node) Bool() (bool, error) { if err := self.checkRaw(); err != nil { return false, err } switch self.t { case types.V_TRUE : return true , nil case types.V_FALSE : return false, nil case types.V_NULL : return false, nil case _V_NUMBER : if i, err := self.toInt64(); err == nil { return i != 0, nil } else if f, err := self.toFloat64(); err == nil { return f != 0, nil } else { return false, err } case types.V_STRING: return strconv.ParseBool(self.toString()) case _V_ANY : any := self.packAny() switch v := any.(type) { case bool : return v, nil case int : return v != 0, nil case int8 : return v != 0, nil case int16 : return v != 0, nil case int32 : return v != 0, nil case int64 : return v != 0, nil case uint : return v != 0, nil case uint8 : return v != 0, nil case uint16 : return v != 0, nil case uint32 : return v != 0, nil case uint64 : return v != 0, nil case float32: return v != 0, nil case float64: return v != 0, nil case string : return strconv.ParseBool(v) case json.Number: if i, err := v.Int64(); err == nil { return i != 0, nil } else if f, err := v.Float64(); err == nil { return f != 0, nil } else { return false, err } default: return false, ErrUnsupportType } default : return false, ErrUnsupportType } } // Int64 casts the node to int64 value, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING // V_NONE it will return error func (self *Node) Int64() (int64, error) { if err := self.checkRaw(); err != nil { return 0, err } switch self.t { case _V_NUMBER, types.V_STRING : if i, err := self.toInt64(); err == nil { return i, nil } else if f, err := self.toFloat64(); err == nil { return int64(f), nil } else { return 0, err } case types.V_TRUE : return 1, nil case types.V_FALSE : return 0, nil case types.V_NULL : return 0, nil case _V_ANY : any := self.packAny() switch v := any.(type) { case bool : if v { return 1, nil } else { return 0, nil } case int : return int64(v), nil case int8 : return int64(v), nil case int16 : return int64(v), nil case int32 : return int64(v), nil case int64 : return int64(v), nil case uint : return int64(v), nil case uint8 : return int64(v), nil case uint16 : return int64(v), nil case uint32 : return int64(v), nil case uint64 : return int64(v), nil case float32: return int64(v), nil case float64: return int64(v), nil case string : if i, err := strconv.ParseInt(v, 10, 64); err == nil { return i, nil } else if f, err := strconv.ParseFloat(v, 64); err == nil { return int64(f), nil } else { return 0, err } case json.Number: if i, err := v.Int64(); err == nil { return i, nil } else if f, err := v.Float64(); err == nil { return int64(f), nil } else { return 0, err } default: return 0, ErrUnsupportType } default : return 0, ErrUnsupportType } } // StrictInt64 exports underlying int64 value, including V_NUMBER, V_ANY func (self *Node) StrictInt64() (int64, error) { if err := self.checkRaw(); err != nil { return 0, err } switch self.t { case _V_NUMBER : return self.toInt64() case _V_ANY : any := self.packAny() switch v := any.(type) { case int : return int64(v), nil case int8 : return int64(v), nil case int16 : return int64(v), nil case int32 : return int64(v), nil case int64 : return int64(v), nil case uint : return int64(v), nil case uint8 : return int64(v), nil case uint16: return int64(v), nil case uint32: return int64(v), nil case uint64: return int64(v), nil case json.Number: if i, err := v.Int64(); err == nil { return i, nil } else { return 0, err } default: return 0, ErrUnsupportType } default : return 0, ErrUnsupportType } } func castNumber(v bool) json.Number { if v { return json.Number("1") } else { return json.Number("0") } } // Number casts node to float64, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, // V_NONE it will return error func (self *Node) Number() (json.Number, error) { if err := self.checkRaw(); err != nil { return json.Number(""), err } switch self.t { case _V_NUMBER : return self.toNumber(), nil case types.V_STRING : if _, err := self.toInt64(); err == nil { return self.toNumber(), nil } else if _, err := self.toFloat64(); err == nil { return self.toNumber(), nil } else { return json.Number(""), err } case types.V_TRUE : return json.Number("1"), nil case types.V_FALSE : return json.Number("0"), nil case types.V_NULL : return json.Number("0"), nil case _V_ANY : any := self.packAny() switch v := any.(type) { case bool : return castNumber(v), nil case int : return castNumber(v != 0), nil case int8 : return castNumber(v != 0), nil case int16 : return castNumber(v != 0), nil case int32 : return castNumber(v != 0), nil case int64 : return castNumber(v != 0), nil case uint : return castNumber(v != 0), nil case uint8 : return castNumber(v != 0), nil case uint16 : return castNumber(v != 0), nil case uint32 : return castNumber(v != 0), nil case uint64 : return castNumber(v != 0), nil case float32: return castNumber(v != 0), nil case float64: return castNumber(v != 0), nil case string : if _, err := strconv.ParseFloat(v, 64); err == nil { return json.Number(v), nil } else { return json.Number(""), err } case json.Number: return v, nil default: return json.Number(""), ErrUnsupportType } default : return json.Number(""), ErrUnsupportType } } // Number exports underlying float64 value, including V_NUMBER, V_ANY of json.Number func (self *Node) StrictNumber() (json.Number, error) { if err := self.checkRaw(); err != nil { return json.Number(""), err } switch self.t { case _V_NUMBER : return self.toNumber() , nil case _V_ANY : if v, ok := self.packAny().(json.Number); ok { return v, nil } else { return json.Number(""), ErrUnsupportType } default : return json.Number(""), ErrUnsupportType } } // String cast node to string, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, // V_NONE it will return error func (self *Node) String() (string, error) { if err := self.checkRaw(); err != nil { return "", err } switch self.t { case types.V_NULL : return "" , nil case types.V_TRUE : return "true" , nil case types.V_FALSE : return "false", nil case types.V_STRING, _V_NUMBER : return self.toString(), nil case _V_ANY : any := self.packAny() switch v := any.(type) { case bool : return strconv.FormatBool(v), nil case int : return strconv.Itoa(v), nil case int8 : return strconv.Itoa(int(v)), nil case int16 : return strconv.Itoa(int(v)), nil case int32 : return strconv.Itoa(int(v)), nil case int64 : return strconv.Itoa(int(v)), nil case uint : return strconv.Itoa(int(v)), nil case uint8 : return strconv.Itoa(int(v)), nil case uint16 : return strconv.Itoa(int(v)), nil case uint32 : return strconv.Itoa(int(v)), nil case uint64 : return strconv.Itoa(int(v)), nil case float32: return strconv.FormatFloat(float64(v), 'g', -1, 64), nil case float64: return strconv.FormatFloat(float64(v), 'g', -1, 64), nil case string : return v, nil case json.Number: return v.String(), nil default: return "", ErrUnsupportType } default : return "" , ErrUnsupportType } } // StrictString returns string value (unescaped), including V_STRING, V_ANY of string. // In other cases, it will return empty string. func (self *Node) StrictString() (string, error) { if err := self.checkRaw(); err != nil { return "", err } switch self.t { case types.V_STRING : return self.toString(), nil case _V_ANY : if v, ok := self.packAny().(string); ok { return v, nil } else { return "", ErrUnsupportType } default : return "", ErrUnsupportType } } // Float64 cast node to float64, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, // V_NONE it will return error func (self *Node) Float64() (float64, error) { if err := self.checkRaw(); err != nil { return 0.0, err } switch self.t { case _V_NUMBER, types.V_STRING : return self.toFloat64() case types.V_TRUE : return 1.0, nil case types.V_FALSE : return 0.0, nil case types.V_NULL : return 0.0, nil case _V_ANY : any := self.packAny() switch v := any.(type) { case bool : if v { return 1.0, nil } else { return 0.0, nil } case int : return float64(v), nil case int8 : return float64(v), nil case int16 : return float64(v), nil case int32 : return float64(v), nil case int64 : return float64(v), nil case uint : return float64(v), nil case uint8 : return float64(v), nil case uint16 : return float64(v), nil case uint32 : return float64(v), nil case uint64 : return float64(v), nil case float32: return float64(v), nil case float64: return float64(v), nil case string : if f, err := strconv.ParseFloat(v, 64); err == nil { return float64(f), nil } else { return 0, err } case json.Number: if f, err := v.Float64(); err == nil { return float64(f), nil } else { return 0, err } default : return 0, ErrUnsupportType } default : return 0.0, ErrUnsupportType } } // Float64 exports underlying float64 value, including V_NUMBER, V_ANY func (self *Node) StrictFloat64() (float64, error) { if err := self.checkRaw(); err != nil { return 0.0, err } switch self.t { case _V_NUMBER : return self.toFloat64() case _V_ANY : any := self.packAny() switch v := any.(type) { case float32 : return float64(v), nil case float64 : return float64(v), nil default : return 0, ErrUnsupportType } default : return 0.0, ErrUnsupportType } } /** Sequential Value Methods **/ // Len returns children count of a array|object|string node // WARN: For partially loaded node, it also works but only counts the parsed children func (self *Node) Len() (int, error) { if err := self.checkRaw(); err != nil { return 0, err } if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY || self.t == types.V_STRING { return int(self.l), nil } else if self.t == _V_NONE || self.t == types.V_NULL { return 0, nil } else { return 0, ErrUnsupportType } } func (self *Node) len() int { return int(self.l) } // Cap returns malloc capacity of a array|object node for children func (self *Node) Cap() (int, error) { if err := self.checkRaw(); err != nil { return 0, err } switch self.t { case types.V_ARRAY: return (*linkedNodes)(self.p).Cap(), nil case types.V_OBJECT: return (*linkedPairs)(self.p).Cap(), nil case _V_ARRAY_LAZY: return (*parseArrayStack)(self.p).v.Cap(), nil case _V_OBJECT_LAZY: return (*parseObjectStack)(self.p).v.Cap(), nil case _V_NONE, types.V_NULL: return 0, nil default: return 0, ErrUnsupportType } } // Set sets the node of given key under self, and reports if the key has existed. // // If self is V_NONE or V_NULL, it becomes V_OBJECT and sets the node at the key. func (self *Node) Set(key string, node Node) (bool, error) { if err := self.checkRaw(); err != nil { return false, err } if err := node.Check(); err != nil { return false, err } if self.t == _V_NONE || self.t == types.V_NULL { *self = NewObject([]Pair{NewPair(key, node)}) return false, nil } else if self.itype() != types.V_OBJECT { return false, ErrUnsupportType } p := self.Get(key) if !p.Exists() { // self must be fully-loaded here if self.len() == 0 { *self = newObject(new(linkedPairs)) } s := (*linkedPairs)(self.p) s.Push(NewPair(key, node)) self.l++ return false, nil } else if err := p.Check(); err != nil { return false, err } *p = node return true, nil } // SetAny wraps val with V_ANY node, and Set() the node. func (self *Node) SetAny(key string, val interface{}) (bool, error) { return self.Set(key, NewAny(val)) } // Unset REMOVE (soft) the node of given key under object parent, and reports if the key has existed. func (self *Node) Unset(key string) (bool, error) { if err := self.should(types.V_OBJECT); err != nil { return false, err } // NOTICE: must get accurate length before deduct if err := self.skipAllKey(); err != nil { return false, err } p, i := self.skipKey(key) if !p.Exists() { return false, nil } else if err := p.Check(); err != nil { return false, err } self.removePairAt(i) return true, nil } // SetByIndex sets the node of given index, and reports if the key has existed. // // The index must be within self's children. func (self *Node) SetByIndex(index int, node Node) (bool, error) { if err := self.checkRaw(); err != nil { return false, err } if err := node.Check(); err != nil { return false, err } if index == 0 && (self.t == _V_NONE || self.t == types.V_NULL) { *self = NewArray([]Node{node}) return false, nil } p := self.Index(index) if !p.Exists() { return false, ErrNotExist } else if err := p.Check(); err != nil { return false, err } *p = node return true, nil } // SetAny wraps val with V_ANY node, and SetByIndex() the node. func (self *Node) SetAnyByIndex(index int, val interface{}) (bool, error) { return self.SetByIndex(index, NewAny(val)) } // UnsetByIndex REMOVE (softly) the node of given index. // // WARN: this will change address of elements, which is a dangerous action. // Use Unset() for object or Pop() for array instead. func (self *Node) UnsetByIndex(index int) (bool, error) { if err := self.checkRaw(); err != nil { return false, err } var p *Node it := self.itype() if it == types.V_ARRAY { if err := self.skipAllIndex(); err != nil { return false, err } p = self.nodeAt(index) } else if it == types.V_OBJECT { if err := self.skipAllKey(); err != nil { return false, err } pr := self.pairAt(index) if pr == nil { return false, ErrNotExist } p = &pr.Value } else { return false, ErrUnsupportType } if !p.Exists() { return false, ErrNotExist } // last elem if index == self.len() - 1 { return true, self.Pop() } // not last elem, self.len() change but linked-chunk not change if it == types.V_ARRAY { self.removeNode(index) }else if it == types.V_OBJECT { self.removePair(index) } return true, nil } // Add appends the given node under self. // // If self is V_NONE or V_NULL, it becomes V_ARRAY and sets the node at index 0. func (self *Node) Add(node Node) error { if err := self.checkRaw(); err != nil { return err } if self != nil && (self.t == _V_NONE || self.t == types.V_NULL) { *self = NewArray([]Node{node}) return nil } if err := self.should(types.V_ARRAY); err != nil { return err } s, err := self.unsafeArray() if err != nil { return err } // Notice: array won't have unset node in tail s.Push(node) self.l++ return nil } // Pop remove the last child of the V_Array or V_Object node. func (self *Node) Pop() error { if err := self.checkRaw(); err != nil { return err } if it := self.itype(); it == types.V_ARRAY { s, err := self.unsafeArray() if err != nil { return err } // remove tail unset nodes for i := s.Len()-1; i >= 0; i-- { if s.At(i).Exists() { s.Pop() self.l-- break } s.Pop() } } else if it == types.V_OBJECT { s, err := self.unsafeMap() if err != nil { return err } // remove tail unset nodes for i := s.Len()-1; i >= 0; i-- { if p := s.At(i); p != nil && p.Value.Exists() { s.Pop() self.l-- break } s.Pop() } } else { return ErrUnsupportType } return nil } // Move moves the child at src index to dst index, // meanwhile slides sliblings from src+1 to dst. // // WARN: this will change address of elements, which is a dangerous action. func (self *Node) Move(dst, src int) error { if err := self.should(types.V_ARRAY); err != nil { return err } s, err := self.unsafeArray() if err != nil { return err } // check if any unset node exists if l := s.Len(); self.len() != l { di, si := dst, src // find real pos of src and dst for i := 0; i < l; i++ { if s.At(i).Exists() { di-- si-- } if di == -1 { dst = i di-- } if si == -1 { src = i si-- } if di == -2 && si == -2 { break } } } s.MoveOne(src, dst) return nil } // SetAny wraps val with V_ANY node, and Add() the node. func (self *Node) AddAny(val interface{}) error { return self.Add(NewAny(val)) } // GetByPath load given path on demands, // which only ensure nodes before this path got parsed. // // Note, the api expects the json is well-formed at least, // otherwise it may return unexpected result. func (self *Node) GetByPath(path ...interface{}) *Node { if !self.Valid() { return self } var s = self for _, p := range path { switch p := p.(type) { case int: s = s.Index(p) if !s.Valid() { return s } case string: s = s.Get(p) if !s.Valid() { return s } default: panic("path must be either int or string") } } return s } // Get loads given key of an object node on demands func (self *Node) Get(key string) *Node { if err := self.should(types.V_OBJECT); err != nil { return unwrapError(err) } n, _ := self.skipKey(key) return n } // Index indexies node at given idx, // node type CAN be either V_OBJECT or V_ARRAY func (self *Node) Index(idx int) *Node { if err := self.checkRaw(); err != nil { return unwrapError(err) } it := self.itype() if it == types.V_ARRAY { return self.skipIndex(idx) }else if it == types.V_OBJECT { pr := self.skipIndexPair(idx) if pr == nil { return newError(_ERR_NOT_FOUND, "value not exists") } return &pr.Value } else { return newError(_ERR_UNSUPPORT_TYPE, fmt.Sprintf("unsupported type: %v", self.itype())) } } // IndexPair indexies pair at given idx, // node type MUST be either V_OBJECT func (self *Node) IndexPair(idx int) *Pair { if err := self.should(types.V_OBJECT); err != nil { return nil } return self.skipIndexPair(idx) } func (self *Node) indexOrGet(idx int, key string) (*Node, int) { if err := self.should(types.V_OBJECT); err != nil { return unwrapError(err), idx } pr := self.skipIndexPair(idx) if pr != nil && pr.Key == key { return &pr.Value, idx } return self.skipKey(key) } // IndexOrGet firstly use idx to index a value and check if its key matches // If not, then use the key to search value func (self *Node) IndexOrGet(idx int, key string) *Node { node, _ := self.indexOrGet(idx, key) return node } // IndexOrGetWithIdx attempts to retrieve a node by index and key, returning the node and its correct index. // If the key does not match at the given index, it searches by key and returns the node with its updated index. func (self *Node) IndexOrGetWithIdx(idx int, key string) (*Node, int) { return self.indexOrGet(idx, key) } /** Generic Value Converters **/ // Map loads all keys of an object node func (self *Node) Map() (map[string]interface{}, error) { if self.isAny() { any := self.packAny() if v, ok := any.(map[string]interface{}); ok { return v, nil } else { return nil, ErrUnsupportType } } if err := self.should(types.V_OBJECT); err != nil { return nil, err } if err := self.loadAllKey(false); err != nil { return nil, err } return self.toGenericObject() } // MapUseNumber loads all keys of an object node, with numeric nodes casted to json.Number func (self *Node) MapUseNumber() (map[string]interface{}, error) { if self.isAny() { any := self.packAny() if v, ok := any.(map[string]interface{}); ok { return v, nil } else { return nil, ErrUnsupportType } } if err := self.should(types.V_OBJECT); err != nil { return nil, err } if err := self.loadAllKey(false); err != nil { return nil, err } return self.toGenericObjectUseNumber() } // MapUseNode scans both parsed and non-parsed children nodes, // and map them by their keys func (self *Node) MapUseNode() (map[string]Node, error) { if self.isAny() { any := self.packAny() if v, ok := any.(map[string]Node); ok { return v, nil } else { return nil, ErrUnsupportType } } if err := self.should(types.V_OBJECT); err != nil { return nil, err } if err := self.skipAllKey(); err != nil { return nil, err } return self.toGenericObjectUseNode() } // MapUnsafe exports the underlying pointer to its children map // WARN: don't use it unless you know what you are doing // // Deprecated: this API now returns copied nodes instead of directly reference, // func (self *Node) UnsafeMap() ([]Pair, error) { // if err := self.should(types.V_OBJECT, "an object"); err != nil { // return nil, err // } // if err := self.skipAllKey(); err != nil { // return nil, err // } // return self.toGenericObjectUsePair() // } //go:nocheckptr func (self *Node) unsafeMap() (*linkedPairs, error) { if err := self.skipAllKey(); err != nil { return nil, err } if self.p == nil { *self = newObject(new(linkedPairs)) } return (*linkedPairs)(self.p), nil } // SortKeys sorts children of a V_OBJECT node in ascending key-order. // If recurse is true, it recursively sorts children's children as long as a V_OBJECT node is found. func (self *Node) SortKeys(recurse bool) error { // check raw node first if err := self.checkRaw(); err != nil { return err } if self.itype() == types.V_OBJECT { return self.sortKeys(recurse) } else if self.itype() == types.V_ARRAY { var err error err2 := self.ForEach(func(path Sequence, node *Node) bool { it := node.itype() if it == types.V_ARRAY || it == types.V_OBJECT { err = node.SortKeys(recurse) if err != nil { return false } } return true }) if err != nil { return err } return err2 } else { return nil } } func (self *Node) sortKeys(recurse bool) (err error) { // check raw node first if err := self.checkRaw(); err != nil { return err } ps, err := self.unsafeMap() if err != nil { return err } ps.Sort() if recurse { var sc Scanner sc = func(path Sequence, node *Node) bool { if node.itype() == types.V_OBJECT {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/buffer.go
vendor/github.com/bytedance/sonic/ast/buffer.go
/** * Copyright 2023 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( "sort" "unsafe" "github.com/bytedance/sonic/internal/caching" ) type nodeChunk [_DEFAULT_NODE_CAP]Node type linkedNodes struct { head nodeChunk tail []*nodeChunk size int } func (self *linkedNodes) Cap() int { if self == nil { return 0 } return (len(self.tail)+1)*_DEFAULT_NODE_CAP } func (self *linkedNodes) Len() int { if self == nil { return 0 } return self.size } func (self *linkedNodes) At(i int) (*Node) { if self == nil { return nil } if i >= 0 && i<self.size && i < _DEFAULT_NODE_CAP { return &self.head[i] } else if i >= _DEFAULT_NODE_CAP && i<self.size { a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < len(self.tail) { return &self.tail[a][b] } } return nil } func (self *linkedNodes) MoveOne(source int, target int) { if source == target { return } if source < 0 || source >= self.size || target < 0 || target >= self.size { return } // reserve source n := *self.At(source) if source < target { // move every element (source,target] one step back for i:=source; i<target; i++ { *self.At(i) = *self.At(i+1) } } else { // move every element [target,source) one step forward for i:=source; i>target; i-- { *self.At(i) = *self.At(i-1) } } // set target *self.At(target) = n } func (self *linkedNodes) Pop() { if self == nil || self.size == 0 { return } self.Set(self.size-1, Node{}) self.size-- } func (self *linkedNodes) Push(v Node) { self.Set(self.size, v) } func (self *linkedNodes) Set(i int, v Node) { if i < _DEFAULT_NODE_CAP { self.head[i] = v if self.size <= i { self.size = i+1 } return } a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < 0 { self.head[b] = v } else { self.growTailLength(a+1) var n = &self.tail[a] if *n == nil { *n = new(nodeChunk) } (*n)[b] = v } if self.size <= i { self.size = i+1 } } func (self *linkedNodes) growTailLength(l int) { if l <= len(self.tail) { return } c := cap(self.tail) for c < l { c += 1 + c>>_APPEND_GROW_SHIFT } if c == cap(self.tail) { self.tail = self.tail[:l] return } tmp := make([]*nodeChunk, l, c) copy(tmp, self.tail) self.tail = tmp } func (self *linkedNodes) ToSlice(con []Node) { if len(con) < self.size { return } i := (self.size-1) a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < 0 { copy(con, self.head[:b+1]) return } else { copy(con, self.head[:]) con = con[_DEFAULT_NODE_CAP:] } for i:=0; i<a; i++ { copy(con, self.tail[i][:]) con = con[_DEFAULT_NODE_CAP:] } copy(con, self.tail[a][:b+1]) } func (self *linkedNodes) FromSlice(con []Node) { self.size = len(con) i := self.size-1 a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < 0 { copy(self.head[:b+1], con) return } else { copy(self.head[:], con) con = con[_DEFAULT_NODE_CAP:] } if cap(self.tail) <= a { c := (a+1) + (a+1)>>_APPEND_GROW_SHIFT self.tail = make([]*nodeChunk, a+1, c) } self.tail = self.tail[:a+1] for i:=0; i<a; i++ { self.tail[i] = new(nodeChunk) copy(self.tail[i][:], con) con = con[_DEFAULT_NODE_CAP:] } self.tail[a] = new(nodeChunk) copy(self.tail[a][:b+1], con) } type pairChunk [_DEFAULT_NODE_CAP]Pair type linkedPairs struct { index map[uint64]int head pairChunk tail []*pairChunk size int } func (self *linkedPairs) BuildIndex() { if self.index == nil { self.index = make(map[uint64]int, self.size) } for i:=0; i<self.size; i++ { p := self.At(i) self.index[p.hash] = i } } func (self *linkedPairs) Cap() int { if self == nil { return 0 } return (len(self.tail)+1)*_DEFAULT_NODE_CAP } func (self *linkedPairs) Len() int { if self == nil { return 0 } return self.size } func (self *linkedPairs) At(i int) *Pair { if self == nil { return nil } if i >= 0 && i < _DEFAULT_NODE_CAP && i<self.size { return &self.head[i] } else if i >= _DEFAULT_NODE_CAP && i<self.size { a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < len(self.tail) { return &self.tail[a][b] } } return nil } func (self *linkedPairs) Push(v Pair) { self.Set(self.size, v) } func (self *linkedPairs) Pop() { if self == nil || self.size == 0 { return } self.Unset(self.size-1) self.size-- } func (self *linkedPairs) Unset(i int) { if self.index != nil { p := self.At(i) delete(self.index, p.hash) } self.set(i, Pair{}) } func (self *linkedPairs) Set(i int, v Pair) { if self.index != nil { h := v.hash self.index[h] = i } self.set(i, v) } func (self *linkedPairs) set(i int, v Pair) { if i < _DEFAULT_NODE_CAP { self.head[i] = v if self.size <= i { self.size = i+1 } return } a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < 0 { self.head[b] = v } else { self.growTailLength(a+1) var n = &self.tail[a] if *n == nil { *n = new(pairChunk) } (*n)[b] = v } if self.size <= i { self.size = i+1 } } func (self *linkedPairs) growTailLength(l int) { if l <= len(self.tail) { return } c := cap(self.tail) for c < l { c += 1 + c>>_APPEND_GROW_SHIFT } if c == cap(self.tail) { self.tail = self.tail[:l] return } tmp := make([]*pairChunk, l, c) copy(tmp, self.tail) self.tail = tmp } // linear search func (self *linkedPairs) Get(key string) (*Pair, int) { if self.index != nil { // fast-path i, ok := self.index[caching.StrHash(key)] if ok { n := self.At(i) if n.Key == key { return n, i } // hash conflicts goto linear_search } else { return nil, -1 } } linear_search: for i:=0; i<self.size; i++ { if n := self.At(i); n.Key == key { return n, i } } return nil, -1 } func (self *linkedPairs) ToSlice(con []Pair) { if len(con) < self.size { return } i := self.size-1 a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < 0 { copy(con, self.head[:b+1]) return } else { copy(con, self.head[:]) con = con[_DEFAULT_NODE_CAP:] } for i:=0; i<a; i++ { copy(con, self.tail[i][:]) con = con[_DEFAULT_NODE_CAP:] } copy(con, self.tail[a][:b+1]) } func (self *linkedPairs) ToMap(con map[string]Node) { for i:=0; i<self.size; i++ { n := self.At(i) con[n.Key] = n.Value } } func (self *linkedPairs) copyPairs(to []Pair, from []Pair, l int) { copy(to, from) if self.index != nil { for i:=0; i<l; i++ { // NOTICE: in case of user not pass hash, just cal it h := caching.StrHash(from[i].Key) from[i].hash = h self.index[h] = i } } } func (self *linkedPairs) FromSlice(con []Pair) { self.size = len(con) i := self.size-1 a, b := i/_DEFAULT_NODE_CAP-1, i%_DEFAULT_NODE_CAP if a < 0 { self.copyPairs(self.head[:b+1], con, b+1) return } else { self.copyPairs(self.head[:], con, len(self.head)) con = con[_DEFAULT_NODE_CAP:] } if cap(self.tail) <= a { c := (a+1) + (a+1)>>_APPEND_GROW_SHIFT self.tail = make([]*pairChunk, a+1, c) } self.tail = self.tail[:a+1] for i:=0; i<a; i++ { self.tail[i] = new(pairChunk) self.copyPairs(self.tail[i][:], con, len(self.tail[i])) con = con[_DEFAULT_NODE_CAP:] } self.tail[a] = new(pairChunk) self.copyPairs(self.tail[a][:b+1], con, b+1) } func (self *linkedPairs) Less(i, j int) bool { return lessFrom(self.At(i).Key, self.At(j).Key, 0) } func (self *linkedPairs) Swap(i, j int) { a, b := self.At(i), self.At(j) if self.index != nil { self.index[a.hash] = j self.index[b.hash] = i } *a, *b = *b, *a } func (self *linkedPairs) Sort() { sort.Stable(self) } // Compare two strings from the pos d. func lessFrom(a, b string, d int) bool { l := len(a) if l > len(b) { l = len(b) } for i := d; i < l; i++ { if a[i] == b[i] { continue } return a[i] < b[i] } return len(a) < len(b) } type parseObjectStack struct { parser Parser v linkedPairs } type parseArrayStack struct { parser Parser v linkedNodes } func newLazyArray(p *Parser) Node { s := new(parseArrayStack) s.parser = *p return Node{ t: _V_ARRAY_LAZY, p: unsafe.Pointer(s), } } func newLazyObject(p *Parser) Node { s := new(parseObjectStack) s.parser = *p return Node{ t: _V_OBJECT_LAZY, p: unsafe.Pointer(s), } } func (self *Node) getParserAndArrayStack() (*Parser, *parseArrayStack) { stack := (*parseArrayStack)(self.p) return &stack.parser, stack } func (self *Node) getParserAndObjectStack() (*Parser, *parseObjectStack) { stack := (*parseObjectStack)(self.p) return &stack.parser, stack }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/stubs.go
vendor/github.com/bytedance/sonic/ast/stubs.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( "unsafe" "github.com/bytedance/sonic/internal/rt" ) //go:nosplit func mem2ptr(s []byte) unsafe.Pointer { return (*rt.GoSlice)(unsafe.Pointer(&s)).Ptr } //go:linkname unquoteBytes encoding/json.unquoteBytes func unquoteBytes(s []byte) (t []byte, ok bool)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/iterator.go
vendor/github.com/bytedance/sonic/ast/iterator.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( "fmt" "github.com/bytedance/sonic/internal/caching" "github.com/bytedance/sonic/internal/native/types" ) type Pair struct { hash uint64 Key string Value Node } func NewPair(key string, val Node) Pair { return Pair{ hash: caching.StrHash(key), Key: key, Value: val, } } // Values returns iterator for array's children traversal func (self *Node) Values() (ListIterator, error) { if err := self.should(types.V_ARRAY); err != nil { return ListIterator{}, err } return self.values(), nil } func (self *Node) values() ListIterator { return ListIterator{Iterator{p: self}} } // Properties returns iterator for object's children traversal func (self *Node) Properties() (ObjectIterator, error) { if err := self.should(types.V_OBJECT); err != nil { return ObjectIterator{}, err } return self.properties(), nil } func (self *Node) properties() ObjectIterator { return ObjectIterator{Iterator{p: self}} } type Iterator struct { i int p *Node } func (self *Iterator) Pos() int { return self.i } func (self *Iterator) Len() int { return self.p.len() } // HasNext reports if it is the end of iteration or has error. func (self *Iterator) HasNext() bool { if !self.p.isLazy() { return self.p.Valid() && self.i < self.p.len() } else if self.p.t == _V_ARRAY_LAZY { return self.p.skipNextNode().Valid() } else if self.p.t == _V_OBJECT_LAZY { pair := self.p.skipNextPair() if pair == nil { return false } return pair.Value.Valid() } return false } // ListIterator is specialized iterator for V_ARRAY type ListIterator struct { Iterator } // ObjectIterator is specialized iterator for V_ARRAY type ObjectIterator struct { Iterator } func (self *ListIterator) next() *Node { next_start: if !self.HasNext() { return nil } else { n := self.p.nodeAt(self.i) self.i++ if !n.Exists() { goto next_start } return n } } // Next scans through children of underlying V_ARRAY, // copies each child to v, and returns .HasNext(). func (self *ListIterator) Next(v *Node) bool { n := self.next() if n == nil { return false } *v = *n return true } func (self *ObjectIterator) next() *Pair { next_start: if !self.HasNext() { return nil } else { n := self.p.pairAt(self.i) self.i++ if n == nil || !n.Value.Exists() { goto next_start } return n } } // Next scans through children of underlying V_OBJECT, // copies each child to v, and returns .HasNext(). func (self *ObjectIterator) Next(p *Pair) bool { n := self.next() if n == nil { return false } *p = *n return true } // Sequence represents scanning path of single-layer nodes. // Index indicates the value's order in both V_ARRAY and V_OBJECT json. // Key is the value's key (for V_OBJECT json only, otherwise it will be nil). type Sequence struct { Index int Key *string // Level int } // String is string representation of one Sequence func (s Sequence) String() string { k := "" if s.Key != nil { k = *s.Key } return fmt.Sprintf("Sequence(%d, %q)", s.Index, k) } type Scanner func(path Sequence, node *Node) bool // ForEach scans one V_OBJECT node's children from JSON head to tail, // and pass the Sequence and Node of corresponding JSON value. // // Especially, if the node is not V_ARRAY or V_OBJECT, // the node itself will be returned and Sequence.Index == -1. // // NOTICE: A unsetted node WON'T trigger sc, but its index still counts into Path.Index func (self *Node) ForEach(sc Scanner) error { if err := self.checkRaw(); err != nil { return err } switch self.itype() { case types.V_ARRAY: iter, err := self.Values() if err != nil { return err } v := iter.next() for v != nil { if !sc(Sequence{iter.i-1, nil}, v) { return nil } v = iter.next() } case types.V_OBJECT: iter, err := self.Properties() if err != nil { return err } v := iter.next() for v != nil { if !sc(Sequence{iter.i-1, &v.Key}, &v.Value) { return nil } v = iter.next() } default: if self.Check() != nil { return self } sc(Sequence{-1, nil}, self) } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/encode.go
vendor/github.com/bytedance/sonic/ast/encode.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( "sync" "unicode/utf8" "github.com/bytedance/sonic/internal/rt" "github.com/bytedance/sonic/option" ) func quoteString(e *[]byte, s string) { *e = append(*e, '"') start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { if rt.SafeSet[b] { i++ continue } if start < i { *e = append(*e, s[start:i]...) } *e = append(*e, '\\') switch b { case '\\', '"': *e = append(*e, b) case '\n': *e = append(*e, 'n') case '\r': *e = append(*e, 'r') case '\t': *e = append(*e, 't') default: // This encodes bytes < 0x20 except for \t, \n and \r. // If escapeHTML is set, it also escapes <, >, and & // because they can lead to security holes when // user-controlled strings are rendered into JSON // and served to some browsers. *e = append(*e, `u00`...) *e = append(*e, rt.Hex[b>>4]) *e = append(*e, rt.Hex[b&0xF]) } i++ start = i continue } c, size := utf8.DecodeRuneInString(s[i:]) // if c == utf8.RuneError && size == 1 { // if start < i { // e.Write(s[start:i]) // } // e.WriteString(`\ufffd`) // i += size // start = i // continue // } if c == '\u2028' || c == '\u2029' { if start < i { *e = append(*e, s[start:i]...) } *e = append(*e, `\u202`...) *e = append(*e, rt.Hex[c&0xF]) i += size start = i continue } i += size } if start < len(s) { *e = append(*e, s[start:]...) } *e = append(*e, '"') } var bytesPool = sync.Pool{} func (self *Node) MarshalJSON() ([]byte, error) { if self == nil { return bytesNull, nil } buf := newBuffer() err := self.encode(buf) if err != nil { freeBuffer(buf) return nil, err } var ret []byte if !rt.CanSizeResue(cap(*buf)) { ret = *buf } else { ret = make([]byte, len(*buf)) copy(ret, *buf) freeBuffer(buf) } return ret, err } func newBuffer() *[]byte { if ret := bytesPool.Get(); ret != nil { return ret.(*[]byte) } else { buf := make([]byte, 0, option.DefaultAstBufferSize) return &buf } } func freeBuffer(buf *[]byte) { if !rt.CanSizeResue(cap(*buf)) { return } *buf = (*buf)[:0] bytesPool.Put(buf) } func (self *Node) encode(buf *[]byte) error { if self.isRaw() { return self.encodeRaw(buf) } switch int(self.itype()) { case V_NONE : return ErrNotExist case V_ERROR : return self.Check() case V_NULL : return self.encodeNull(buf) case V_TRUE : return self.encodeTrue(buf) case V_FALSE : return self.encodeFalse(buf) case V_ARRAY : return self.encodeArray(buf) case V_OBJECT: return self.encodeObject(buf) case V_STRING: return self.encodeString(buf) case V_NUMBER: return self.encodeNumber(buf) case V_ANY : return self.encodeInterface(buf) default : return ErrUnsupportType } } func (self *Node) encodeRaw(buf *[]byte) error { lock := self.rlock() if !self.isRaw() { self.runlock() return self.encode(buf) } raw := self.toString() if lock { self.runlock() } *buf = append(*buf, raw...) return nil } func (self *Node) encodeNull(buf *[]byte) error { *buf = append(*buf, strNull...) return nil } func (self *Node) encodeTrue(buf *[]byte) error { *buf = append(*buf, bytesTrue...) return nil } func (self *Node) encodeFalse(buf *[]byte) error { *buf = append(*buf, bytesFalse...) return nil } func (self *Node) encodeNumber(buf *[]byte) error { str := self.toString() *buf = append(*buf, str...) return nil } func (self *Node) encodeString(buf *[]byte) error { if self.l == 0 { *buf = append(*buf, '"', '"') return nil } quote(buf, self.toString()) return nil } func (self *Node) encodeArray(buf *[]byte) error { if self.isLazy() { if err := self.skipAllIndex(); err != nil { return err } } nb := self.len() if nb == 0 { *buf = append(*buf, bytesArray...) return nil } *buf = append(*buf, '[') var started bool for i := 0; i < nb; i++ { n := self.nodeAt(i) if !n.Exists() { continue } if started { *buf = append(*buf, ',') } started = true if err := n.encode(buf); err != nil { return err } } *buf = append(*buf, ']') return nil } func (self *Pair) encode(buf *[]byte) error { if len(*buf) == 0 { *buf = append(*buf, '"', '"', ':') return self.Value.encode(buf) } quote(buf, self.Key) *buf = append(*buf, ':') return self.Value.encode(buf) } func (self *Node) encodeObject(buf *[]byte) error { if self.isLazy() { if err := self.skipAllKey(); err != nil { return err } } nb := self.len() if nb == 0 { *buf = append(*buf, bytesObject...) return nil } *buf = append(*buf, '{') var started bool for i := 0; i < nb; i++ { n := self.pairAt(i) if n == nil || !n.Value.Exists() { continue } if started { *buf = append(*buf, ',') } started = true if err := n.encode(buf); err != nil { return err } } *buf = append(*buf, '}') return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/search.go
vendor/github.com/bytedance/sonic/ast/search.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( `github.com/bytedance/sonic/internal/rt` `github.com/bytedance/sonic/internal/native/types` ) // SearchOptions controls Searcher's behavior type SearchOptions struct { // ValidateJSON indicates the searcher to validate the entire JSON ValidateJSON bool // CopyReturn indicates the searcher to copy the result JSON instead of refer from the input // This can help to reduce memory usage if you cache the results CopyReturn bool // ConcurrentRead indicates the searcher to return a concurrently-READ-safe node, // including: GetByPath/Get/Index/GetOrIndex/Int64/Bool/Float64/String/Number/Interface/Array/Map/Raw/MarshalJSON ConcurrentRead bool } type Searcher struct { parser Parser SearchOptions } func NewSearcher(str string) *Searcher { return &Searcher{ parser: Parser{ s: str, noLazy: false, }, SearchOptions: SearchOptions{ ValidateJSON: true, }, } } // GetByPathCopy search in depth from top json and returns a **Copied** json node at the path location func (self *Searcher) GetByPathCopy(path ...interface{}) (Node, error) { self.CopyReturn = true return self.getByPath(path...) } // GetByPathNoCopy search in depth from top json and returns a **Referenced** json node at the path location // // WARN: this search directly refer partial json from top json, which has faster speed, // may consumes more memory. func (self *Searcher) GetByPath(path ...interface{}) (Node, error) { return self.getByPath(path...) } func (self *Searcher) getByPath(path ...interface{}) (Node, error) { var err types.ParsingError var start int self.parser.p = 0 start, err = self.parser.getByPath(self.ValidateJSON, path...) if err != 0 { // for compatibility with old version if err == types.ERR_NOT_FOUND { return Node{}, ErrNotExist } if err == types.ERR_UNSUPPORT_TYPE { panic("path must be either int(>=0) or string") } return Node{}, self.parser.syntaxError(err) } t := switchRawType(self.parser.s[start]) if t == _V_NONE { return Node{}, self.parser.ExportError(err) } // copy string to reducing memory usage var raw string if self.CopyReturn { raw = rt.Mem2Str([]byte(self.parser.s[start:self.parser.p])) } else { raw = self.parser.s[start:self.parser.p] } return newRawNode(raw, t, self.ConcurrentRead), nil } // GetByPath searches a path and returns relaction and types of target func _GetByPath(src string, path ...interface{}) (start int, end int, typ int, err error) { p := NewParserObj(src) s, e := p.getByPath(false, path...) if e != 0 { // for compatibility with old version if e == types.ERR_NOT_FOUND { return -1, -1, 0, ErrNotExist } if e == types.ERR_UNSUPPORT_TYPE { panic("path must be either int(>=0) or string") } return -1, -1, 0, p.syntaxError(e) } t := switchRawType(p.s[s]) if t == _V_NONE { return -1, -1, 0, ErrNotExist } if t == _V_NUMBER { p.p = 1 + backward(p.s, p.p-1) } return s, p.p, int(t), nil } // ValidSyntax check if a json has a valid JSON syntax, // while not validate UTF-8 charset func _ValidSyntax(json string) bool { p := NewParserObj(json) _, e := p.skip() if e != 0 { return false } if skipBlank(p.s, p.p) != -int(types.ERR_EOF) { return false } return true } // SkipFast skip a json value in fast-skip algs, // while not strictly validate JSON syntax and UTF-8 charset. func _SkipFast(src string, i int) (int, int, error) { p := NewParserObj(src) p.p = i s, e := p.skipFast() if e != 0 { return -1, -1, p.ExportError(e) } t := switchRawType(p.s[s]) if t == _V_NONE { return -1, -1, ErrNotExist } if t == _V_NUMBER { p.p = 1 + backward(p.s, p.p-1) } return s, p.p, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/decode.go
vendor/github.com/bytedance/sonic/ast/decode.go
/* * Copyright 2022 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( "encoding/base64" "runtime" "strconv" "unsafe" "github.com/bytedance/sonic/internal/native/types" "github.com/bytedance/sonic/internal/rt" "github.com/bytedance/sonic/internal/utils" ) // Hack: this is used for both checking space and cause friendly compile errors in 32-bit arch. const _Sonic_Not_Support_32Bit_Arch__Checking_32Bit_Arch_Here = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') var bytesNull = []byte("null") const ( strNull = "null" bytesTrue = "true" bytesFalse = "false" bytesObject = "{}" bytesArray = "[]" ) func isSpace(c byte) bool { return (int(1<<c) & _Sonic_Not_Support_32Bit_Arch__Checking_32Bit_Arch_Here) != 0 } //go:nocheckptr func skipBlank(src string, pos int) int { se := uintptr(rt.IndexChar(src, len(src))) sp := uintptr(rt.IndexChar(src, pos)) for sp < se { if !isSpace(*(*byte)(unsafe.Pointer(sp))) { break } sp += 1 } if sp >= se { return -int(types.ERR_EOF) } runtime.KeepAlive(src) return int(sp - uintptr(rt.IndexChar(src, 0))) } func decodeNull(src string, pos int) (ret int) { ret = pos + 4 if ret > len(src) { return -int(types.ERR_EOF) } if src[pos:ret] == strNull { return ret } else { return -int(types.ERR_INVALID_CHAR) } } func decodeTrue(src string, pos int) (ret int) { ret = pos + 4 if ret > len(src) { return -int(types.ERR_EOF) } if src[pos:ret] == bytesTrue { return ret } else { return -int(types.ERR_INVALID_CHAR) } } func decodeFalse(src string, pos int) (ret int) { ret = pos + 5 if ret > len(src) { return -int(types.ERR_EOF) } if src[pos:ret] == bytesFalse { return ret } return -int(types.ERR_INVALID_CHAR) } //go:nocheckptr func decodeString(src string, pos int) (ret int, v string) { ret, ep := skipString(src, pos) if ep == -1 { (*rt.GoString)(unsafe.Pointer(&v)).Ptr = rt.IndexChar(src, pos+1) (*rt.GoString)(unsafe.Pointer(&v)).Len = ret - pos - 2 return ret, v } vv, ok := unquoteBytes(rt.Str2Mem(src[pos:ret])) if !ok { return -int(types.ERR_INVALID_CHAR), "" } runtime.KeepAlive(src) return ret, rt.Mem2Str(vv) } func decodeBinary(src string, pos int) (ret int, v []byte) { var vv string ret, vv = decodeString(src, pos) if ret < 0 { return ret, nil } var err error v, err = base64.StdEncoding.DecodeString(vv) if err != nil { return -int(types.ERR_INVALID_CHAR), nil } return ret, v } func isDigit(c byte) bool { return c >= '0' && c <= '9' } //go:nocheckptr func decodeInt64(src string, pos int) (ret int, v int64, err error) { sp := uintptr(rt.IndexChar(src, pos)) ss := uintptr(sp) se := uintptr(rt.IndexChar(src, len(src))) if uintptr(sp) >= se { return -int(types.ERR_EOF), 0, nil } if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { sp += 1 } if sp == se { return -int(types.ERR_EOF), 0, nil } for ; sp < se; sp += uintptr(1) { if !isDigit(*(*byte)(unsafe.Pointer(sp))) { break } } if sp < se { if c := *(*byte)(unsafe.Pointer(sp)); c == '.' || c == 'e' || c == 'E' { return -int(types.ERR_INVALID_NUMBER_FMT), 0, nil } } var vv string ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos v, err = strconv.ParseInt(vv, 10, 64) if err != nil { //NOTICE: allow overflow here if err.(*strconv.NumError).Err == strconv.ErrRange { return ret, 0, err } return -int(types.ERR_INVALID_CHAR), 0, err } runtime.KeepAlive(src) return ret, v, nil } func isNumberChars(c byte) bool { return (c >= '0' && c <= '9') || c == '+' || c == '-' || c == 'e' || c == 'E' || c == '.' } //go:nocheckptr func decodeFloat64(src string, pos int) (ret int, v float64, err error) { sp := uintptr(rt.IndexChar(src, pos)) ss := uintptr(sp) se := uintptr(rt.IndexChar(src, len(src))) if uintptr(sp) >= se { return -int(types.ERR_EOF), 0, nil } if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { sp += 1 } if sp == se { return -int(types.ERR_EOF), 0, nil } for ; sp < se; sp += uintptr(1) { if !isNumberChars(*(*byte)(unsafe.Pointer(sp))) { break } } var vv string ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos v, err = strconv.ParseFloat(vv, 64) if err != nil { //NOTICE: allow overflow here if err.(*strconv.NumError).Err == strconv.ErrRange { return ret, 0, err } return -int(types.ERR_INVALID_CHAR), 0, err } runtime.KeepAlive(src) return ret, v, nil } func decodeValue(src string, pos int, skipnum bool) (ret int, v types.JsonState) { pos = skipBlank(src, pos) if pos < 0 { return pos, types.JsonState{Vt: types.ValueType(pos)} } switch c := src[pos]; c { case 'n': ret = decodeNull(src, pos) if ret < 0 { return ret, types.JsonState{Vt: types.ValueType(ret)} } return ret, types.JsonState{Vt: types.V_NULL} case '"': var ep int ret, ep = skipString(src, pos) if ret < 0 { return ret, types.JsonState{Vt: types.ValueType(ret)} } return ret, types.JsonState{Vt: types.V_STRING, Iv: int64(pos + 1), Ep: ep} case '{': return pos + 1, types.JsonState{Vt: types.V_OBJECT} case '[': return pos + 1, types.JsonState{Vt: types.V_ARRAY} case 't': ret = decodeTrue(src, pos) if ret < 0 { return ret, types.JsonState{Vt: types.ValueType(ret)} } return ret, types.JsonState{Vt: types.V_TRUE} case 'f': ret = decodeFalse(src, pos) if ret < 0 { return ret, types.JsonState{Vt: types.ValueType(ret)} } return ret, types.JsonState{Vt: types.V_FALSE} case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': if skipnum { ret = skipNumber(src, pos) if ret >= 0 { return ret, types.JsonState{Vt: types.V_DOUBLE, Iv: 0, Ep: pos} } else { return ret, types.JsonState{Vt: types.ValueType(ret)} } } else { var iv int64 ret, iv, _ = decodeInt64(src, pos) if ret >= 0 { return ret, types.JsonState{Vt: types.V_INTEGER, Iv: iv, Ep: pos} } else if ret != -int(types.ERR_INVALID_NUMBER_FMT) { return ret, types.JsonState{Vt: types.ValueType(ret)} } var fv float64 ret, fv, _ = decodeFloat64(src, pos) if ret >= 0 { return ret, types.JsonState{Vt: types.V_DOUBLE, Dv: fv, Ep: pos} } else { return ret, types.JsonState{Vt: types.ValueType(ret)} } } default: return -int(types.ERR_INVALID_CHAR), types.JsonState{Vt:-types.ValueType(types.ERR_INVALID_CHAR)} } } //go:nocheckptr func skipNumber(src string, pos int) (ret int) { return utils.SkipNumber(src, pos) } //go:nocheckptr func skipString(src string, pos int) (ret int, ep int) { if pos+1 >= len(src) { return -int(types.ERR_EOF), -1 } sp := uintptr(rt.IndexChar(src, pos)) se := uintptr(rt.IndexChar(src, len(src))) // not start with quote if *(*byte)(unsafe.Pointer(sp)) != '"' { return -int(types.ERR_INVALID_CHAR), -1 } sp += 1 ep = -1 for sp < se { c := *(*byte)(unsafe.Pointer(sp)) if c == '\\' { if ep == -1 { ep = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) } sp += 2 continue } sp += 1 if c == '"' { return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)), ep } } runtime.KeepAlive(src) // not found the closed quote until EOF return -int(types.ERR_EOF), -1 } //go:nocheckptr func skipPair(src string, pos int, lchar byte, rchar byte) (ret int) { if pos+1 >= len(src) { return -int(types.ERR_EOF) } sp := uintptr(rt.IndexChar(src, pos)) se := uintptr(rt.IndexChar(src, len(src))) if *(*byte)(unsafe.Pointer(sp)) != lchar { return -int(types.ERR_INVALID_CHAR) } sp += 1 nbrace := 1 inquote := false for sp < se { c := *(*byte)(unsafe.Pointer(sp)) if c == '\\' { sp += 2 continue } else if c == '"' { inquote = !inquote } else if c == lchar { if !inquote { nbrace += 1 } } else if c == rchar { if !inquote { nbrace -= 1 if nbrace == 0 { sp += 1 break } } } sp += 1 } if nbrace != 0 { return -int(types.ERR_INVALID_CHAR) } runtime.KeepAlive(src) return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) } func skipValueFast(src string, pos int) (ret int, start int) { pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } switch c := src[pos]; c { case 'n': ret = decodeNull(src, pos) case '"': ret, _ = skipString(src, pos) case '{': ret = skipPair(src, pos, '{', '}') case '[': ret = skipPair(src, pos, '[', ']') case 't': ret = decodeTrue(src, pos) case 'f': ret = decodeFalse(src, pos) case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': ret = skipNumber(src, pos) default: ret = -int(types.ERR_INVALID_CHAR) } return ret, pos } func skipValue(src string, pos int) (ret int, start int) { pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } switch c := src[pos]; c { case 'n': ret = decodeNull(src, pos) case '"': ret, _ = skipString(src, pos) case '{': ret, _ = skipObject(src, pos) case '[': ret, _ = skipArray(src, pos) case 't': ret = decodeTrue(src, pos) case 'f': ret = decodeFalse(src, pos) case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': ret = skipNumber(src, pos) default: ret = -int(types.ERR_INVALID_CHAR) } return ret, pos } func skipObject(src string, pos int) (ret int, start int) { start = skipBlank(src, pos) if start < 0 { return start, -1 } if src[start] != '{' { return -int(types.ERR_INVALID_CHAR), -1 } pos = start + 1 pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } if src[pos] == '}' { return pos + 1, start } for { pos, _ = skipString(src, pos) if pos < 0 { return pos, -1 } pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } if src[pos] != ':' { return -int(types.ERR_INVALID_CHAR), -1 } pos++ pos, _ = skipValue(src, pos) if pos < 0 { return pos, -1 } pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } if src[pos] == '}' { return pos + 1, start } if src[pos] != ',' { return -int(types.ERR_INVALID_CHAR), -1 } pos++ pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } } } func skipArray(src string, pos int) (ret int, start int) { start = skipBlank(src, pos) if start < 0 { return start, -1 } if src[start] != '[' { return -int(types.ERR_INVALID_CHAR), -1 } pos = start + 1 pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } if src[pos] == ']' { return pos + 1, start } for { pos, _ = skipValue(src, pos) if pos < 0 { return pos, -1 } pos = skipBlank(src, pos) if pos < 0 { return pos, -1 } if src[pos] == ']' { return pos + 1, start } if src[pos] != ',' { return -int(types.ERR_INVALID_CHAR), -1 } pos++ } } // DecodeString decodes a JSON string from pos and return golang string. // - needEsc indicates if to unescaped escaping chars // - hasEsc tells if the returned string has escaping chars // - validStr enables validating UTF8 charset // func _DecodeString(src string, pos int, needEsc bool, validStr bool) (v string, ret int, hasEsc bool) { p := NewParserObj(src) p.p = pos switch val := p.decodeValue(); val.Vt { case types.V_STRING: str := p.s[val.Iv : p.p-1] if validStr && !validate_utf8(str) { return "", -int(types.ERR_INVALID_UTF8), false } /* fast path: no escape sequence */ if val.Ep == -1 { return str, p.p, false } else if !needEsc { return str, p.p, true } /* unquote the string */ out, err := unquote(str) /* check for errors */ if err != 0 { return "", -int(err), true } else { return out, p.p, true } default: return "", -int(_ERR_UNSUPPORT_TYPE), false } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/api_compat.go
vendor/github.com/bytedance/sonic/ast/api_compat.go
// +build !amd64,!arm64 go1.25 !go1.17 arm64,!go1.20 /* * Copyright 2022 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( `encoding/json` `unicode/utf8` `github.com/bytedance/sonic/internal/native/types` `github.com/bytedance/sonic/internal/rt` `github.com/bytedance/sonic/internal/compat` ) func init() { compat.Warn("sonic/ast") } func quote(buf *[]byte, val string) { quoteString(buf, val) } // unquote unescapes an internal JSON string (it doesn't count quotas at the beginning and end) func unquote(src string) (string, types.ParsingError) { sp := rt.IndexChar(src, -1) out, ok := unquoteBytes(rt.BytesFrom(sp, len(src)+2, len(src)+2)) if !ok { return "", types.ERR_INVALID_ESCAPE } return rt.Mem2Str(out), 0 } func (self *Parser) decodeValue() (val types.JsonState) { e, v := decodeValue(self.s, self.p, self.dbuf == nil) if e < 0 { return v } self.p = e return v } func (self *Parser) skip() (int, types.ParsingError) { e, s := skipValue(self.s, self.p) if e < 0 { return self.p, types.ParsingError(-e) } self.p = e return s, 0 } func (self *Parser) skipFast() (int, types.ParsingError) { e, s := skipValueFast(self.s, self.p) if e < 0 { return self.p, types.ParsingError(-e) } self.p = e return s, 0 } func (self *Node) encodeInterface(buf *[]byte) error { out, err := json.Marshal(self.packAny()) if err != nil { return err } *buf = append(*buf, out...) return nil } func (self *Parser) getByPath(validate bool, path ...interface{}) (int, types.ParsingError) { for _, p := range path { if idx, ok := p.(int); ok && idx >= 0 { if err := self.searchIndex(idx); err != 0 { return self.p, err } } else if key, ok := p.(string); ok { if err := self.searchKey(key); err != 0 { return self.p, err } } else { panic("path must be either int(>=0) or string") } } var start int var e types.ParsingError if validate { start, e = self.skip() } else { start, e = self.skipFast() } if e != 0 { return self.p, e } return start, 0 } func validate_utf8(str string) bool { return utf8.ValidString(str) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/ast/visitor.go
vendor/github.com/bytedance/sonic/ast/visitor.go
/* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ast import ( `encoding/json` `errors` `github.com/bytedance/sonic/internal/native/types` ) // Visitor handles the callbacks during preorder traversal of a JSON AST. // // According to the JSON RFC8259, a JSON AST can be defined by // the following rules without separator / whitespace tokens. // // JSON-AST = value // value = false / null / true / object / array / number / string // object = begin-object [ member *( member ) ] end-object // member = string value // array = begin-array [ value *( value ) ] end-array // type Visitor interface { // OnNull handles a JSON null value. OnNull() error // OnBool handles a JSON true / false value. OnBool(v bool) error // OnString handles a JSON string value. OnString(v string) error // OnInt64 handles a JSON number value with int64 type. OnInt64(v int64, n json.Number) error // OnFloat64 handles a JSON number value with float64 type. OnFloat64(v float64, n json.Number) error // OnObjectBegin handles the beginning of a JSON object value with a // suggested capacity that can be used to make your custom object container. // // After this point the visitor will receive a sequence of callbacks like // [string, value, string, value, ......, ObjectEnd]. // // Note: // 1. This is a recursive definition which means the value can // also be a JSON object / array described by a sequence of callbacks. // 2. The suggested capacity will be 0 if current object is empty. // 3. Currently sonic use a fixed capacity for non-empty object (keep in // sync with ast.Node) which might not be very suitable. This may be // improved in future version. OnObjectBegin(capacity int) error // OnObjectKey handles a JSON object key string in member. OnObjectKey(key string) error // OnObjectEnd handles the ending of a JSON object value. OnObjectEnd() error // OnArrayBegin handles the beginning of a JSON array value with a // suggested capacity that can be used to make your custom array container. // // After this point the visitor will receive a sequence of callbacks like // [value, value, value, ......, ArrayEnd]. // // Note: // 1. This is a recursive definition which means the value can // also be a JSON object / array described by a sequence of callbacks. // 2. The suggested capacity will be 0 if current array is empty. // 3. Currently sonic use a fixed capacity for non-empty array (keep in // sync with ast.Node) which might not be very suitable. This may be // improved in future version. OnArrayBegin(capacity int) error // OnArrayEnd handles the ending of a JSON array value. OnArrayEnd() error } // VisitorOptions contains all Visitor's options. The default value is an // empty VisitorOptions{}. type VisitorOptions struct { // OnlyNumber indicates parser to directly return number value without // conversion, then the first argument of OnInt64 / OnFloat64 will always // be zero. OnlyNumber bool } var defaultVisitorOptions = &VisitorOptions{} // Preorder decodes the whole JSON string and callbacks each AST node to visitor // during preorder traversal. Any visitor method with an error returned will // break the traversal and the given error will be directly returned. The opts // argument can be reused after every call. func Preorder(str string, visitor Visitor, opts *VisitorOptions) error { if opts == nil { opts = defaultVisitorOptions } // process VisitorOptions first to guarantee that all options will be // constant during decoding and make options more readable. var ( optDecodeNumber = !opts.OnlyNumber ) tv := &traverser{ parser: Parser{ s: str, noLazy: true, skipValue: false, }, visitor: visitor, } if optDecodeNumber { tv.parser.decodeNumber(true) } err := tv.decodeValue() if optDecodeNumber { tv.parser.decodeNumber(false) } return err } type traverser struct { parser Parser visitor Visitor } // NOTE: keep in sync with (*Parser).Parse method. func (self *traverser) decodeValue() error { switch val := self.parser.decodeValue(); val.Vt { case types.V_EOF: return types.ERR_EOF case types.V_NULL: return self.visitor.OnNull() case types.V_TRUE: return self.visitor.OnBool(true) case types.V_FALSE: return self.visitor.OnBool(false) case types.V_STRING: return self.decodeString(val.Iv, val.Ep) case types.V_DOUBLE: return self.visitor.OnFloat64(val.Dv, json.Number(self.parser.s[val.Ep:self.parser.p])) case types.V_INTEGER: return self.visitor.OnInt64(val.Iv, json.Number(self.parser.s[val.Ep:self.parser.p])) case types.V_ARRAY: return self.decodeArray() case types.V_OBJECT: return self.decodeObject() default: return types.ParsingError(-val.Vt) } } // NOTE: keep in sync with (*Parser).decodeArray method. func (self *traverser) decodeArray() error { sp := self.parser.p ns := len(self.parser.s) /* allocate array space and parse every element */ if err := self.visitor.OnArrayBegin(_DEFAULT_NODE_CAP); err != nil { if err == VisitOPSkip { // NOTICE: for user needs to skip entiry object self.parser.p -= 1 if _, e := self.parser.skipFast(); e != 0 { return e } return self.visitor.OnArrayEnd() } return err } /* check for EOF */ self.parser.p = self.parser.lspace(sp) if self.parser.p >= ns { return types.ERR_EOF } /* check for empty array */ if self.parser.s[self.parser.p] == ']' { self.parser.p++ return self.visitor.OnArrayEnd() } for { /* decode the value */ if err := self.decodeValue(); err != nil { return err } self.parser.p = self.parser.lspace(self.parser.p) /* check for EOF */ if self.parser.p >= ns { return types.ERR_EOF } /* check for the next character */ switch self.parser.s[self.parser.p] { case ',': self.parser.p++ case ']': self.parser.p++ return self.visitor.OnArrayEnd() default: return types.ERR_INVALID_CHAR } } } // NOTE: keep in sync with (*Parser).decodeObject method. func (self *traverser) decodeObject() error { sp := self.parser.p ns := len(self.parser.s) /* allocate object space and decode each pair */ if err := self.visitor.OnObjectBegin(_DEFAULT_NODE_CAP); err != nil { if err == VisitOPSkip { // NOTICE: for user needs to skip entiry object self.parser.p -= 1 if _, e := self.parser.skipFast(); e != 0 { return e } return self.visitor.OnObjectEnd() } return err } /* check for EOF */ self.parser.p = self.parser.lspace(sp) if self.parser.p >= ns { return types.ERR_EOF } /* check for empty object */ if self.parser.s[self.parser.p] == '}' { self.parser.p++ return self.visitor.OnObjectEnd() } for { var njs types.JsonState var err types.ParsingError /* decode the key */ if njs = self.parser.decodeValue(); njs.Vt != types.V_STRING { return types.ERR_INVALID_CHAR } /* extract the key */ idx := self.parser.p - 1 key := self.parser.s[njs.Iv:idx] /* check for escape sequence */ if njs.Ep != -1 { if key, err = unquote(key); err != 0 { return err } } if err := self.visitor.OnObjectKey(key); err != nil { return err } /* expect a ':' delimiter */ if err = self.parser.delim(); err != 0 { return err } /* decode the value */ if err := self.decodeValue(); err != nil { return err } self.parser.p = self.parser.lspace(self.parser.p) /* check for EOF */ if self.parser.p >= ns { return types.ERR_EOF } /* check for the next character */ switch self.parser.s[self.parser.p] { case ',': self.parser.p++ case '}': self.parser.p++ return self.visitor.OnObjectEnd() default: return types.ERR_INVALID_CHAR } } } // NOTE: keep in sync with (*Parser).decodeString method. func (self *traverser) decodeString(iv int64, ep int) error { p := self.parser.p - 1 s := self.parser.s[iv:p] /* fast path: no escape sequence */ if ep == -1 { return self.visitor.OnString(s) } /* unquote the string */ out, err := unquote(s) if err != 0 { return err } return self.visitor.OnString(out) } // If visitor return this error on `OnObjectBegin()` or `OnArrayBegin()`, // the transverer will skip entiry object or array var VisitOPSkip = errors.New("")
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/bytedance/sonic/utf8/utf8.go
vendor/github.com/bytedance/sonic/utf8/utf8.go
/* * Copyright 2022 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package utf8 import ( `runtime` `github.com/bytedance/sonic/internal/rt` `github.com/bytedance/sonic/internal/native/types` `github.com/bytedance/sonic/internal/native` ) // CorrectWith corrects the invalid utf8 byte with repl string. func CorrectWith(dst []byte, src []byte, repl string) []byte { sstr := rt.Mem2Str(src) sidx := 0 /* state machine records the invalid positions */ m := types.NewStateMachine() m.Sp = 0 // invalid utf8 numbers for sidx < len(sstr) { scur := sidx ecode := native.ValidateUTF8(&sstr, &sidx, m) if m.Sp != 0 { if m.Sp > len(sstr) { panic("numbers of invalid utf8 exceed the string len!") } } for i := 0; i < m.Sp; i++ { ipos := m.Vt[i] // invalid utf8 position dst = append(dst, sstr[scur:ipos]...) dst = append(dst, repl...) scur = m.Vt[i] + 1 } /* append the remained valid utf8 bytes */ dst = append(dst, sstr[scur:sidx]...) /* not enough space, reset and continue */ if ecode != 0 { m.Sp = 0 } } types.FreeStateMachine(m) return dst } // Validate is a simd-accelereated drop-in replacement for the standard library's utf8.Valid. func Validate(src []byte) bool { if src == nil { return true } return ValidateString(rt.Mem2Str(src)) } // ValidateString as Validate, but for string. func ValidateString(src string) bool { if src == "" { return true } ret := native.ValidateUTF8Fast(&src) == 0 runtime.KeepAlive(src) return ret }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/data.go
vendor/github.com/twitchyliquid64/golang-asm/obj/data.go
// Derived from Inferno utils/6l/obj.c and utils/6l/span.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package obj import ( "github.com/twitchyliquid64/golang-asm/objabi" "log" "math" ) // Grow increases the length of s.P to lsiz. func (s *LSym) Grow(lsiz int64) { siz := int(lsiz) if int64(siz) != lsiz { log.Fatalf("LSym.Grow size %d too long", lsiz) } if len(s.P) >= siz { return } s.P = append(s.P, make([]byte, siz-len(s.P))...) } // GrowCap increases the capacity of s.P to c. func (s *LSym) GrowCap(c int64) { if int64(cap(s.P)) >= c { return } if s.P == nil { s.P = make([]byte, 0, c) return } b := make([]byte, len(s.P), c) copy(b, s.P) s.P = b } // prepwrite prepares to write data of size siz into s at offset off. func (s *LSym) prepwrite(ctxt *Link, off int64, siz int) { if off < 0 || siz < 0 || off >= 1<<30 { ctxt.Diag("prepwrite: bad off=%d siz=%d s=%v", off, siz, s) } switch s.Type { case objabi.Sxxx, objabi.SBSS: s.Type = objabi.SDATA case objabi.SNOPTRBSS: s.Type = objabi.SNOPTRDATA case objabi.STLSBSS: ctxt.Diag("cannot supply data for %v var %v", s.Type, s.Name) } l := off + int64(siz) s.Grow(l) if l > s.Size { s.Size = l } } // WriteFloat32 writes f into s at offset off. func (s *LSym) WriteFloat32(ctxt *Link, off int64, f float32) { s.prepwrite(ctxt, off, 4) ctxt.Arch.ByteOrder.PutUint32(s.P[off:], math.Float32bits(f)) } // WriteFloat64 writes f into s at offset off. func (s *LSym) WriteFloat64(ctxt *Link, off int64, f float64) { s.prepwrite(ctxt, off, 8) ctxt.Arch.ByteOrder.PutUint64(s.P[off:], math.Float64bits(f)) } // WriteInt writes an integer i of size siz into s at offset off. func (s *LSym) WriteInt(ctxt *Link, off int64, siz int, i int64) { s.prepwrite(ctxt, off, siz) switch siz { default: ctxt.Diag("WriteInt: bad integer size: %d", siz) case 1: s.P[off] = byte(i) case 2: ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(i)) case 4: ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(i)) case 8: ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(i)) } } func (s *LSym) writeAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64, rtype objabi.RelocType) { // Allow 4-byte addresses for DWARF. if siz != ctxt.Arch.PtrSize && siz != 4 { ctxt.Diag("WriteAddr: bad address size %d in %s", siz, s.Name) } s.prepwrite(ctxt, off, siz) r := Addrel(s) r.Off = int32(off) if int64(r.Off) != off { ctxt.Diag("WriteAddr: off overflow %d in %s", off, s.Name) } r.Siz = uint8(siz) r.Sym = rsym r.Type = rtype r.Add = roff } // WriteAddr writes an address of size siz into s at offset off. // rsym and roff specify the relocation for the address. func (s *LSym) WriteAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) { s.writeAddr(ctxt, off, siz, rsym, roff, objabi.R_ADDR) } // WriteCURelativeAddr writes a pointer-sized address into s at offset off. // rsym and roff specify the relocation for the address which will be // resolved by the linker to an offset from the DW_AT_low_pc attribute of // the DWARF Compile Unit of rsym. func (s *LSym) WriteCURelativeAddr(ctxt *Link, off int64, rsym *LSym, roff int64) { s.writeAddr(ctxt, off, ctxt.Arch.PtrSize, rsym, roff, objabi.R_ADDRCUOFF) } // WriteOff writes a 4 byte offset to rsym+roff into s at offset off. // After linking the 4 bytes stored at s+off will be // rsym+roff-(start of section that s is in). func (s *LSym) WriteOff(ctxt *Link, off int64, rsym *LSym, roff int64) { s.prepwrite(ctxt, off, 4) r := Addrel(s) r.Off = int32(off) if int64(r.Off) != off { ctxt.Diag("WriteOff: off overflow %d in %s", off, s.Name) } r.Siz = 4 r.Sym = rsym r.Type = objabi.R_ADDROFF r.Add = roff } // WriteWeakOff writes a weak 4 byte offset to rsym+roff into s at offset off. // After linking the 4 bytes stored at s+off will be // rsym+roff-(start of section that s is in). func (s *LSym) WriteWeakOff(ctxt *Link, off int64, rsym *LSym, roff int64) { s.prepwrite(ctxt, off, 4) r := Addrel(s) r.Off = int32(off) if int64(r.Off) != off { ctxt.Diag("WriteOff: off overflow %d in %s", off, s.Name) } r.Siz = 4 r.Sym = rsym r.Type = objabi.R_WEAKADDROFF r.Add = roff } // WriteString writes a string of size siz into s at offset off. func (s *LSym) WriteString(ctxt *Link, off int64, siz int, str string) { if siz < len(str) { ctxt.Diag("WriteString: bad string size: %d < %d", siz, len(str)) } s.prepwrite(ctxt, off, siz) copy(s.P[off:off+int64(siz)], str) } // WriteBytes writes a slice of bytes into s at offset off. func (s *LSym) WriteBytes(ctxt *Link, off int64, b []byte) int64 { s.prepwrite(ctxt, off, len(b)) copy(s.P[off:], b) return off + int64(len(b)) } func Addrel(s *LSym) *Reloc { if s.R == nil { s.R = make([]Reloc, 0, 4) } s.R = append(s.R, Reloc{}) return &s.R[len(s.R)-1] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/sym.go
vendor/github.com/twitchyliquid64/golang-asm/obj/sym.go
// Derived from Inferno utils/6l/obj.c and utils/6l/span.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package obj import ( "github.com/twitchyliquid64/golang-asm/goobj" "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "log" "math" "sort" ) func Linknew(arch *LinkArch) *Link { ctxt := new(Link) ctxt.hash = make(map[string]*LSym) ctxt.funchash = make(map[string]*LSym) ctxt.statichash = make(map[string]*LSym) ctxt.Arch = arch ctxt.Pathname = objabi.WorkingDir() if err := ctxt.Headtype.Set(objabi.GOOS); err != nil { log.Fatalf("unknown goos %s", objabi.GOOS) } ctxt.Flag_optimize = true return ctxt } // LookupDerived looks up or creates the symbol with name name derived from symbol s. // The resulting symbol will be static iff s is. func (ctxt *Link) LookupDerived(s *LSym, name string) *LSym { if s.Static() { return ctxt.LookupStatic(name) } return ctxt.Lookup(name) } // LookupStatic looks up the static symbol with name name. // If it does not exist, it creates it. func (ctxt *Link) LookupStatic(name string) *LSym { s := ctxt.statichash[name] if s == nil { s = &LSym{Name: name, Attribute: AttrStatic} ctxt.statichash[name] = s } return s } // LookupABI looks up a symbol with the given ABI. // If it does not exist, it creates it. func (ctxt *Link) LookupABI(name string, abi ABI) *LSym { return ctxt.LookupABIInit(name, abi, nil) } // LookupABI looks up a symbol with the given ABI. // If it does not exist, it creates it and // passes it to init for one-time initialization. func (ctxt *Link) LookupABIInit(name string, abi ABI, init func(s *LSym)) *LSym { var hash map[string]*LSym switch abi { case ABI0: hash = ctxt.hash case ABIInternal: hash = ctxt.funchash default: panic("unknown ABI") } ctxt.hashmu.Lock() s := hash[name] if s == nil { s = &LSym{Name: name} s.SetABI(abi) hash[name] = s if init != nil { init(s) } } ctxt.hashmu.Unlock() return s } // Lookup looks up the symbol with name name. // If it does not exist, it creates it. func (ctxt *Link) Lookup(name string) *LSym { return ctxt.LookupInit(name, nil) } // LookupInit looks up the symbol with name name. // If it does not exist, it creates it and // passes it to init for one-time initialization. func (ctxt *Link) LookupInit(name string, init func(s *LSym)) *LSym { ctxt.hashmu.Lock() s := ctxt.hash[name] if s == nil { s = &LSym{Name: name} ctxt.hash[name] = s if init != nil { init(s) } } ctxt.hashmu.Unlock() return s } func (ctxt *Link) Float32Sym(f float32) *LSym { i := math.Float32bits(f) name := fmt.Sprintf("$f32.%08x", i) return ctxt.LookupInit(name, func(s *LSym) { s.Size = 4 s.WriteFloat32(ctxt, 0, f) s.Type = objabi.SRODATA s.Set(AttrLocal, true) s.Set(AttrContentAddressable, true) ctxt.constSyms = append(ctxt.constSyms, s) }) } func (ctxt *Link) Float64Sym(f float64) *LSym { i := math.Float64bits(f) name := fmt.Sprintf("$f64.%016x", i) return ctxt.LookupInit(name, func(s *LSym) { s.Size = 8 s.WriteFloat64(ctxt, 0, f) s.Type = objabi.SRODATA s.Set(AttrLocal, true) s.Set(AttrContentAddressable, true) ctxt.constSyms = append(ctxt.constSyms, s) }) } func (ctxt *Link) Int64Sym(i int64) *LSym { name := fmt.Sprintf("$i64.%016x", uint64(i)) return ctxt.LookupInit(name, func(s *LSym) { s.Size = 8 s.WriteInt(ctxt, 0, 8, i) s.Type = objabi.SRODATA s.Set(AttrLocal, true) s.Set(AttrContentAddressable, true) ctxt.constSyms = append(ctxt.constSyms, s) }) } // Assign index to symbols. // asm is set to true if this is called by the assembler (i.e. not the compiler), // in which case all the symbols are non-package (for now). func (ctxt *Link) NumberSyms() { if ctxt.Headtype == objabi.Haix { // Data must be sorted to keep a constant order in TOC symbols. // As they are created during Progedit, two symbols can be switched between // two different compilations. Therefore, BuildID will be different. // TODO: find a better place and optimize to only sort TOC symbols sort.Slice(ctxt.Data, func(i, j int) bool { return ctxt.Data[i].Name < ctxt.Data[j].Name }) } // Constant symbols are created late in the concurrent phase. Sort them // to ensure a deterministic order. sort.Slice(ctxt.constSyms, func(i, j int) bool { return ctxt.constSyms[i].Name < ctxt.constSyms[j].Name }) ctxt.Data = append(ctxt.Data, ctxt.constSyms...) ctxt.constSyms = nil ctxt.pkgIdx = make(map[string]int32) ctxt.defs = []*LSym{} ctxt.hashed64defs = []*LSym{} ctxt.hasheddefs = []*LSym{} ctxt.nonpkgdefs = []*LSym{} var idx, hashedidx, hashed64idx, nonpkgidx int32 ctxt.traverseSyms(traverseDefs, func(s *LSym) { // if Pkgpath is unknown, cannot hash symbols with relocations, as it // may reference named symbols whose names are not fully expanded. if s.ContentAddressable() && (ctxt.Pkgpath != "" || len(s.R) == 0) { if len(s.P) <= 8 && len(s.R) == 0 { // we can use short hash only for symbols without relocations s.PkgIdx = goobj.PkgIdxHashed64 s.SymIdx = hashed64idx if hashed64idx != int32(len(ctxt.hashed64defs)) { panic("bad index") } ctxt.hashed64defs = append(ctxt.hashed64defs, s) hashed64idx++ } else { s.PkgIdx = goobj.PkgIdxHashed s.SymIdx = hashedidx if hashedidx != int32(len(ctxt.hasheddefs)) { panic("bad index") } ctxt.hasheddefs = append(ctxt.hasheddefs, s) hashedidx++ } } else if isNonPkgSym(ctxt, s) { s.PkgIdx = goobj.PkgIdxNone s.SymIdx = nonpkgidx if nonpkgidx != int32(len(ctxt.nonpkgdefs)) { panic("bad index") } ctxt.nonpkgdefs = append(ctxt.nonpkgdefs, s) nonpkgidx++ } else { s.PkgIdx = goobj.PkgIdxSelf s.SymIdx = idx if idx != int32(len(ctxt.defs)) { panic("bad index") } ctxt.defs = append(ctxt.defs, s) idx++ } s.Set(AttrIndexed, true) }) ipkg := int32(1) // 0 is invalid index nonpkgdef := nonpkgidx ctxt.traverseSyms(traverseRefs|traverseAux, func(rs *LSym) { if rs.PkgIdx != goobj.PkgIdxInvalid { return } if !ctxt.Flag_linkshared { // Assign special index for builtin symbols. // Don't do it when linking against shared libraries, as the runtime // may be in a different library. if i := goobj.BuiltinIdx(rs.Name, int(rs.ABI())); i != -1 { rs.PkgIdx = goobj.PkgIdxBuiltin rs.SymIdx = int32(i) rs.Set(AttrIndexed, true) return } } pkg := rs.Pkg if rs.ContentAddressable() { // for now, only support content-addressable symbols that are always locally defined. panic("hashed refs unsupported for now") } if pkg == "" || pkg == "\"\"" || pkg == "_" || !rs.Indexed() { rs.PkgIdx = goobj.PkgIdxNone rs.SymIdx = nonpkgidx rs.Set(AttrIndexed, true) if nonpkgidx != nonpkgdef+int32(len(ctxt.nonpkgrefs)) { panic("bad index") } ctxt.nonpkgrefs = append(ctxt.nonpkgrefs, rs) nonpkgidx++ return } if k, ok := ctxt.pkgIdx[pkg]; ok { rs.PkgIdx = k return } rs.PkgIdx = ipkg ctxt.pkgIdx[pkg] = ipkg ipkg++ }) } // Returns whether s is a non-package symbol, which needs to be referenced // by name instead of by index. func isNonPkgSym(ctxt *Link, s *LSym) bool { if ctxt.IsAsm && !s.Static() { // asm symbols are referenced by name only, except static symbols // which are file-local and can be referenced by index. return true } if ctxt.Flag_linkshared { // The referenced symbol may be in a different shared library so // the linker cannot see its index. return true } if s.Pkg == "_" { // The frontend uses package "_" to mark symbols that should not // be referenced by index, e.g. linkname'd symbols. return true } if s.DuplicateOK() { // Dupok symbol needs to be dedup'd by name. return true } return false } // StaticNamePref is the prefix the front end applies to static temporary // variables. When turned into LSyms, these can be tagged as static so // as to avoid inserting them into the linker's name lookup tables. const StaticNamePref = ".stmp_" type traverseFlag uint32 const ( traverseDefs traverseFlag = 1 << iota traverseRefs traverseAux traverseAll = traverseDefs | traverseRefs | traverseAux ) // Traverse symbols based on flag, call fn for each symbol. func (ctxt *Link) traverseSyms(flag traverseFlag, fn func(*LSym)) { lists := [][]*LSym{ctxt.Text, ctxt.Data, ctxt.ABIAliases} for _, list := range lists { for _, s := range list { if flag&traverseDefs != 0 { fn(s) } if flag&traverseRefs != 0 { for _, r := range s.R { if r.Sym != nil { fn(r.Sym) } } } if flag&traverseAux != 0 { if s.Gotype != nil { fn(s.Gotype) } if s.Type == objabi.STEXT { f := func(parent *LSym, aux *LSym) { fn(aux) } ctxt.traverseFuncAux(flag, s, f) } } } } } func (ctxt *Link) traverseFuncAux(flag traverseFlag, fsym *LSym, fn func(parent *LSym, aux *LSym)) { pc := &fsym.Func.Pcln if flag&traverseAux == 0 { // NB: should it become necessary to walk aux sym reloc references // without walking the aux syms themselves, this can be changed. panic("should not be here") } for _, d := range pc.Funcdata { if d != nil { fn(fsym, d) } } files := ctxt.PosTable.FileTable() usedFiles := make([]goobj.CUFileIndex, 0, len(pc.UsedFiles)) for f := range pc.UsedFiles { usedFiles = append(usedFiles, f) } sort.Slice(usedFiles, func(i, j int) bool { return usedFiles[i] < usedFiles[j] }) for _, f := range usedFiles { if filesym := ctxt.Lookup(files[f]); filesym != nil { fn(fsym, filesym) } } for _, call := range pc.InlTree.nodes { if call.Func != nil { fn(fsym, call.Func) } f, _ := linkgetlineFromPos(ctxt, call.Pos) if filesym := ctxt.Lookup(f); filesym != nil { fn(fsym, filesym) } } dwsyms := []*LSym{fsym.Func.dwarfRangesSym, fsym.Func.dwarfLocSym, fsym.Func.dwarfDebugLinesSym, fsym.Func.dwarfInfoSym} for _, dws := range dwsyms { if dws == nil || dws.Size == 0 { continue } fn(fsym, dws) if flag&traverseRefs != 0 { for _, r := range dws.R { if r.Sym != nil { fn(dws, r.Sym) } } } } } // Traverse aux symbols, calling fn for each sym/aux pair. func (ctxt *Link) traverseAuxSyms(flag traverseFlag, fn func(parent *LSym, aux *LSym)) { lists := [][]*LSym{ctxt.Text, ctxt.Data, ctxt.ABIAliases} for _, list := range lists { for _, s := range list { if s.Gotype != nil { if flag&traverseDefs != 0 { fn(s, s.Gotype) } } if s.Type != objabi.STEXT { continue } ctxt.traverseFuncAux(flag, s, fn) } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/addrtype_string.go
vendor/github.com/twitchyliquid64/golang-asm/obj/addrtype_string.go
// Code generated by "stringer -type AddrType"; DO NOT EDIT. package obj import "strconv" const _AddrType_name = "TYPE_NONETYPE_BRANCHTYPE_TEXTSIZETYPE_MEMTYPE_CONSTTYPE_FCONSTTYPE_SCONSTTYPE_REGTYPE_ADDRTYPE_SHIFTTYPE_REGREGTYPE_REGREG2TYPE_INDIRTYPE_REGLIST" var _AddrType_index = [...]uint8{0, 9, 20, 33, 41, 51, 62, 73, 81, 90, 100, 111, 123, 133, 145} func (i AddrType) String() string { if i >= AddrType(len(_AddrType_index)-1) { return "AddrType(" + strconv.FormatInt(int64(i), 10) + ")" } return _AddrType_name[_AddrType_index[i]:_AddrType_index[i+1]] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/line.go
vendor/github.com/twitchyliquid64/golang-asm/obj/line.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package obj import ( "github.com/twitchyliquid64/golang-asm/goobj" "github.com/twitchyliquid64/golang-asm/src" ) // AddImport adds a package to the list of imported packages. func (ctxt *Link) AddImport(pkg string, fingerprint goobj.FingerprintType) { ctxt.Imports = append(ctxt.Imports, goobj.ImportedPkg{Pkg: pkg, Fingerprint: fingerprint}) } func linkgetlineFromPos(ctxt *Link, xpos src.XPos) (f string, l int32) { pos := ctxt.PosTable.Pos(xpos) if !pos.IsKnown() { pos = src.Pos{} } // TODO(gri) Should this use relative or absolute line number? return pos.SymFilename(), int32(pos.RelLine()) } // getFileIndexAndLine returns the file index (local to the CU), and the line number for a position. func getFileIndexAndLine(ctxt *Link, xpos src.XPos) (int, int32) { f, l := linkgetlineFromPos(ctxt, xpos) return ctxt.PosTable.FileIndex(f), l }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/pcln.go
vendor/github.com/twitchyliquid64/golang-asm/obj/pcln.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package obj import ( "github.com/twitchyliquid64/golang-asm/goobj" "encoding/binary" "log" ) // funcpctab writes to dst a pc-value table mapping the code in func to the values // returned by valfunc parameterized by arg. The invocation of valfunc to update the // current value is, for each p, // // val = valfunc(func, val, p, 0, arg); // record val as value at p->pc; // val = valfunc(func, val, p, 1, arg); // // where func is the function, val is the current value, p is the instruction being // considered, and arg can be used to further parameterize valfunc. func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) { dbg := desc == ctxt.Debugpcln dst.P = dst.P[:0] if dbg { ctxt.Logf("funcpctab %s [valfunc=%s]\n", func_.Name, desc) } val := int32(-1) oldval := val if func_.Func.Text == nil { return } pc := func_.Func.Text.Pc if dbg { ctxt.Logf("%6x %6d %v\n", uint64(pc), val, func_.Func.Text) } buf := make([]byte, binary.MaxVarintLen32) started := false for p := func_.Func.Text; p != nil; p = p.Link { // Update val. If it's not changing, keep going. val = valfunc(ctxt, func_, val, p, 0, arg) if val == oldval && started { val = valfunc(ctxt, func_, val, p, 1, arg) if dbg { ctxt.Logf("%6x %6s %v\n", uint64(p.Pc), "", p) } continue } // If the pc of the next instruction is the same as the // pc of this instruction, this instruction is not a real // instruction. Keep going, so that we only emit a delta // for a true instruction boundary in the program. if p.Link != nil && p.Link.Pc == p.Pc { val = valfunc(ctxt, func_, val, p, 1, arg) if dbg { ctxt.Logf("%6x %6s %v\n", uint64(p.Pc), "", p) } continue } // The table is a sequence of (value, pc) pairs, where each // pair states that the given value is in effect from the current position // up to the given pc, which becomes the new current position. // To generate the table as we scan over the program instructions, // we emit a "(value" when pc == func->value, and then // each time we observe a change in value we emit ", pc) (value". // When the scan is over, we emit the closing ", pc)". // // The table is delta-encoded. The value deltas are signed and // transmitted in zig-zag form, where a complement bit is placed in bit 0, // and the pc deltas are unsigned. Both kinds of deltas are sent // as variable-length little-endian base-128 integers, // where the 0x80 bit indicates that the integer continues. if dbg { ctxt.Logf("%6x %6d %v\n", uint64(p.Pc), val, p) } if started { pcdelta := (p.Pc - pc) / int64(ctxt.Arch.MinLC) n := binary.PutUvarint(buf, uint64(pcdelta)) dst.P = append(dst.P, buf[:n]...) pc = p.Pc } delta := val - oldval n := binary.PutVarint(buf, int64(delta)) dst.P = append(dst.P, buf[:n]...) oldval = val started = true val = valfunc(ctxt, func_, val, p, 1, arg) } if started { if dbg { ctxt.Logf("%6x done\n", uint64(func_.Func.Text.Pc+func_.Size)) } v := (func_.Size - pc) / int64(ctxt.Arch.MinLC) if v < 0 { ctxt.Diag("negative pc offset: %v", v) } n := binary.PutUvarint(buf, uint64(v)) dst.P = append(dst.P, buf[:n]...) // add terminating varint-encoded 0, which is just 0 dst.P = append(dst.P, 0) } if dbg { ctxt.Logf("wrote %d bytes to %p\n", len(dst.P), dst) for _, p := range dst.P { ctxt.Logf(" %02x", p) } ctxt.Logf("\n") } } // pctofileline computes either the file number (arg == 0) // or the line number (arg == 1) to use at p. // Because p.Pos applies to p, phase == 0 (before p) // takes care of the update. func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { if p.As == ATEXT || p.As == ANOP || p.Pos.Line() == 0 || phase == 1 { return oldval } f, l := getFileIndexAndLine(ctxt, p.Pos) if arg == nil { return l } pcln := arg.(*Pcln) pcln.UsedFiles[goobj.CUFileIndex(f)] = struct{}{} return int32(f) } // pcinlineState holds the state used to create a function's inlining // tree and the PC-value table that maps PCs to nodes in that tree. type pcinlineState struct { globalToLocal map[int]int localTree InlTree } // addBranch adds a branch from the global inlining tree in ctxt to // the function's local inlining tree, returning the index in the local tree. func (s *pcinlineState) addBranch(ctxt *Link, globalIndex int) int { if globalIndex < 0 { return -1 } localIndex, ok := s.globalToLocal[globalIndex] if ok { return localIndex } // Since tracebacks don't include column information, we could // use one node for multiple calls of the same function on the // same line (e.g., f(x) + f(y)). For now, we use one node for // each inlined call. call := ctxt.InlTree.nodes[globalIndex] call.Parent = s.addBranch(ctxt, call.Parent) localIndex = len(s.localTree.nodes) s.localTree.nodes = append(s.localTree.nodes, call) s.globalToLocal[globalIndex] = localIndex return localIndex } func (s *pcinlineState) setParentPC(ctxt *Link, globalIndex int, pc int32) { localIndex, ok := s.globalToLocal[globalIndex] if !ok { // We know where to unwind to when we need to unwind a body identified // by globalIndex. But there may be no instructions generated by that // body (it's empty, or its instructions were CSEd with other things, etc.). // In that case, we don't need an unwind entry. // TODO: is this really right? Seems to happen a whole lot... return } s.localTree.setParentPC(localIndex, pc) } // pctoinline computes the index into the local inlining tree to use at p. // If p is not the result of inlining, pctoinline returns -1. Because p.Pos // applies to p, phase == 0 (before p) takes care of the update. func (s *pcinlineState) pctoinline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { if phase == 1 { return oldval } posBase := ctxt.PosTable.Pos(p.Pos).Base() if posBase == nil { return -1 } globalIndex := posBase.InliningIndex() if globalIndex < 0 { return -1 } if s.globalToLocal == nil { s.globalToLocal = make(map[int]int) } return int32(s.addBranch(ctxt, globalIndex)) } // pctospadj computes the sp adjustment in effect. // It is oldval plus any adjustment made by p itself. // The adjustment by p takes effect only after p, so we // apply the change during phase == 1. func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { if oldval == -1 { // starting oldval = 0 } if phase == 0 { return oldval } if oldval+p.Spadj < -10000 || oldval+p.Spadj > 1100000000 { ctxt.Diag("overflow in spadj: %d + %d = %d", oldval, p.Spadj, oldval+p.Spadj) ctxt.DiagFlush() log.Fatalf("bad code") } return oldval + p.Spadj } // pctopcdata computes the pcdata value in effect at p. // A PCDATA instruction sets the value in effect at future // non-PCDATA instructions. // Since PCDATA instructions have no width in the final code, // it does not matter which phase we use for the update. func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { if phase == 0 || p.As != APCDATA || p.From.Offset != int64(arg.(uint32)) { return oldval } if int64(int32(p.To.Offset)) != p.To.Offset { ctxt.Diag("overflow in PCDATA instruction: %v", p) ctxt.DiagFlush() log.Fatalf("bad code") } return int32(p.To.Offset) } func linkpcln(ctxt *Link, cursym *LSym) { pcln := &cursym.Func.Pcln pcln.UsedFiles = make(map[goobj.CUFileIndex]struct{}) npcdata := 0 nfuncdata := 0 for p := cursym.Func.Text; p != nil; p = p.Link { // Find the highest ID of any used PCDATA table. This ignores PCDATA table // that consist entirely of "-1", since that's the assumed default value. // From.Offset is table ID // To.Offset is data if p.As == APCDATA && p.From.Offset >= int64(npcdata) && p.To.Offset != -1 { // ignore -1 as we start at -1, if we only see -1, nothing changed npcdata = int(p.From.Offset + 1) } // Find the highest ID of any FUNCDATA table. // From.Offset is table ID if p.As == AFUNCDATA && p.From.Offset >= int64(nfuncdata) { nfuncdata = int(p.From.Offset + 1) } } pcln.Pcdata = make([]Pcdata, npcdata) pcln.Pcdata = pcln.Pcdata[:npcdata] pcln.Funcdata = make([]*LSym, nfuncdata) pcln.Funcdataoff = make([]int64, nfuncdata) pcln.Funcdataoff = pcln.Funcdataoff[:nfuncdata] funcpctab(ctxt, &pcln.Pcsp, cursym, "pctospadj", pctospadj, nil) funcpctab(ctxt, &pcln.Pcfile, cursym, "pctofile", pctofileline, pcln) funcpctab(ctxt, &pcln.Pcline, cursym, "pctoline", pctofileline, nil) // Check that all the Progs used as inline markers are still reachable. // See issue #40473. inlMarkProgs := make(map[*Prog]struct{}, len(cursym.Func.InlMarks)) for _, inlMark := range cursym.Func.InlMarks { inlMarkProgs[inlMark.p] = struct{}{} } for p := cursym.Func.Text; p != nil; p = p.Link { if _, ok := inlMarkProgs[p]; ok { delete(inlMarkProgs, p) } } if len(inlMarkProgs) > 0 { ctxt.Diag("one or more instructions used as inline markers are no longer reachable") } pcinlineState := new(pcinlineState) funcpctab(ctxt, &pcln.Pcinline, cursym, "pctoinline", pcinlineState.pctoinline, nil) for _, inlMark := range cursym.Func.InlMarks { pcinlineState.setParentPC(ctxt, int(inlMark.id), int32(inlMark.p.Pc)) } pcln.InlTree = pcinlineState.localTree if ctxt.Debugpcln == "pctoinline" && len(pcln.InlTree.nodes) > 0 { ctxt.Logf("-- inlining tree for %s:\n", cursym) dumpInlTree(ctxt, pcln.InlTree) ctxt.Logf("--\n") } // tabulate which pc and func data we have. havepc := make([]uint32, (npcdata+31)/32) havefunc := make([]uint32, (nfuncdata+31)/32) for p := cursym.Func.Text; p != nil; p = p.Link { if p.As == AFUNCDATA { if (havefunc[p.From.Offset/32]>>uint64(p.From.Offset%32))&1 != 0 { ctxt.Diag("multiple definitions for FUNCDATA $%d", p.From.Offset) } havefunc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32) } if p.As == APCDATA && p.To.Offset != -1 { havepc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32) } } // pcdata. for i := 0; i < npcdata; i++ { if (havepc[i/32]>>uint(i%32))&1 == 0 { continue } funcpctab(ctxt, &pcln.Pcdata[i], cursym, "pctopcdata", pctopcdata, interface{}(uint32(i))) } // funcdata if nfuncdata > 0 { for p := cursym.Func.Text; p != nil; p = p.Link { if p.As != AFUNCDATA { continue } i := int(p.From.Offset) pcln.Funcdataoff[i] = p.To.Offset if p.To.Type != TYPE_CONST { // TODO: Dedup. //funcdata_bytes += p->to.sym->size; pcln.Funcdata[i] = p.To.Sym } } } } // PCIter iterates over encoded pcdata tables. type PCIter struct { p []byte PC uint32 NextPC uint32 PCScale uint32 Value int32 start bool Done bool } // newPCIter creates a PCIter with a scale factor for the PC step size. func NewPCIter(pcScale uint32) *PCIter { it := new(PCIter) it.PCScale = pcScale return it } // Next advances it to the Next pc. func (it *PCIter) Next() { it.PC = it.NextPC if it.Done { return } if len(it.p) == 0 { it.Done = true return } // Value delta val, n := binary.Varint(it.p) if n <= 0 { log.Fatalf("bad Value varint in pciterNext: read %v", n) } it.p = it.p[n:] if val == 0 && !it.start { it.Done = true return } it.start = false it.Value += int32(val) // pc delta pc, n := binary.Uvarint(it.p) if n <= 0 { log.Fatalf("bad pc varint in pciterNext: read %v", n) } it.p = it.p[n:] it.NextPC = it.PC + uint32(pc)*it.PCScale } // init prepares it to iterate over p, // and advances it to the first pc. func (it *PCIter) Init(p []byte) { it.p = p it.PC = 0 it.NextPC = 0 it.Value = -1 it.start = true it.Done = false it.Next() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/ld.go
vendor/github.com/twitchyliquid64/golang-asm/obj/ld.go
// Derived from Inferno utils/6l/obj.c and utils/6l/span.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package obj /* * add library to library list. * srcref: src file referring to package * objref: object file referring to package * file: object file, e.g., /home/rsc/go/pkg/container/vector.a * pkg: package import path, e.g. container/vector */ const ( LOG = 5 ) func mkfwd(sym *LSym) { var dwn [LOG]int32 var cnt [LOG]int32 var lst [LOG]*Prog for i := 0; i < LOG; i++ { if i == 0 { cnt[i] = 1 } else { cnt[i] = LOG * cnt[i-1] } dwn[i] = 1 lst[i] = nil } i := 0 for p := sym.Func.Text; p != nil && p.Link != nil; p = p.Link { i-- if i < 0 { i = LOG - 1 } p.Forwd = nil dwn[i]-- if dwn[i] <= 0 { dwn[i] = cnt[i] if lst[i] != nil { lst[i].Forwd = p } lst[i] = p } } } func Appendp(q *Prog, newprog ProgAlloc) *Prog { p := newprog() p.Link = q.Link q.Link = p p.Pos = q.Pos return p }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/util.go
vendor/github.com/twitchyliquid64/golang-asm/obj/util.go
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package obj import ( "bytes" "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "io" "strings" ) const REG_NONE = 0 // Line returns a string containing the filename and line number for p func (p *Prog) Line() string { return p.Ctxt.OutermostPos(p.Pos).Format(false, true) } func (p *Prog) InnermostLine(w io.Writer) { p.Ctxt.InnermostPos(p.Pos).WriteTo(w, false, true) } // InnermostLineNumber returns a string containing the line number for the // innermost inlined function (if any inlining) at p's position func (p *Prog) InnermostLineNumber() string { return p.Ctxt.InnermostPos(p.Pos).LineNumber() } // InnermostLineNumberHTML returns a string containing the line number for the // innermost inlined function (if any inlining) at p's position func (p *Prog) InnermostLineNumberHTML() string { return p.Ctxt.InnermostPos(p.Pos).LineNumberHTML() } // InnermostFilename returns a string containing the innermost // (in inlining) filename at p's position func (p *Prog) InnermostFilename() string { // TODO For now, this is only used for debugging output, and if we need more/better information, it might change. // An example of what we might want to see is the full stack of positions for inlined code, so we get some visibility into what is recorded there. pos := p.Ctxt.InnermostPos(p.Pos) if !pos.IsKnown() { return "<unknown file name>" } return pos.Filename() } var armCondCode = []string{ ".EQ", ".NE", ".CS", ".CC", ".MI", ".PL", ".VS", ".VC", ".HI", ".LS", ".GE", ".LT", ".GT", ".LE", "", ".NV", } /* ARM scond byte */ const ( C_SCOND = (1 << 4) - 1 C_SBIT = 1 << 4 C_PBIT = 1 << 5 C_WBIT = 1 << 6 C_FBIT = 1 << 7 C_UBIT = 1 << 7 C_SCOND_XOR = 14 ) // CConv formats opcode suffix bits (Prog.Scond). func CConv(s uint8) string { if s == 0 { return "" } for i := range opSuffixSpace { sset := &opSuffixSpace[i] if sset.arch == objabi.GOARCH { return sset.cconv(s) } } return fmt.Sprintf("SC???%d", s) } // CConvARM formats ARM opcode suffix bits (mostly condition codes). func CConvARM(s uint8) string { // TODO: could be great to move suffix-related things into // ARM asm backends some day. // obj/x86 can be used as an example. sc := armCondCode[(s&C_SCOND)^C_SCOND_XOR] if s&C_SBIT != 0 { sc += ".S" } if s&C_PBIT != 0 { sc += ".P" } if s&C_WBIT != 0 { sc += ".W" } if s&C_UBIT != 0 { /* ambiguous with FBIT */ sc += ".U" } return sc } func (p *Prog) String() string { if p == nil { return "<nil Prog>" } if p.Ctxt == nil { return "<Prog without ctxt>" } return fmt.Sprintf("%.5d (%v)\t%s", p.Pc, p.Line(), p.InstructionString()) } func (p *Prog) InnermostString(w io.Writer) { if p == nil { io.WriteString(w, "<nil Prog>") return } if p.Ctxt == nil { io.WriteString(w, "<Prog without ctxt>") return } fmt.Fprintf(w, "%.5d (", p.Pc) p.InnermostLine(w) io.WriteString(w, ")\t") p.WriteInstructionString(w) } // InstructionString returns a string representation of the instruction without preceding // program counter or file and line number. func (p *Prog) InstructionString() string { buf := new(bytes.Buffer) p.WriteInstructionString(buf) return buf.String() } // WriteInstructionString writes a string representation of the instruction without preceding // program counter or file and line number. func (p *Prog) WriteInstructionString(w io.Writer) { if p == nil { io.WriteString(w, "<nil Prog>") return } if p.Ctxt == nil { io.WriteString(w, "<Prog without ctxt>") return } sc := CConv(p.Scond) io.WriteString(w, p.As.String()) io.WriteString(w, sc) sep := "\t" if p.From.Type != TYPE_NONE { io.WriteString(w, sep) WriteDconv(w, p, &p.From) sep = ", " } if p.Reg != REG_NONE { // Should not happen but might as well show it if it does. fmt.Fprintf(w, "%s%v", sep, Rconv(int(p.Reg))) sep = ", " } for i := range p.RestArgs { io.WriteString(w, sep) WriteDconv(w, p, &p.RestArgs[i]) sep = ", " } if p.As == ATEXT { // If there are attributes, print them. Otherwise, skip the comma. // In short, print one of these two: // TEXT foo(SB), DUPOK|NOSPLIT, $0 // TEXT foo(SB), $0 s := p.From.Sym.Attribute.TextAttrString() if s != "" { fmt.Fprintf(w, "%s%s", sep, s) sep = ", " } } if p.To.Type != TYPE_NONE { io.WriteString(w, sep) WriteDconv(w, p, &p.To) } if p.RegTo2 != REG_NONE { fmt.Fprintf(w, "%s%v", sep, Rconv(int(p.RegTo2))) } } func (ctxt *Link) NewProg() *Prog { p := new(Prog) p.Ctxt = ctxt return p } func (ctxt *Link) CanReuseProgs() bool { return ctxt.Debugasm == 0 } func Dconv(p *Prog, a *Addr) string { buf := new(bytes.Buffer) WriteDconv(buf, p, a) return buf.String() } func WriteDconv(w io.Writer, p *Prog, a *Addr) { switch a.Type { default: fmt.Fprintf(w, "type=%d", a.Type) case TYPE_NONE: if a.Name != NAME_NONE || a.Reg != 0 || a.Sym != nil { a.WriteNameTo(w) fmt.Fprintf(w, "(%v)(NONE)", Rconv(int(a.Reg))) } case TYPE_REG: // TODO(rsc): This special case is for x86 instructions like // PINSRQ CX,$1,X6 // where the $1 is included in the p->to Addr. // Move into a new field. if a.Offset != 0 && (a.Reg < RBaseARM64 || a.Reg >= RBaseMIPS) { fmt.Fprintf(w, "$%d,%v", a.Offset, Rconv(int(a.Reg))) return } if a.Name != NAME_NONE || a.Sym != nil { a.WriteNameTo(w) fmt.Fprintf(w, "(%v)(REG)", Rconv(int(a.Reg))) } else { io.WriteString(w, Rconv(int(a.Reg))) } if (RBaseARM64+1<<10+1<<9) /* arm64.REG_ELEM */ <= a.Reg && a.Reg < (RBaseARM64+1<<11) /* arm64.REG_ELEM_END */ { fmt.Fprintf(w, "[%d]", a.Index) } case TYPE_BRANCH: if a.Sym != nil { fmt.Fprintf(w, "%s(SB)", a.Sym.Name) } else if a.Target() != nil { fmt.Fprint(w, a.Target().Pc) } else { fmt.Fprintf(w, "%d(PC)", a.Offset) } case TYPE_INDIR: io.WriteString(w, "*") a.WriteNameTo(w) case TYPE_MEM: a.WriteNameTo(w) if a.Index != REG_NONE { if a.Scale == 0 { // arm64 shifted or extended register offset, scale = 0. fmt.Fprintf(w, "(%v)", Rconv(int(a.Index))) } else { fmt.Fprintf(w, "(%v*%d)", Rconv(int(a.Index)), int(a.Scale)) } } case TYPE_CONST: io.WriteString(w, "$") a.WriteNameTo(w) if a.Reg != 0 { fmt.Fprintf(w, "(%v)", Rconv(int(a.Reg))) } case TYPE_TEXTSIZE: if a.Val.(int32) == objabi.ArgsSizeUnknown { fmt.Fprintf(w, "$%d", a.Offset) } else { fmt.Fprintf(w, "$%d-%d", a.Offset, a.Val.(int32)) } case TYPE_FCONST: str := fmt.Sprintf("%.17g", a.Val.(float64)) // Make sure 1 prints as 1.0 if !strings.ContainsAny(str, ".e") { str += ".0" } fmt.Fprintf(w, "$(%s)", str) case TYPE_SCONST: fmt.Fprintf(w, "$%q", a.Val.(string)) case TYPE_ADDR: io.WriteString(w, "$") a.WriteNameTo(w) case TYPE_SHIFT: v := int(a.Offset) ops := "<<>>->@>" switch objabi.GOARCH { case "arm": op := ops[((v>>5)&3)<<1:] if v&(1<<4) != 0 { fmt.Fprintf(w, "R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15) } else { fmt.Fprintf(w, "R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31) } if a.Reg != 0 { fmt.Fprintf(w, "(%v)", Rconv(int(a.Reg))) } case "arm64": op := ops[((v>>22)&3)<<1:] r := (v >> 16) & 31 fmt.Fprintf(w, "%s%c%c%d", Rconv(r+RBaseARM64), op[0], op[1], (v>>10)&63) default: panic("TYPE_SHIFT is not supported on " + objabi.GOARCH) } case TYPE_REGREG: fmt.Fprintf(w, "(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset))) case TYPE_REGREG2: fmt.Fprintf(w, "%v, %v", Rconv(int(a.Offset)), Rconv(int(a.Reg))) case TYPE_REGLIST: io.WriteString(w, RLconv(a.Offset)) } } func (a *Addr) WriteNameTo(w io.Writer) { switch a.Name { default: fmt.Fprintf(w, "name=%d", a.Name) case NAME_NONE: switch { case a.Reg == REG_NONE: fmt.Fprint(w, a.Offset) case a.Offset == 0: fmt.Fprintf(w, "(%v)", Rconv(int(a.Reg))) case a.Offset != 0: fmt.Fprintf(w, "%d(%v)", a.Offset, Rconv(int(a.Reg))) } // Note: a.Reg == REG_NONE encodes the default base register for the NAME_ type. case NAME_EXTERN: reg := "SB" if a.Reg != REG_NONE { reg = Rconv(int(a.Reg)) } if a.Sym != nil { fmt.Fprintf(w, "%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg) } else { fmt.Fprintf(w, "%s(%s)", offConv(a.Offset), reg) } case NAME_GOTREF: reg := "SB" if a.Reg != REG_NONE { reg = Rconv(int(a.Reg)) } if a.Sym != nil { fmt.Fprintf(w, "%s%s@GOT(%s)", a.Sym.Name, offConv(a.Offset), reg) } else { fmt.Fprintf(w, "%s@GOT(%s)", offConv(a.Offset), reg) } case NAME_STATIC: reg := "SB" if a.Reg != REG_NONE { reg = Rconv(int(a.Reg)) } if a.Sym != nil { fmt.Fprintf(w, "%s<>%s(%s)", a.Sym.Name, offConv(a.Offset), reg) } else { fmt.Fprintf(w, "<>%s(%s)", offConv(a.Offset), reg) } case NAME_AUTO: reg := "SP" if a.Reg != REG_NONE { reg = Rconv(int(a.Reg)) } if a.Sym != nil { fmt.Fprintf(w, "%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg) } else { fmt.Fprintf(w, "%s(%s)", offConv(a.Offset), reg) } case NAME_PARAM: reg := "FP" if a.Reg != REG_NONE { reg = Rconv(int(a.Reg)) } if a.Sym != nil { fmt.Fprintf(w, "%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg) } else { fmt.Fprintf(w, "%s(%s)", offConv(a.Offset), reg) } case NAME_TOCREF: reg := "SB" if a.Reg != REG_NONE { reg = Rconv(int(a.Reg)) } if a.Sym != nil { fmt.Fprintf(w, "%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg) } else { fmt.Fprintf(w, "%s(%s)", offConv(a.Offset), reg) } } } func offConv(off int64) string { if off == 0 { return "" } return fmt.Sprintf("%+d", off) } // opSuffixSet is like regListSet, but for opcode suffixes. // // Unlike some other similar structures, uint8 space is not // divided by its own values set (because there are only 256 of them). // Instead, every arch may interpret/format all 8 bits as they like, // as long as they register proper cconv function for it. type opSuffixSet struct { arch string cconv func(suffix uint8) string } var opSuffixSpace []opSuffixSet // RegisterOpSuffix assigns cconv function for formatting opcode suffixes // when compiling for GOARCH=arch. // // cconv is never called with 0 argument. func RegisterOpSuffix(arch string, cconv func(uint8) string) { opSuffixSpace = append(opSuffixSpace, opSuffixSet{ arch: arch, cconv: cconv, }) } type regSet struct { lo int hi int Rconv func(int) string } // Few enough architectures that a linear scan is fastest. // Not even worth sorting. var regSpace []regSet /* Each architecture defines a register space as a unique integer range. Here is the list of architectures and the base of their register spaces. */ const ( // Because of masking operations in the encodings, each register // space should start at 0 modulo some power of 2. RBase386 = 1 * 1024 RBaseAMD64 = 2 * 1024 RBaseARM = 3 * 1024 RBasePPC64 = 4 * 1024 // range [4k, 8k) RBaseARM64 = 8 * 1024 // range [8k, 13k) RBaseMIPS = 13 * 1024 // range [13k, 14k) RBaseS390X = 14 * 1024 // range [14k, 15k) RBaseRISCV = 15 * 1024 // range [15k, 16k) RBaseWasm = 16 * 1024 ) // RegisterRegister binds a pretty-printer (Rconv) for register // numbers to a given register number range. Lo is inclusive, // hi exclusive (valid registers are lo through hi-1). func RegisterRegister(lo, hi int, Rconv func(int) string) { regSpace = append(regSpace, regSet{lo, hi, Rconv}) } func Rconv(reg int) string { if reg == REG_NONE { return "NONE" } for i := range regSpace { rs := &regSpace[i] if rs.lo <= reg && reg < rs.hi { return rs.Rconv(reg) } } return fmt.Sprintf("R???%d", reg) } type regListSet struct { lo int64 hi int64 RLconv func(int64) string } var regListSpace []regListSet // Each architecture is allotted a distinct subspace: [Lo, Hi) for declaring its // arch-specific register list numbers. const ( RegListARMLo = 0 RegListARMHi = 1 << 16 // arm64 uses the 60th bit to differentiate from other archs RegListARM64Lo = 1 << 60 RegListARM64Hi = 1<<61 - 1 // x86 uses the 61th bit to differentiate from other archs RegListX86Lo = 1 << 61 RegListX86Hi = 1<<62 - 1 ) // RegisterRegisterList binds a pretty-printer (RLconv) for register list // numbers to a given register list number range. Lo is inclusive, // hi exclusive (valid register list are lo through hi-1). func RegisterRegisterList(lo, hi int64, rlconv func(int64) string) { regListSpace = append(regListSpace, regListSet{lo, hi, rlconv}) } func RLconv(list int64) string { for i := range regListSpace { rls := &regListSpace[i] if rls.lo <= list && list < rls.hi { return rls.RLconv(list) } } return fmt.Sprintf("RL???%d", list) } type opSet struct { lo As names []string } // Not even worth sorting var aSpace []opSet // RegisterOpcode binds a list of instruction names // to a given instruction number range. func RegisterOpcode(lo As, Anames []string) { if len(Anames) > AllowedOpCodes { panic(fmt.Sprintf("too many instructions, have %d max %d", len(Anames), AllowedOpCodes)) } aSpace = append(aSpace, opSet{lo, Anames}) } func (a As) String() string { if 0 <= a && int(a) < len(Anames) { return Anames[a] } for i := range aSpace { as := &aSpace[i] if as.lo <= a && int(a-as.lo) < len(as.names) { return as.names[a-as.lo] } } return fmt.Sprintf("A???%d", a) } var Anames = []string{ "XXX", "CALL", "DUFFCOPY", "DUFFZERO", "END", "FUNCDATA", "JMP", "NOP", "PCALIGN", "PCDATA", "RET", "GETCALLERPC", "TEXT", "UNDEF", } func Bool2int(b bool) int { // The compiler currently only optimizes this form. // See issue 6011. var i int if b { i = 1 } else { i = 0 } return i }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/link.go
vendor/github.com/twitchyliquid64/golang-asm/obj/link.go
// Derived from Inferno utils/6l/l.h and related files. // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/l.h // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package obj import ( "bufio" "github.com/twitchyliquid64/golang-asm/dwarf" "github.com/twitchyliquid64/golang-asm/goobj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/src" "github.com/twitchyliquid64/golang-asm/sys" "fmt" "sync" ) // An Addr is an argument to an instruction. // The general forms and their encodings are: // // sym±offset(symkind)(reg)(index*scale) // Memory reference at address &sym(symkind) + offset + reg + index*scale. // Any of sym(symkind), ±offset, (reg), (index*scale), and *scale can be omitted. // If (reg) and *scale are both omitted, the resulting expression (index) is parsed as (reg). // To force a parsing as index*scale, write (index*1). // Encoding: // type = TYPE_MEM // name = symkind (NAME_AUTO, ...) or 0 (NAME_NONE) // sym = sym // offset = ±offset // reg = reg (REG_*) // index = index (REG_*) // scale = scale (1, 2, 4, 8) // // $<mem> // Effective address of memory reference <mem>, defined above. // Encoding: same as memory reference, but type = TYPE_ADDR. // // $<±integer value> // This is a special case of $<mem>, in which only ±offset is present. // It has a separate type for easy recognition. // Encoding: // type = TYPE_CONST // offset = ±integer value // // *<mem> // Indirect reference through memory reference <mem>, defined above. // Only used on x86 for CALL/JMP *sym(SB), which calls/jumps to a function // pointer stored in the data word sym(SB), not a function named sym(SB). // Encoding: same as above, but type = TYPE_INDIR. // // $*$<mem> // No longer used. // On machines with actual SB registers, $*$<mem> forced the // instruction encoding to use a full 32-bit constant, never a // reference relative to SB. // // $<floating point literal> // Floating point constant value. // Encoding: // type = TYPE_FCONST // val = floating point value // // $<string literal, up to 8 chars> // String literal value (raw bytes used for DATA instruction). // Encoding: // type = TYPE_SCONST // val = string // // <register name> // Any register: integer, floating point, control, segment, and so on. // If looking for specific register kind, must check type and reg value range. // Encoding: // type = TYPE_REG // reg = reg (REG_*) // // x(PC) // Encoding: // type = TYPE_BRANCH // val = Prog* reference OR ELSE offset = target pc (branch takes priority) // // $±x-±y // Final argument to TEXT, specifying local frame size x and argument size y. // In this form, x and y are integer literals only, not arbitrary expressions. // This avoids parsing ambiguities due to the use of - as a separator. // The ± are optional. // If the final argument to TEXT omits the -±y, the encoding should still // use TYPE_TEXTSIZE (not TYPE_CONST), with u.argsize = ArgsSizeUnknown. // Encoding: // type = TYPE_TEXTSIZE // offset = x // val = int32(y) // // reg<<shift, reg>>shift, reg->shift, reg@>shift // Shifted register value, for ARM and ARM64. // In this form, reg must be a register and shift can be a register or an integer constant. // Encoding: // type = TYPE_SHIFT // On ARM: // offset = (reg&15) | shifttype<<5 | count // shifttype = 0, 1, 2, 3 for <<, >>, ->, @> // count = (reg&15)<<8 | 1<<4 for a register shift count, (n&31)<<7 for an integer constant. // On ARM64: // offset = (reg&31)<<16 | shifttype<<22 | (count&63)<<10 // shifttype = 0, 1, 2 for <<, >>, -> // // (reg, reg) // A destination register pair. When used as the last argument of an instruction, // this form makes clear that both registers are destinations. // Encoding: // type = TYPE_REGREG // reg = first register // offset = second register // // [reg, reg, reg-reg] // Register list for ARM, ARM64, 386/AMD64. // Encoding: // type = TYPE_REGLIST // On ARM: // offset = bit mask of registers in list; R0 is low bit. // On ARM64: // offset = register count (Q:size) | arrangement (opcode) | first register // On 386/AMD64: // reg = range low register // offset = 2 packed registers + kind tag (see x86.EncodeRegisterRange) // // reg, reg // Register pair for ARM. // TYPE_REGREG2 // // (reg+reg) // Register pair for PPC64. // Encoding: // type = TYPE_MEM // reg = first register // index = second register // scale = 1 // // reg.[US]XT[BHWX] // Register extension for ARM64 // Encoding: // type = TYPE_REG // reg = REG_[US]XT[BHWX] + register + shift amount // offset = ((reg&31) << 16) | (exttype << 13) | (amount<<10) // // reg.<T> // Register arrangement for ARM64 SIMD register // e.g.: V1.S4, V2.S2, V7.D2, V2.H4, V6.B16 // Encoding: // type = TYPE_REG // reg = REG_ARNG + register + arrangement // // reg.<T>[index] // Register element for ARM64 // Encoding: // type = TYPE_REG // reg = REG_ELEM + register + arrangement // index = element index type Addr struct { Reg int16 Index int16 Scale int16 // Sometimes holds a register. Type AddrType Name AddrName Class int8 Offset int64 Sym *LSym // argument value: // for TYPE_SCONST, a string // for TYPE_FCONST, a float64 // for TYPE_BRANCH, a *Prog (optional) // for TYPE_TEXTSIZE, an int32 (optional) Val interface{} } type AddrName int8 const ( NAME_NONE AddrName = iota NAME_EXTERN NAME_STATIC NAME_AUTO NAME_PARAM // A reference to name@GOT(SB) is a reference to the entry in the global offset // table for 'name'. NAME_GOTREF // Indicates that this is a reference to a TOC anchor. NAME_TOCREF ) //go:generate stringer -type AddrType type AddrType uint8 const ( TYPE_NONE AddrType = iota TYPE_BRANCH TYPE_TEXTSIZE TYPE_MEM TYPE_CONST TYPE_FCONST TYPE_SCONST TYPE_REG TYPE_ADDR TYPE_SHIFT TYPE_REGREG TYPE_REGREG2 TYPE_INDIR TYPE_REGLIST ) func (a *Addr) Target() *Prog { if a.Type == TYPE_BRANCH && a.Val != nil { return a.Val.(*Prog) } return nil } func (a *Addr) SetTarget(t *Prog) { if a.Type != TYPE_BRANCH { panic("setting branch target when type is not TYPE_BRANCH") } a.Val = t } // Prog describes a single machine instruction. // // The general instruction form is: // // (1) As.Scond From [, ...RestArgs], To // (2) As.Scond From, Reg [, ...RestArgs], To, RegTo2 // // where As is an opcode and the others are arguments: // From, Reg are sources, and To, RegTo2 are destinations. // RestArgs can hold additional sources and destinations. // Usually, not all arguments are present. // For example, MOVL R1, R2 encodes using only As=MOVL, From=R1, To=R2. // The Scond field holds additional condition bits for systems (like arm) // that have generalized conditional execution. // (2) form is present for compatibility with older code, // to avoid too much changes in a single swing. // (1) scheme is enough to express any kind of operand combination. // // Jump instructions use the To.Val field to point to the target *Prog, // which must be in the same linked list as the jump instruction. // // The Progs for a given function are arranged in a list linked through the Link field. // // Each Prog is charged to a specific source line in the debug information, // specified by Pos.Line(). // Every Prog has a Ctxt field that defines its context. // For performance reasons, Progs usually are usually bulk allocated, cached, and reused; // those bulk allocators should always be used, rather than new(Prog). // // The other fields not yet mentioned are for use by the back ends and should // be left zeroed by creators of Prog lists. type Prog struct { Ctxt *Link // linker context Link *Prog // next Prog in linked list From Addr // first source operand RestArgs []Addr // can pack any operands that not fit into {Prog.From, Prog.To} To Addr // destination operand (second is RegTo2 below) Pool *Prog // constant pool entry, for arm,arm64 back ends Forwd *Prog // for x86 back end Rel *Prog // for x86, arm back ends Pc int64 // for back ends or assembler: virtual or actual program counter, depending on phase Pos src.XPos // source position of this instruction Spadj int32 // effect of instruction on stack pointer (increment or decrement amount) As As // assembler opcode Reg int16 // 2nd source operand RegTo2 int16 // 2nd destination operand Mark uint16 // bitmask of arch-specific items Optab uint16 // arch-specific opcode index Scond uint8 // bits that describe instruction suffixes (e.g. ARM conditions) Back uint8 // for x86 back end: backwards branch state Ft uint8 // for x86 back end: type index of Prog.From Tt uint8 // for x86 back end: type index of Prog.To Isize uint8 // for x86 back end: size of the instruction in bytes } // From3Type returns p.GetFrom3().Type, or TYPE_NONE when // p.GetFrom3() returns nil. // // Deprecated: for the same reasons as Prog.GetFrom3. func (p *Prog) From3Type() AddrType { if p.RestArgs == nil { return TYPE_NONE } return p.RestArgs[0].Type } // GetFrom3 returns second source operand (the first is Prog.From). // In combination with Prog.From and Prog.To it makes common 3 operand // case easier to use. // // Should be used only when RestArgs is set with SetFrom3. // // Deprecated: better use RestArgs directly or define backend-specific getters. // Introduced to simplify transition to []Addr. // Usage of this is discouraged due to fragility and lack of guarantees. func (p *Prog) GetFrom3() *Addr { if p.RestArgs == nil { return nil } return &p.RestArgs[0] } // SetFrom3 assigns []Addr{a} to p.RestArgs. // In pair with Prog.GetFrom3 it can help in emulation of Prog.From3. // // Deprecated: for the same reasons as Prog.GetFrom3. func (p *Prog) SetFrom3(a Addr) { p.RestArgs = []Addr{a} } // An As denotes an assembler opcode. // There are some portable opcodes, declared here in package obj, // that are common to all architectures. // However, the majority of opcodes are arch-specific // and are declared in their respective architecture's subpackage. type As int16 // These are the portable opcodes. const ( AXXX As = iota ACALL ADUFFCOPY ADUFFZERO AEND AFUNCDATA AJMP ANOP APCALIGN APCDATA ARET AGETCALLERPC ATEXT AUNDEF A_ARCHSPECIFIC ) // Each architecture is allotted a distinct subspace of opcode values // for declaring its arch-specific opcodes. // Within this subspace, the first arch-specific opcode should be // at offset A_ARCHSPECIFIC. // // Subspaces are aligned to a power of two so opcodes can be masked // with AMask and used as compact array indices. const ( ABase386 = (1 + iota) << 11 ABaseARM ABaseAMD64 ABasePPC64 ABaseARM64 ABaseMIPS ABaseRISCV ABaseS390X ABaseWasm AllowedOpCodes = 1 << 11 // The number of opcodes available for any given architecture. AMask = AllowedOpCodes - 1 // AND with this to use the opcode as an array index. ) // An LSym is the sort of symbol that is written to an object file. // It represents Go symbols in a flat pkg+"."+name namespace. type LSym struct { Name string Type objabi.SymKind Attribute RefIdx int // Index of this symbol in the symbol reference list. Size int64 Gotype *LSym P []byte R []Reloc Func *FuncInfo Pkg string PkgIdx int32 SymIdx int32 // TODO: replace RefIdx } // A FuncInfo contains extra fields for STEXT symbols. type FuncInfo struct { Args int32 Locals int32 Align int32 FuncID objabi.FuncID Text *Prog Autot map[*LSym]struct{} Pcln Pcln InlMarks []InlMark dwarfInfoSym *LSym dwarfLocSym *LSym dwarfRangesSym *LSym dwarfAbsFnSym *LSym dwarfDebugLinesSym *LSym GCArgs *LSym GCLocals *LSym GCRegs *LSym // Only if !go115ReduceLiveness StackObjects *LSym OpenCodedDeferInfo *LSym FuncInfoSym *LSym } type InlMark struct { // When unwinding from an instruction in an inlined body, mark // where we should unwind to. // id records the global inlining id of the inlined body. // p records the location of an instruction in the parent (inliner) frame. p *Prog id int32 } // Mark p as the instruction to set as the pc when // "unwinding" the inlining global frame id. Usually it should be // instruction with a file:line at the callsite, and occur // just before the body of the inlined function. func (fi *FuncInfo) AddInlMark(p *Prog, id int32) { fi.InlMarks = append(fi.InlMarks, InlMark{p: p, id: id}) } // Record the type symbol for an auto variable so that the linker // an emit DWARF type information for the type. func (fi *FuncInfo) RecordAutoType(gotype *LSym) { if fi.Autot == nil { fi.Autot = make(map[*LSym]struct{}) } fi.Autot[gotype] = struct{}{} } //go:generate stringer -type ABI // ABI is the calling convention of a text symbol. type ABI uint8 const ( // ABI0 is the stable stack-based ABI. It's important that the // value of this is "0": we can't distinguish between // references to data and ABI0 text symbols in assembly code, // and hence this doesn't distinguish between symbols without // an ABI and text symbols with ABI0. ABI0 ABI = iota // ABIInternal is the internal ABI that may change between Go // versions. All Go functions use the internal ABI and the // compiler generates wrappers for calls to and from other // ABIs. ABIInternal ABICount ) // Attribute is a set of symbol attributes. type Attribute uint32 const ( AttrDuplicateOK Attribute = 1 << iota AttrCFunc AttrNoSplit AttrLeaf AttrWrapper AttrNeedCtxt AttrNoFrame AttrOnList AttrStatic // MakeTypelink means that the type should have an entry in the typelink table. AttrMakeTypelink // ReflectMethod means the function may call reflect.Type.Method or // reflect.Type.MethodByName. Matching is imprecise (as reflect.Type // can be used through a custom interface), so ReflectMethod may be // set in some cases when the reflect package is not called. // // Used by the linker to determine what methods can be pruned. AttrReflectMethod // Local means make the symbol local even when compiling Go code to reference Go // symbols in other shared libraries, as in this mode symbols are global by // default. "local" here means in the sense of the dynamic linker, i.e. not // visible outside of the module (shared library or executable) that contains its // definition. (When not compiling to support Go shared libraries, all symbols are // local in this sense unless there is a cgo_export_* directive). AttrLocal // For function symbols; indicates that the specified function was the // target of an inline during compilation AttrWasInlined // TopFrame means that this function is an entry point and unwinders should not // keep unwinding beyond this frame. AttrTopFrame // Indexed indicates this symbol has been assigned with an index (when using the // new object file format). AttrIndexed // Only applied on type descriptor symbols, UsedInIface indicates this type is // converted to an interface. // // Used by the linker to determine what methods can be pruned. AttrUsedInIface // ContentAddressable indicates this is a content-addressable symbol. AttrContentAddressable // attrABIBase is the value at which the ABI is encoded in // Attribute. This must be last; all bits after this are // assumed to be an ABI value. // // MUST BE LAST since all bits above this comprise the ABI. attrABIBase ) func (a Attribute) DuplicateOK() bool { return a&AttrDuplicateOK != 0 } func (a Attribute) MakeTypelink() bool { return a&AttrMakeTypelink != 0 } func (a Attribute) CFunc() bool { return a&AttrCFunc != 0 } func (a Attribute) NoSplit() bool { return a&AttrNoSplit != 0 } func (a Attribute) Leaf() bool { return a&AttrLeaf != 0 } func (a Attribute) OnList() bool { return a&AttrOnList != 0 } func (a Attribute) ReflectMethod() bool { return a&AttrReflectMethod != 0 } func (a Attribute) Local() bool { return a&AttrLocal != 0 } func (a Attribute) Wrapper() bool { return a&AttrWrapper != 0 } func (a Attribute) NeedCtxt() bool { return a&AttrNeedCtxt != 0 } func (a Attribute) NoFrame() bool { return a&AttrNoFrame != 0 } func (a Attribute) Static() bool { return a&AttrStatic != 0 } func (a Attribute) WasInlined() bool { return a&AttrWasInlined != 0 } func (a Attribute) TopFrame() bool { return a&AttrTopFrame != 0 } func (a Attribute) Indexed() bool { return a&AttrIndexed != 0 } func (a Attribute) UsedInIface() bool { return a&AttrUsedInIface != 0 } func (a Attribute) ContentAddressable() bool { return a&AttrContentAddressable != 0 } func (a *Attribute) Set(flag Attribute, value bool) { if value { *a |= flag } else { *a &^= flag } } func (a Attribute) ABI() ABI { return ABI(a / attrABIBase) } func (a *Attribute) SetABI(abi ABI) { const mask = 1 // Only one ABI bit for now. *a = (*a &^ (mask * attrABIBase)) | Attribute(abi)*attrABIBase } var textAttrStrings = [...]struct { bit Attribute s string }{ {bit: AttrDuplicateOK, s: "DUPOK"}, {bit: AttrMakeTypelink, s: ""}, {bit: AttrCFunc, s: "CFUNC"}, {bit: AttrNoSplit, s: "NOSPLIT"}, {bit: AttrLeaf, s: "LEAF"}, {bit: AttrOnList, s: ""}, {bit: AttrReflectMethod, s: "REFLECTMETHOD"}, {bit: AttrLocal, s: "LOCAL"}, {bit: AttrWrapper, s: "WRAPPER"}, {bit: AttrNeedCtxt, s: "NEEDCTXT"}, {bit: AttrNoFrame, s: "NOFRAME"}, {bit: AttrStatic, s: "STATIC"}, {bit: AttrWasInlined, s: ""}, {bit: AttrTopFrame, s: "TOPFRAME"}, {bit: AttrIndexed, s: ""}, {bit: AttrContentAddressable, s: ""}, } // TextAttrString formats a for printing in as part of a TEXT prog. func (a Attribute) TextAttrString() string { var s string for _, x := range textAttrStrings { if a&x.bit != 0 { if x.s != "" { s += x.s + "|" } a &^= x.bit } } switch a.ABI() { case ABI0: case ABIInternal: s += "ABIInternal|" a.SetABI(0) // Clear ABI so we don't print below. } if a != 0 { s += fmt.Sprintf("UnknownAttribute(%d)|", a) } // Chop off trailing |, if present. if len(s) > 0 { s = s[:len(s)-1] } return s } func (s *LSym) String() string { return s.Name } // The compiler needs *LSym to be assignable to cmd/compile/internal/ssa.Sym. func (s *LSym) CanBeAnSSASym() { } type Pcln struct { Pcsp Pcdata Pcfile Pcdata Pcline Pcdata Pcinline Pcdata Pcdata []Pcdata Funcdata []*LSym Funcdataoff []int64 UsedFiles map[goobj.CUFileIndex]struct{} // file indices used while generating pcfile InlTree InlTree // per-function inlining tree extracted from the global tree } type Reloc struct { Off int32 Siz uint8 Type objabi.RelocType Add int64 Sym *LSym } type Auto struct { Asym *LSym Aoffset int32 Name AddrName Gotype *LSym } type Pcdata struct { P []byte } // Link holds the context for writing object code from a compiler // to be linker input or for reading that input into the linker. type Link struct { Headtype objabi.HeadType Arch *LinkArch Debugasm int Debugvlog bool Debugpcln string Flag_shared bool Flag_dynlink bool Flag_linkshared bool Flag_optimize bool Flag_locationlists bool Retpoline bool // emit use of retpoline stubs for indirect jmp/call Bso *bufio.Writer Pathname string Pkgpath string // the current package's import path, "" if unknown hashmu sync.Mutex // protects hash, funchash hash map[string]*LSym // name -> sym mapping funchash map[string]*LSym // name -> sym mapping for ABIInternal syms statichash map[string]*LSym // name -> sym mapping for static syms PosTable src.PosTable InlTree InlTree // global inlining tree used by gc/inl.go DwFixups *DwarfFixupTable Imports []goobj.ImportedPkg DiagFunc func(string, ...interface{}) DiagFlush func() DebugInfo func(fn *LSym, info *LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) // if non-nil, curfn is a *gc.Node GenAbstractFunc func(fn *LSym) Errors int InParallel bool // parallel backend phase in effect UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges IsAsm bool // is the source assembly language, which may contain surprising idioms (e.g., call tables) // state for writing objects Text []*LSym Data []*LSym // ABIAliases are text symbols that should be aliased to all // ABIs. These symbols may only be referenced and not defined // by this object, since the need for an alias may appear in a // different object than the definition. Hence, this // information can't be carried in the symbol definition. // // TODO(austin): Replace this with ABI wrappers once the ABIs // actually diverge. ABIAliases []*LSym // Constant symbols (e.g. $i64.*) are data symbols created late // in the concurrent phase. To ensure a deterministic order, we // add them to a separate list, sort at the end, and append it // to Data. constSyms []*LSym // pkgIdx maps package path to index. The index is used for // symbol reference in the object file. pkgIdx map[string]int32 defs []*LSym // list of defined symbols in the current package hashed64defs []*LSym // list of defined short (64-bit or less) hashed (content-addressable) symbols hasheddefs []*LSym // list of defined hashed (content-addressable) symbols nonpkgdefs []*LSym // list of defined non-package symbols nonpkgrefs []*LSym // list of referenced non-package symbols Fingerprint goobj.FingerprintType // fingerprint of symbol indices, to catch index mismatch } func (ctxt *Link) Diag(format string, args ...interface{}) { ctxt.Errors++ ctxt.DiagFunc(format, args...) } func (ctxt *Link) Logf(format string, args ...interface{}) { fmt.Fprintf(ctxt.Bso, format, args...) ctxt.Bso.Flush() } // The smallest possible offset from the hardware stack pointer to a local // variable on the stack. Architectures that use a link register save its value // on the stack in the function prologue and so always have a pointer between // the hardware stack pointer and the local variable area. func (ctxt *Link) FixedFrameSize() int64 { switch ctxt.Arch.Family { case sys.AMD64, sys.I386, sys.Wasm: return 0 case sys.PPC64: // PIC code on ppc64le requires 32 bytes of stack, and it's easier to // just use that much stack always on ppc64x. return int64(4 * ctxt.Arch.PtrSize) default: return int64(ctxt.Arch.PtrSize) } } // LinkArch is the definition of a single architecture. type LinkArch struct { *sys.Arch Init func(*Link) Preprocess func(*Link, *LSym, ProgAlloc) Assemble func(*Link, *LSym, ProgAlloc) Progedit func(*Link, *Prog, ProgAlloc) UnaryDst map[As]bool // Instruction takes one operand, a destination. DWARFRegisters map[int16]int16 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/inl.go
vendor/github.com/twitchyliquid64/golang-asm/obj/inl.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package obj import "github.com/twitchyliquid64/golang-asm/src" // InlTree is a collection of inlined calls. The Parent field of an // InlinedCall is the index of another InlinedCall in InlTree. // // The compiler maintains a global inlining tree and adds a node to it // every time a function is inlined. For example, suppose f() calls g() // and g has two calls to h(), and that f, g, and h are inlineable: // // 1 func main() { // 2 f() // 3 } // 4 func f() { // 5 g() // 6 } // 7 func g() { // 8 h() // 9 h() // 10 } // 11 func h() { // 12 println("H") // 13 } // // Assuming the global tree starts empty, inlining will produce the // following tree: // // []InlinedCall{ // {Parent: -1, Func: "f", Pos: <line 2>}, // {Parent: 0, Func: "g", Pos: <line 5>}, // {Parent: 1, Func: "h", Pos: <line 8>}, // {Parent: 1, Func: "h", Pos: <line 9>}, // } // // The nodes of h inlined into main will have inlining indexes 2 and 3. // // Eventually, the compiler extracts a per-function inlining tree from // the global inlining tree (see pcln.go). type InlTree struct { nodes []InlinedCall } // InlinedCall is a node in an InlTree. type InlinedCall struct { Parent int // index of the parent in the InlTree or < 0 if outermost call Pos src.XPos // position of the inlined call Func *LSym // function that was inlined ParentPC int32 // PC of instruction just before inlined body. Only valid in local trees. } // Add adds a new call to the tree, returning its index. func (tree *InlTree) Add(parent int, pos src.XPos, func_ *LSym) int { r := len(tree.nodes) call := InlinedCall{ Parent: parent, Pos: pos, Func: func_, } tree.nodes = append(tree.nodes, call) return r } func (tree *InlTree) Parent(inlIndex int) int { return tree.nodes[inlIndex].Parent } func (tree *InlTree) InlinedFunction(inlIndex int) *LSym { return tree.nodes[inlIndex].Func } func (tree *InlTree) CallPos(inlIndex int) src.XPos { return tree.nodes[inlIndex].Pos } func (tree *InlTree) setParentPC(inlIndex int, pc int32) { tree.nodes[inlIndex].ParentPC = pc } // OutermostPos returns the outermost position corresponding to xpos, // which is where xpos was ultimately inlined to. In the example for // InlTree, main() contains inlined AST nodes from h(), but the // outermost position for those nodes is line 2. func (ctxt *Link) OutermostPos(xpos src.XPos) src.Pos { pos := ctxt.InnermostPos(xpos) outerxpos := xpos for ix := pos.Base().InliningIndex(); ix >= 0; { call := ctxt.InlTree.nodes[ix] ix = call.Parent outerxpos = call.Pos } return ctxt.PosTable.Pos(outerxpos) } // InnermostPos returns the innermost position corresponding to xpos, // that is, the code that is inlined and that inlines nothing else. // In the example for InlTree above, the code for println within h // would have an innermost position with line number 12, whether // h was not inlined, inlined into g, g-then-f, or g-then-f-then-main. // This corresponds to what someone debugging main, f, g, or h might // expect to see while single-stepping. func (ctxt *Link) InnermostPos(xpos src.XPos) src.Pos { return ctxt.PosTable.Pos(xpos) } // AllPos returns a slice of the positions inlined at xpos, from // innermost (index zero) to outermost. To avoid gratuitous allocation // the result is passed in and extended if necessary. func (ctxt *Link) AllPos(xpos src.XPos, result []src.Pos) []src.Pos { pos := ctxt.InnermostPos(xpos) result = result[:0] result = append(result, ctxt.PosTable.Pos(xpos)) for ix := pos.Base().InliningIndex(); ix >= 0; { call := ctxt.InlTree.nodes[ix] ix = call.Parent result = append(result, ctxt.PosTable.Pos(call.Pos)) } return result } func dumpInlTree(ctxt *Link, tree InlTree) { for i, call := range tree.nodes { pos := ctxt.PosTable.Pos(call.Pos) ctxt.Logf("%0d | %0d | %s (%s) pc=%d\n", i, call.Parent, call.Func, pos, call.ParentPC) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/abi_string.go
vendor/github.com/twitchyliquid64/golang-asm/obj/abi_string.go
// Code generated by "stringer -type ABI"; DO NOT EDIT. package obj import "strconv" const _ABI_name = "ABI0ABIInternalABICount" var _ABI_index = [...]uint8{0, 4, 15, 23} func (i ABI) String() string { if i >= ABI(len(_ABI_index)-1) { return "ABI(" + strconv.FormatInt(int64(i), 10) + ")" } return _ABI_name[_ABI_index[i]:_ABI_index[i+1]] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/pass.go
vendor/github.com/twitchyliquid64/golang-asm/obj/pass.go
// Inferno utils/6l/pass.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/pass.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package obj // Code and data passes. // brloop returns the ultimate destination of the series of unconditional jumps beginning at p. // In the case of an infinite loop, brloop returns nil. func brloop(p *Prog) *Prog { c := 0 for q := p; q != nil; q = q.To.Target() { if q.As != AJMP || q.To.Target() == nil { return q } c++ if c >= 5000 { // infinite loop return nil } } panic("unreachable") } // checkaddr checks that a has an expected encoding, especially TYPE_CONST vs TYPE_ADDR. func checkaddr(ctxt *Link, p *Prog, a *Addr) { switch a.Type { case TYPE_NONE, TYPE_REGREG2, TYPE_REGLIST: return case TYPE_BRANCH, TYPE_TEXTSIZE: if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 { break } return case TYPE_MEM: return case TYPE_CONST: // TODO(rsc): After fixing SHRQ, check a.Index != 0 too. if a.Name != 0 || a.Sym != nil || a.Reg != 0 { ctxt.Diag("argument is TYPE_CONST, should be TYPE_ADDR, in %v", p) return } if a.Reg != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.Val != nil { break } return case TYPE_FCONST, TYPE_SCONST: if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Offset != 0 || a.Sym != nil { break } return case TYPE_REG: // TODO(rsc): After fixing PINSRQ, check a.Offset != 0 too. // TODO(rsc): After fixing SHRQ, check a.Index != 0 too. if a.Scale != 0 || a.Name != 0 || a.Sym != nil { break } return case TYPE_ADDR: if a.Val != nil { break } if a.Reg == 0 && a.Index == 0 && a.Scale == 0 && a.Name == 0 && a.Sym == nil { ctxt.Diag("argument is TYPE_ADDR, should be TYPE_CONST, in %v", p) } return case TYPE_SHIFT, TYPE_REGREG: if a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.Val != nil { break } return case TYPE_INDIR: // Expect sym and name to be set, nothing else. // Technically more is allowed, but this is only used for *name(SB). if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name == 0 || a.Offset != 0 || a.Sym == nil || a.Val != nil { break } return } ctxt.Diag("invalid encoding for argument %v", p) } func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { for p := sym.Func.Text; p != nil; p = p.Link { checkaddr(ctxt, p, &p.From) if p.GetFrom3() != nil { checkaddr(ctxt, p, p.GetFrom3()) } checkaddr(ctxt, p, &p.To) if ctxt.Arch.Progedit != nil { ctxt.Arch.Progedit(ctxt, p, newprog) } if p.To.Type != TYPE_BRANCH { continue } if p.To.Val != nil { continue } if p.To.Sym != nil { continue } q := sym.Func.Text for q != nil && p.To.Offset != q.Pc { if q.Forwd != nil && p.To.Offset >= q.Forwd.Pc { q = q.Forwd } else { q = q.Link } } if q == nil { name := "<nil>" if p.To.Sym != nil { name = p.To.Sym.Name } ctxt.Diag("branch out of range (%#x)\n%v [%s]", uint32(p.To.Offset), p, name) p.To.Type = TYPE_NONE } p.To.SetTarget(q) } if !ctxt.Flag_optimize { return } // Collapse series of jumps to jumps. for p := sym.Func.Text; p != nil; p = p.Link { if p.To.Target() == nil { continue } p.To.SetTarget(brloop(p.To.Target())) if p.To.Target() != nil && p.To.Type == TYPE_BRANCH { p.To.Offset = p.To.Target().Pc } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/plist.go
vendor/github.com/twitchyliquid64/golang-asm/obj/plist.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package obj import ( "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "strings" ) type Plist struct { Firstpc *Prog Curfn interface{} // holds a *gc.Node, if non-nil } // ProgAlloc is a function that allocates Progs. // It is used to provide access to cached/bulk-allocated Progs to the assemblers. type ProgAlloc func() *Prog func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc, myimportpath string) { // Build list of symbols, and assign instructions to lists. var curtext *LSym var etext *Prog var text []*LSym var plink *Prog for p := plist.Firstpc; p != nil; p = plink { if ctxt.Debugasm > 0 && ctxt.Debugvlog { fmt.Printf("obj: %v\n", p) } plink = p.Link p.Link = nil switch p.As { case AEND: continue case ATEXT: s := p.From.Sym if s == nil { // func _() { } curtext = nil continue } text = append(text, s) etext = p curtext = s continue case AFUNCDATA: // Rewrite reference to go_args_stackmap(SB) to the Go-provided declaration information. if curtext == nil { // func _() {} continue } if p.To.Sym.Name == "go_args_stackmap" { if p.From.Type != TYPE_CONST || p.From.Offset != objabi.FUNCDATA_ArgsPointerMaps { ctxt.Diag("FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps") } p.To.Sym = ctxt.LookupDerived(curtext, curtext.Name+".args_stackmap") } } if curtext == nil { etext = nil continue } etext.Link = p etext = p } if newprog == nil { newprog = ctxt.NewProg } // Add reference to Go arguments for C or assembly functions without them. for _, s := range text { if !strings.HasPrefix(s.Name, "\"\".") { continue } found := false for p := s.Func.Text; p != nil; p = p.Link { if p.As == AFUNCDATA && p.From.Type == TYPE_CONST && p.From.Offset == objabi.FUNCDATA_ArgsPointerMaps { found = true break } } if !found { p := Appendp(s.Func.Text, newprog) p.As = AFUNCDATA p.From.Type = TYPE_CONST p.From.Offset = objabi.FUNCDATA_ArgsPointerMaps p.To.Type = TYPE_MEM p.To.Name = NAME_EXTERN p.To.Sym = ctxt.LookupDerived(s, s.Name+".args_stackmap") } } // Turn functions into machine code images. for _, s := range text { mkfwd(s) linkpatch(ctxt, s, newprog) ctxt.Arch.Preprocess(ctxt, s, newprog) ctxt.Arch.Assemble(ctxt, s, newprog) if ctxt.Errors > 0 { continue } linkpcln(ctxt, s) if myimportpath != "" { ctxt.populateDWARF(plist.Curfn, s, myimportpath) } } } func (ctxt *Link) InitTextSym(s *LSym, flag int) { if s == nil { // func _() { } return } if s.Func != nil { ctxt.Diag("InitTextSym double init for %s", s.Name) } s.Func = new(FuncInfo) if s.OnList() { ctxt.Diag("symbol %s listed multiple times", s.Name) } name := strings.Replace(s.Name, "\"\"", ctxt.Pkgpath, -1) s.Func.FuncID = objabi.GetFuncID(name, flag&WRAPPER != 0) s.Set(AttrOnList, true) s.Set(AttrDuplicateOK, flag&DUPOK != 0) s.Set(AttrNoSplit, flag&NOSPLIT != 0) s.Set(AttrReflectMethod, flag&REFLECTMETHOD != 0) s.Set(AttrWrapper, flag&WRAPPER != 0) s.Set(AttrNeedCtxt, flag&NEEDCTXT != 0) s.Set(AttrNoFrame, flag&NOFRAME != 0) s.Set(AttrTopFrame, flag&TOPFRAME != 0) s.Type = objabi.STEXT ctxt.Text = append(ctxt.Text, s) // Set up DWARF entries for s ctxt.dwarfSym(s) } func (ctxt *Link) Globl(s *LSym, size int64, flag int) { if s.OnList() { ctxt.Diag("symbol %s listed multiple times", s.Name) } s.Set(AttrOnList, true) ctxt.Data = append(ctxt.Data, s) s.Size = size if s.Type == 0 { s.Type = objabi.SBSS } if flag&DUPOK != 0 { s.Set(AttrDuplicateOK, true) } if flag&RODATA != 0 { s.Type = objabi.SRODATA } else if flag&NOPTR != 0 { if s.Type == objabi.SDATA { s.Type = objabi.SNOPTRDATA } else { s.Type = objabi.SNOPTRBSS } } else if flag&TLSBSS != 0 { s.Type = objabi.STLSBSS } if strings.HasPrefix(s.Name, "\"\"."+StaticNamePref) { s.Set(AttrStatic, true) } } // EmitEntryLiveness generates PCDATA Progs after p to switch to the // liveness map active at the entry of function s. It returns the last // Prog generated. func (ctxt *Link) EmitEntryLiveness(s *LSym, p *Prog, newprog ProgAlloc) *Prog { pcdata := ctxt.EmitEntryStackMap(s, p, newprog) pcdata = ctxt.EmitEntryRegMap(s, pcdata, newprog) return pcdata } // Similar to EmitEntryLiveness, but just emit stack map. func (ctxt *Link) EmitEntryStackMap(s *LSym, p *Prog, newprog ProgAlloc) *Prog { pcdata := Appendp(p, newprog) pcdata.Pos = s.Func.Text.Pos pcdata.As = APCDATA pcdata.From.Type = TYPE_CONST pcdata.From.Offset = objabi.PCDATA_StackMapIndex pcdata.To.Type = TYPE_CONST pcdata.To.Offset = -1 // pcdata starts at -1 at function entry return pcdata } // Similar to EmitEntryLiveness, but just emit register map. func (ctxt *Link) EmitEntryRegMap(s *LSym, p *Prog, newprog ProgAlloc) *Prog { pcdata := Appendp(p, newprog) pcdata.Pos = s.Func.Text.Pos pcdata.As = APCDATA pcdata.From.Type = TYPE_CONST pcdata.From.Offset = objabi.PCDATA_RegMapIndex pcdata.To.Type = TYPE_CONST pcdata.To.Offset = -1 return pcdata } // StartUnsafePoint generates PCDATA Progs after p to mark the // beginning of an unsafe point. The unsafe point starts immediately // after p. // It returns the last Prog generated. func (ctxt *Link) StartUnsafePoint(p *Prog, newprog ProgAlloc) *Prog { pcdata := Appendp(p, newprog) pcdata.As = APCDATA pcdata.From.Type = TYPE_CONST pcdata.From.Offset = objabi.PCDATA_RegMapIndex pcdata.To.Type = TYPE_CONST pcdata.To.Offset = objabi.PCDATA_RegMapUnsafe return pcdata } // EndUnsafePoint generates PCDATA Progs after p to mark the end of an // unsafe point, restoring the register map index to oldval. // The unsafe point ends right after p. // It returns the last Prog generated. func (ctxt *Link) EndUnsafePoint(p *Prog, newprog ProgAlloc, oldval int64) *Prog { pcdata := Appendp(p, newprog) pcdata.As = APCDATA pcdata.From.Type = TYPE_CONST pcdata.From.Offset = objabi.PCDATA_RegMapIndex pcdata.To.Type = TYPE_CONST pcdata.To.Offset = oldval return pcdata } // MarkUnsafePoints inserts PCDATAs to mark nonpreemptible and restartable // instruction sequences, based on isUnsafePoint and isRestartable predicate. // p0 is the start of the instruction stream. // isUnsafePoint(p) returns true if p is not safe for async preemption. // isRestartable(p) returns true if we can restart at the start of p (this Prog) // upon async preemption. (Currently multi-Prog restartable sequence is not // supported.) // isRestartable can be nil. In this case it is treated as always returning false. // If isUnsafePoint(p) and isRestartable(p) are both true, it is treated as // an unsafe point. func MarkUnsafePoints(ctxt *Link, p0 *Prog, newprog ProgAlloc, isUnsafePoint, isRestartable func(*Prog) bool) { if isRestartable == nil { // Default implementation: nothing is restartable. isRestartable = func(*Prog) bool { return false } } prev := p0 prevPcdata := int64(-1) // entry PC data value prevRestart := int64(0) for p := prev.Link; p != nil; p, prev = p.Link, p { if p.As == APCDATA && p.From.Offset == objabi.PCDATA_RegMapIndex { prevPcdata = p.To.Offset continue } if prevPcdata == objabi.PCDATA_RegMapUnsafe { continue // already unsafe } if isUnsafePoint(p) { q := ctxt.StartUnsafePoint(prev, newprog) q.Pc = p.Pc q.Link = p // Advance to the end of unsafe point. for p.Link != nil && isUnsafePoint(p.Link) { p = p.Link } if p.Link == nil { break // Reached the end, don't bother marking the end } p = ctxt.EndUnsafePoint(p, newprog, prevPcdata) p.Pc = p.Link.Pc continue } if isRestartable(p) { val := int64(objabi.PCDATA_Restart1) if val == prevRestart { val = objabi.PCDATA_Restart2 } prevRestart = val q := Appendp(prev, newprog) q.As = APCDATA q.From.Type = TYPE_CONST q.From.Offset = objabi.PCDATA_RegMapIndex q.To.Type = TYPE_CONST q.To.Offset = val q.Pc = p.Pc q.Link = p if p.Link == nil { break // Reached the end, don't bother marking the end } if isRestartable(p.Link) { // Next Prog is also restartable. No need to mark the end // of this sequence. We'll just go ahead mark the next one. continue } p = Appendp(p, newprog) p.As = APCDATA p.From.Type = TYPE_CONST p.From.Offset = objabi.PCDATA_RegMapIndex p.To.Type = TYPE_CONST p.To.Offset = prevPcdata p.Pc = p.Link.Pc } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/dwarf.go
vendor/github.com/twitchyliquid64/golang-asm/obj/dwarf.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Writes dwarf information to object files. package obj import ( "github.com/twitchyliquid64/golang-asm/dwarf" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/src" "fmt" "sort" "sync" ) // Generate a sequence of opcodes that is as short as possible. // See section 6.2.5 const ( LINE_BASE = -4 LINE_RANGE = 10 PC_RANGE = (255 - OPCODE_BASE) / LINE_RANGE OPCODE_BASE = 11 ) // generateDebugLinesSymbol fills the debug lines symbol of a given function. // // It's worth noting that this function doesn't generate the full debug_lines // DWARF section, saving that for the linker. This function just generates the // state machine part of debug_lines. The full table is generated by the // linker. Also, we use the file numbers from the full package (not just the // function in question) when generating the state machine. We do this so we // don't have to do a fixup on the indices when writing the full section. func (ctxt *Link) generateDebugLinesSymbol(s, lines *LSym) { dctxt := dwCtxt{ctxt} // Emit a LNE_set_address extended opcode, so as to establish the // starting text address of this function. dctxt.AddUint8(lines, 0) dwarf.Uleb128put(dctxt, lines, 1+int64(ctxt.Arch.PtrSize)) dctxt.AddUint8(lines, dwarf.DW_LNE_set_address) dctxt.AddAddress(lines, s, 0) // Set up the debug_lines state machine to the default values // we expect at the start of a new sequence. stmt := true line := int64(1) pc := s.Func.Text.Pc var lastpc int64 // last PC written to line table, not last PC in func name := "" prologue, wrotePrologue := false, false // Walk the progs, generating the DWARF table. for p := s.Func.Text; p != nil; p = p.Link { prologue = prologue || (p.Pos.Xlogue() == src.PosPrologueEnd) // If we're not at a real instruction, keep looping! if p.Pos.Line() == 0 || (p.Link != nil && p.Link.Pc == p.Pc) { continue } newStmt := p.Pos.IsStmt() != src.PosNotStmt newName, newLine := linkgetlineFromPos(ctxt, p.Pos) // Output debug info. wrote := false if name != newName { newFile := ctxt.PosTable.FileIndex(newName) + 1 // 1 indexing for the table. dctxt.AddUint8(lines, dwarf.DW_LNS_set_file) dwarf.Uleb128put(dctxt, lines, int64(newFile)) name = newName wrote = true } if prologue && !wrotePrologue { dctxt.AddUint8(lines, uint8(dwarf.DW_LNS_set_prologue_end)) wrotePrologue = true wrote = true } if stmt != newStmt { dctxt.AddUint8(lines, uint8(dwarf.DW_LNS_negate_stmt)) stmt = newStmt wrote = true } if line != int64(newLine) || wrote { pcdelta := p.Pc - pc lastpc = p.Pc putpclcdelta(ctxt, dctxt, lines, uint64(pcdelta), int64(newLine)-line) line, pc = int64(newLine), p.Pc } } // Because these symbols will be concatenated together by the // linker, we need to reset the state machine that controls the // debug symbols. Do this using an end-of-sequence operator. // // Note: at one point in time, Delve did not support multiple end // sequence ops within a compilation unit (bug for this: // https://github.com/go-delve/delve/issues/1694), however the bug // has since been fixed (Oct 2019). // // Issue 38192: the DWARF standard specifies that when you issue // an end-sequence op, the PC value should be one past the last // text address in the translation unit, so apply a delta to the // text address before the end sequence op. If this isn't done, // GDB will assign a line number of zero the last row in the line // table, which we don't want. lastlen := uint64(s.Size - (lastpc - s.Func.Text.Pc)) putpclcdelta(ctxt, dctxt, lines, lastlen, 0) dctxt.AddUint8(lines, 0) // start extended opcode dwarf.Uleb128put(dctxt, lines, 1) dctxt.AddUint8(lines, dwarf.DW_LNE_end_sequence) } func putpclcdelta(linkctxt *Link, dctxt dwCtxt, s *LSym, deltaPC uint64, deltaLC int64) { // Choose a special opcode that minimizes the number of bytes needed to // encode the remaining PC delta and LC delta. var opcode int64 if deltaLC < LINE_BASE { if deltaPC >= PC_RANGE { opcode = OPCODE_BASE + (LINE_RANGE * PC_RANGE) } else { opcode = OPCODE_BASE + (LINE_RANGE * int64(deltaPC)) } } else if deltaLC < LINE_BASE+LINE_RANGE { if deltaPC >= PC_RANGE { opcode = OPCODE_BASE + (deltaLC - LINE_BASE) + (LINE_RANGE * PC_RANGE) if opcode > 255 { opcode -= LINE_RANGE } } else { opcode = OPCODE_BASE + (deltaLC - LINE_BASE) + (LINE_RANGE * int64(deltaPC)) } } else { if deltaPC <= PC_RANGE { opcode = OPCODE_BASE + (LINE_RANGE - 1) + (LINE_RANGE * int64(deltaPC)) if opcode > 255 { opcode = 255 } } else { // Use opcode 249 (pc+=23, lc+=5) or 255 (pc+=24, lc+=1). // // Let x=deltaPC-PC_RANGE. If we use opcode 255, x will be the remaining // deltaPC that we need to encode separately before emitting 255. If we // use opcode 249, we will need to encode x+1. If x+1 takes one more // byte to encode than x, then we use opcode 255. // // In all other cases x and x+1 take the same number of bytes to encode, // so we use opcode 249, which may save us a byte in encoding deltaLC, // for similar reasons. switch deltaPC - PC_RANGE { // PC_RANGE is the largest deltaPC we can encode in one byte, using // DW_LNS_const_add_pc. // // (1<<16)-1 is the largest deltaPC we can encode in three bytes, using // DW_LNS_fixed_advance_pc. // // (1<<(7n))-1 is the largest deltaPC we can encode in n+1 bytes for // n=1,3,4,5,..., using DW_LNS_advance_pc. case PC_RANGE, (1 << 7) - 1, (1 << 16) - 1, (1 << 21) - 1, (1 << 28) - 1, (1 << 35) - 1, (1 << 42) - 1, (1 << 49) - 1, (1 << 56) - 1, (1 << 63) - 1: opcode = 255 default: opcode = OPCODE_BASE + LINE_RANGE*PC_RANGE - 1 // 249 } } } if opcode < OPCODE_BASE || opcode > 255 { panic(fmt.Sprintf("produced invalid special opcode %d", opcode)) } // Subtract from deltaPC and deltaLC the amounts that the opcode will add. deltaPC -= uint64((opcode - OPCODE_BASE) / LINE_RANGE) deltaLC -= (opcode-OPCODE_BASE)%LINE_RANGE + LINE_BASE // Encode deltaPC. if deltaPC != 0 { if deltaPC <= PC_RANGE { // Adjust the opcode so that we can use the 1-byte DW_LNS_const_add_pc // instruction. opcode -= LINE_RANGE * int64(PC_RANGE-deltaPC) if opcode < OPCODE_BASE { panic(fmt.Sprintf("produced invalid special opcode %d", opcode)) } dctxt.AddUint8(s, dwarf.DW_LNS_const_add_pc) } else if (1<<14) <= deltaPC && deltaPC < (1<<16) { dctxt.AddUint8(s, dwarf.DW_LNS_fixed_advance_pc) dctxt.AddUint16(s, uint16(deltaPC)) } else { dctxt.AddUint8(s, dwarf.DW_LNS_advance_pc) dwarf.Uleb128put(dctxt, s, int64(deltaPC)) } } // Encode deltaLC. if deltaLC != 0 { dctxt.AddUint8(s, dwarf.DW_LNS_advance_line) dwarf.Sleb128put(dctxt, s, deltaLC) } // Output the special opcode. dctxt.AddUint8(s, uint8(opcode)) } // implement dwarf.Context type dwCtxt struct{ *Link } func (c dwCtxt) PtrSize() int { return c.Arch.PtrSize } func (c dwCtxt) AddInt(s dwarf.Sym, size int, i int64) { ls := s.(*LSym) ls.WriteInt(c.Link, ls.Size, size, i) } func (c dwCtxt) AddUint16(s dwarf.Sym, i uint16) { c.AddInt(s, 2, int64(i)) } func (c dwCtxt) AddUint8(s dwarf.Sym, i uint8) { b := []byte{byte(i)} c.AddBytes(s, b) } func (c dwCtxt) AddBytes(s dwarf.Sym, b []byte) { ls := s.(*LSym) ls.WriteBytes(c.Link, ls.Size, b) } func (c dwCtxt) AddString(s dwarf.Sym, v string) { ls := s.(*LSym) ls.WriteString(c.Link, ls.Size, len(v), v) ls.WriteInt(c.Link, ls.Size, 1, 0) } func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { ls := s.(*LSym) size := c.PtrSize() if data != nil { rsym := data.(*LSym) ls.WriteAddr(c.Link, ls.Size, size, rsym, value) } else { ls.WriteInt(c.Link, ls.Size, size, value) } } func (c dwCtxt) AddCURelativeAddress(s dwarf.Sym, data interface{}, value int64) { ls := s.(*LSym) rsym := data.(*LSym) ls.WriteCURelativeAddr(c.Link, ls.Size, rsym, value) } func (c dwCtxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { panic("should be used only in the linker") } func (c dwCtxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { size := 4 if isDwarf64(c.Link) { size = 8 } ls := s.(*LSym) rsym := t.(*LSym) ls.WriteAddr(c.Link, ls.Size, size, rsym, ofs) r := &ls.R[len(ls.R)-1] r.Type = objabi.R_DWARFSECREF } func (c dwCtxt) AddFileRef(s dwarf.Sym, f interface{}) { ls := s.(*LSym) rsym := f.(*LSym) fidx := c.Link.PosTable.FileIndex(rsym.Name) // Note the +1 here -- the value we're writing is going to be an // index into the DWARF line table file section, whose entries // are numbered starting at 1, not 0. ls.WriteInt(c.Link, ls.Size, 4, int64(fidx+1)) } func (c dwCtxt) CurrentOffset(s dwarf.Sym) int64 { ls := s.(*LSym) return ls.Size } // Here "from" is a symbol corresponding to an inlined or concrete // function, "to" is the symbol for the corresponding abstract // function, and "dclIdx" is the index of the symbol of interest with // respect to the Dcl slice of the original pre-optimization version // of the inlined function. func (c dwCtxt) RecordDclReference(from dwarf.Sym, to dwarf.Sym, dclIdx int, inlIndex int) { ls := from.(*LSym) tls := to.(*LSym) ridx := len(ls.R) - 1 c.Link.DwFixups.ReferenceChildDIE(ls, ridx, tls, dclIdx, inlIndex) } func (c dwCtxt) RecordChildDieOffsets(s dwarf.Sym, vars []*dwarf.Var, offsets []int32) { ls := s.(*LSym) c.Link.DwFixups.RegisterChildDIEOffsets(ls, vars, offsets) } func (c dwCtxt) Logf(format string, args ...interface{}) { c.Link.Logf(format, args...) } func isDwarf64(ctxt *Link) bool { return ctxt.Headtype == objabi.Haix } func (ctxt *Link) dwarfSym(s *LSym) (dwarfInfoSym, dwarfLocSym, dwarfRangesSym, dwarfAbsFnSym, dwarfDebugLines *LSym) { if s.Type != objabi.STEXT { ctxt.Diag("dwarfSym of non-TEXT %v", s) } if s.Func.dwarfInfoSym == nil { s.Func.dwarfInfoSym = &LSym{ Type: objabi.SDWARFFCN, } if ctxt.Flag_locationlists { s.Func.dwarfLocSym = &LSym{ Type: objabi.SDWARFLOC, } } s.Func.dwarfRangesSym = &LSym{ Type: objabi.SDWARFRANGE, } s.Func.dwarfDebugLinesSym = &LSym{ Type: objabi.SDWARFLINES, } if s.WasInlined() { s.Func.dwarfAbsFnSym = ctxt.DwFixups.AbsFuncDwarfSym(s) } } return s.Func.dwarfInfoSym, s.Func.dwarfLocSym, s.Func.dwarfRangesSym, s.Func.dwarfAbsFnSym, s.Func.dwarfDebugLinesSym } func (s *LSym) Length(dwarfContext interface{}) int64 { return s.Size } // fileSymbol returns a symbol corresponding to the source file of the // first instruction (prog) of the specified function. This will // presumably be the file in which the function is defined. func (ctxt *Link) fileSymbol(fn *LSym) *LSym { p := fn.Func.Text if p != nil { f, _ := linkgetlineFromPos(ctxt, p.Pos) fsym := ctxt.Lookup(f) return fsym } return nil } // populateDWARF fills in the DWARF Debugging Information Entries for // TEXT symbol 's'. The various DWARF symbols must already have been // initialized in InitTextSym. func (ctxt *Link) populateDWARF(curfn interface{}, s *LSym, myimportpath string) { info, loc, ranges, absfunc, lines := ctxt.dwarfSym(s) if info.Size != 0 { ctxt.Diag("makeFuncDebugEntry double process %v", s) } var scopes []dwarf.Scope var inlcalls dwarf.InlCalls if ctxt.DebugInfo != nil { scopes, inlcalls = ctxt.DebugInfo(s, info, curfn) } var err error dwctxt := dwCtxt{ctxt} filesym := ctxt.fileSymbol(s) fnstate := &dwarf.FnState{ Name: s.Name, Importpath: myimportpath, Info: info, Filesym: filesym, Loc: loc, Ranges: ranges, Absfn: absfunc, StartPC: s, Size: s.Size, External: !s.Static(), Scopes: scopes, InlCalls: inlcalls, UseBASEntries: ctxt.UseBASEntries, } if absfunc != nil { err = dwarf.PutAbstractFunc(dwctxt, fnstate) if err != nil { ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err) } err = dwarf.PutConcreteFunc(dwctxt, fnstate) } else { err = dwarf.PutDefaultFunc(dwctxt, fnstate) } if err != nil { ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err) } // Fill in the debug lines symbol. ctxt.generateDebugLinesSymbol(s, lines) } // DwarfIntConst creates a link symbol for an integer constant with the // given name, type and value. func (ctxt *Link) DwarfIntConst(myimportpath, name, typename string, val int64) { if myimportpath == "" { return } s := ctxt.LookupInit(dwarf.ConstInfoPrefix+myimportpath, func(s *LSym) { s.Type = objabi.SDWARFCONST ctxt.Data = append(ctxt.Data, s) }) dwarf.PutIntConst(dwCtxt{ctxt}, s, ctxt.Lookup(dwarf.InfoPrefix+typename), myimportpath+"."+name, val) } func (ctxt *Link) DwarfAbstractFunc(curfn interface{}, s *LSym, myimportpath string) { absfn := ctxt.DwFixups.AbsFuncDwarfSym(s) if absfn.Size != 0 { ctxt.Diag("internal error: DwarfAbstractFunc double process %v", s) } if s.Func == nil { s.Func = new(FuncInfo) } scopes, _ := ctxt.DebugInfo(s, absfn, curfn) dwctxt := dwCtxt{ctxt} filesym := ctxt.fileSymbol(s) fnstate := dwarf.FnState{ Name: s.Name, Importpath: myimportpath, Info: absfn, Filesym: filesym, Absfn: absfn, External: !s.Static(), Scopes: scopes, UseBASEntries: ctxt.UseBASEntries, } if err := dwarf.PutAbstractFunc(dwctxt, &fnstate); err != nil { ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err) } } // This table is designed to aid in the creation of references between // DWARF subprogram DIEs. // // In most cases when one DWARF DIE has to refer to another DWARF DIE, // the target of the reference has an LSym, which makes it easy to use // the existing relocation mechanism. For DWARF inlined routine DIEs, // however, the subprogram DIE has to refer to a child // parameter/variable DIE of the abstract subprogram. This child DIE // doesn't have an LSym, and also of interest is the fact that when // DWARF generation is happening for inlined function F within caller // G, it's possible that DWARF generation hasn't happened yet for F, // so there is no way to know the offset of a child DIE within F's // abstract function. Making matters more complex, each inlined // instance of F may refer to a subset of the original F's variables // (depending on what happens with optimization, some vars may be // eliminated). // // The fixup table below helps overcome this hurdle. At the point // where a parameter/variable reference is made (via a call to // "ReferenceChildDIE"), a fixup record is generate that records // the relocation that is targeting that child variable. At a later // point when the abstract function DIE is emitted, there will be // a call to "RegisterChildDIEOffsets", at which point the offsets // needed to apply fixups are captured. Finally, once the parallel // portion of the compilation is done, fixups can actually be applied // during the "Finalize" method (this can't be done during the // parallel portion of the compile due to the possibility of data // races). // // This table is also used to record the "precursor" function node for // each function that is the target of an inline -- child DIE references // have to be made with respect to the original pre-optimization // version of the function (to allow for the fact that each inlined // body may be optimized differently). type DwarfFixupTable struct { ctxt *Link mu sync.Mutex symtab map[*LSym]int // maps abstract fn LSYM to index in svec svec []symFixups precursor map[*LSym]fnState // maps fn Lsym to precursor Node, absfn sym } type symFixups struct { fixups []relFixup doffsets []declOffset inlIndex int32 defseen bool } type declOffset struct { // Index of variable within DCL list of pre-optimization function dclIdx int32 // Offset of var's child DIE with respect to containing subprogram DIE offset int32 } type relFixup struct { refsym *LSym relidx int32 dclidx int32 } type fnState struct { // precursor function (really *gc.Node) precursor interface{} // abstract function symbol absfn *LSym } func NewDwarfFixupTable(ctxt *Link) *DwarfFixupTable { return &DwarfFixupTable{ ctxt: ctxt, symtab: make(map[*LSym]int), precursor: make(map[*LSym]fnState), } } func (ft *DwarfFixupTable) GetPrecursorFunc(s *LSym) interface{} { if fnstate, found := ft.precursor[s]; found { return fnstate.precursor } return nil } func (ft *DwarfFixupTable) SetPrecursorFunc(s *LSym, fn interface{}) { if _, found := ft.precursor[s]; found { ft.ctxt.Diag("internal error: DwarfFixupTable.SetPrecursorFunc double call on %v", s) } // initialize abstract function symbol now. This is done here so // as to avoid data races later on during the parallel portion of // the back end. absfn := ft.ctxt.LookupDerived(s, dwarf.InfoPrefix+s.Name+dwarf.AbstractFuncSuffix) absfn.Set(AttrDuplicateOK, true) absfn.Type = objabi.SDWARFABSFCN ft.ctxt.Data = append(ft.ctxt.Data, absfn) // In the case of "late" inlining (inlines that happen during // wrapper generation as opposed to the main inlining phase) it's // possible that we didn't cache the abstract function sym for the // text symbol -- do so now if needed. See issue 38068. if s.Func != nil && s.Func.dwarfAbsFnSym == nil { s.Func.dwarfAbsFnSym = absfn } ft.precursor[s] = fnState{precursor: fn, absfn: absfn} } // Make a note of a child DIE reference: relocation 'ridx' within symbol 's' // is targeting child 'c' of DIE with symbol 'tgt'. func (ft *DwarfFixupTable) ReferenceChildDIE(s *LSym, ridx int, tgt *LSym, dclidx int, inlIndex int) { // Protect against concurrent access if multiple backend workers ft.mu.Lock() defer ft.mu.Unlock() // Create entry for symbol if not already present. idx, found := ft.symtab[tgt] if !found { ft.svec = append(ft.svec, symFixups{inlIndex: int32(inlIndex)}) idx = len(ft.svec) - 1 ft.symtab[tgt] = idx } // Do we have child DIE offsets available? If so, then apply them, // otherwise create a fixup record. sf := &ft.svec[idx] if len(sf.doffsets) > 0 { found := false for _, do := range sf.doffsets { if do.dclIdx == int32(dclidx) { off := do.offset s.R[ridx].Add += int64(off) found = true break } } if !found { ft.ctxt.Diag("internal error: DwarfFixupTable.ReferenceChildDIE unable to locate child DIE offset for dclIdx=%d src=%v tgt=%v", dclidx, s, tgt) } } else { sf.fixups = append(sf.fixups, relFixup{s, int32(ridx), int32(dclidx)}) } } // Called once DWARF generation is complete for a given abstract function, // whose children might have been referenced via a call above. Stores // the offsets for any child DIEs (vars, params) so that they can be // consumed later in on DwarfFixupTable.Finalize, which applies any // outstanding fixups. func (ft *DwarfFixupTable) RegisterChildDIEOffsets(s *LSym, vars []*dwarf.Var, coffsets []int32) { // Length of these two slices should agree if len(vars) != len(coffsets) { ft.ctxt.Diag("internal error: RegisterChildDIEOffsets vars/offsets length mismatch") return } // Generate the slice of declOffset's based in vars/coffsets doffsets := make([]declOffset, len(coffsets)) for i := range coffsets { doffsets[i].dclIdx = vars[i].ChildIndex doffsets[i].offset = coffsets[i] } ft.mu.Lock() defer ft.mu.Unlock() // Store offsets for this symbol. idx, found := ft.symtab[s] if !found { sf := symFixups{inlIndex: -1, defseen: true, doffsets: doffsets} ft.svec = append(ft.svec, sf) ft.symtab[s] = len(ft.svec) - 1 } else { sf := &ft.svec[idx] sf.doffsets = doffsets sf.defseen = true } } func (ft *DwarfFixupTable) processFixups(slot int, s *LSym) { sf := &ft.svec[slot] for _, f := range sf.fixups { dfound := false for _, doffset := range sf.doffsets { if doffset.dclIdx == f.dclidx { f.refsym.R[f.relidx].Add += int64(doffset.offset) dfound = true break } } if !dfound { ft.ctxt.Diag("internal error: DwarfFixupTable has orphaned fixup on %v targeting %v relidx=%d dclidx=%d", f.refsym, s, f.relidx, f.dclidx) } } } // return the LSym corresponding to the 'abstract subprogram' DWARF // info entry for a function. func (ft *DwarfFixupTable) AbsFuncDwarfSym(fnsym *LSym) *LSym { // Protect against concurrent access if multiple backend workers ft.mu.Lock() defer ft.mu.Unlock() if fnstate, found := ft.precursor[fnsym]; found { return fnstate.absfn } ft.ctxt.Diag("internal error: AbsFuncDwarfSym requested for %v, not seen during inlining", fnsym) return nil } // Called after all functions have been compiled; the main job of this // function is to identify cases where there are outstanding fixups. // This scenario crops up when we have references to variables of an // inlined routine, but that routine is defined in some other package. // This helper walks through and locate these fixups, then invokes a // helper to create an abstract subprogram DIE for each one. func (ft *DwarfFixupTable) Finalize(myimportpath string, trace bool) { if trace { ft.ctxt.Logf("DwarfFixupTable.Finalize invoked for %s\n", myimportpath) } // Collect up the keys from the precursor map, then sort the // resulting list (don't want to rely on map ordering here). fns := make([]*LSym, len(ft.precursor)) idx := 0 for fn := range ft.precursor { fns[idx] = fn idx++ } sort.Sort(BySymName(fns)) // Should not be called during parallel portion of compilation. if ft.ctxt.InParallel { ft.ctxt.Diag("internal error: DwarfFixupTable.Finalize call during parallel backend") } // Generate any missing abstract functions. for _, s := range fns { absfn := ft.AbsFuncDwarfSym(s) slot, found := ft.symtab[absfn] if !found || !ft.svec[slot].defseen { ft.ctxt.GenAbstractFunc(s) } } // Apply fixups. for _, s := range fns { absfn := ft.AbsFuncDwarfSym(s) slot, found := ft.symtab[absfn] if !found { ft.ctxt.Diag("internal error: DwarfFixupTable.Finalize orphan abstract function for %v", s) } else { ft.processFixups(slot, s) } } } type BySymName []*LSym func (s BySymName) Len() int { return len(s) } func (s BySymName) Less(i, j int) bool { return s[i].Name < s[j].Name } func (s BySymName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/go.go
vendor/github.com/twitchyliquid64/golang-asm/obj/go.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package obj // go-specific code shared across loaders (5l, 6l, 8l). func Nopout(p *Prog) { p.As = ANOP p.Scond = 0 p.From = Addr{} p.RestArgs = nil p.Reg = 0 p.To = Addr{} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/textflag.go
vendor/github.com/twitchyliquid64/golang-asm/obj/textflag.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file defines flags attached to various functions // and data objects. The compilers, assemblers, and linker must // all agree on these values. package obj const ( // Don't profile the marked routine. // // Deprecated: Not implemented, do not use. NOPROF = 1 // It is ok for the linker to get multiple of these symbols. It will // pick one of the duplicates to use. DUPOK = 2 // Don't insert stack check preamble. NOSPLIT = 4 // Put this data in a read-only section. RODATA = 8 // This data contains no pointers. NOPTR = 16 // This is a wrapper function and should not count as disabling 'recover'. WRAPPER = 32 // This function uses its incoming context register. NEEDCTXT = 64 // When passed to ggloblsym, causes Local to be set to true on the LSym it creates. LOCAL = 128 // Allocate a word of thread local storage and store the offset from the // thread local base to the thread local storage in this variable. TLSBSS = 256 // Do not insert instructions to allocate a stack frame for this function. // Only valid on functions that declare a frame size of 0. // TODO(mwhudson): only implemented for ppc64x at present. NOFRAME = 512 // Function can call reflect.Type.Method or reflect.Type.MethodByName. REFLECTMETHOD = 1024 // Function is the top of the call stack. Call stack unwinders should stop // at this function. TOPFRAME = 2048 )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/objfile.go
vendor/github.com/twitchyliquid64/golang-asm/obj/objfile.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Writing Go object files. package obj import ( "bytes" "github.com/twitchyliquid64/golang-asm/bio" "github.com/twitchyliquid64/golang-asm/goobj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" "crypto/sha1" "encoding/binary" "fmt" "io" "path/filepath" "sort" "strings" ) // Entry point of writing new object file. func WriteObjFile(ctxt *Link, b *bio.Writer) { debugAsmEmit(ctxt) genFuncInfoSyms(ctxt) w := writer{ Writer: goobj.NewWriter(b), ctxt: ctxt, pkgpath: objabi.PathToPrefix(ctxt.Pkgpath), } start := b.Offset() w.init() // Header // We just reserve the space. We'll fill in the offsets later. flags := uint32(0) if ctxt.Flag_shared { flags |= goobj.ObjFlagShared } if w.pkgpath == "" { flags |= goobj.ObjFlagNeedNameExpansion } if ctxt.IsAsm { flags |= goobj.ObjFlagFromAssembly } h := goobj.Header{ Magic: goobj.Magic, Fingerprint: ctxt.Fingerprint, Flags: flags, } h.Write(w.Writer) // String table w.StringTable() // Autolib h.Offsets[goobj.BlkAutolib] = w.Offset() for i := range ctxt.Imports { ctxt.Imports[i].Write(w.Writer) } // Package references h.Offsets[goobj.BlkPkgIdx] = w.Offset() for _, pkg := range w.pkglist { w.StringRef(pkg) } // File table (for DWARF and pcln generation). h.Offsets[goobj.BlkFile] = w.Offset() for _, f := range ctxt.PosTable.FileTable() { w.StringRef(filepath.ToSlash(f)) } // Symbol definitions h.Offsets[goobj.BlkSymdef] = w.Offset() for _, s := range ctxt.defs { w.Sym(s) } // Short hashed symbol definitions h.Offsets[goobj.BlkHashed64def] = w.Offset() for _, s := range ctxt.hashed64defs { w.Sym(s) } // Hashed symbol definitions h.Offsets[goobj.BlkHasheddef] = w.Offset() for _, s := range ctxt.hasheddefs { w.Sym(s) } // Non-pkg symbol definitions h.Offsets[goobj.BlkNonpkgdef] = w.Offset() for _, s := range ctxt.nonpkgdefs { w.Sym(s) } // Non-pkg symbol references h.Offsets[goobj.BlkNonpkgref] = w.Offset() for _, s := range ctxt.nonpkgrefs { w.Sym(s) } // Referenced package symbol flags h.Offsets[goobj.BlkRefFlags] = w.Offset() w.refFlags() // Hashes h.Offsets[goobj.BlkHash64] = w.Offset() for _, s := range ctxt.hashed64defs { w.Hash64(s) } h.Offsets[goobj.BlkHash] = w.Offset() for _, s := range ctxt.hasheddefs { w.Hash(s) } // TODO: hashedrefs unused/unsupported for now // Reloc indexes h.Offsets[goobj.BlkRelocIdx] = w.Offset() nreloc := uint32(0) lists := [][]*LSym{ctxt.defs, ctxt.hashed64defs, ctxt.hasheddefs, ctxt.nonpkgdefs} for _, list := range lists { for _, s := range list { w.Uint32(nreloc) nreloc += uint32(len(s.R)) } } w.Uint32(nreloc) // Symbol Info indexes h.Offsets[goobj.BlkAuxIdx] = w.Offset() naux := uint32(0) for _, list := range lists { for _, s := range list { w.Uint32(naux) naux += uint32(nAuxSym(s)) } } w.Uint32(naux) // Data indexes h.Offsets[goobj.BlkDataIdx] = w.Offset() dataOff := uint32(0) for _, list := range lists { for _, s := range list { w.Uint32(dataOff) dataOff += uint32(len(s.P)) } } w.Uint32(dataOff) // Relocs h.Offsets[goobj.BlkReloc] = w.Offset() for _, list := range lists { for _, s := range list { for i := range s.R { w.Reloc(&s.R[i]) } } } // Aux symbol info h.Offsets[goobj.BlkAux] = w.Offset() for _, list := range lists { for _, s := range list { w.Aux(s) } } // Data h.Offsets[goobj.BlkData] = w.Offset() for _, list := range lists { for _, s := range list { w.Bytes(s.P) } } // Pcdata h.Offsets[goobj.BlkPcdata] = w.Offset() for _, s := range ctxt.Text { // iteration order must match genFuncInfoSyms if s.Func != nil { pc := &s.Func.Pcln w.Bytes(pc.Pcsp.P) w.Bytes(pc.Pcfile.P) w.Bytes(pc.Pcline.P) w.Bytes(pc.Pcinline.P) for i := range pc.Pcdata { w.Bytes(pc.Pcdata[i].P) } } } // Blocks used only by tools (objdump, nm). // Referenced symbol names from other packages h.Offsets[goobj.BlkRefName] = w.Offset() w.refNames() h.Offsets[goobj.BlkEnd] = w.Offset() // Fix up block offsets in the header end := start + int64(w.Offset()) b.MustSeek(start, 0) h.Write(w.Writer) b.MustSeek(end, 0) } type writer struct { *goobj.Writer ctxt *Link pkgpath string // the package import path (escaped), "" if unknown pkglist []string // list of packages referenced, indexed by ctxt.pkgIdx } // prepare package index list func (w *writer) init() { w.pkglist = make([]string, len(w.ctxt.pkgIdx)+1) w.pkglist[0] = "" // dummy invalid package for index 0 for pkg, i := range w.ctxt.pkgIdx { w.pkglist[i] = pkg } } func (w *writer) StringTable() { w.AddString("") for _, p := range w.ctxt.Imports { w.AddString(p.Pkg) } for _, pkg := range w.pkglist { w.AddString(pkg) } w.ctxt.traverseSyms(traverseAll, func(s *LSym) { // TODO: this includes references of indexed symbols from other packages, // for which the linker doesn't need the name. Consider moving them to // a separate block (for tools only). if w.pkgpath != "" { s.Name = strings.Replace(s.Name, "\"\".", w.pkgpath+".", -1) } // Don't put names of builtins into the string table (to save // space). if s.PkgIdx == goobj.PkgIdxBuiltin { return } w.AddString(s.Name) }) // All filenames are in the postable. for _, f := range w.ctxt.PosTable.FileTable() { w.AddString(filepath.ToSlash(f)) } } func (w *writer) Sym(s *LSym) { abi := uint16(s.ABI()) if s.Static() { abi = goobj.SymABIstatic } flag := uint8(0) if s.DuplicateOK() { flag |= goobj.SymFlagDupok } if s.Local() { flag |= goobj.SymFlagLocal } if s.MakeTypelink() { flag |= goobj.SymFlagTypelink } if s.Leaf() { flag |= goobj.SymFlagLeaf } if s.NoSplit() { flag |= goobj.SymFlagNoSplit } if s.ReflectMethod() { flag |= goobj.SymFlagReflectMethod } if s.TopFrame() { flag |= goobj.SymFlagTopFrame } if strings.HasPrefix(s.Name, "type.") && s.Name[5] != '.' && s.Type == objabi.SRODATA { flag |= goobj.SymFlagGoType } flag2 := uint8(0) if s.UsedInIface() { flag2 |= goobj.SymFlagUsedInIface } if strings.HasPrefix(s.Name, "go.itab.") && s.Type == objabi.SRODATA { flag2 |= goobj.SymFlagItab } name := s.Name if strings.HasPrefix(name, "gofile..") { name = filepath.ToSlash(name) } var align uint32 if s.Func != nil { align = uint32(s.Func.Align) } if s.ContentAddressable() { // We generally assume data symbols are natually aligned, // except for strings. If we dedup a string symbol and a // non-string symbol with the same content, we should keep // the largest alignment. // TODO: maybe the compiler could set the alignment for all // data symbols more carefully. if s.Size != 0 && !strings.HasPrefix(s.Name, "go.string.") { switch { case w.ctxt.Arch.PtrSize == 8 && s.Size%8 == 0: align = 8 case s.Size%4 == 0: align = 4 case s.Size%2 == 0: align = 2 } // don't bother setting align to 1. } } var o goobj.Sym o.SetName(name, w.Writer) o.SetABI(abi) o.SetType(uint8(s.Type)) o.SetFlag(flag) o.SetFlag2(flag2) o.SetSiz(uint32(s.Size)) o.SetAlign(align) o.Write(w.Writer) } func (w *writer) Hash64(s *LSym) { if !s.ContentAddressable() || len(s.R) != 0 { panic("Hash of non-content-addresable symbol") } b := contentHash64(s) w.Bytes(b[:]) } func (w *writer) Hash(s *LSym) { if !s.ContentAddressable() { panic("Hash of non-content-addresable symbol") } b := w.contentHash(s) w.Bytes(b[:]) } func contentHash64(s *LSym) goobj.Hash64Type { var b goobj.Hash64Type copy(b[:], s.P) return b } // Compute the content hash for a content-addressable symbol. // We build a content hash based on its content and relocations. // Depending on the category of the referenced symbol, we choose // different hash algorithms such that the hash is globally // consistent. // - For referenced content-addressable symbol, its content hash // is globally consistent. // - For package symbol and builtin symbol, its local index is // globally consistent. // - For non-package symbol, its fully-expanded name is globally // consistent. For now, we require we know the current package // path so we can always expand symbol names. (Otherwise, // symbols with relocations are not considered hashable.) // // For now, we assume there is no circular dependencies among // hashed symbols. func (w *writer) contentHash(s *LSym) goobj.HashType { h := sha1.New() // The compiler trims trailing zeros _sometimes_. We just do // it always. h.Write(bytes.TrimRight(s.P, "\x00")) var tmp [14]byte for i := range s.R { r := &s.R[i] binary.LittleEndian.PutUint32(tmp[:4], uint32(r.Off)) tmp[4] = r.Siz tmp[5] = uint8(r.Type) binary.LittleEndian.PutUint64(tmp[6:14], uint64(r.Add)) h.Write(tmp[:]) rs := r.Sym switch rs.PkgIdx { case goobj.PkgIdxHashed64: h.Write([]byte{0}) t := contentHash64(rs) h.Write(t[:]) case goobj.PkgIdxHashed: h.Write([]byte{1}) t := w.contentHash(rs) h.Write(t[:]) case goobj.PkgIdxNone: h.Write([]byte{2}) io.WriteString(h, rs.Name) // name is already expanded at this point case goobj.PkgIdxBuiltin: h.Write([]byte{3}) binary.LittleEndian.PutUint32(tmp[:4], uint32(rs.SymIdx)) h.Write(tmp[:4]) case goobj.PkgIdxSelf: io.WriteString(h, w.pkgpath) binary.LittleEndian.PutUint32(tmp[:4], uint32(rs.SymIdx)) h.Write(tmp[:4]) default: io.WriteString(h, rs.Pkg) binary.LittleEndian.PutUint32(tmp[:4], uint32(rs.SymIdx)) h.Write(tmp[:4]) } } var b goobj.HashType copy(b[:], h.Sum(nil)) return b } func makeSymRef(s *LSym) goobj.SymRef { if s == nil { return goobj.SymRef{} } if s.PkgIdx == 0 || !s.Indexed() { fmt.Printf("unindexed symbol reference: %v\n", s) panic("unindexed symbol reference") } return goobj.SymRef{PkgIdx: uint32(s.PkgIdx), SymIdx: uint32(s.SymIdx)} } func (w *writer) Reloc(r *Reloc) { var o goobj.Reloc o.SetOff(r.Off) o.SetSiz(r.Siz) o.SetType(uint8(r.Type)) o.SetAdd(r.Add) o.SetSym(makeSymRef(r.Sym)) o.Write(w.Writer) } func (w *writer) aux1(typ uint8, rs *LSym) { var o goobj.Aux o.SetType(typ) o.SetSym(makeSymRef(rs)) o.Write(w.Writer) } func (w *writer) Aux(s *LSym) { if s.Gotype != nil { w.aux1(goobj.AuxGotype, s.Gotype) } if s.Func != nil { w.aux1(goobj.AuxFuncInfo, s.Func.FuncInfoSym) for _, d := range s.Func.Pcln.Funcdata { w.aux1(goobj.AuxFuncdata, d) } if s.Func.dwarfInfoSym != nil && s.Func.dwarfInfoSym.Size != 0 { w.aux1(goobj.AuxDwarfInfo, s.Func.dwarfInfoSym) } if s.Func.dwarfLocSym != nil && s.Func.dwarfLocSym.Size != 0 { w.aux1(goobj.AuxDwarfLoc, s.Func.dwarfLocSym) } if s.Func.dwarfRangesSym != nil && s.Func.dwarfRangesSym.Size != 0 { w.aux1(goobj.AuxDwarfRanges, s.Func.dwarfRangesSym) } if s.Func.dwarfDebugLinesSym != nil && s.Func.dwarfDebugLinesSym.Size != 0 { w.aux1(goobj.AuxDwarfLines, s.Func.dwarfDebugLinesSym) } } } // Emits flags of referenced indexed symbols. func (w *writer) refFlags() { seen := make(map[*LSym]bool) w.ctxt.traverseSyms(traverseRefs, func(rs *LSym) { // only traverse refs, not auxs, as tools don't need auxs switch rs.PkgIdx { case goobj.PkgIdxNone, goobj.PkgIdxHashed64, goobj.PkgIdxHashed, goobj.PkgIdxBuiltin, goobj.PkgIdxSelf: // not an external indexed reference return case goobj.PkgIdxInvalid: panic("unindexed symbol reference") } if seen[rs] { return } seen[rs] = true symref := makeSymRef(rs) flag2 := uint8(0) if rs.UsedInIface() { flag2 |= goobj.SymFlagUsedInIface } if flag2 == 0 { return // no need to write zero flags } var o goobj.RefFlags o.SetSym(symref) o.SetFlag2(flag2) o.Write(w.Writer) }) } // Emits names of referenced indexed symbols, used by tools (objdump, nm) // only. func (w *writer) refNames() { seen := make(map[*LSym]bool) w.ctxt.traverseSyms(traverseRefs, func(rs *LSym) { // only traverse refs, not auxs, as tools don't need auxs switch rs.PkgIdx { case goobj.PkgIdxNone, goobj.PkgIdxHashed64, goobj.PkgIdxHashed, goobj.PkgIdxBuiltin, goobj.PkgIdxSelf: // not an external indexed reference return case goobj.PkgIdxInvalid: panic("unindexed symbol reference") } if seen[rs] { return } seen[rs] = true symref := makeSymRef(rs) var o goobj.RefName o.SetSym(symref) o.SetName(rs.Name, w.Writer) o.Write(w.Writer) }) // TODO: output in sorted order? // Currently tools (cmd/internal/goobj package) doesn't use mmap, // and it just read it into a map in memory upfront. If it uses // mmap, if the output is sorted, it probably could avoid reading // into memory and just do lookups in the mmap'd object file. } // return the number of aux symbols s have. func nAuxSym(s *LSym) int { n := 0 if s.Gotype != nil { n++ } if s.Func != nil { // FuncInfo is an aux symbol, each Funcdata is an aux symbol n += 1 + len(s.Func.Pcln.Funcdata) if s.Func.dwarfInfoSym != nil && s.Func.dwarfInfoSym.Size != 0 { n++ } if s.Func.dwarfLocSym != nil && s.Func.dwarfLocSym.Size != 0 { n++ } if s.Func.dwarfRangesSym != nil && s.Func.dwarfRangesSym.Size != 0 { n++ } if s.Func.dwarfDebugLinesSym != nil && s.Func.dwarfDebugLinesSym.Size != 0 { n++ } } return n } // generate symbols for FuncInfo. func genFuncInfoSyms(ctxt *Link) { infosyms := make([]*LSym, 0, len(ctxt.Text)) var pcdataoff uint32 var b bytes.Buffer symidx := int32(len(ctxt.defs)) for _, s := range ctxt.Text { if s.Func == nil { continue } o := goobj.FuncInfo{ Args: uint32(s.Func.Args), Locals: uint32(s.Func.Locals), FuncID: objabi.FuncID(s.Func.FuncID), } pc := &s.Func.Pcln o.Pcsp = pcdataoff pcdataoff += uint32(len(pc.Pcsp.P)) o.Pcfile = pcdataoff pcdataoff += uint32(len(pc.Pcfile.P)) o.Pcline = pcdataoff pcdataoff += uint32(len(pc.Pcline.P)) o.Pcinline = pcdataoff pcdataoff += uint32(len(pc.Pcinline.P)) o.Pcdata = make([]uint32, len(pc.Pcdata)) for i, pcd := range pc.Pcdata { o.Pcdata[i] = pcdataoff pcdataoff += uint32(len(pcd.P)) } o.PcdataEnd = pcdataoff o.Funcdataoff = make([]uint32, len(pc.Funcdataoff)) for i, x := range pc.Funcdataoff { o.Funcdataoff[i] = uint32(x) } i := 0 o.File = make([]goobj.CUFileIndex, len(pc.UsedFiles)) for f := range pc.UsedFiles { o.File[i] = f i++ } sort.Slice(o.File, func(i, j int) bool { return o.File[i] < o.File[j] }) o.InlTree = make([]goobj.InlTreeNode, len(pc.InlTree.nodes)) for i, inl := range pc.InlTree.nodes { f, l := getFileIndexAndLine(ctxt, inl.Pos) o.InlTree[i] = goobj.InlTreeNode{ Parent: int32(inl.Parent), File: goobj.CUFileIndex(f), Line: l, Func: makeSymRef(inl.Func), ParentPC: inl.ParentPC, } } o.Write(&b) isym := &LSym{ Type: objabi.SDATA, // for now, I don't think it matters PkgIdx: goobj.PkgIdxSelf, SymIdx: symidx, P: append([]byte(nil), b.Bytes()...), } isym.Set(AttrIndexed, true) symidx++ infosyms = append(infosyms, isym) s.Func.FuncInfoSym = isym b.Reset() dwsyms := []*LSym{s.Func.dwarfRangesSym, s.Func.dwarfLocSym, s.Func.dwarfDebugLinesSym, s.Func.dwarfInfoSym} for _, s := range dwsyms { if s == nil || s.Size == 0 { continue } s.PkgIdx = goobj.PkgIdxSelf s.SymIdx = symidx s.Set(AttrIndexed, true) symidx++ infosyms = append(infosyms, s) } } ctxt.defs = append(ctxt.defs, infosyms...) } // debugDumpAux is a dumper for selected aux symbols. func writeAuxSymDebug(ctxt *Link, par *LSym, aux *LSym) { // Most aux symbols (ex: funcdata) are not interesting-- // pick out just the DWARF ones for now. if aux.Type != objabi.SDWARFLOC && aux.Type != objabi.SDWARFFCN && aux.Type != objabi.SDWARFABSFCN && aux.Type != objabi.SDWARFLINES && aux.Type != objabi.SDWARFRANGE { return } ctxt.writeSymDebugNamed(aux, "aux for "+par.Name) } func debugAsmEmit(ctxt *Link) { if ctxt.Debugasm > 0 { ctxt.traverseSyms(traverseDefs, ctxt.writeSymDebug) if ctxt.Debugasm > 1 { fn := func(par *LSym, aux *LSym) { writeAuxSymDebug(ctxt, par, aux) } ctxt.traverseAuxSyms(traverseAux, fn) } } } func (ctxt *Link) writeSymDebug(s *LSym) { ctxt.writeSymDebugNamed(s, s.Name) } func (ctxt *Link) writeSymDebugNamed(s *LSym, name string) { ver := "" if ctxt.Debugasm > 1 { ver = fmt.Sprintf("<%d>", s.ABI()) } fmt.Fprintf(ctxt.Bso, "%s%s ", name, ver) if s.Type != 0 { fmt.Fprintf(ctxt.Bso, "%v ", s.Type) } if s.Static() { fmt.Fprint(ctxt.Bso, "static ") } if s.DuplicateOK() { fmt.Fprintf(ctxt.Bso, "dupok ") } if s.CFunc() { fmt.Fprintf(ctxt.Bso, "cfunc ") } if s.NoSplit() { fmt.Fprintf(ctxt.Bso, "nosplit ") } if s.TopFrame() { fmt.Fprintf(ctxt.Bso, "topframe ") } fmt.Fprintf(ctxt.Bso, "size=%d", s.Size) if s.Type == objabi.STEXT { fmt.Fprintf(ctxt.Bso, " args=%#x locals=%#x funcid=%#x", uint64(s.Func.Args), uint64(s.Func.Locals), uint64(s.Func.FuncID)) if s.Leaf() { fmt.Fprintf(ctxt.Bso, " leaf") } } fmt.Fprintf(ctxt.Bso, "\n") if s.Type == objabi.STEXT { for p := s.Func.Text; p != nil; p = p.Link { fmt.Fprintf(ctxt.Bso, "\t%#04x ", uint(int(p.Pc))) if ctxt.Debugasm > 1 { io.WriteString(ctxt.Bso, p.String()) } else { p.InnermostString(ctxt.Bso) } fmt.Fprintln(ctxt.Bso) } } for i := 0; i < len(s.P); i += 16 { fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i)) j := i for ; j < i+16 && j < len(s.P); j++ { fmt.Fprintf(ctxt.Bso, " %02x", s.P[j]) } for ; j < i+16; j++ { fmt.Fprintf(ctxt.Bso, " ") } fmt.Fprintf(ctxt.Bso, " ") for j = i; j < i+16 && j < len(s.P); j++ { c := int(s.P[j]) b := byte('.') if ' ' <= c && c <= 0x7e { b = byte(c) } ctxt.Bso.WriteByte(b) } fmt.Fprintf(ctxt.Bso, "\n") } sort.Sort(relocByOff(s.R)) // generate stable output for _, r := range s.R { name := "" ver := "" if r.Sym != nil { name = r.Sym.Name if ctxt.Debugasm > 1 { ver = fmt.Sprintf("<%d>", s.ABI()) } } else if r.Type == objabi.R_TLS_LE { name = "TLS" } if ctxt.Arch.InFamily(sys.ARM, sys.PPC64) { fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s%s+%x\n", int(r.Off), r.Siz, r.Type, name, ver, uint64(r.Add)) } else { fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s%s+%d\n", int(r.Off), r.Siz, r.Type, name, ver, r.Add) } } } // relocByOff sorts relocations by their offsets. type relocByOff []Reloc func (x relocByOff) Len() int { return len(x) } func (x relocByOff) Less(i, j int) bool { return x[i].Off < x[j].Off } func (x relocByOff) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/anames.go
vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/anames.go
// Code generated by stringer -i cpu.go -o anames.go -p riscv; DO NOT EDIT. package riscv import "github.com/twitchyliquid64/golang-asm/obj" var Anames = []string{ obj.A_ARCHSPECIFIC: "ADDI", "SLTI", "SLTIU", "ANDI", "ORI", "XORI", "SLLI", "SRLI", "SRAI", "LUI", "AUIPC", "ADD", "SLT", "SLTU", "AND", "OR", "XOR", "SLL", "SRL", "SUB", "SRA", "SLLIRV32", "SRLIRV32", "SRAIRV32", "JAL", "JALR", "BEQ", "BNE", "BLT", "BLTU", "BGE", "BGEU", "LW", "LWU", "LH", "LHU", "LB", "LBU", "SW", "SH", "SB", "FENCE", "FENCEI", "FENCETSO", "ADDIW", "SLLIW", "SRLIW", "SRAIW", "ADDW", "SLLW", "SRLW", "SUBW", "SRAW", "LD", "SD", "MUL", "MULH", "MULHU", "MULHSU", "MULW", "DIV", "DIVU", "REM", "REMU", "DIVW", "DIVUW", "REMW", "REMUW", "LRD", "SCD", "LRW", "SCW", "AMOSWAPD", "AMOADDD", "AMOANDD", "AMOORD", "AMOXORD", "AMOMAXD", "AMOMAXUD", "AMOMIND", "AMOMINUD", "AMOSWAPW", "AMOADDW", "AMOANDW", "AMOORW", "AMOXORW", "AMOMAXW", "AMOMAXUW", "AMOMINW", "AMOMINUW", "RDCYCLE", "RDCYCLEH", "RDTIME", "RDTIMEH", "RDINSTRET", "RDINSTRETH", "FRCSR", "FSCSR", "FRRM", "FSRM", "FRFLAGS", "FSFLAGS", "FSRMI", "FSFLAGSI", "FLW", "FSW", "FADDS", "FSUBS", "FMULS", "FDIVS", "FMINS", "FMAXS", "FSQRTS", "FMADDS", "FMSUBS", "FNMADDS", "FNMSUBS", "FCVTWS", "FCVTLS", "FCVTSW", "FCVTSL", "FCVTWUS", "FCVTLUS", "FCVTSWU", "FCVTSLU", "FSGNJS", "FSGNJNS", "FSGNJXS", "FMVXS", "FMVSX", "FMVXW", "FMVWX", "FEQS", "FLTS", "FLES", "FCLASSS", "FLD", "FSD", "FADDD", "FSUBD", "FMULD", "FDIVD", "FMIND", "FMAXD", "FSQRTD", "FMADDD", "FMSUBD", "FNMADDD", "FNMSUBD", "FCVTWD", "FCVTLD", "FCVTDW", "FCVTDL", "FCVTWUD", "FCVTLUD", "FCVTDWU", "FCVTDLU", "FCVTSD", "FCVTDS", "FSGNJD", "FSGNJND", "FSGNJXD", "FMVXD", "FMVDX", "FEQD", "FLTD", "FLED", "FCLASSD", "FLQ", "FSQ", "FADDQ", "FSUBQ", "FMULQ", "FDIVQ", "FMINQ", "FMAXQ", "FSQRTQ", "FMADDQ", "FMSUBQ", "FNMADDQ", "FNMSUBQ", "FCVTWQ", "FCVTLQ", "FCVTSQ", "FCVTDQ", "FCVTQW", "FCVTQL", "FCVTQS", "FCVTQD", "FCVTWUQ", "FCVTLUQ", "FCVTQWU", "FCVTQLU", "FSGNJQ", "FSGNJNQ", "FSGNJXQ", "FMVXQ", "FMVQX", "FEQQ", "FLEQ", "FLTQ", "FCLASSQ", "CSRRW", "CSRRS", "CSRRC", "CSRRWI", "CSRRSI", "CSRRCI", "ECALL", "SCALL", "EBREAK", "SBREAK", "MRET", "SRET", "URET", "DRET", "WFI", "SFENCEVMA", "HFENCEGVMA", "HFENCEVVMA", "WORD", "BEQZ", "BGEZ", "BGT", "BGTU", "BGTZ", "BLE", "BLEU", "BLEZ", "BLTZ", "BNEZ", "FNEGD", "FNEGS", "FNED", "FNES", "MOV", "MOVB", "MOVBU", "MOVF", "MOVD", "MOVH", "MOVHU", "MOVW", "MOVWU", "NEG", "NEGW", "NOT", "SEQZ", "SNEZ", "LAST", }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/inst.go
vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/inst.go
// Code generated by parse_opcodes -go; DO NOT EDIT. package riscv import "github.com/twitchyliquid64/golang-asm/obj" type inst struct { opcode uint32 funct3 uint32 rs2 uint32 csr int64 funct7 uint32 } func encode(a obj.As) *inst { switch a { case ABEQ: return &inst{0x63, 0x0, 0x0, 0, 0x0} case ABNE: return &inst{0x63, 0x1, 0x0, 0, 0x0} case ABLT: return &inst{0x63, 0x4, 0x0, 0, 0x0} case ABGE: return &inst{0x63, 0x5, 0x0, 0, 0x0} case ABLTU: return &inst{0x63, 0x6, 0x0, 0, 0x0} case ABGEU: return &inst{0x63, 0x7, 0x0, 0, 0x0} case AJALR: return &inst{0x67, 0x0, 0x0, 0, 0x0} case AJAL: return &inst{0x6f, 0x0, 0x0, 0, 0x0} case ALUI: return &inst{0x37, 0x0, 0x0, 0, 0x0} case AAUIPC: return &inst{0x17, 0x0, 0x0, 0, 0x0} case AADDI: return &inst{0x13, 0x0, 0x0, 0, 0x0} case ASLLI: return &inst{0x13, 0x1, 0x0, 0, 0x0} case ASLTI: return &inst{0x13, 0x2, 0x0, 0, 0x0} case ASLTIU: return &inst{0x13, 0x3, 0x0, 0, 0x0} case AXORI: return &inst{0x13, 0x4, 0x0, 0, 0x0} case ASRLI: return &inst{0x13, 0x5, 0x0, 0, 0x0} case ASRAI: return &inst{0x13, 0x5, 0x0, 1024, 0x20} case AORI: return &inst{0x13, 0x6, 0x0, 0, 0x0} case AANDI: return &inst{0x13, 0x7, 0x0, 0, 0x0} case AADD: return &inst{0x33, 0x0, 0x0, 0, 0x0} case ASUB: return &inst{0x33, 0x0, 0x0, 1024, 0x20} case ASLL: return &inst{0x33, 0x1, 0x0, 0, 0x0} case ASLT: return &inst{0x33, 0x2, 0x0, 0, 0x0} case ASLTU: return &inst{0x33, 0x3, 0x0, 0, 0x0} case AXOR: return &inst{0x33, 0x4, 0x0, 0, 0x0} case ASRL: return &inst{0x33, 0x5, 0x0, 0, 0x0} case ASRA: return &inst{0x33, 0x5, 0x0, 1024, 0x20} case AOR: return &inst{0x33, 0x6, 0x0, 0, 0x0} case AAND: return &inst{0x33, 0x7, 0x0, 0, 0x0} case AADDIW: return &inst{0x1b, 0x0, 0x0, 0, 0x0} case ASLLIW: return &inst{0x1b, 0x1, 0x0, 0, 0x0} case ASRLIW: return &inst{0x1b, 0x5, 0x0, 0, 0x0} case ASRAIW: return &inst{0x1b, 0x5, 0x0, 1024, 0x20} case AADDW: return &inst{0x3b, 0x0, 0x0, 0, 0x0} case ASUBW: return &inst{0x3b, 0x0, 0x0, 1024, 0x20} case ASLLW: return &inst{0x3b, 0x1, 0x0, 0, 0x0} case ASRLW: return &inst{0x3b, 0x5, 0x0, 0, 0x0} case ASRAW: return &inst{0x3b, 0x5, 0x0, 1024, 0x20} case ALB: return &inst{0x3, 0x0, 0x0, 0, 0x0} case ALH: return &inst{0x3, 0x1, 0x0, 0, 0x0} case ALW: return &inst{0x3, 0x2, 0x0, 0, 0x0} case ALD: return &inst{0x3, 0x3, 0x0, 0, 0x0} case ALBU: return &inst{0x3, 0x4, 0x0, 0, 0x0} case ALHU: return &inst{0x3, 0x5, 0x0, 0, 0x0} case ALWU: return &inst{0x3, 0x6, 0x0, 0, 0x0} case ASB: return &inst{0x23, 0x0, 0x0, 0, 0x0} case ASH: return &inst{0x23, 0x1, 0x0, 0, 0x0} case ASW: return &inst{0x23, 0x2, 0x0, 0, 0x0} case ASD: return &inst{0x23, 0x3, 0x0, 0, 0x0} case AFENCE: return &inst{0xf, 0x0, 0x0, 0, 0x0} case AFENCEI: return &inst{0xf, 0x1, 0x0, 0, 0x0} case AMUL: return &inst{0x33, 0x0, 0x0, 32, 0x1} case AMULH: return &inst{0x33, 0x1, 0x0, 32, 0x1} case AMULHSU: return &inst{0x33, 0x2, 0x0, 32, 0x1} case AMULHU: return &inst{0x33, 0x3, 0x0, 32, 0x1} case ADIV: return &inst{0x33, 0x4, 0x0, 32, 0x1} case ADIVU: return &inst{0x33, 0x5, 0x0, 32, 0x1} case AREM: return &inst{0x33, 0x6, 0x0, 32, 0x1} case AREMU: return &inst{0x33, 0x7, 0x0, 32, 0x1} case AMULW: return &inst{0x3b, 0x0, 0x0, 32, 0x1} case ADIVW: return &inst{0x3b, 0x4, 0x0, 32, 0x1} case ADIVUW: return &inst{0x3b, 0x5, 0x0, 32, 0x1} case AREMW: return &inst{0x3b, 0x6, 0x0, 32, 0x1} case AREMUW: return &inst{0x3b, 0x7, 0x0, 32, 0x1} case AAMOADDW: return &inst{0x2f, 0x2, 0x0, 0, 0x0} case AAMOXORW: return &inst{0x2f, 0x2, 0x0, 512, 0x10} case AAMOORW: return &inst{0x2f, 0x2, 0x0, 1024, 0x20} case AAMOANDW: return &inst{0x2f, 0x2, 0x0, 1536, 0x30} case AAMOMINW: return &inst{0x2f, 0x2, 0x0, -2048, 0x40} case AAMOMAXW: return &inst{0x2f, 0x2, 0x0, -1536, 0x50} case AAMOMINUW: return &inst{0x2f, 0x2, 0x0, -1024, 0x60} case AAMOMAXUW: return &inst{0x2f, 0x2, 0x0, -512, 0x70} case AAMOSWAPW: return &inst{0x2f, 0x2, 0x0, 128, 0x4} case ALRW: return &inst{0x2f, 0x2, 0x0, 256, 0x8} case ASCW: return &inst{0x2f, 0x2, 0x0, 384, 0xc} case AAMOADDD: return &inst{0x2f, 0x3, 0x0, 0, 0x0} case AAMOXORD: return &inst{0x2f, 0x3, 0x0, 512, 0x10} case AAMOORD: return &inst{0x2f, 0x3, 0x0, 1024, 0x20} case AAMOANDD: return &inst{0x2f, 0x3, 0x0, 1536, 0x30} case AAMOMIND: return &inst{0x2f, 0x3, 0x0, -2048, 0x40} case AAMOMAXD: return &inst{0x2f, 0x3, 0x0, -1536, 0x50} case AAMOMINUD: return &inst{0x2f, 0x3, 0x0, -1024, 0x60} case AAMOMAXUD: return &inst{0x2f, 0x3, 0x0, -512, 0x70} case AAMOSWAPD: return &inst{0x2f, 0x3, 0x0, 128, 0x4} case ALRD: return &inst{0x2f, 0x3, 0x0, 256, 0x8} case ASCD: return &inst{0x2f, 0x3, 0x0, 384, 0xc} case AECALL: return &inst{0x73, 0x0, 0x0, 0, 0x0} case AEBREAK: return &inst{0x73, 0x0, 0x1, 1, 0x0} case AURET: return &inst{0x73, 0x0, 0x2, 2, 0x0} case ASRET: return &inst{0x73, 0x0, 0x2, 258, 0x8} case AMRET: return &inst{0x73, 0x0, 0x2, 770, 0x18} case ADRET: return &inst{0x73, 0x0, 0x12, 1970, 0x3d} case ASFENCEVMA: return &inst{0x73, 0x0, 0x0, 288, 0x9} case AWFI: return &inst{0x73, 0x0, 0x5, 261, 0x8} case ACSRRW: return &inst{0x73, 0x1, 0x0, 0, 0x0} case ACSRRS: return &inst{0x73, 0x2, 0x0, 0, 0x0} case ACSRRC: return &inst{0x73, 0x3, 0x0, 0, 0x0} case ACSRRWI: return &inst{0x73, 0x5, 0x0, 0, 0x0} case ACSRRSI: return &inst{0x73, 0x6, 0x0, 0, 0x0} case ACSRRCI: return &inst{0x73, 0x7, 0x0, 0, 0x0} case AHFENCEVVMA: return &inst{0x73, 0x0, 0x0, 544, 0x11} case AHFENCEGVMA: return &inst{0x73, 0x0, 0x0, 1568, 0x31} case AFADDS: return &inst{0x53, 0x0, 0x0, 0, 0x0} case AFSUBS: return &inst{0x53, 0x0, 0x0, 128, 0x4} case AFMULS: return &inst{0x53, 0x0, 0x0, 256, 0x8} case AFDIVS: return &inst{0x53, 0x0, 0x0, 384, 0xc} case AFSGNJS: return &inst{0x53, 0x0, 0x0, 512, 0x10} case AFSGNJNS: return &inst{0x53, 0x1, 0x0, 512, 0x10} case AFSGNJXS: return &inst{0x53, 0x2, 0x0, 512, 0x10} case AFMINS: return &inst{0x53, 0x0, 0x0, 640, 0x14} case AFMAXS: return &inst{0x53, 0x1, 0x0, 640, 0x14} case AFSQRTS: return &inst{0x53, 0x0, 0x0, 1408, 0x2c} case AFADDD: return &inst{0x53, 0x0, 0x0, 32, 0x1} case AFSUBD: return &inst{0x53, 0x0, 0x0, 160, 0x5} case AFMULD: return &inst{0x53, 0x0, 0x0, 288, 0x9} case AFDIVD: return &inst{0x53, 0x0, 0x0, 416, 0xd} case AFSGNJD: return &inst{0x53, 0x0, 0x0, 544, 0x11} case AFSGNJND: return &inst{0x53, 0x1, 0x0, 544, 0x11} case AFSGNJXD: return &inst{0x53, 0x2, 0x0, 544, 0x11} case AFMIND: return &inst{0x53, 0x0, 0x0, 672, 0x15} case AFMAXD: return &inst{0x53, 0x1, 0x0, 672, 0x15} case AFCVTSD: return &inst{0x53, 0x0, 0x1, 1025, 0x20} case AFCVTDS: return &inst{0x53, 0x0, 0x0, 1056, 0x21} case AFSQRTD: return &inst{0x53, 0x0, 0x0, 1440, 0x2d} case AFADDQ: return &inst{0x53, 0x0, 0x0, 96, 0x3} case AFSUBQ: return &inst{0x53, 0x0, 0x0, 224, 0x7} case AFMULQ: return &inst{0x53, 0x0, 0x0, 352, 0xb} case AFDIVQ: return &inst{0x53, 0x0, 0x0, 480, 0xf} case AFSGNJQ: return &inst{0x53, 0x0, 0x0, 608, 0x13} case AFSGNJNQ: return &inst{0x53, 0x1, 0x0, 608, 0x13} case AFSGNJXQ: return &inst{0x53, 0x2, 0x0, 608, 0x13} case AFMINQ: return &inst{0x53, 0x0, 0x0, 736, 0x17} case AFMAXQ: return &inst{0x53, 0x1, 0x0, 736, 0x17} case AFCVTSQ: return &inst{0x53, 0x0, 0x3, 1027, 0x20} case AFCVTQS: return &inst{0x53, 0x0, 0x0, 1120, 0x23} case AFCVTDQ: return &inst{0x53, 0x0, 0x3, 1059, 0x21} case AFCVTQD: return &inst{0x53, 0x0, 0x1, 1121, 0x23} case AFSQRTQ: return &inst{0x53, 0x0, 0x0, 1504, 0x2f} case AFLES: return &inst{0x53, 0x0, 0x0, -1536, 0x50} case AFLTS: return &inst{0x53, 0x1, 0x0, -1536, 0x50} case AFEQS: return &inst{0x53, 0x2, 0x0, -1536, 0x50} case AFLED: return &inst{0x53, 0x0, 0x0, -1504, 0x51} case AFLTD: return &inst{0x53, 0x1, 0x0, -1504, 0x51} case AFEQD: return &inst{0x53, 0x2, 0x0, -1504, 0x51} case AFLEQ: return &inst{0x53, 0x0, 0x0, -1440, 0x53} case AFLTQ: return &inst{0x53, 0x1, 0x0, -1440, 0x53} case AFEQQ: return &inst{0x53, 0x2, 0x0, -1440, 0x53} case AFCVTWS: return &inst{0x53, 0x0, 0x0, -1024, 0x60} case AFCVTWUS: return &inst{0x53, 0x0, 0x1, -1023, 0x60} case AFCVTLS: return &inst{0x53, 0x0, 0x2, -1022, 0x60} case AFCVTLUS: return &inst{0x53, 0x0, 0x3, -1021, 0x60} case AFMVXW: return &inst{0x53, 0x0, 0x0, -512, 0x70} case AFCLASSS: return &inst{0x53, 0x1, 0x0, -512, 0x70} case AFCVTWD: return &inst{0x53, 0x0, 0x0, -992, 0x61} case AFCVTWUD: return &inst{0x53, 0x0, 0x1, -991, 0x61} case AFCVTLD: return &inst{0x53, 0x0, 0x2, -990, 0x61} case AFCVTLUD: return &inst{0x53, 0x0, 0x3, -989, 0x61} case AFMVXD: return &inst{0x53, 0x0, 0x0, -480, 0x71} case AFCLASSD: return &inst{0x53, 0x1, 0x0, -480, 0x71} case AFCVTWQ: return &inst{0x53, 0x0, 0x0, -928, 0x63} case AFCVTWUQ: return &inst{0x53, 0x0, 0x1, -927, 0x63} case AFCVTLQ: return &inst{0x53, 0x0, 0x2, -926, 0x63} case AFCVTLUQ: return &inst{0x53, 0x0, 0x3, -925, 0x63} case AFMVXQ: return &inst{0x53, 0x0, 0x0, -416, 0x73} case AFCLASSQ: return &inst{0x53, 0x1, 0x0, -416, 0x73} case AFCVTSW: return &inst{0x53, 0x0, 0x0, -768, 0x68} case AFCVTSWU: return &inst{0x53, 0x0, 0x1, -767, 0x68} case AFCVTSL: return &inst{0x53, 0x0, 0x2, -766, 0x68} case AFCVTSLU: return &inst{0x53, 0x0, 0x3, -765, 0x68} case AFMVWX: return &inst{0x53, 0x0, 0x0, -256, 0x78} case AFCVTDW: return &inst{0x53, 0x0, 0x0, -736, 0x69} case AFCVTDWU: return &inst{0x53, 0x0, 0x1, -735, 0x69} case AFCVTDL: return &inst{0x53, 0x0, 0x2, -734, 0x69} case AFCVTDLU: return &inst{0x53, 0x0, 0x3, -733, 0x69} case AFMVDX: return &inst{0x53, 0x0, 0x0, -224, 0x79} case AFCVTQW: return &inst{0x53, 0x0, 0x0, -672, 0x6b} case AFCVTQWU: return &inst{0x53, 0x0, 0x1, -671, 0x6b} case AFCVTQL: return &inst{0x53, 0x0, 0x2, -670, 0x6b} case AFCVTQLU: return &inst{0x53, 0x0, 0x3, -669, 0x6b} case AFMVQX: return &inst{0x53, 0x0, 0x0, -160, 0x7b} case AFLW: return &inst{0x7, 0x2, 0x0, 0, 0x0} case AFLD: return &inst{0x7, 0x3, 0x0, 0, 0x0} case AFLQ: return &inst{0x7, 0x4, 0x0, 0, 0x0} case AFSW: return &inst{0x27, 0x2, 0x0, 0, 0x0} case AFSD: return &inst{0x27, 0x3, 0x0, 0, 0x0} case AFSQ: return &inst{0x27, 0x4, 0x0, 0, 0x0} case AFMADDS: return &inst{0x43, 0x0, 0x0, 0, 0x0} case AFMSUBS: return &inst{0x47, 0x0, 0x0, 0, 0x0} case AFNMSUBS: return &inst{0x4b, 0x0, 0x0, 0, 0x0} case AFNMADDS: return &inst{0x4f, 0x0, 0x0, 0, 0x0} case AFMADDD: return &inst{0x43, 0x0, 0x0, 32, 0x1} case AFMSUBD: return &inst{0x47, 0x0, 0x0, 32, 0x1} case AFNMSUBD: return &inst{0x4b, 0x0, 0x0, 32, 0x1} case AFNMADDD: return &inst{0x4f, 0x0, 0x0, 32, 0x1} case AFMADDQ: return &inst{0x43, 0x0, 0x0, 96, 0x3} case AFMSUBQ: return &inst{0x47, 0x0, 0x0, 96, 0x3} case AFNMSUBQ: return &inst{0x4b, 0x0, 0x0, 96, 0x3} case AFNMADDQ: return &inst{0x4f, 0x0, 0x0, 96, 0x3} case ASLLIRV32: return &inst{0x13, 0x1, 0x0, 0, 0x0} case ASRLIRV32: return &inst{0x13, 0x5, 0x0, 0, 0x0} case ASRAIRV32: return &inst{0x13, 0x5, 0x0, 1024, 0x20} case AFRFLAGS: return &inst{0x73, 0x2, 0x1, 1, 0x0} case AFSFLAGS: return &inst{0x73, 0x1, 0x1, 1, 0x0} case AFSFLAGSI: return &inst{0x73, 0x5, 0x1, 1, 0x0} case AFRRM: return &inst{0x73, 0x2, 0x2, 2, 0x0} case AFSRM: return &inst{0x73, 0x1, 0x2, 2, 0x0} case AFSRMI: return &inst{0x73, 0x5, 0x2, 2, 0x0} case AFSCSR: return &inst{0x73, 0x1, 0x3, 3, 0x0} case AFRCSR: return &inst{0x73, 0x2, 0x3, 3, 0x0} case ARDCYCLE: return &inst{0x73, 0x2, 0x0, -1024, 0x60} case ARDTIME: return &inst{0x73, 0x2, 0x1, -1023, 0x60} case ARDINSTRET: return &inst{0x73, 0x2, 0x2, -1022, 0x60} case ARDCYCLEH: return &inst{0x73, 0x2, 0x0, -896, 0x64} case ARDTIMEH: return &inst{0x73, 0x2, 0x1, -895, 0x64} case ARDINSTRETH: return &inst{0x73, 0x2, 0x2, -894, 0x64} case ASCALL: return &inst{0x73, 0x0, 0x0, 0, 0x0} case ASBREAK: return &inst{0x73, 0x0, 0x1, 1, 0x0} case AFMVXS: return &inst{0x53, 0x0, 0x0, -512, 0x70} case AFMVSX: return &inst{0x53, 0x0, 0x0, -256, 0x78} case AFENCETSO: return &inst{0xf, 0x0, 0x13, -1997, 0x41} } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/list.go
vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/list.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package riscv import ( "fmt" "github.com/twitchyliquid64/golang-asm/obj" ) func init() { obj.RegisterRegister(obj.RBaseRISCV, REG_END, RegName) obj.RegisterOpcode(obj.ABaseRISCV, Anames) } func RegName(r int) string { switch { case r == 0: return "NONE" case r == REG_G: return "g" case r == REG_SP: return "SP" case REG_X0 <= r && r <= REG_X31: return fmt.Sprintf("X%d", r-REG_X0) case REG_F0 <= r && r <= REG_F31: return fmt.Sprintf("F%d", r-REG_F0) default: return fmt.Sprintf("Rgok(%d)", r-obj.RBaseRISCV) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/obj.go
vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/obj.go
// Copyright © 2015 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package riscv import ( "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" "fmt" ) func buildop(ctxt *obj.Link) {} // jalrToSym replaces p with a set of Progs needed to jump to the Sym in p. // lr is the link register to use for the JALR. // p must be a CALL, JMP or RET. func jalrToSym(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, lr int16) *obj.Prog { if p.As != obj.ACALL && p.As != obj.AJMP && p.As != obj.ARET { ctxt.Diag("unexpected Prog in jalrToSym: %v", p) return p } // TODO(jsing): Consider using a single JAL instruction and teaching // the linker to provide trampolines for the case where the destination // offset is too large. This would potentially reduce instructions for // the common case, but would require three instructions to go via the // trampoline. to := p.To p.As = AAUIPC p.Mark |= NEED_PCREL_ITYPE_RELOC p.RestArgs = []obj.Addr{obj.Addr{Type: obj.TYPE_CONST, Offset: to.Offset, Sym: to.Sym}} p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0} p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p = obj.Appendp(p, newprog) // Leave Sym only for the CALL reloc in assemble. p.As = AJALR p.From.Type = obj.TYPE_REG p.From.Reg = lr p.Reg = 0 p.To.Type = obj.TYPE_REG p.To.Reg = REG_TMP p.To.Sym = to.Sym return p } // progedit is called individually for each *obj.Prog. It normalizes instruction // formats and eliminates as many pseudo-instructions as possible. func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { // Expand binary instructions to ternary ones. if p.Reg == 0 { switch p.As { case AADDI, ASLTI, ASLTIU, AANDI, AORI, AXORI, ASLLI, ASRLI, ASRAI, AADD, AAND, AOR, AXOR, ASLL, ASRL, ASUB, ASRA, AMUL, AMULH, AMULHU, AMULHSU, AMULW, ADIV, ADIVU, ADIVW, ADIVUW, AREM, AREMU, AREMW, AREMUW: p.Reg = p.To.Reg } } // Rewrite instructions with constant operands to refer to the immediate // form of the instruction. if p.From.Type == obj.TYPE_CONST { switch p.As { case AADD: p.As = AADDI case ASLT: p.As = ASLTI case ASLTU: p.As = ASLTIU case AAND: p.As = AANDI case AOR: p.As = AORI case AXOR: p.As = AXORI case ASLL: p.As = ASLLI case ASRL: p.As = ASRLI case ASRA: p.As = ASRAI } } switch p.As { case obj.AJMP: // Turn JMP into JAL ZERO or JALR ZERO. p.From.Type = obj.TYPE_REG p.From.Reg = REG_ZERO switch p.To.Type { case obj.TYPE_BRANCH: p.As = AJAL case obj.TYPE_MEM: switch p.To.Name { case obj.NAME_NONE: p.As = AJALR case obj.NAME_EXTERN: // Handled in preprocess. default: ctxt.Diag("unsupported name %d for %v", p.To.Name, p) } default: panic(fmt.Sprintf("unhandled type %+v", p.To.Type)) } case obj.ACALL: switch p.To.Type { case obj.TYPE_MEM: // Handled in preprocess. case obj.TYPE_REG: p.As = AJALR p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR default: ctxt.Diag("unknown destination type %+v in CALL: %v", p.To.Type, p) } case obj.AUNDEF: p.As = AEBREAK case ASCALL: // SCALL is the old name for ECALL. p.As = AECALL case ASBREAK: // SBREAK is the old name for EBREAK. p.As = AEBREAK } } // addrToReg extracts the register from an Addr, handling special Addr.Names. func addrToReg(a obj.Addr) int16 { switch a.Name { case obj.NAME_PARAM, obj.NAME_AUTO: return REG_SP } return a.Reg } // movToLoad converts a MOV mnemonic into the corresponding load instruction. func movToLoad(mnemonic obj.As) obj.As { switch mnemonic { case AMOV: return ALD case AMOVB: return ALB case AMOVH: return ALH case AMOVW: return ALW case AMOVBU: return ALBU case AMOVHU: return ALHU case AMOVWU: return ALWU case AMOVF: return AFLW case AMOVD: return AFLD default: panic(fmt.Sprintf("%+v is not a MOV", mnemonic)) } } // movToStore converts a MOV mnemonic into the corresponding store instruction. func movToStore(mnemonic obj.As) obj.As { switch mnemonic { case AMOV: return ASD case AMOVB: return ASB case AMOVH: return ASH case AMOVW: return ASW case AMOVF: return AFSW case AMOVD: return AFSD default: panic(fmt.Sprintf("%+v is not a MOV", mnemonic)) } } // rewriteMOV rewrites MOV pseudo-instructions. func rewriteMOV(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog) { switch p.As { case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD: default: panic(fmt.Sprintf("%+v is not a MOV pseudo-instruction", p.As)) } switch p.From.Type { case obj.TYPE_MEM: // MOV c(Rs), Rd -> L $c, Rs, Rd switch p.From.Name { case obj.NAME_AUTO, obj.NAME_PARAM, obj.NAME_NONE: if p.To.Type != obj.TYPE_REG { ctxt.Diag("unsupported load at %v", p) } p.As = movToLoad(p.As) p.From.Reg = addrToReg(p.From) case obj.NAME_EXTERN, obj.NAME_STATIC: // AUIPC $off_hi, R // L $off_lo, R as := p.As to := p.To p.As = AAUIPC p.Mark |= NEED_PCREL_ITYPE_RELOC p.RestArgs = []obj.Addr{obj.Addr{Type: obj.TYPE_CONST, Offset: p.From.Offset, Sym: p.From.Sym}} p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0} p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: to.Reg} p = obj.Appendp(p, newprog) p.As = movToLoad(as) p.From = obj.Addr{Type: obj.TYPE_MEM, Reg: to.Reg, Offset: 0} p.To = to default: ctxt.Diag("unsupported name %d for %v", p.From.Name, p) } case obj.TYPE_REG: switch p.To.Type { case obj.TYPE_REG: switch p.As { case AMOV: // MOV Ra, Rb -> ADDI $0, Ra, Rb p.As = AADDI p.Reg = p.From.Reg p.From = obj.Addr{Type: obj.TYPE_CONST} case AMOVF: // MOVF Ra, Rb -> FSGNJS Ra, Ra, Rb p.As = AFSGNJS p.Reg = p.From.Reg case AMOVD: // MOVD Ra, Rb -> FSGNJD Ra, Ra, Rb p.As = AFSGNJD p.Reg = p.From.Reg default: ctxt.Diag("unsupported register-register move at %v", p) } case obj.TYPE_MEM: // MOV Rs, c(Rd) -> S $c, Rs, Rd switch p.As { case AMOVBU, AMOVHU, AMOVWU: ctxt.Diag("unsupported unsigned store at %v", p) } switch p.To.Name { case obj.NAME_AUTO, obj.NAME_PARAM, obj.NAME_NONE: p.As = movToStore(p.As) p.To.Reg = addrToReg(p.To) case obj.NAME_EXTERN: // AUIPC $off_hi, TMP // S $off_lo, TMP, R as := p.As from := p.From p.As = AAUIPC p.Mark |= NEED_PCREL_STYPE_RELOC p.RestArgs = []obj.Addr{obj.Addr{Type: obj.TYPE_CONST, Offset: p.To.Offset, Sym: p.To.Sym}} p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0} p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p = obj.Appendp(p, newprog) p.As = movToStore(as) p.From = from p.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_TMP, Offset: 0} default: ctxt.Diag("unsupported name %d for %v", p.From.Name, p) } default: ctxt.Diag("unsupported MOV at %v", p) } case obj.TYPE_CONST: // MOV $c, R // If c is small enough, convert to: // ADD $c, ZERO, R // If not, convert to: // LUI top20bits(c), R // ADD bottom12bits(c), R, R if p.As != AMOV { ctxt.Diag("unsupported constant load at %v", p) } off := p.From.Offset to := p.To low, high, err := Split32BitImmediate(off) if err != nil { ctxt.Diag("%v: constant %d too large: %v", p, off, err) } // LUI is only necessary if the offset doesn't fit in 12-bits. needLUI := high != 0 if needLUI { p.As = ALUI p.To = to // Pass top 20 bits to LUI. p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high} p = obj.Appendp(p, newprog) } p.As = AADDIW p.To = to p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: low} p.Reg = REG_ZERO if needLUI { p.Reg = to.Reg } case obj.TYPE_ADDR: // MOV $sym+off(SP/SB), R if p.To.Type != obj.TYPE_REG || p.As != AMOV { ctxt.Diag("unsupported addr MOV at %v", p) } switch p.From.Name { case obj.NAME_EXTERN, obj.NAME_STATIC: // AUIPC $off_hi, R // ADDI $off_lo, R to := p.To p.As = AAUIPC p.Mark |= NEED_PCREL_ITYPE_RELOC p.RestArgs = []obj.Addr{obj.Addr{Type: obj.TYPE_CONST, Offset: p.From.Offset, Sym: p.From.Sym}} p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0} p.Reg = 0 p.To = to p = obj.Appendp(p, newprog) p.As = AADDI p.From = obj.Addr{Type: obj.TYPE_CONST} p.Reg = to.Reg p.To = to case obj.NAME_PARAM, obj.NAME_AUTO: p.As = AADDI p.Reg = REG_SP p.From.Type = obj.TYPE_CONST case obj.NAME_NONE: p.As = AADDI p.Reg = p.From.Reg p.From.Type = obj.TYPE_CONST p.From.Reg = 0 default: ctxt.Diag("bad addr MOV from name %v at %v", p.From.Name, p) } default: ctxt.Diag("unsupported MOV at %v", p) } } // InvertBranch inverts the condition of a conditional branch. func InvertBranch(as obj.As) obj.As { switch as { case ABEQ: return ABNE case ABEQZ: return ABNEZ case ABGE: return ABLT case ABGEU: return ABLTU case ABGEZ: return ABLTZ case ABGT: return ABLE case ABGTU: return ABLEU case ABGTZ: return ABLEZ case ABLE: return ABGT case ABLEU: return ABGTU case ABLEZ: return ABGTZ case ABLT: return ABGE case ABLTU: return ABGEU case ABLTZ: return ABGEZ case ABNE: return ABEQ case ABNEZ: return ABEQZ default: panic("InvertBranch: not a branch") } } // containsCall reports whether the symbol contains a CALL (or equivalent) // instruction. Must be called after progedit. func containsCall(sym *obj.LSym) bool { // CALLs are CALL or JAL(R) with link register LR. for p := sym.Func.Text; p != nil; p = p.Link { switch p.As { case obj.ACALL: return true case AJAL, AJALR: if p.From.Type == obj.TYPE_REG && p.From.Reg == REG_LR { return true } } } return false } // setPCs sets the Pc field in all instructions reachable from p. // It uses pc as the initial value. func setPCs(p *obj.Prog, pc int64) { for ; p != nil; p = p.Link { p.Pc = pc for _, ins := range instructionsForProg(p) { pc += int64(ins.length()) } } } // stackOffset updates Addr offsets based on the current stack size. // // The stack looks like: // ------------------- // | | // | PARAMs | // | | // | | // ------------------- // | Parent RA | SP on function entry // ------------------- // | | // | | // | AUTOs | // | | // | | // ------------------- // | RA | SP during function execution // ------------------- // // FixedFrameSize makes other packages aware of the space allocated for RA. // // A nicer version of this diagram can be found on slide 21 of the presentation // attached to: // // https://golang.org/issue/16922#issuecomment-243748180 // func stackOffset(a *obj.Addr, stacksize int64) { switch a.Name { case obj.NAME_AUTO: // Adjust to the top of AUTOs. a.Offset += stacksize case obj.NAME_PARAM: // Adjust to the bottom of PARAMs. a.Offset += stacksize + 8 } } // preprocess generates prologue and epilogue code, computes PC-relative branch // and jump offsets, and resolves pseudo-registers. // // preprocess is called once per linker symbol. // // When preprocess finishes, all instructions in the symbol are either // concrete, real RISC-V instructions or directive pseudo-ops like TEXT, // PCDATA, and FUNCDATA. func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if cursym.Func.Text == nil || cursym.Func.Text.Link == nil { return } // Generate the prologue. text := cursym.Func.Text if text.As != obj.ATEXT { ctxt.Diag("preprocess: found symbol that does not start with TEXT directive") return } stacksize := text.To.Offset if stacksize == -8 { // Historical way to mark NOFRAME. text.From.Sym.Set(obj.AttrNoFrame, true) stacksize = 0 } if stacksize < 0 { ctxt.Diag("negative frame size %d - did you mean NOFRAME?", stacksize) } if text.From.Sym.NoFrame() { if stacksize != 0 { ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", stacksize) } } if !containsCall(cursym) { text.From.Sym.Set(obj.AttrLeaf, true) if stacksize == 0 { // A leaf function with no locals has no frame. text.From.Sym.Set(obj.AttrNoFrame, true) } } // Save LR unless there is no frame. if !text.From.Sym.NoFrame() { stacksize += ctxt.FixedFrameSize() } cursym.Func.Args = text.To.Val.(int32) cursym.Func.Locals = int32(stacksize) prologue := text if !cursym.Func.Text.From.Sym.NoSplit() { prologue = stacksplit(ctxt, prologue, cursym, newprog, stacksize) // emit split check } if stacksize != 0 { prologue = ctxt.StartUnsafePoint(prologue, newprog) // Actually save LR. prologue = obj.Appendp(prologue, newprog) prologue.As = AMOV prologue.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR} prologue.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: -stacksize} // Insert stack adjustment. prologue = obj.Appendp(prologue, newprog) prologue.As = AADDI prologue.From = obj.Addr{Type: obj.TYPE_CONST, Offset: -stacksize} prologue.Reg = REG_SP prologue.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP} prologue.Spadj = int32(stacksize) prologue = ctxt.EndUnsafePoint(prologue, newprog, -1) } if cursym.Func.Text.From.Sym.Wrapper() { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOV g_panic(g), X11 // BNE X11, ZERO, adjust // end: // NOP // ...rest of function.. // adjust: // MOV panic_argp(X11), X12 // ADD $(autosize+FIXED_FRAME), SP, X13 // BNE X12, X13, end // ADD $FIXED_FRAME, SP, X12 // MOV X12, panic_argp(X11) // JMP end // // The NOP is needed to give the jumps somewhere to land. ldpanic := obj.Appendp(prologue, newprog) ldpanic.As = AMOV ldpanic.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REGG, Offset: 4 * int64(ctxt.Arch.PtrSize)} // G.panic ldpanic.Reg = 0 ldpanic.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X11} bneadj := obj.Appendp(ldpanic, newprog) bneadj.As = ABNE bneadj.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X11} bneadj.Reg = REG_ZERO bneadj.To.Type = obj.TYPE_BRANCH endadj := obj.Appendp(bneadj, newprog) endadj.As = obj.ANOP last := endadj for last.Link != nil { last = last.Link } getargp := obj.Appendp(last, newprog) getargp.As = AMOV getargp.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X11, Offset: 0} // Panic.argp getargp.Reg = 0 getargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} bneadj.To.SetTarget(getargp) calcargp := obj.Appendp(getargp, newprog) calcargp.As = AADDI calcargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: stacksize + ctxt.FixedFrameSize()} calcargp.Reg = REG_SP calcargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X13} testargp := obj.Appendp(calcargp, newprog) testargp.As = ABNE testargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} testargp.Reg = REG_X13 testargp.To.Type = obj.TYPE_BRANCH testargp.To.SetTarget(endadj) adjargp := obj.Appendp(testargp, newprog) adjargp.As = AADDI adjargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(ctxt.Arch.PtrSize)} adjargp.Reg = REG_SP adjargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} setargp := obj.Appendp(adjargp, newprog) setargp.As = AMOV setargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} setargp.Reg = 0 setargp.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X11, Offset: 0} // Panic.argp godone := obj.Appendp(setargp, newprog) godone.As = AJAL godone.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} godone.To.Type = obj.TYPE_BRANCH godone.To.SetTarget(endadj) } // Update stack-based offsets. for p := cursym.Func.Text; p != nil; p = p.Link { stackOffset(&p.From, stacksize) stackOffset(&p.To, stacksize) } // Additional instruction rewriting. for p := cursym.Func.Text; p != nil; p = p.Link { switch p.As { case obj.AGETCALLERPC: if cursym.Leaf() { // MOV LR, Rd p.As = AMOV p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR } else { // MOV (RSP), Rd p.As = AMOV p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP } case obj.ACALL: switch p.To.Type { case obj.TYPE_MEM: jalrToSym(ctxt, p, newprog, REG_LR) } case obj.AJMP: switch p.To.Type { case obj.TYPE_MEM: switch p.To.Name { case obj.NAME_EXTERN: // JMP to symbol. jalrToSym(ctxt, p, newprog, REG_ZERO) } } case obj.ARET: // Replace RET with epilogue. retJMP := p.To.Sym if stacksize != 0 { // Restore LR. p.As = AMOV p.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 0} p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR} p = obj.Appendp(p, newprog) p.As = AADDI p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: stacksize} p.Reg = REG_SP p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP} p.Spadj = int32(-stacksize) p = obj.Appendp(p, newprog) } if retJMP != nil { p.As = obj.ARET p.To.Sym = retJMP p = jalrToSym(ctxt, p, newprog, REG_ZERO) } else { p.As = AJALR p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR} } // "Add back" the stack removed in the previous instruction. // // This is to avoid confusing pctospadj, which sums // Spadj from function entry to each PC, and shouldn't // count adjustments from earlier epilogues, since they // won't affect later PCs. p.Spadj = int32(stacksize) case AADDI: // Refine Spadjs account for adjustment via ADDI instruction. if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_SP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } } } // Rewrite MOV pseudo-instructions. This cannot be done in // progedit, as SP offsets need to be applied before we split // up some of the Addrs. for p := cursym.Func.Text; p != nil; p = p.Link { switch p.As { case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD: rewriteMOV(ctxt, newprog, p) } } // Split immediates larger than 12-bits. for p := cursym.Func.Text; p != nil; p = p.Link { switch p.As { // <opi> $imm, REG, TO case AADDI, AANDI, AORI, AXORI: // LUI $high, TMP // ADDI $low, TMP, TMP // <op> TMP, REG, TO q := *p low, high, err := Split32BitImmediate(p.From.Offset) if err != nil { ctxt.Diag("%v: constant %d too large", p, p.From.Offset, err) } if high == 0 { break // no need to split } p.As = ALUI p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high} p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p.Spadj = 0 // needed if TO is SP p = obj.Appendp(p, newprog) p.As = AADDIW p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: low} p.Reg = REG_TMP p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p = obj.Appendp(p, newprog) switch q.As { case AADDI: p.As = AADD case AANDI: p.As = AAND case AORI: p.As = AOR case AXORI: p.As = AXOR default: ctxt.Diag("unsupported instruction %v for splitting", q) } p.Spadj = q.Spadj p.To = q.To p.Reg = q.Reg p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} // <load> $imm, REG, TO (load $imm+(REG), TO) case ALD, ALB, ALH, ALW, ALBU, ALHU, ALWU, AFLW, AFLD: low, high, err := Split32BitImmediate(p.From.Offset) if err != nil { ctxt.Diag("%v: constant %d too large", p, p.From.Offset) } if high == 0 { break // no need to split } q := *p // LUI $high, TMP // ADD TMP, REG, TMP // <load> $low, TMP, TO p.As = ALUI p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high} p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p.Spadj = 0 // needed if TO is SP p = obj.Appendp(p, newprog) p.As = AADD p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p.Reg = q.From.Reg p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p = obj.Appendp(p, newprog) p.As = q.As p.To = q.To p.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_TMP, Offset: low} p.Reg = obj.REG_NONE // <store> $imm, REG, TO (store $imm+(TO), REG) case ASD, ASB, ASH, ASW, AFSW, AFSD: low, high, err := Split32BitImmediate(p.To.Offset) if err != nil { ctxt.Diag("%v: constant %d too large", p, p.To.Offset) } if high == 0 { break // no need to split } q := *p // LUI $high, TMP // ADD TMP, TO, TMP // <store> $low, REG, TMP p.As = ALUI p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high} p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p.Spadj = 0 // needed if TO is SP p = obj.Appendp(p, newprog) p.As = AADD p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p.Reg = q.To.Reg p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} p = obj.Appendp(p, newprog) p.As = q.As p.From = obj.Addr{Type: obj.TYPE_REG, Reg: q.From.Reg, Offset: 0} p.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_TMP, Offset: low} } } // Compute instruction addresses. Once we do that, we need to check for // overextended jumps and branches. Within each iteration, Pc differences // are always lower bounds (since the program gets monotonically longer, // a fixed point will be reached). No attempt to handle functions > 2GiB. for { rescan := false setPCs(cursym.Func.Text, 0) for p := cursym.Func.Text; p != nil; p = p.Link { switch p.As { case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ: if p.To.Type != obj.TYPE_BRANCH { panic("assemble: instruction with branch-like opcode lacks destination") } offset := p.To.Target().Pc - p.Pc if offset < -4096 || 4096 <= offset { // Branch is long. Replace it with a jump. jmp := obj.Appendp(p, newprog) jmp.As = AJAL jmp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} jmp.To = obj.Addr{Type: obj.TYPE_BRANCH} jmp.To.SetTarget(p.To.Target()) p.As = InvertBranch(p.As) p.To.SetTarget(jmp.Link) // We may have made previous branches too long, // so recheck them. rescan = true } case AJAL: if p.To.Target() == nil { panic("intersymbol jumps should be expressed as AUIPC+JALR") } offset := p.To.Target().Pc - p.Pc if offset < -(1<<20) || (1<<20) <= offset { // Replace with 2-instruction sequence. This assumes // that TMP is not live across J instructions, since // it is reserved by SSA. jmp := obj.Appendp(p, newprog) jmp.As = AJALR jmp.From = p.From jmp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} // p.From is not generally valid, however will be // fixed up in the next loop. p.As = AAUIPC p.From = obj.Addr{Type: obj.TYPE_BRANCH, Sym: p.From.Sym} p.From.SetTarget(p.To.Target()) p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} rescan = true } } } if !rescan { break } } // Now that there are no long branches, resolve branch and jump targets. // At this point, instruction rewriting which changes the number of // instructions will break everything--don't do it! for p := cursym.Func.Text; p != nil; p = p.Link { switch p.As { case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ, AJAL: switch p.To.Type { case obj.TYPE_BRANCH: p.To.Type, p.To.Offset = obj.TYPE_CONST, p.To.Target().Pc-p.Pc case obj.TYPE_MEM: panic("unhandled type") } case AAUIPC: if p.From.Type == obj.TYPE_BRANCH { low, high, err := Split32BitImmediate(p.From.Target().Pc - p.Pc) if err != nil { ctxt.Diag("%v: jump displacement %d too large", p, p.To.Target().Pc-p.Pc) } p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym} p.Link.From.Offset = low } } } // Validate all instructions - this provides nice error messages. for p := cursym.Func.Text; p != nil; p = p.Link { for _, ins := range instructionsForProg(p) { ins.validate(ctxt) } } } func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgAlloc, framesize int64) *obj.Prog { // Leaf function with no frame is effectively NOSPLIT. if framesize == 0 { return p } // MOV g_stackguard(g), X10 p = obj.Appendp(p, newprog) p.As = AMOV p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 if cursym.CFunc() { p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_X10 var to_done, to_more *obj.Prog if framesize <= objabi.StackSmall { // small stack: SP < stackguard // BLTU SP, stackguard, done p = obj.Appendp(p, newprog) p.As = ABLTU p.From.Type = obj.TYPE_REG p.From.Reg = REG_X10 p.Reg = REG_SP p.To.Type = obj.TYPE_BRANCH to_done = p } else if framesize <= objabi.StackBig { // large stack: SP-framesize < stackguard-StackSmall // ADD $-(framesize-StackSmall), SP, X11 // BLTU X11, stackguard, done p = obj.Appendp(p, newprog) // TODO(sorear): logic inconsistent with comment, but both match all non-x86 arches p.As = AADDI p.From.Type = obj.TYPE_CONST p.From.Offset = -(int64(framesize) - objabi.StackSmall) p.Reg = REG_SP p.To.Type = obj.TYPE_REG p.To.Reg = REG_X11 p = obj.Appendp(p, newprog) p.As = ABLTU p.From.Type = obj.TYPE_REG p.From.Reg = REG_X10 p.Reg = REG_X11 p.To.Type = obj.TYPE_BRANCH to_done = p } else { // Such a large stack we need to protect against wraparound. // If SP is close to zero: // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // // Preemption sets stackguard to StackPreempt, a very large value. // That breaks the math above, so we have to check for that explicitly. // // stackguard is X10 // MOV $StackPreempt, X11 // BEQ X10, X11, more // ADD $StackGuard, SP, X11 // SUB X10, X11 // MOV $(framesize+(StackGuard-StackSmall)), X10 // BGTU X11, X10, done p = obj.Appendp(p, newprog) p.As = AMOV p.From.Type = obj.TYPE_CONST p.From.Offset = objabi.StackPreempt p.To.Type = obj.TYPE_REG p.To.Reg = REG_X11 p = obj.Appendp(p, newprog) to_more = p p.As = ABEQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_X10 p.Reg = REG_X11 p.To.Type = obj.TYPE_BRANCH p = obj.Appendp(p, newprog) p.As = AADDI p.From.Type = obj.TYPE_CONST p.From.Offset = int64(objabi.StackGuard) p.Reg = REG_SP p.To.Type = obj.TYPE_REG p.To.Reg = REG_X11 p = obj.Appendp(p, newprog) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_X10 p.Reg = REG_X11 p.To.Type = obj.TYPE_REG p.To.Reg = REG_X11 p = obj.Appendp(p, newprog) p.As = AMOV p.From.Type = obj.TYPE_CONST p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REG_X10 p = obj.Appendp(p, newprog) p.As = ABLTU p.From.Type = obj.TYPE_REG p.From.Reg = REG_X10 p.Reg = REG_X11 p.To.Type = obj.TYPE_BRANCH to_done = p } p = ctxt.EmitEntryLiveness(cursym, p, newprog) // CALL runtime.morestack(SB) p = obj.Appendp(p, newprog) p.As = obj.ACALL p.To.Type = obj.TYPE_BRANCH if cursym.CFunc() { p.To.Sym = ctxt.Lookup("runtime.morestackc") } else if !cursym.Func.Text.From.Sym.NeedCtxt() { p.To.Sym = ctxt.Lookup("runtime.morestack_noctxt") } else { p.To.Sym = ctxt.Lookup("runtime.morestack") } if to_more != nil { to_more.To.SetTarget(p) } p = jalrToSym(ctxt, p, newprog, REG_X5) // JMP start p = obj.Appendp(p, newprog) p.As = AJAL p.To = obj.Addr{Type: obj.TYPE_BRANCH} p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} p.To.SetTarget(cursym.Func.Text.Link) // placeholder for to_done's jump target p = obj.Appendp(p, newprog) p.As = obj.ANOP // zero-width place holder to_done.To.SetTarget(p) return p } // signExtend sign extends val starting at bit bit. func signExtend(val int64, bit uint) int64 { return val << (64 - bit) >> (64 - bit) } // Split32BitImmediate splits a signed 32-bit immediate into a signed 20-bit // upper immediate and a signed 12-bit lower immediate to be added to the upper // result. For example, high may be used in LUI and low in a following ADDI to // generate a full 32-bit constant. func Split32BitImmediate(imm int64) (low, high int64, err error) { if !immIFits(imm, 32) { return 0, 0, fmt.Errorf("immediate does not fit in 32-bits: %d", imm) } // Nothing special needs to be done if the immediate fits in 12-bits. if immIFits(imm, 12) { return imm, 0, nil } high = imm >> 12 // The bottom 12 bits will be treated as signed. // // If that will result in a negative 12 bit number, add 1 to // our upper bits to adjust for the borrow. // // It is not possible for this increment to overflow. To // overflow, the 20 top bits would be 1, and the sign bit for // the low 12 bits would be set, in which case the entire 32 // bit pattern fits in a 12 bit signed value. if imm&(1<<11) != 0 { high++ } low = signExtend(imm, 12) high = signExtend(high, 20) return low, high, nil } func regVal(r, min, max uint32) uint32 { if r < min || r > max { panic(fmt.Sprintf("register out of range, want %d < %d < %d", min, r, max)) } return r - min } // regI returns an integer register. func regI(r uint32) uint32 { return regVal(r, REG_X0, REG_X31) } // regF returns a float register. func regF(r uint32) uint32 { return regVal(r, REG_F0, REG_F31) } // regAddr extracts a register from an Addr. func regAddr(a obj.Addr, min, max uint32) uint32 { if a.Type != obj.TYPE_REG { panic(fmt.Sprintf("ill typed: %+v", a)) } return regVal(uint32(a.Reg), min, max) } // regIAddr extracts the integer register from an Addr. func regIAddr(a obj.Addr) uint32 { return regAddr(a, REG_X0, REG_X31) } // regFAddr extracts the float register from an Addr. func regFAddr(a obj.Addr) uint32 { return regAddr(a, REG_F0, REG_F31) } // immIFits reports whether immediate value x fits in nbits bits // as a signed integer. func immIFits(x int64, nbits uint) bool { nbits-- var min int64 = -1 << nbits var max int64 = 1<<nbits - 1 return min <= x && x <= max } // immI extracts the signed integer of the specified size from an immediate. func immI(as obj.As, imm int64, nbits uint) uint32 { if !immIFits(imm, nbits) { panic(fmt.Sprintf("%v\tsigned immediate %d cannot fit in %d bits", as, imm, nbits)) } return uint32(imm) } func wantImmI(ctxt *obj.Link, as obj.As, imm int64, nbits uint) { if !immIFits(imm, nbits) { ctxt.Diag("%v\tsigned immediate cannot be larger than %d bits but got %d", as, nbits, imm) } } func wantReg(ctxt *obj.Link, as obj.As, pos string, descr string, r, min, max uint32) { if r < min || r > max { var suffix string if r != obj.REG_NONE { suffix = fmt.Sprintf(" but got non-%s register %s", descr, RegName(int(r))) } ctxt.Diag("%v\texpected %s register in %s position%s", as, descr, pos, suffix) } } func wantNoneReg(ctxt *obj.Link, as obj.As, pos string, r uint32) { if r != obj.REG_NONE { ctxt.Diag("%v\texpected no register in %s but got register %s", as, pos, RegName(int(r))) } } // wantIntReg checks that r is an integer register. func wantIntReg(ctxt *obj.Link, as obj.As, pos string, r uint32) { wantReg(ctxt, as, pos, "integer", r, REG_X0, REG_X31) } // wantFloatReg checks that r is a floating-point register.
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/cpu.go
vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/cpu.go
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // Portions Copyright © 2019 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package riscv import "github.com/twitchyliquid64/golang-asm/obj" //go:generate go run ../stringer.go -i $GOFILE -o anames.go -p riscv const ( // Base register numberings. REG_X0 = obj.RBaseRISCV + iota REG_X1 REG_X2 REG_X3 REG_X4 REG_X5 REG_X6 REG_X7 REG_X8 REG_X9 REG_X10 REG_X11 REG_X12 REG_X13 REG_X14 REG_X15 REG_X16 REG_X17 REG_X18 REG_X19 REG_X20 REG_X21 REG_X22 REG_X23 REG_X24 REG_X25 REG_X26 REG_X27 REG_X28 REG_X29 REG_X30 REG_X31 // FP register numberings. REG_F0 REG_F1 REG_F2 REG_F3 REG_F4 REG_F5 REG_F6 REG_F7 REG_F8 REG_F9 REG_F10 REG_F11 REG_F12 REG_F13 REG_F14 REG_F15 REG_F16 REG_F17 REG_F18 REG_F19 REG_F20 REG_F21 REG_F22 REG_F23 REG_F24 REG_F25 REG_F26 REG_F27 REG_F28 REG_F29 REG_F30 REG_F31 // This marks the end of the register numbering. REG_END // General registers reassigned to ABI names. REG_ZERO = REG_X0 REG_RA = REG_X1 // aka REG_LR REG_SP = REG_X2 REG_GP = REG_X3 // aka REG_SB REG_TP = REG_X4 // aka REG_G REG_T0 = REG_X5 REG_T1 = REG_X6 REG_T2 = REG_X7 REG_S0 = REG_X8 REG_S1 = REG_X9 REG_A0 = REG_X10 REG_A1 = REG_X11 REG_A2 = REG_X12 REG_A3 = REG_X13 REG_A4 = REG_X14 REG_A5 = REG_X15 REG_A6 = REG_X16 REG_A7 = REG_X17 REG_S2 = REG_X18 REG_S3 = REG_X19 REG_S4 = REG_X20 // aka REG_CTXT REG_S5 = REG_X21 REG_S6 = REG_X22 REG_S7 = REG_X23 REG_S8 = REG_X24 REG_S9 = REG_X25 REG_S10 = REG_X26 REG_S11 = REG_X27 REG_T3 = REG_X28 REG_T4 = REG_X29 REG_T5 = REG_X30 REG_T6 = REG_X31 // aka REG_TMP // Go runtime register names. REG_G = REG_TP // G pointer. REG_CTXT = REG_S4 // Context for closures. REG_LR = REG_RA // Link register. REG_TMP = REG_T6 // Reserved for assembler use. // ABI names for floating point registers. REG_FT0 = REG_F0 REG_FT1 = REG_F1 REG_FT2 = REG_F2 REG_FT3 = REG_F3 REG_FT4 = REG_F4 REG_FT5 = REG_F5 REG_FT6 = REG_F6 REG_FT7 = REG_F7 REG_FS0 = REG_F8 REG_FS1 = REG_F9 REG_FA0 = REG_F10 REG_FA1 = REG_F11 REG_FA2 = REG_F12 REG_FA3 = REG_F13 REG_FA4 = REG_F14 REG_FA5 = REG_F15 REG_FA6 = REG_F16 REG_FA7 = REG_F17 REG_FS2 = REG_F18 REG_FS3 = REG_F19 REG_FS4 = REG_F20 REG_FS5 = REG_F21 REG_FS6 = REG_F22 REG_FS7 = REG_F23 REG_FS8 = REG_F24 REG_FS9 = REG_F25 REG_FS10 = REG_F26 REG_FS11 = REG_F27 REG_FT8 = REG_F28 REG_FT9 = REG_F29 REG_FT10 = REG_F30 REG_FT11 = REG_F31 // Names generated by the SSA compiler. REGSP = REG_SP REGG = REG_G ) // https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#dwarf-register-numbers var RISCV64DWARFRegisters = map[int16]int16{ // Integer Registers. REG_X0: 0, REG_X1: 1, REG_X2: 2, REG_X3: 3, REG_X4: 4, REG_X5: 5, REG_X6: 6, REG_X7: 7, REG_X8: 8, REG_X9: 9, REG_X10: 10, REG_X11: 11, REG_X12: 12, REG_X13: 13, REG_X14: 14, REG_X15: 15, REG_X16: 16, REG_X17: 17, REG_X18: 18, REG_X19: 19, REG_X20: 20, REG_X21: 21, REG_X22: 22, REG_X23: 23, REG_X24: 24, REG_X25: 25, REG_X26: 26, REG_X27: 27, REG_X28: 28, REG_X29: 29, REG_X30: 30, REG_X31: 31, // Floating-Point Registers. REG_F0: 32, REG_F1: 33, REG_F2: 34, REG_F3: 35, REG_F4: 36, REG_F5: 37, REG_F6: 38, REG_F7: 39, REG_F8: 40, REG_F9: 41, REG_F10: 42, REG_F11: 43, REG_F12: 44, REG_F13: 45, REG_F14: 46, REG_F15: 47, REG_F16: 48, REG_F17: 49, REG_F18: 50, REG_F19: 51, REG_F20: 52, REG_F21: 53, REG_F22: 54, REG_F23: 55, REG_F24: 56, REG_F25: 57, REG_F26: 58, REG_F27: 59, REG_F28: 60, REG_F29: 61, REG_F30: 62, REG_F31: 63, } // Prog.Mark flags. const ( // NEED_PCREL_ITYPE_RELOC is set on AUIPC instructions to indicate that // it is the first instruction in an AUIPC + I-type pair that needs a // R_RISCV_PCREL_ITYPE relocation. NEED_PCREL_ITYPE_RELOC = 1 << 0 // NEED_PCREL_STYPE_RELOC is set on AUIPC instructions to indicate that // it is the first instruction in an AUIPC + S-type pair that needs a // R_RISCV_PCREL_STYPE relocation. NEED_PCREL_STYPE_RELOC = 1 << 1 ) // RISC-V mnemonics, as defined in the "opcodes" and "opcodes-pseudo" files // from: // // https://github.com/riscv/riscv-opcodes // // As well as some pseudo-mnemonics (e.g. MOV) used only in the assembler. // // See also "The RISC-V Instruction Set Manual" at: // // https://riscv.org/specifications/ // // If you modify this table, you MUST run 'go generate' to regenerate anames.go! const ( // Unprivileged ISA (Document Version 20190608-Base-Ratified) // 2.4: Integer Computational Instructions AADDI = obj.ABaseRISCV + obj.A_ARCHSPECIFIC + iota ASLTI ASLTIU AANDI AORI AXORI ASLLI ASRLI ASRAI ALUI AAUIPC AADD ASLT ASLTU AAND AOR AXOR ASLL ASRL ASUB ASRA // The SLL/SRL/SRA instructions differ slightly between RV32 and RV64, // hence there are pseudo-opcodes for the RV32 specific versions. ASLLIRV32 ASRLIRV32 ASRAIRV32 // 2.5: Control Transfer Instructions AJAL AJALR ABEQ ABNE ABLT ABLTU ABGE ABGEU // 2.6: Load and Store Instructions ALW ALWU ALH ALHU ALB ALBU ASW ASH ASB // 2.7: Memory Ordering Instructions AFENCE AFENCEI AFENCETSO // 5.2: Integer Computational Instructions (RV64I) AADDIW ASLLIW ASRLIW ASRAIW AADDW ASLLW ASRLW ASUBW ASRAW // 5.3: Load and Store Instructions (RV64I) ALD ASD // 7.1: Multiplication Operations AMUL AMULH AMULHU AMULHSU AMULW ADIV ADIVU AREM AREMU ADIVW ADIVUW AREMW AREMUW // 8.2: Load-Reserved/Store-Conditional Instructions ALRD ASCD ALRW ASCW // 8.3: Atomic Memory Operations AAMOSWAPD AAMOADDD AAMOANDD AAMOORD AAMOXORD AAMOMAXD AAMOMAXUD AAMOMIND AAMOMINUD AAMOSWAPW AAMOADDW AAMOANDW AAMOORW AAMOXORW AAMOMAXW AAMOMAXUW AAMOMINW AAMOMINUW // 10.1: Base Counters and Timers ARDCYCLE ARDCYCLEH ARDTIME ARDTIMEH ARDINSTRET ARDINSTRETH // 11.2: Floating-Point Control and Status Register AFRCSR AFSCSR AFRRM AFSRM AFRFLAGS AFSFLAGS AFSRMI AFSFLAGSI // 11.5: Single-Precision Load and Store Instructions AFLW AFSW // 11.6: Single-Precision Floating-Point Computational Instructions AFADDS AFSUBS AFMULS AFDIVS AFMINS AFMAXS AFSQRTS AFMADDS AFMSUBS AFNMADDS AFNMSUBS // 11.7: Single-Precision Floating-Point Conversion and Move Instructions AFCVTWS AFCVTLS AFCVTSW AFCVTSL AFCVTWUS AFCVTLUS AFCVTSWU AFCVTSLU AFSGNJS AFSGNJNS AFSGNJXS AFMVXS AFMVSX AFMVXW AFMVWX // 11.8: Single-Precision Floating-Point Compare Instructions AFEQS AFLTS AFLES // 11.9: Single-Precision Floating-Point Classify Instruction AFCLASSS // 12.3: Double-Precision Load and Store Instructions AFLD AFSD // 12.4: Double-Precision Floating-Point Computational Instructions AFADDD AFSUBD AFMULD AFDIVD AFMIND AFMAXD AFSQRTD AFMADDD AFMSUBD AFNMADDD AFNMSUBD // 12.5: Double-Precision Floating-Point Conversion and Move Instructions AFCVTWD AFCVTLD AFCVTDW AFCVTDL AFCVTWUD AFCVTLUD AFCVTDWU AFCVTDLU AFCVTSD AFCVTDS AFSGNJD AFSGNJND AFSGNJXD AFMVXD AFMVDX // 12.6: Double-Precision Floating-Point Compare Instructions AFEQD AFLTD AFLED // 12.7: Double-Precision Floating-Point Classify Instruction AFCLASSD // 13.1 Quad-Precision Load and Store Instructions AFLQ AFSQ // 13.2: Quad-Precision Computational Instructions AFADDQ AFSUBQ AFMULQ AFDIVQ AFMINQ AFMAXQ AFSQRTQ AFMADDQ AFMSUBQ AFNMADDQ AFNMSUBQ // 13.3 Quad-Precision Convert and Move Instructions AFCVTWQ AFCVTLQ AFCVTSQ AFCVTDQ AFCVTQW AFCVTQL AFCVTQS AFCVTQD AFCVTWUQ AFCVTLUQ AFCVTQWU AFCVTQLU AFSGNJQ AFSGNJNQ AFSGNJXQ AFMVXQ AFMVQX // 13.4 Quad-Precision Floating-Point Compare Instructions AFEQQ AFLEQ AFLTQ // 13.5 Quad-Precision Floating-Point Classify Instruction AFCLASSQ // Privileged ISA (Version 20190608-Priv-MSU-Ratified) // 3.1.9: Instructions to Access CSRs ACSRRW ACSRRS ACSRRC ACSRRWI ACSRRSI ACSRRCI // 3.2.1: Environment Call and Breakpoint AECALL ASCALL AEBREAK ASBREAK // 3.2.2: Trap-Return Instructions AMRET ASRET AURET ADRET // 3.2.3: Wait for Interrupt AWFI // 4.2.1: Supervisor Memory-Management Fence Instruction ASFENCEVMA // Hypervisor Memory-Management Instructions AHFENCEGVMA AHFENCEVVMA // The escape hatch. Inserts a single 32-bit word. AWORD // Pseudo-instructions. These get translated by the assembler into other // instructions, based on their operands. ABEQZ ABGEZ ABGT ABGTU ABGTZ ABLE ABLEU ABLEZ ABLTZ ABNEZ AFNEGD AFNEGS AFNED AFNES AMOV AMOVB AMOVBU AMOVF AMOVD AMOVH AMOVHU AMOVW AMOVWU ANEG ANEGW ANOT ASEQZ ASNEZ // End marker ALAST ) // All unary instructions which write to their arguments (as opposed to reading // from them) go here. The assembly parser uses this information to populate // its AST in a semantically reasonable way. // // Any instructions not listed here are assumed to either be non-unary or to read // from its argument. var unaryDst = map[obj.As]bool{ ARDCYCLE: true, ARDCYCLEH: true, ARDTIME: true, ARDTIMEH: true, ARDINSTRET: true, ARDINSTRETH: true, } // Instruction encoding masks. const ( // ITypeImmMask is a mask including only the immediate portion of // I-type instructions. ITypeImmMask = 0xfff00000 // STypeImmMask is a mask including only the immediate portion of // S-type instructions. STypeImmMask = 0xfe000f80 // UTypeImmMask is a mask including only the immediate portion of // U-type instructions. UTypeImmMask = 0xfffff000 // UJTypeImmMask is a mask including only the immediate portion of // UJ-type instructions. UJTypeImmMask = UTypeImmMask )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/a.out.go
vendor/github.com/twitchyliquid64/golang-asm/obj/arm/a.out.go
// Inferno utils/5c/5.out.h // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5c/5.out.h // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package arm import "github.com/twitchyliquid64/golang-asm/obj" //go:generate go run ../stringer.go -i $GOFILE -o anames.go -p arm const ( NSNAME = 8 NSYM = 50 NREG = 16 ) /* -1 disables use of REGARG */ const ( REGARG = -1 ) const ( REG_R0 = obj.RBaseARM + iota // must be 16-aligned REG_R1 REG_R2 REG_R3 REG_R4 REG_R5 REG_R6 REG_R7 REG_R8 REG_R9 REG_R10 REG_R11 REG_R12 REG_R13 REG_R14 REG_R15 REG_F0 // must be 16-aligned REG_F1 REG_F2 REG_F3 REG_F4 REG_F5 REG_F6 REG_F7 REG_F8 REG_F9 REG_F10 REG_F11 REG_F12 REG_F13 REG_F14 REG_F15 REG_FPSR // must be 2-aligned REG_FPCR REG_CPSR // must be 2-aligned REG_SPSR REGRET = REG_R0 /* compiler allocates R1 up as temps */ /* compiler allocates register variables R3 up */ /* compiler allocates external registers R10 down */ REGEXT = REG_R10 /* these two registers are declared in runtime.h */ REGG = REGEXT - 0 REGM = REGEXT - 1 REGCTXT = REG_R7 REGTMP = REG_R11 REGSP = REG_R13 REGLINK = REG_R14 REGPC = REG_R15 NFREG = 16 /* compiler allocates register variables F0 up */ /* compiler allocates external registers F7 down */ FREGRET = REG_F0 FREGEXT = REG_F7 FREGTMP = REG_F15 ) // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0040b/IHI0040B_aadwarf.pdf var ARMDWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregisters[from:to] = (base):(step*(to-from)+base) f := func(from, to, base, step int16) { for r := int16(from); r <= to; r++ { ARMDWARFRegisters[r] = step*(r-from) + base } } f(REG_R0, REG_R15, 0, 1) f(REG_F0, REG_F15, 64, 2) // Use d0 through D15, aka S0, S2, ..., S30 } // Special registers, after subtracting obj.RBaseARM, bit 9 indicates // a special register and the low bits select the register. const ( REG_SPECIAL = obj.RBaseARM + 1<<9 + iota REG_MB_SY REG_MB_ST REG_MB_ISH REG_MB_ISHST REG_MB_NSH REG_MB_NSHST REG_MB_OSH REG_MB_OSHST MAXREG ) const ( C_NONE = iota C_REG C_REGREG C_REGREG2 C_REGLIST C_SHIFT /* register shift R>>x */ C_SHIFTADDR /* memory address with shifted offset R>>x(R) */ C_FREG C_PSR C_FCR C_SPR /* REG_MB_SY */ C_RCON /* 0xff rotated */ C_NCON /* ~RCON */ C_RCON2A /* OR of two disjoint C_RCON constants */ C_RCON2S /* subtraction of two disjoint C_RCON constants */ C_SCON /* 0xffff */ C_LCON C_LCONADDR C_ZFCON C_SFCON C_LFCON C_RACON C_LACON C_SBRA C_LBRA C_HAUTO /* halfword insn offset (-0xff to 0xff) */ C_FAUTO /* float insn offset (0 to 0x3fc, word aligned) */ C_HFAUTO /* both H and F */ C_SAUTO /* -0xfff to 0xfff */ C_LAUTO C_HOREG C_FOREG C_HFOREG C_SOREG C_ROREG C_SROREG /* both nil and R */ C_LOREG C_PC C_SP C_HREG C_ADDR /* reference to relocatable address */ // TLS "var" in local exec mode: will become a constant offset from // thread local base that is ultimately chosen by the program linker. C_TLS_LE // TLS "var" in initial exec mode: will become a memory address (chosen // by the program linker) that the dynamic linker will fill with the // offset from the thread local base. C_TLS_IE C_TEXTSIZE C_GOK C_NCLASS /* must be the last */ ) const ( AAND = obj.ABaseARM + obj.A_ARCHSPECIFIC + iota AEOR ASUB ARSB AADD AADC ASBC ARSC ATST ATEQ ACMP ACMN AORR ABIC AMVN /* * Do not reorder or fragment the conditional branch * opcodes, or the predication code will break */ ABEQ ABNE ABCS ABHS ABCC ABLO ABMI ABPL ABVS ABVC ABHI ABLS ABGE ABLT ABGT ABLE AMOVWD AMOVWF AMOVDW AMOVFW AMOVFD AMOVDF AMOVF AMOVD ACMPF ACMPD AADDF AADDD ASUBF ASUBD AMULF AMULD ANMULF ANMULD AMULAF AMULAD ANMULAF ANMULAD AMULSF AMULSD ANMULSF ANMULSD AFMULAF AFMULAD AFNMULAF AFNMULAD AFMULSF AFMULSD AFNMULSF AFNMULSD ADIVF ADIVD ASQRTF ASQRTD AABSF AABSD ANEGF ANEGD ASRL ASRA ASLL AMULU ADIVU AMUL AMMUL ADIV AMOD AMODU ADIVHW ADIVUHW AMOVB AMOVBS AMOVBU AMOVH AMOVHS AMOVHU AMOVW AMOVM ASWPBU ASWPW ARFE ASWI AMULA AMULS AMMULA AMMULS AWORD AMULL AMULAL AMULLU AMULALU ABX ABXRET ADWORD ALDREX ASTREX ALDREXD ASTREXD ADMB APLD ACLZ AREV AREV16 AREVSH ARBIT AXTAB AXTAH AXTABU AXTAHU ABFX ABFXU ABFC ABFI AMULWT AMULWB AMULBB AMULAWT AMULAWB AMULABB AMRC // MRC/MCR ALAST // aliases AB = obj.AJMP ABL = obj.ACALL ) /* scond byte */ const ( C_SCOND = (1 << 4) - 1 C_SBIT = 1 << 4 C_PBIT = 1 << 5 C_WBIT = 1 << 6 C_FBIT = 1 << 7 /* psr flags-only */ C_UBIT = 1 << 7 /* up bit, unsigned bit */ // These constants are the ARM condition codes encodings, // XORed with 14 so that C_SCOND_NONE has value 0, // so that a zeroed Prog.scond means "always execute". C_SCOND_XOR = 14 C_SCOND_EQ = 0 ^ C_SCOND_XOR C_SCOND_NE = 1 ^ C_SCOND_XOR C_SCOND_HS = 2 ^ C_SCOND_XOR C_SCOND_LO = 3 ^ C_SCOND_XOR C_SCOND_MI = 4 ^ C_SCOND_XOR C_SCOND_PL = 5 ^ C_SCOND_XOR C_SCOND_VS = 6 ^ C_SCOND_XOR C_SCOND_VC = 7 ^ C_SCOND_XOR C_SCOND_HI = 8 ^ C_SCOND_XOR C_SCOND_LS = 9 ^ C_SCOND_XOR C_SCOND_GE = 10 ^ C_SCOND_XOR C_SCOND_LT = 11 ^ C_SCOND_XOR C_SCOND_GT = 12 ^ C_SCOND_XOR C_SCOND_LE = 13 ^ C_SCOND_XOR C_SCOND_NONE = 14 ^ C_SCOND_XOR C_SCOND_NV = 15 ^ C_SCOND_XOR /* D_SHIFT type */ SHIFT_LL = 0 << 5 SHIFT_LR = 1 << 5 SHIFT_AR = 2 << 5 SHIFT_RR = 3 << 5 )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/obj5.go
vendor/github.com/twitchyliquid64/golang-asm/obj/arm/obj5.go
// Derived from Inferno utils/5c/swt.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5c/swt.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package arm import ( "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" ) var progedit_tlsfallback *obj.LSym func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p.From.Class = 0 p.To.Class = 0 c := ctxt5{ctxt: ctxt, newprog: newprog} // Rewrite B/BL to symbol as TYPE_BRANCH. switch p.As { case AB, ABL, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } } // Replace TLS register fetches on older ARM processors. switch p.As { // Treat MRC 15, 0, <reg>, C13, C0, 3 specially. case AMRC: if p.To.Offset&0xffff0fff == 0xee1d0f70 { // Because the instruction might be rewritten to a BL which returns in R0 // the register must be zero. if p.To.Offset&0xf000 != 0 { ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line()) } if objabi.GOARM < 7 { // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension. if progedit_tlsfallback == nil { progedit_tlsfallback = ctxt.Lookup("runtime.read_tls_fallback") } // MOVW LR, R11 p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP // BL runtime.read_tls_fallback(SB) p = obj.Appendp(p, newprog) p.As = ABL p.To.Type = obj.TYPE_BRANCH p.To.Sym = progedit_tlsfallback p.To.Offset = 0 // MOVW R11, LR p = obj.Appendp(p, newprog) p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.To.Type = obj.TYPE_REG p.To.Reg = REGLINK break } } // Otherwise, MRC/MCR instructions need no further treatment. p.As = AWORD } // Rewrite float constants to values stored in memory. switch p.As { case AMOVF: if p.From.Type == obj.TYPE_FCONST && c.chipfloat5(p.From.Val.(float64)) < 0 && (c.chipzero5(p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { f32 := float32(p.From.Val.(float64)) p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Float32Sym(f32) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } case AMOVD: if p.From.Type == obj.TYPE_FCONST && c.chipfloat5(p.From.Val.(float64)) < 0 && (c.chipzero5(p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Float64Sym(p.From.Val.(float64)) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } } if ctxt.Flag_dynlink { c.rewriteToUseGot(p) } } // Rewrite p, if necessary, to access global data via the global offset table. func (c *ctxt5) rewriteToUseGot(p *obj.Prog) { if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { // ADUFFxxx $offset // becomes // MOVW runtime.duffxxx@GOT, R9 // ADD $offset, R9 // CALL (R9) var sym *obj.LSym if p.As == obj.ADUFFZERO { sym = c.ctxt.Lookup("runtime.duffzero") } else { sym = c.ctxt.Lookup("runtime.duffcopy") } offset := p.To.Offset p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF p.From.Sym = sym p.To.Type = obj.TYPE_REG p.To.Reg = REG_R9 p.To.Name = obj.NAME_NONE p.To.Offset = 0 p.To.Sym = nil p1 := obj.Appendp(p, c.newprog) p1.As = AADD p1.From.Type = obj.TYPE_CONST p1.From.Offset = offset p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_R9 p2 := obj.Appendp(p1, c.newprog) p2.As = obj.ACALL p2.To.Type = obj.TYPE_MEM p2.To.Reg = REG_R9 return } // We only care about global data: NAME_EXTERN means a global // symbol in the Go sense, and p.Sym.Local is true for a few // internally defined symbols. if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { // MOVW $sym, Rx becomes MOVW sym@GOT, Rx // MOVW $sym+<off>, Rx becomes MOVW sym@GOT, Rx; ADD <off>, Rx if p.As != AMOVW { c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p) } if p.To.Type != obj.TYPE_REG { c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p) } p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF if p.From.Offset != 0 { q := obj.Appendp(p, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = p.From.Offset q.To = p.To p.From.Offset = 0 } } if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr // MOVx sym, Ry becomes MOVW sym@GOT, R9; MOVx (R9), Ry // MOVx Ry, sym becomes MOVW sym@GOT, R9; MOVx Ry, (R9) // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { source = &p.To } else { return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if source.Sym.Type == objabi.STLSBSS { return } if source.Type != obj.TYPE_MEM { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } p1 := obj.Appendp(p, c.newprog) p2 := obj.Appendp(p1, c.newprog) p1.As = AMOVW p1.From.Type = obj.TYPE_MEM p1.From.Sym = source.Sym p1.From.Name = obj.NAME_GOTREF p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_R9 p2.As = p.As p2.From = p.From p2.To = p.To if p.From.Name == obj.NAME_EXTERN { p2.From.Reg = REG_R9 p2.From.Name = obj.NAME_NONE p2.From.Sym = nil } else if p.To.Name == obj.NAME_EXTERN { p2.To.Reg = REG_R9 p2.To.Name = obj.NAME_NONE p2.To.Sym = nil } else { return } obj.Nopout(p) } // Prog.mark const ( FOLL = 1 << 0 LABEL = 1 << 1 LEAF = 1 << 2 ) func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { autosize := int32(0) if cursym.Func.Text == nil || cursym.Func.Text.Link == nil { return } c := ctxt5{ctxt: ctxt, cursym: cursym, newprog: newprog} p := c.cursym.Func.Text autoffset := int32(p.To.Offset) if autoffset == -4 { // Historical way to mark NOFRAME. p.From.Sym.Set(obj.AttrNoFrame, true) autoffset = 0 } if autoffset < 0 || autoffset%4 != 0 { c.ctxt.Diag("frame size %d not 0 or a positive multiple of 4", autoffset) } if p.From.Sym.NoFrame() { if autoffset != 0 { c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", autoffset) } } cursym.Func.Locals = autoffset cursym.Func.Args = p.To.Val.(int32) /* * find leaf subroutines */ for p := cursym.Func.Text; p != nil; p = p.Link { switch p.As { case obj.ATEXT: p.Mark |= LEAF case ADIV, ADIVU, AMOD, AMODU: cursym.Func.Text.Mark &^= LEAF case ABL, ABX, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Func.Text.Mark &^= LEAF } } var q2 *obj.Prog for p := cursym.Func.Text; p != nil; p = p.Link { o := p.As switch o { case obj.ATEXT: autosize = autoffset if p.Mark&LEAF != 0 && autosize == 0 { // A leaf function with no locals has no frame. p.From.Sym.Set(obj.AttrNoFrame, true) } if !p.From.Sym.NoFrame() { // If there is a stack frame at all, it includes // space to save the LR. autosize += 4 } if autosize == 0 && cursym.Func.Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller // are not identified as leaves but still have no frame. if ctxt.Debugvlog { ctxt.Logf("save suppressed in: %s\n", cursym.Name) } cursym.Func.Text.Mark |= LEAF } // FP offsets need an updated p.To.Offset. p.To.Offset = int64(autosize) - 4 if cursym.Func.Text.Mark&LEAF != 0 { cursym.Set(obj.AttrLeaf, true) if p.From.Sym.NoFrame() { break } } if !p.From.Sym.NoSplit() { p = c.stacksplit(p, autosize) // emit split check } // MOVW.W R14,$-autosize(SP) p = obj.Appendp(p, c.newprog) p.As = AMOVW p.Scond |= C_WBIT p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK p.To.Type = obj.TYPE_MEM p.To.Offset = int64(-autosize) p.To.Reg = REGSP p.Spadj = autosize if cursym.Func.Text.From.Sym.Wrapper() { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVW g_panic(g), R1 // CMP $0, R1 // B.NE checkargp // end: // NOP // ... function ... // checkargp: // MOVW panic_argp(R1), R2 // ADD $(autosize+4), R13, R3 // CMP R2, R3 // B.NE end // ADD $4, R13, R4 // MOVW R4, panic_argp(R1) // B end // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes. p = obj.Appendp(p, newprog) p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 p = obj.Appendp(p, newprog) p.As = ACMP p.From.Type = obj.TYPE_CONST p.From.Offset = 0 p.Reg = REG_R1 // B.NE checkargp bne := obj.Appendp(p, newprog) bne.As = ABNE bne.To.Type = obj.TYPE_BRANCH // end: NOP end := obj.Appendp(bne, newprog) end.As = obj.ANOP // find end of function var last *obj.Prog for last = end; last.Link != nil; last = last.Link { } // MOVW panic_argp(R1), R2 mov := obj.Appendp(last, newprog) mov.As = AMOVW mov.From.Type = obj.TYPE_MEM mov.From.Reg = REG_R1 mov.From.Offset = 0 // Panic.argp mov.To.Type = obj.TYPE_REG mov.To.Reg = REG_R2 // B.NE branch target is MOVW above bne.To.SetTarget(mov) // ADD $(autosize+4), R13, R3 p = obj.Appendp(mov, newprog) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) + 4 p.Reg = REG_R13 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 // CMP R2, R3 p = obj.Appendp(p, newprog) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R2 p.Reg = REG_R3 // B.NE end p = obj.Appendp(p, newprog) p.As = ABNE p.To.Type = obj.TYPE_BRANCH p.To.SetTarget(end) // ADD $4, R13, R4 p = obj.Appendp(p, newprog) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = 4 p.Reg = REG_R13 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 // MOVW R4, panic_argp(R1) p = obj.Appendp(p, newprog) p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REG_R4 p.To.Type = obj.TYPE_MEM p.To.Reg = REG_R1 p.To.Offset = 0 // Panic.argp // B end p = obj.Appendp(p, newprog) p.As = AB p.To.Type = obj.TYPE_BRANCH p.To.SetTarget(end) // reset for subsequent passes p = end } case obj.ARET: nocache(p) if cursym.Func.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = AB p.From = obj.Addr{} if p.To.Sym != nil { // retjmp p.To.Type = obj.TYPE_BRANCH } else { p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = REGLINK } break } } p.As = AMOVW p.Scond |= C_PBIT p.From.Type = obj.TYPE_MEM p.From.Offset = int64(autosize) p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGPC // If there are instructions following // this ARET, they come from a branch // with the same stackframe, so no spadj. if p.To.Sym != nil { // retjmp p.To.Reg = REGLINK q2 = obj.Appendp(p, newprog) q2.As = AB q2.To.Type = obj.TYPE_BRANCH q2.To.Sym = p.To.Sym p.To.Sym = nil p = q2 } case AADD: if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { p.Spadj = int32(-p.From.Offset) } case ASUB: if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { p.Spadj = int32(p.From.Offset) } case ADIV, ADIVU, AMOD, AMODU: if cursym.Func.Text.From.Sym.NoSplit() { ctxt.Diag("cannot divide in NOSPLIT function") } const debugdivmod = false if debugdivmod { break } if p.From.Type != obj.TYPE_REG { break } if p.To.Type != obj.TYPE_REG { break } // Make copy because we overwrite p below. q1 := *p if q1.Reg == REGTMP || q1.Reg == 0 && q1.To.Reg == REGTMP { ctxt.Diag("div already using REGTMP: %v", p) } /* MOV m(g),REGTMP */ p.As = AMOVW p.Pos = q1.Pos p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 6 * 4 // offset of g.m p.Reg = 0 p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP /* MOV a,m_divmod(REGTMP) */ p = obj.Appendp(p, newprog) p.As = AMOVW p.Pos = q1.Pos p.From.Type = obj.TYPE_REG p.From.Reg = q1.From.Reg p.To.Type = obj.TYPE_MEM p.To.Reg = REGTMP p.To.Offset = 8 * 4 // offset of m.divmod /* MOV b, R8 */ p = obj.Appendp(p, newprog) p.As = AMOVW p.Pos = q1.Pos p.From.Type = obj.TYPE_REG p.From.Reg = q1.Reg if q1.Reg == 0 { p.From.Reg = q1.To.Reg } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R8 p.To.Offset = 0 /* CALL appropriate */ p = obj.Appendp(p, newprog) p.As = ABL p.Pos = q1.Pos p.To.Type = obj.TYPE_BRANCH switch o { case ADIV: p.To.Sym = symdiv case ADIVU: p.To.Sym = symdivu case AMOD: p.To.Sym = symmod case AMODU: p.To.Sym = symmodu } /* MOV REGTMP, b */ p = obj.Appendp(p, newprog) p.As = AMOVW p.Pos = q1.Pos p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = q1.To.Reg case AMOVW: if (p.Scond&C_WBIT != 0) && p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP { p.Spadj = int32(-p.To.Offset) } if (p.Scond&C_PBIT != 0) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP && p.To.Reg != REGPC { p.Spadj = int32(-p.From.Offset) } if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { p.Spadj = int32(-p.From.Offset) } case obj.AGETCALLERPC: if cursym.Leaf() { /* MOVW LR, Rd */ p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK } else { /* MOVW (RSP), Rd */ p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP } } } } func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { // MOVW g_stackguard(g), R1 p = obj.Appendp(p, c.newprog) p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 if c.cursym.CFunc() { p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 // Mark the stack bound check and morestack call async nonpreemptible. // If we get preempted here, when resumed the preemption request is // cleared, but we'll still call morestack, which will double the stack // unnecessarily. See issue #35470. p = c.ctxt.StartUnsafePoint(p, c.newprog) if framesize <= objabi.StackSmall { // small stack: SP < stackguard // CMP stackguard, SP p = obj.Appendp(p, c.newprog) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REGSP } else if framesize <= objabi.StackBig { // large stack: SP-framesize < stackguard-StackSmall // MOVW $-(framesize-StackSmall)(SP), R2 // CMP stackguard, R2 p = obj.Appendp(p, c.newprog) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Reg = REGSP p.From.Offset = -(int64(framesize) - objabi.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(p, c.newprog) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REG_R2 } else { // Such a large stack we need to protect against wraparound // if SP is close to zero. // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // CMP $StackPreempt, R1 // MOVW.NE $StackGuard(SP), R2 // SUB.NE R1, R2 // MOVW.NE $(framesize+(StackGuard-StackSmall)), R3 // CMP.NE R3, R2 p = obj.Appendp(p, c.newprog) p.As = ACMP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(uint32(objabi.StackPreempt & (1<<32 - 1))) p.Reg = REG_R1 p = obj.Appendp(p, c.newprog) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Reg = REGSP p.From.Offset = int64(objabi.StackGuard) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p.Scond = C_SCOND_NE p = obj.Appendp(p, c.newprog) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p.Scond = C_SCOND_NE p = obj.Appendp(p, c.newprog) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 p.Scond = C_SCOND_NE p = obj.Appendp(p, c.newprog) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REG_R2 p.Scond = C_SCOND_NE } // BLS call-to-morestack bls := obj.Appendp(p, c.newprog) bls.As = ABLS bls.To.Type = obj.TYPE_BRANCH end := c.ctxt.EndUnsafePoint(bls, c.newprog, -1) var last *obj.Prog for last = c.cursym.Func.Text; last.Link != nil; last = last.Link { } // Now we are at the end of the function, but logically // we are still in function prologue. We need to fix the // SP data and PCDATA. spfix := obj.Appendp(last, c.newprog) spfix.As = obj.ANOP spfix.Spadj = -framesize pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog) pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog) // MOVW LR, R3 movw := obj.Appendp(pcdata, c.newprog) movw.As = AMOVW movw.From.Type = obj.TYPE_REG movw.From.Reg = REGLINK movw.To.Type = obj.TYPE_REG movw.To.Reg = REG_R3 bls.To.SetTarget(movw) // BL runtime.morestack call := obj.Appendp(movw, c.newprog) call.As = obj.ACALL call.To.Type = obj.TYPE_BRANCH morestack := "runtime.morestack" switch { case c.cursym.CFunc(): morestack = "runtime.morestackc" case !c.cursym.Func.Text.From.Sym.NeedCtxt(): morestack = "runtime.morestack_noctxt" } call.To.Sym = c.ctxt.Lookup(morestack) pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1) // B start b := obj.Appendp(pcdata, c.newprog) b.As = obj.AJMP b.To.Type = obj.TYPE_BRANCH b.To.SetTarget(c.cursym.Func.Text.Link) b.Spadj = +framesize return end } var unaryDst = map[obj.As]bool{ ASWI: true, AWORD: true, } var Linkarm = obj.LinkArch{ Arch: sys.ArchARM, Init: buildop, Preprocess: preprocess, Assemble: span5, Progedit: progedit, UnaryDst: unaryDst, DWARFRegisters: ARMDWARFRegisters, }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames.go
vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames.go
// Code generated by stringer -i a.out.go -o anames.go -p arm; DO NOT EDIT. package arm import "github.com/twitchyliquid64/golang-asm/obj" var Anames = []string{ obj.A_ARCHSPECIFIC: "AND", "EOR", "SUB", "RSB", "ADD", "ADC", "SBC", "RSC", "TST", "TEQ", "CMP", "CMN", "ORR", "BIC", "MVN", "BEQ", "BNE", "BCS", "BHS", "BCC", "BLO", "BMI", "BPL", "BVS", "BVC", "BHI", "BLS", "BGE", "BLT", "BGT", "BLE", "MOVWD", "MOVWF", "MOVDW", "MOVFW", "MOVFD", "MOVDF", "MOVF", "MOVD", "CMPF", "CMPD", "ADDF", "ADDD", "SUBF", "SUBD", "MULF", "MULD", "NMULF", "NMULD", "MULAF", "MULAD", "NMULAF", "NMULAD", "MULSF", "MULSD", "NMULSF", "NMULSD", "FMULAF", "FMULAD", "FNMULAF", "FNMULAD", "FMULSF", "FMULSD", "FNMULSF", "FNMULSD", "DIVF", "DIVD", "SQRTF", "SQRTD", "ABSF", "ABSD", "NEGF", "NEGD", "SRL", "SRA", "SLL", "MULU", "DIVU", "MUL", "MMUL", "DIV", "MOD", "MODU", "DIVHW", "DIVUHW", "MOVB", "MOVBS", "MOVBU", "MOVH", "MOVHS", "MOVHU", "MOVW", "MOVM", "SWPBU", "SWPW", "RFE", "SWI", "MULA", "MULS", "MMULA", "MMULS", "WORD", "MULL", "MULAL", "MULLU", "MULALU", "BX", "BXRET", "DWORD", "LDREX", "STREX", "LDREXD", "STREXD", "DMB", "PLD", "CLZ", "REV", "REV16", "REVSH", "RBIT", "XTAB", "XTAH", "XTABU", "XTAHU", "BFX", "BFXU", "BFC", "BFI", "MULWT", "MULWB", "MULBB", "MULAWT", "MULAWB", "MULABB", "MRC", "LAST", }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go
vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go
// Inferno utils/5c/list.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5c/list.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package arm import ( "github.com/twitchyliquid64/golang-asm/obj" "fmt" ) func init() { obj.RegisterRegister(obj.RBaseARM, MAXREG, rconv) obj.RegisterOpcode(obj.ABaseARM, Anames) obj.RegisterRegisterList(obj.RegListARMLo, obj.RegListARMHi, rlconv) obj.RegisterOpSuffix("arm", obj.CConvARM) } func rconv(r int) string { if r == 0 { return "NONE" } if r == REGG { // Special case. return "g" } if REG_R0 <= r && r <= REG_R15 { return fmt.Sprintf("R%d", r-REG_R0) } if REG_F0 <= r && r <= REG_F15 { return fmt.Sprintf("F%d", r-REG_F0) } switch r { case REG_FPSR: return "FPSR" case REG_FPCR: return "FPCR" case REG_CPSR: return "CPSR" case REG_SPSR: return "SPSR" case REG_MB_SY: return "MB_SY" case REG_MB_ST: return "MB_ST" case REG_MB_ISH: return "MB_ISH" case REG_MB_ISHST: return "MB_ISHST" case REG_MB_NSH: return "MB_NSH" case REG_MB_NSHST: return "MB_NSHST" case REG_MB_OSH: return "MB_OSH" case REG_MB_OSHST: return "MB_OSHST" } return fmt.Sprintf("Rgok(%d)", r-obj.RBaseARM) } func DRconv(a int) string { s := "C_??" if a >= C_NONE && a <= C_NCLASS { s = cnames5[a] } var fp string fp += s return fp } func rlconv(list int64) string { str := "" for i := 0; i < 16; i++ { if list&(1<<uint(i)) != 0 { if str == "" { str += "[" } else { str += "," } // This is ARM-specific; R10 is g. if i == REGG-REG_R0 { str += "g" } else { str += fmt.Sprintf("R%d", i) } } } str += "]" return str }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go
vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go
// Inferno utils/5l/span.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/span.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package arm import ( "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "log" "math" "sort" ) // ctxt5 holds state while assembling a single function. // Each function gets a fresh ctxt5. // This allows for multiple functions to be safely concurrently assembled. type ctxt5 struct { ctxt *obj.Link newprog obj.ProgAlloc cursym *obj.LSym printp *obj.Prog blitrl *obj.Prog elitrl *obj.Prog autosize int64 instoffset int64 pc int64 pool struct { start uint32 size uint32 extra uint32 } } type Optab struct { as obj.As a1 uint8 a2 int8 a3 uint8 type_ uint8 size int8 param int16 flag int8 pcrelsiz uint8 scond uint8 // optional flags accepted by the instruction } type Opcross [32][2][32]uint8 const ( LFROM = 1 << 0 LTO = 1 << 1 LPOOL = 1 << 2 LPCREL = 1 << 3 ) var optab = []Optab{ /* struct Optab: OPCODE, from, prog->reg, to, type, size, param, flag, extra data size, optional suffix */ {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0, 0}, {AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {AAND, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {AAND, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {AORR, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {AORR, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {AMVN, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, {ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0, 0}, {AADD, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, {AADD, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, {AAND, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, {AAND, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, {AORR, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, {AORR, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, {AMOVW, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, 0}, {AMVN, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, 0}, {ACMP, C_RCON, C_REG, C_NONE, 2, 4, 0, 0, 0, 0}, {AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, {AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, {AAND, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, {AAND, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, {AORR, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, {AORR, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, {AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, {ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0, 0}, {AMOVW, C_RACON, C_NONE, C_REG, 4, 4, REGSP, 0, 0, C_SBIT}, {AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, LPOOL, 0, 0}, {ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, {ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0, 0}, {ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, {ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // prediction hinted form, hint ignored {AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0, 0}, {ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0, 0}, {ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0, 0}, {ABX, C_NONE, C_NONE, C_ROREG, 75, 12, 0, 0, 0, 0}, {ABXRET, C_NONE, C_NONE, C_ROREG, 76, 4, 0, 0, 0, 0}, {ASLL, C_RCON, C_REG, C_REG, 8, 4, 0, 0, 0, C_SBIT}, {ASLL, C_RCON, C_NONE, C_REG, 8, 4, 0, 0, 0, C_SBIT}, {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0, C_SBIT}, {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0, C_SBIT}, {ASWI, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0, 0}, {ASWI, C_NONE, C_NONE, C_LCON, 10, 4, 0, 0, 0, 0}, {AWORD, C_NONE, C_NONE, C_LCON, 11, 4, 0, 0, 0, 0}, {AWORD, C_NONE, C_NONE, C_LCONADDR, 11, 4, 0, 0, 0, 0}, {AWORD, C_NONE, C_NONE, C_ADDR, 11, 4, 0, 0, 0, 0}, {AWORD, C_NONE, C_NONE, C_TLS_LE, 103, 4, 0, 0, 0, 0}, {AWORD, C_NONE, C_NONE, C_TLS_IE, 104, 4, 0, 0, 0, 0}, {AMOVW, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, {AMOVW, C_SCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, {AMOVW, C_LCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0, 0}, {AMOVW, C_LCONADDR, C_NONE, C_REG, 12, 4, 0, LFROM | LPCREL, 4, 0}, {AMVN, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, {AADD, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AADD, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AAND, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AAND, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AORR, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AORR, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {ACMP, C_NCON, C_REG, C_NONE, 13, 8, 0, 0, 0, 0}, {AADD, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AADD, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AAND, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AAND, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AORR, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AORR, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, {AMVN, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, 0}, {ACMP, C_SCON, C_REG, C_NONE, 13, 8, 0, 0, 0, 0}, {AADD, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0, 0}, {AADD, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0, 0}, {AORR, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0, 0}, {AORR, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0, 0}, {AADD, C_RCON2S, C_REG, C_REG, 107, 8, 0, 0, 0, 0}, {AADD, C_RCON2S, C_NONE, C_REG, 107, 8, 0, 0, 0, 0}, {AADD, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, {AADD, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, {AAND, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, {AAND, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, {AORR, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, {AORR, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, {AMVN, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, 0}, {ACMP, C_LCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0, 0}, {AMOVB, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, 0}, {AMOVBS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, {AMOVBU, C_REG, C_NONE, C_REG, 58, 4, 0, 0, 0, 0}, {AMOVH, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, 0}, {AMOVHS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, {AMOVHU, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, {AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0, C_SBIT}, {AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0, C_SBIT}, {ADIV, C_REG, C_REG, C_REG, 16, 4, 0, 0, 0, 0}, {ADIV, C_REG, C_NONE, C_REG, 16, 4, 0, 0, 0, 0}, {ADIVHW, C_REG, C_REG, C_REG, 105, 4, 0, 0, 0, 0}, {ADIVHW, C_REG, C_NONE, C_REG, 105, 4, 0, 0, 0, 0}, {AMULL, C_REG, C_REG, C_REGREG, 17, 4, 0, 0, 0, C_SBIT}, {ABFX, C_LCON, C_REG, C_REG, 18, 4, 0, 0, 0, 0}, // width in From, LSB in From3 {ABFX, C_LCON, C_NONE, C_REG, 18, 4, 0, 0, 0, 0}, // width in From, LSB in From3 {AMOVW, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AXTAB, C_SHIFT, C_REG, C_REG, 22, 4, 0, 0, 0, 0}, {AXTAB, C_SHIFT, C_NONE, C_REG, 22, 4, 0, 0, 0, 0}, {AMOVW, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, C_SBIT}, {AMOVB, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, {AMOVBS, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, {AMOVBU, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, {AMOVH, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, {AMOVHS, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, {AMOVHU, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_TLS_LE, C_NONE, C_REG, 101, 4, 0, LFROM, 0, 0}, {AMOVW, C_TLS_IE, C_NONE, C_REG, 102, 8, 0, LFROM, 0, 0}, {AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0, C_SBIT}, {AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_PSR, 36, 4, 0, 0, 0, 0}, {AMOVW, C_RCON, C_NONE, C_PSR, 37, 4, 0, 0, 0, 0}, {AMOVM, C_REGLIST, C_NONE, C_SOREG, 38, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVM, C_SOREG, C_NONE, C_REGLIST, 39, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {ASWPW, C_SOREG, C_REG, C_REG, 40, 4, 0, 0, 0, 0}, {ARFE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0, 0}, {AMOVF, C_FREG, C_NONE, C_FAUTO, 50, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_FREG, C_NONE, C_FOREG, 50, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_FAUTO, C_NONE, C_FREG, 51, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_FOREG, C_NONE, C_FREG, 51, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_FREG, C_NONE, C_LAUTO, 52, 12, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_FREG, C_NONE, C_LOREG, 52, 12, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_LAUTO, C_NONE, C_FREG, 53, 12, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_LOREG, C_NONE, C_FREG, 53, 12, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_FREG, C_NONE, C_ADDR, 68, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVF, C_ADDR, C_NONE, C_FREG, 69, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AADDF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0, 0}, {AADDF, C_FREG, C_FREG, C_FREG, 54, 4, 0, 0, 0, 0}, {AMOVF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0, 0}, {ANEGF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_FCR, 56, 4, 0, 0, 0, 0}, {AMOVW, C_FCR, C_NONE, C_REG, 57, 4, 0, 0, 0, 0}, {AMOVW, C_SHIFTADDR, C_NONE, C_REG, 59, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_SHIFTADDR, C_NONE, C_REG, 59, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVW, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBU, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVBS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, {AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, {ALDREX, C_SOREG, C_NONE, C_REG, 77, 4, 0, 0, 0, 0}, {ASTREX, C_SOREG, C_REG, C_REG, 78, 4, 0, 0, 0, 0}, {ADMB, C_NONE, C_NONE, C_NONE, 110, 4, 0, 0, 0, 0}, {ADMB, C_LCON, C_NONE, C_NONE, 110, 4, 0, 0, 0, 0}, {ADMB, C_SPR, C_NONE, C_NONE, 110, 4, 0, 0, 0, 0}, {AMOVF, C_ZFCON, C_NONE, C_FREG, 80, 8, 0, 0, 0, 0}, {AMOVF, C_SFCON, C_NONE, C_FREG, 81, 4, 0, 0, 0, 0}, {ACMPF, C_FREG, C_FREG, C_NONE, 82, 8, 0, 0, 0, 0}, {ACMPF, C_FREG, C_NONE, C_NONE, 83, 8, 0, 0, 0, 0}, {AMOVFW, C_FREG, C_NONE, C_FREG, 84, 4, 0, 0, 0, C_UBIT}, {AMOVWF, C_FREG, C_NONE, C_FREG, 85, 4, 0, 0, 0, C_UBIT}, {AMOVFW, C_FREG, C_NONE, C_REG, 86, 8, 0, 0, 0, C_UBIT}, {AMOVWF, C_REG, C_NONE, C_FREG, 87, 8, 0, 0, 0, C_UBIT}, {AMOVW, C_REG, C_NONE, C_FREG, 88, 4, 0, 0, 0, 0}, {AMOVW, C_FREG, C_NONE, C_REG, 89, 4, 0, 0, 0, 0}, {ALDREXD, C_SOREG, C_NONE, C_REG, 91, 4, 0, 0, 0, 0}, {ASTREXD, C_SOREG, C_REG, C_REG, 92, 4, 0, 0, 0, 0}, {APLD, C_SOREG, C_NONE, C_NONE, 95, 4, 0, 0, 0, 0}, {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 96, 4, 0, 0, 0, 0}, {ACLZ, C_REG, C_NONE, C_REG, 97, 4, 0, 0, 0, 0}, {AMULWT, C_REG, C_REG, C_REG, 98, 4, 0, 0, 0, 0}, {AMULA, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0, C_SBIT}, {AMULAWT, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0, 0}, {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0, 0}, {obj.AFUNCDATA, C_LCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0, 0}, {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, {obj.ANOP, C_LCON, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, // nop variants, see #40689 {obj.ANOP, C_REG, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, {obj.ANOP, C_FREG, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, {obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL {obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0, 0}, } var mbOp = []struct { reg int16 enc uint32 }{ {REG_MB_SY, 15}, {REG_MB_ST, 14}, {REG_MB_ISH, 11}, {REG_MB_ISHST, 10}, {REG_MB_NSH, 7}, {REG_MB_NSHST, 6}, {REG_MB_OSH, 3}, {REG_MB_OSHST, 2}, } var oprange [ALAST & obj.AMask][]Optab var xcmp [C_GOK + 1][C_GOK + 1]bool var ( deferreturn *obj.LSym symdiv *obj.LSym symdivu *obj.LSym symmod *obj.LSym symmodu *obj.LSym ) // Note about encoding: Prog.scond holds the condition encoding, // but XOR'ed with C_SCOND_XOR, so that C_SCOND_NONE == 0. // The code that shifts the value << 28 has the responsibility // for XORing with C_SCOND_XOR too. func checkSuffix(c *ctxt5, p *obj.Prog, o *Optab) { if p.Scond&C_SBIT != 0 && o.scond&C_SBIT == 0 { c.ctxt.Diag("invalid .S suffix: %v", p) } if p.Scond&C_PBIT != 0 && o.scond&C_PBIT == 0 { c.ctxt.Diag("invalid .P suffix: %v", p) } if p.Scond&C_WBIT != 0 && o.scond&C_WBIT == 0 { c.ctxt.Diag("invalid .W suffix: %v", p) } if p.Scond&C_UBIT != 0 && o.scond&C_UBIT == 0 { c.ctxt.Diag("invalid .U suffix: %v", p) } } func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if ctxt.Retpoline { ctxt.Diag("-spectre=ret not supported on arm") ctxt.Retpoline = false // don't keep printing } var p *obj.Prog var op *obj.Prog p = cursym.Func.Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols return } if oprange[AAND&obj.AMask] == nil { ctxt.Diag("arm ops not initialized, call arm.buildop first") } c := ctxt5{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: p.To.Offset + 4} pc := int32(0) op = p p = p.Link var m int var o *Optab for ; p != nil || c.blitrl != nil; op, p = p, p.Link { if p == nil { if c.checkpool(op, pc) { p = op continue } // can't happen: blitrl is not nil, but checkpool didn't flushpool ctxt.Diag("internal inconsistency") break } p.Pc = int64(pc) o = c.oplook(p) m = int(o.size) if m%4 != 0 || p.Pc%4 != 0 { ctxt.Diag("!pc invalid: %v size=%d", p, m) } // must check literal pool here in case p generates many instructions if c.blitrl != nil { // Emit the constant pool just before p if p // would push us over the immediate size limit. if c.checkpool(op, pc+int32(m)) { // Back up to the instruction just // before the pool and continue with // the first instruction of the pool. p = op continue } } if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.ANOP) { ctxt.Diag("zero-width instruction\n%v", p) continue } switch o.flag & (LFROM | LTO | LPOOL) { case LFROM: c.addpool(p, &p.From) case LTO: c.addpool(p, &p.To) case LPOOL: if p.Scond&C_SCOND == C_SCOND_NONE { c.flushpool(p, 0, 0) } } if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE { c.flushpool(p, 0, 0) } pc += int32(m) } c.cursym.Size = int64(pc) /* * if any procedure is large enough to * generate a large SBRA branch, then * generate extra passes putting branches * around jmps to fix. this is rare. */ times := 0 var bflag int var opc int32 var out [6 + 3]uint32 for { bflag = 0 pc = 0 times++ c.cursym.Func.Text.Pc = 0 // force re-layout the code. for p = c.cursym.Func.Text; p != nil; p = p.Link { o = c.oplook(p) if int64(pc) > p.Pc { p.Pc = int64(pc) } /* very large branches if(o->type == 6 && p->pcond) { otxt = p->pcond->pc - c; if(otxt < 0) otxt = -otxt; if(otxt >= (1L<<17) - 10) { q = emallocz(sizeof(Prog)); q->link = p->link; p->link = q; q->as = AB; q->to.type = TYPE_BRANCH; q->pcond = p->pcond; p->pcond = q; q = emallocz(sizeof(Prog)); q->link = p->link; p->link = q; q->as = AB; q->to.type = TYPE_BRANCH; q->pcond = q->link->link; bflag = 1; } } */ opc = int32(p.Pc) m = int(o.size) if p.Pc != int64(opc) { bflag = 1 } //print("%v pc changed %d to %d in iter. %d\n", p, opc, (int32)p->pc, times); pc = int32(p.Pc + int64(m)) if m%4 != 0 || p.Pc%4 != 0 { ctxt.Diag("pc invalid: %v size=%d", p, m) } if m/4 > len(out) { ctxt.Diag("instruction size too large: %d > %d", m/4, len(out)) } if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.ANOP) { if p.As == obj.ATEXT { c.autosize = p.To.Offset + 4 continue } ctxt.Diag("zero-width instruction\n%v", p) continue } } c.cursym.Size = int64(pc) if bflag == 0 { break } } if pc%4 != 0 { ctxt.Diag("sym->size=%d, invalid", pc) } /* * lay out the code. all the pc-relative code references, * even cross-function, are resolved now; * only data references need to be relocated. * with more work we could leave cross-function * code references to be relocated too, and then * perhaps we'd be able to parallelize the span loop above. */ p = c.cursym.Func.Text c.autosize = p.To.Offset + 4 c.cursym.Grow(c.cursym.Size) bp := c.cursym.P pc = int32(p.Pc) // even p->link might need extra padding var v int for p = p.Link; p != nil; p = p.Link { c.pc = p.Pc o = c.oplook(p) opc = int32(p.Pc) c.asmout(p, o, out[:]) m = int(o.size) if m%4 != 0 || p.Pc%4 != 0 { ctxt.Diag("final stage: pc invalid: %v size=%d", p, m) } if int64(pc) > p.Pc { ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, pc, p) } for int64(pc) != p.Pc { // emit 0xe1a00000 (MOVW R0, R0) bp[0] = 0x00 bp = bp[1:] bp[0] = 0x00 bp = bp[1:] bp[0] = 0xa0 bp = bp[1:] bp[0] = 0xe1 bp = bp[1:] pc += 4 } for i := 0; i < m/4; i++ { v = int(out[i]) bp[0] = byte(v) bp = bp[1:] bp[0] = byte(v >> 8) bp = bp[1:] bp[0] = byte(v >> 16) bp = bp[1:] bp[0] = byte(v >> 24) bp = bp[1:] } pc += int32(m) } } // checkpool flushes the literal pool when the first reference to // it threatens to go out of range of a 12-bit PC-relative offset. // // nextpc is the tentative next PC at which the pool could be emitted. // checkpool should be called *before* emitting the instruction that // would cause the PC to reach nextpc. // If nextpc is too far from the first pool reference, checkpool will // flush the pool immediately after p. // The caller should resume processing a p.Link. func (c *ctxt5) checkpool(p *obj.Prog, nextpc int32) bool { poolLast := nextpc poolLast += 4 // the AB instruction to jump around the pool poolLast += int32(c.pool.size) - 4 // the offset of the last pool entry refPC := int32(c.pool.start) // PC of the first pool reference v := poolLast - refPC - 8 // 12-bit PC-relative offset (see omvl) if c.pool.size >= 0xff0 || immaddr(v) == 0 { return c.flushpool(p, 1, 0) } else if p.Link == nil { return c.flushpool(p, 2, 0) } return false } func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool { if c.blitrl != nil { if skip != 0 { if false && skip == 1 { fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), c.pool.size, c.pool.start) } q := c.newprog() q.As = AB q.To.Type = obj.TYPE_BRANCH q.To.SetTarget(p.Link) q.Link = c.blitrl q.Pos = p.Pos c.blitrl = q } else if force == 0 && (p.Pc+int64(c.pool.size)-int64(c.pool.start) < 2048) { return false } // The line number for constant pool entries doesn't really matter. // We set it to the line number of the preceding instruction so that // there are no deltas to encode in the pc-line tables. for q := c.blitrl; q != nil; q = q.Link { q.Pos = p.Pos } c.elitrl.Link = p.Link p.Link = c.blitrl c.blitrl = nil /* BUG: should refer back to values until out-of-range */ c.elitrl = nil c.pool.size = 0 c.pool.start = 0 c.pool.extra = 0 return true } return false } func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { t := c.newprog() t.As = AWORD switch c.aclass(a) { default: t.To.Offset = a.Offset t.To.Sym = a.Sym t.To.Type = a.Type t.To.Name = a.Name if c.ctxt.Flag_shared && t.To.Sym != nil { t.Rel = p } case C_SROREG, C_LOREG, C_ROREG, C_FOREG, C_SOREG, C_HOREG, C_FAUTO, C_SAUTO, C_LAUTO, C_LACON: t.To.Type = obj.TYPE_CONST t.To.Offset = c.instoffset } if t.Rel == nil { for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.Rel == nil && q.To == t.To { p.Pool = q return } } } q := c.newprog() *q = *t q.Pc = int64(c.pool.size) if c.blitrl == nil { c.blitrl = q c.pool.start = uint32(p.Pc) } else { c.elitrl.Link = q } c.elitrl = q c.pool.size += 4 // Store the link to the pool entry in Pool. p.Pool = q } func (c *ctxt5) regoff(a *obj.Addr) int32 { c.instoffset = 0 c.aclass(a) return int32(c.instoffset) } func immrot(v uint32) int32 { for i := 0; i < 16; i++ { if v&^0xff == 0 { return int32(uint32(int32(i)<<8) | v | 1<<25) } v = v<<2 | v>>30 } return 0 } // immrot2a returns bits encoding the immediate constant fields of two instructions, // such that the encoded constants x, y satisfy x|y==v, x&y==0. // Returns 0,0 if no such decomposition of v exists. func immrot2a(v uint32) (uint32, uint32) { for i := uint(1); i < 32; i++ { m := uint32(1<<i - 1) if x, y := immrot(v&m), immrot(v&^m); x != 0 && y != 0 { return uint32(x), uint32(y) } } // TODO: handle some more cases, like where // the wraparound from the rotate could help. return 0, 0 } // immrot2s returns bits encoding the immediate constant fields of two instructions, // such that the encoded constants y, x satisfy y-x==v, y&x==0. // Returns 0,0 if no such decomposition of v exists. func immrot2s(v uint32) (uint32, uint32) { if immrot(v) != 0 { return v, 0 } // suppose v in the form of {leading 00, upper effective bits, lower 8 effective bits, trailing 00} // omit trailing 00 var i uint32 for i = 2; i < 32; i += 2 { if v&(1<<i-1) != 0 { break } } // i must be <= 24, then adjust i just above lower 8 effective bits of v i += 6 // let x = {the complement of lower 8 effective bits, trailing 00}, y = x + v x := 1<<i - v&(1<<i-1) y := v + x if y, x = uint32(immrot(y)), uint32(immrot(x)); y != 0 && x != 0 { return y, x } return 0, 0 } func immaddr(v int32) int32 { if v >= 0 && v <= 0xfff { return v&0xfff | 1<<24 | 1<<23 /* pre indexing */ /* pre indexing, up */ } if v >= -0xfff && v < 0 { return -v&0xfff | 1<<24 /* pre indexing */ } return 0 } func immfloat(v int32) bool { return v&0xC03 == 0 /* offset will fit in floating-point load/store */ } func immhalf(v int32) bool { if v >= 0 && v <= 0xff { return v|1<<24|1<<23 != 0 /* pre indexing */ /* pre indexing, up */ } if v >= -0xff && v < 0 { return -v&0xff|1<<24 != 0 /* pre indexing */ } return false } func (c *ctxt5) aclass(a *obj.Addr) int { switch a.Type { case obj.TYPE_NONE: return C_NONE case obj.TYPE_REG: c.instoffset = 0 if REG_R0 <= a.Reg && a.Reg <= REG_R15 { return C_REG } if REG_F0 <= a.Reg && a.Reg <= REG_F15 { return C_FREG } if a.Reg == REG_FPSR || a.Reg == REG_FPCR { return C_FCR } if a.Reg == REG_CPSR || a.Reg == REG_SPSR { return C_PSR } if a.Reg >= REG_SPECIAL { return C_SPR } return C_GOK case obj.TYPE_REGREG: return C_REGREG case obj.TYPE_REGREG2: return C_REGREG2 case obj.TYPE_REGLIST: return C_REGLIST case obj.TYPE_SHIFT: if a.Reg == 0 { // register shift R>>i return C_SHIFT } else { // memory address with shifted offset R>>i(R) return C_SHIFTADDR } case obj.TYPE_MEM: switch a.Name { case obj.NAME_EXTERN, obj.NAME_GOTREF, obj.NAME_STATIC: if a.Sym == nil || a.Sym.Name == "" { fmt.Printf("null sym external\n") return C_GOK } c.instoffset = 0 // s.b. unused but just in case if a.Sym.Type == objabi.STLSBSS { if c.ctxt.Flag_shared { return C_TLS_IE } else { return C_TLS_LE } } return C_ADDR case obj.NAME_AUTO: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } c.instoffset = c.autosize + a.Offset if t := immaddr(int32(c.instoffset)); t != 0 { if immhalf(int32(c.instoffset)) { if immfloat(t) { return C_HFAUTO } return C_HAUTO } if immfloat(t) { return C_FAUTO } return C_SAUTO } return C_LAUTO case obj.NAME_PARAM: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-FP. a.Reg = obj.REG_NONE } c.instoffset = c.autosize + a.Offset + 4 if t := immaddr(int32(c.instoffset)); t != 0 { if immhalf(int32(c.instoffset)) { if immfloat(t) { return C_HFAUTO } return C_HAUTO } if immfloat(t) { return C_FAUTO } return C_SAUTO } return C_LAUTO case obj.NAME_NONE: c.instoffset = a.Offset if t := immaddr(int32(c.instoffset)); t != 0 { if immhalf(int32(c.instoffset)) { /* n.b. that it will also satisfy immrot */ if immfloat(t) { return C_HFOREG } return C_HOREG } if immfloat(t) { return C_FOREG /* n.b. that it will also satisfy immrot */ } if immrot(uint32(c.instoffset)) != 0 { return C_SROREG } if immhalf(int32(c.instoffset)) { return C_HOREG } return C_SOREG } if immrot(uint32(c.instoffset)) != 0 { return C_ROREG } return C_LOREG } return C_GOK case obj.TYPE_FCONST: if c.chipzero5(a.Val.(float64)) >= 0 {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames5.go
vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames5.go
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package arm var cnames5 = []string{ "NONE", "REG", "REGREG", "REGREG2", "REGLIST", "SHIFT", "SHIFTADDR", "FREG", "PSR", "FCR", "SPR", "RCON", "NCON", "RCON2A", "RCON2S", "SCON", "LCON", "LCONADDR", "ZFCON", "SFCON", "LFCON", "RACON", "LACON", "SBRA", "LBRA", "HAUTO", "FAUTO", "HFAUTO", "SAUTO", "LAUTO", "HOREG", "FOREG", "HFOREG", "SOREG", "ROREG", "SROREG", "LOREG", "PC", "SP", "HREG", "ADDR", "C_TLS_LE", "C_TLS_IE", "TEXTSIZE", "GOK", "NCLASS", "SCOND = (1<<4)-1", "SBIT = 1<<4", "PBIT = 1<<5", "WBIT = 1<<6", "FBIT = 1<<7", "UBIT = 1<<7", "SCOND_XOR = 14", "SCOND_EQ = 0 ^ C_SCOND_XOR", "SCOND_NE = 1 ^ C_SCOND_XOR", "SCOND_HS = 2 ^ C_SCOND_XOR", "SCOND_LO = 3 ^ C_SCOND_XOR", "SCOND_MI = 4 ^ C_SCOND_XOR", "SCOND_PL = 5 ^ C_SCOND_XOR", "SCOND_VS = 6 ^ C_SCOND_XOR", "SCOND_VC = 7 ^ C_SCOND_XOR", "SCOND_HI = 8 ^ C_SCOND_XOR", "SCOND_LS = 9 ^ C_SCOND_XOR", "SCOND_GE = 10 ^ C_SCOND_XOR", "SCOND_LT = 11 ^ C_SCOND_XOR", "SCOND_GT = 12 ^ C_SCOND_XOR", "SCOND_LE = 13 ^ C_SCOND_XOR", "SCOND_NONE = 14 ^ C_SCOND_XOR", "SCOND_NV = 15 ^ C_SCOND_XOR", }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/asmz.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/asmz.go
// Based on cmd/internal/obj/ppc64/asm9.go. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package s390x import ( "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "log" "math" "sort" ) // ctxtz holds state while assembling a single function. // Each function gets a fresh ctxtz. // This allows for multiple functions to be safely concurrently assembled. type ctxtz struct { ctxt *obj.Link newprog obj.ProgAlloc cursym *obj.LSym autosize int32 instoffset int64 pc int64 } // instruction layout. const ( funcAlign = 16 ) type Optab struct { as obj.As // opcode i uint8 // handler index a1 uint8 // From a2 uint8 // Reg a3 uint8 // RestArgs[0] a4 uint8 // RestArgs[1] a5 uint8 // RestArgs[2] a6 uint8 // To } var optab = []Optab{ // zero-length instructions {i: 0, as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE}, {i: 0, as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE}, {i: 0, as: obj.APCDATA, a1: C_LCON, a6: C_LCON}, {i: 0, as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR}, {i: 0, as: obj.ANOP}, {i: 0, as: obj.ANOP, a1: C_SAUTO}, // move register {i: 1, as: AMOVD, a1: C_REG, a6: C_REG}, {i: 1, as: AMOVB, a1: C_REG, a6: C_REG}, {i: 1, as: AMOVBZ, a1: C_REG, a6: C_REG}, {i: 1, as: AMOVW, a1: C_REG, a6: C_REG}, {i: 1, as: AMOVWZ, a1: C_REG, a6: C_REG}, {i: 1, as: AFMOVD, a1: C_FREG, a6: C_FREG}, {i: 1, as: AMOVDBR, a1: C_REG, a6: C_REG}, // load constant {i: 26, as: AMOVD, a1: C_LACON, a6: C_REG}, {i: 26, as: AMOVW, a1: C_LACON, a6: C_REG}, {i: 26, as: AMOVWZ, a1: C_LACON, a6: C_REG}, {i: 3, as: AMOVD, a1: C_DCON, a6: C_REG}, {i: 3, as: AMOVW, a1: C_DCON, a6: C_REG}, {i: 3, as: AMOVWZ, a1: C_DCON, a6: C_REG}, {i: 3, as: AMOVB, a1: C_DCON, a6: C_REG}, {i: 3, as: AMOVBZ, a1: C_DCON, a6: C_REG}, // store constant {i: 72, as: AMOVD, a1: C_SCON, a6: C_LAUTO}, {i: 72, as: AMOVD, a1: C_ADDCON, a6: C_LAUTO}, {i: 72, as: AMOVW, a1: C_SCON, a6: C_LAUTO}, {i: 72, as: AMOVW, a1: C_ADDCON, a6: C_LAUTO}, {i: 72, as: AMOVWZ, a1: C_SCON, a6: C_LAUTO}, {i: 72, as: AMOVWZ, a1: C_ADDCON, a6: C_LAUTO}, {i: 72, as: AMOVB, a1: C_SCON, a6: C_LAUTO}, {i: 72, as: AMOVB, a1: C_ADDCON, a6: C_LAUTO}, {i: 72, as: AMOVBZ, a1: C_SCON, a6: C_LAUTO}, {i: 72, as: AMOVBZ, a1: C_ADDCON, a6: C_LAUTO}, {i: 72, as: AMOVD, a1: C_SCON, a6: C_LOREG}, {i: 72, as: AMOVD, a1: C_ADDCON, a6: C_LOREG}, {i: 72, as: AMOVW, a1: C_SCON, a6: C_LOREG}, {i: 72, as: AMOVW, a1: C_ADDCON, a6: C_LOREG}, {i: 72, as: AMOVWZ, a1: C_SCON, a6: C_LOREG}, {i: 72, as: AMOVWZ, a1: C_ADDCON, a6: C_LOREG}, {i: 72, as: AMOVB, a1: C_SCON, a6: C_LOREG}, {i: 72, as: AMOVB, a1: C_ADDCON, a6: C_LOREG}, {i: 72, as: AMOVBZ, a1: C_SCON, a6: C_LOREG}, {i: 72, as: AMOVBZ, a1: C_ADDCON, a6: C_LOREG}, // store {i: 35, as: AMOVD, a1: C_REG, a6: C_LAUTO}, {i: 35, as: AMOVW, a1: C_REG, a6: C_LAUTO}, {i: 35, as: AMOVWZ, a1: C_REG, a6: C_LAUTO}, {i: 35, as: AMOVBZ, a1: C_REG, a6: C_LAUTO}, {i: 35, as: AMOVB, a1: C_REG, a6: C_LAUTO}, {i: 35, as: AMOVDBR, a1: C_REG, a6: C_LAUTO}, {i: 35, as: AMOVHBR, a1: C_REG, a6: C_LAUTO}, {i: 35, as: AMOVD, a1: C_REG, a6: C_LOREG}, {i: 35, as: AMOVW, a1: C_REG, a6: C_LOREG}, {i: 35, as: AMOVWZ, a1: C_REG, a6: C_LOREG}, {i: 35, as: AMOVBZ, a1: C_REG, a6: C_LOREG}, {i: 35, as: AMOVB, a1: C_REG, a6: C_LOREG}, {i: 35, as: AMOVDBR, a1: C_REG, a6: C_LOREG}, {i: 35, as: AMOVHBR, a1: C_REG, a6: C_LOREG}, {i: 74, as: AMOVD, a1: C_REG, a6: C_ADDR}, {i: 74, as: AMOVW, a1: C_REG, a6: C_ADDR}, {i: 74, as: AMOVWZ, a1: C_REG, a6: C_ADDR}, {i: 74, as: AMOVBZ, a1: C_REG, a6: C_ADDR}, {i: 74, as: AMOVB, a1: C_REG, a6: C_ADDR}, // load {i: 36, as: AMOVD, a1: C_LAUTO, a6: C_REG}, {i: 36, as: AMOVW, a1: C_LAUTO, a6: C_REG}, {i: 36, as: AMOVWZ, a1: C_LAUTO, a6: C_REG}, {i: 36, as: AMOVBZ, a1: C_LAUTO, a6: C_REG}, {i: 36, as: AMOVB, a1: C_LAUTO, a6: C_REG}, {i: 36, as: AMOVDBR, a1: C_LAUTO, a6: C_REG}, {i: 36, as: AMOVHBR, a1: C_LAUTO, a6: C_REG}, {i: 36, as: AMOVD, a1: C_LOREG, a6: C_REG}, {i: 36, as: AMOVW, a1: C_LOREG, a6: C_REG}, {i: 36, as: AMOVWZ, a1: C_LOREG, a6: C_REG}, {i: 36, as: AMOVBZ, a1: C_LOREG, a6: C_REG}, {i: 36, as: AMOVB, a1: C_LOREG, a6: C_REG}, {i: 36, as: AMOVDBR, a1: C_LOREG, a6: C_REG}, {i: 36, as: AMOVHBR, a1: C_LOREG, a6: C_REG}, {i: 75, as: AMOVD, a1: C_ADDR, a6: C_REG}, {i: 75, as: AMOVW, a1: C_ADDR, a6: C_REG}, {i: 75, as: AMOVWZ, a1: C_ADDR, a6: C_REG}, {i: 75, as: AMOVBZ, a1: C_ADDR, a6: C_REG}, {i: 75, as: AMOVB, a1: C_ADDR, a6: C_REG}, // interlocked load and op {i: 99, as: ALAAG, a1: C_REG, a2: C_REG, a6: C_LOREG}, // integer arithmetic {i: 2, as: AADD, a1: C_REG, a2: C_REG, a6: C_REG}, {i: 2, as: AADD, a1: C_REG, a6: C_REG}, {i: 22, as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG}, {i: 22, as: AADD, a1: C_LCON, a6: C_REG}, {i: 12, as: AADD, a1: C_LOREG, a6: C_REG}, {i: 12, as: AADD, a1: C_LAUTO, a6: C_REG}, {i: 21, as: ASUB, a1: C_LCON, a2: C_REG, a6: C_REG}, {i: 21, as: ASUB, a1: C_LCON, a6: C_REG}, {i: 12, as: ASUB, a1: C_LOREG, a6: C_REG}, {i: 12, as: ASUB, a1: C_LAUTO, a6: C_REG}, {i: 4, as: AMULHD, a1: C_REG, a6: C_REG}, {i: 4, as: AMULHD, a1: C_REG, a2: C_REG, a6: C_REG}, {i: 62, as: AMLGR, a1: C_REG, a6: C_REG}, {i: 2, as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG}, {i: 2, as: ADIVW, a1: C_REG, a6: C_REG}, {i: 10, as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG}, {i: 10, as: ASUB, a1: C_REG, a6: C_REG}, {i: 47, as: ANEG, a1: C_REG, a6: C_REG}, {i: 47, as: ANEG, a6: C_REG}, // integer logical {i: 6, as: AAND, a1: C_REG, a2: C_REG, a6: C_REG}, {i: 6, as: AAND, a1: C_REG, a6: C_REG}, {i: 23, as: AAND, a1: C_LCON, a6: C_REG}, {i: 12, as: AAND, a1: C_LOREG, a6: C_REG}, {i: 12, as: AAND, a1: C_LAUTO, a6: C_REG}, {i: 6, as: AANDW, a1: C_REG, a2: C_REG, a6: C_REG}, {i: 6, as: AANDW, a1: C_REG, a6: C_REG}, {i: 24, as: AANDW, a1: C_LCON, a6: C_REG}, {i: 12, as: AANDW, a1: C_LOREG, a6: C_REG}, {i: 12, as: AANDW, a1: C_LAUTO, a6: C_REG}, {i: 7, as: ASLD, a1: C_REG, a6: C_REG}, {i: 7, as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG}, {i: 7, as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG}, {i: 7, as: ASLD, a1: C_SCON, a6: C_REG}, {i: 13, as: ARNSBG, a1: C_SCON, a3: C_SCON, a4: C_SCON, a5: C_REG, a6: C_REG}, // compare and swap {i: 79, as: ACSG, a1: C_REG, a2: C_REG, a6: C_SOREG}, // floating point {i: 32, as: AFADD, a1: C_FREG, a6: C_FREG}, {i: 33, as: AFABS, a1: C_FREG, a6: C_FREG}, {i: 33, as: AFABS, a6: C_FREG}, {i: 34, as: AFMADD, a1: C_FREG, a2: C_FREG, a6: C_FREG}, {i: 32, as: AFMUL, a1: C_FREG, a6: C_FREG}, {i: 36, as: AFMOVD, a1: C_LAUTO, a6: C_FREG}, {i: 36, as: AFMOVD, a1: C_LOREG, a6: C_FREG}, {i: 75, as: AFMOVD, a1: C_ADDR, a6: C_FREG}, {i: 35, as: AFMOVD, a1: C_FREG, a6: C_LAUTO}, {i: 35, as: AFMOVD, a1: C_FREG, a6: C_LOREG}, {i: 74, as: AFMOVD, a1: C_FREG, a6: C_ADDR}, {i: 67, as: AFMOVD, a1: C_ZCON, a6: C_FREG}, {i: 81, as: ALDGR, a1: C_REG, a6: C_FREG}, {i: 81, as: ALGDR, a1: C_FREG, a6: C_REG}, {i: 82, as: ACEFBRA, a1: C_REG, a6: C_FREG}, {i: 83, as: ACFEBRA, a1: C_FREG, a6: C_REG}, {i: 48, as: AFIEBR, a1: C_SCON, a2: C_FREG, a6: C_FREG}, {i: 49, as: ACPSDR, a1: C_FREG, a2: C_FREG, a6: C_FREG}, {i: 50, as: ALTDBR, a1: C_FREG, a6: C_FREG}, {i: 51, as: ATCDB, a1: C_FREG, a6: C_SCON}, // load symbol address (plus offset) {i: 19, as: AMOVD, a1: C_SYMADDR, a6: C_REG}, {i: 93, as: AMOVD, a1: C_GOTADDR, a6: C_REG}, {i: 94, as: AMOVD, a1: C_TLS_LE, a6: C_REG}, {i: 95, as: AMOVD, a1: C_TLS_IE, a6: C_REG}, // system call {i: 5, as: ASYSCALL}, {i: 77, as: ASYSCALL, a1: C_SCON}, // branch {i: 16, as: ABEQ, a6: C_SBRA}, {i: 16, as: ABRC, a1: C_SCON, a6: C_SBRA}, {i: 11, as: ABR, a6: C_LBRA}, {i: 16, as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA}, {i: 18, as: ABR, a6: C_REG}, {i: 18, as: ABR, a1: C_REG, a6: C_REG}, {i: 15, as: ABR, a6: C_ZOREG}, {i: 15, as: ABC, a6: C_ZOREG}, // compare and branch {i: 89, as: ACGRJ, a1: C_SCON, a2: C_REG, a3: C_REG, a6: C_SBRA}, {i: 89, as: ACMPBEQ, a1: C_REG, a2: C_REG, a6: C_SBRA}, {i: 89, as: ACLGRJ, a1: C_SCON, a2: C_REG, a3: C_REG, a6: C_SBRA}, {i: 89, as: ACMPUBEQ, a1: C_REG, a2: C_REG, a6: C_SBRA}, {i: 90, as: ACGIJ, a1: C_SCON, a2: C_REG, a3: C_ADDCON, a6: C_SBRA}, {i: 90, as: ACGIJ, a1: C_SCON, a2: C_REG, a3: C_SCON, a6: C_SBRA}, {i: 90, as: ACMPBEQ, a1: C_REG, a3: C_ADDCON, a6: C_SBRA}, {i: 90, as: ACMPBEQ, a1: C_REG, a3: C_SCON, a6: C_SBRA}, {i: 90, as: ACLGIJ, a1: C_SCON, a2: C_REG, a3: C_ADDCON, a6: C_SBRA}, {i: 90, as: ACMPUBEQ, a1: C_REG, a3: C_ANDCON, a6: C_SBRA}, // branch on count {i: 41, as: ABRCT, a1: C_REG, a6: C_SBRA}, {i: 41, as: ABRCTG, a1: C_REG, a6: C_SBRA}, // move on condition {i: 17, as: AMOVDEQ, a1: C_REG, a6: C_REG}, // load on condition {i: 25, as: ALOCGR, a1: C_SCON, a2: C_REG, a6: C_REG}, // find leftmost one {i: 8, as: AFLOGR, a1: C_REG, a6: C_REG}, // population count {i: 9, as: APOPCNT, a1: C_REG, a6: C_REG}, // compare {i: 70, as: ACMP, a1: C_REG, a6: C_REG}, {i: 71, as: ACMP, a1: C_REG, a6: C_LCON}, {i: 70, as: ACMPU, a1: C_REG, a6: C_REG}, {i: 71, as: ACMPU, a1: C_REG, a6: C_LCON}, {i: 70, as: AFCMPO, a1: C_FREG, a6: C_FREG}, {i: 70, as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG}, // test under mask {i: 91, as: ATMHH, a1: C_REG, a6: C_ANDCON}, // insert program mask {i: 92, as: AIPM, a1: C_REG}, // set program mask {i: 76, as: ASPM, a1: C_REG}, // 32-bit access registers {i: 68, as: AMOVW, a1: C_AREG, a6: C_REG}, {i: 68, as: AMOVWZ, a1: C_AREG, a6: C_REG}, {i: 69, as: AMOVW, a1: C_REG, a6: C_AREG}, {i: 69, as: AMOVWZ, a1: C_REG, a6: C_AREG}, // macros {i: 96, as: ACLEAR, a1: C_LCON, a6: C_LOREG}, {i: 96, as: ACLEAR, a1: C_LCON, a6: C_LAUTO}, // load/store multiple {i: 97, as: ASTMG, a1: C_REG, a2: C_REG, a6: C_LOREG}, {i: 97, as: ASTMG, a1: C_REG, a2: C_REG, a6: C_LAUTO}, {i: 98, as: ALMG, a1: C_LOREG, a2: C_REG, a6: C_REG}, {i: 98, as: ALMG, a1: C_LAUTO, a2: C_REG, a6: C_REG}, // bytes {i: 40, as: ABYTE, a1: C_SCON}, {i: 40, as: AWORD, a1: C_LCON}, {i: 31, as: ADWORD, a1: C_LCON}, {i: 31, as: ADWORD, a1: C_DCON}, // fast synchronization {i: 80, as: ASYNC}, // store clock {i: 88, as: ASTCK, a6: C_SAUTO}, {i: 88, as: ASTCK, a6: C_SOREG}, // storage and storage {i: 84, as: AMVC, a1: C_SCON, a3: C_LOREG, a6: C_LOREG}, {i: 84, as: AMVC, a1: C_SCON, a3: C_LOREG, a6: C_LAUTO}, {i: 84, as: AMVC, a1: C_SCON, a3: C_LAUTO, a6: C_LAUTO}, // address {i: 85, as: ALARL, a1: C_LCON, a6: C_REG}, {i: 85, as: ALARL, a1: C_SYMADDR, a6: C_REG}, {i: 86, as: ALA, a1: C_SOREG, a6: C_REG}, {i: 86, as: ALA, a1: C_SAUTO, a6: C_REG}, {i: 87, as: AEXRL, a1: C_SYMADDR, a6: C_REG}, // undefined (deliberate illegal instruction) {i: 78, as: obj.AUNDEF}, // 2 byte no-operation {i: 66, as: ANOPH}, // vector instructions // VRX store {i: 100, as: AVST, a1: C_VREG, a6: C_SOREG}, {i: 100, as: AVST, a1: C_VREG, a6: C_SAUTO}, {i: 100, as: AVSTEG, a1: C_SCON, a2: C_VREG, a6: C_SOREG}, {i: 100, as: AVSTEG, a1: C_SCON, a2: C_VREG, a6: C_SAUTO}, // VRX load {i: 101, as: AVL, a1: C_SOREG, a6: C_VREG}, {i: 101, as: AVL, a1: C_SAUTO, a6: C_VREG}, {i: 101, as: AVLEG, a1: C_SCON, a3: C_SOREG, a6: C_VREG}, {i: 101, as: AVLEG, a1: C_SCON, a3: C_SAUTO, a6: C_VREG}, // VRV scatter {i: 102, as: AVSCEG, a1: C_SCON, a2: C_VREG, a6: C_SOREG}, {i: 102, as: AVSCEG, a1: C_SCON, a2: C_VREG, a6: C_SAUTO}, // VRV gather {i: 103, as: AVGEG, a1: C_SCON, a3: C_SOREG, a6: C_VREG}, {i: 103, as: AVGEG, a1: C_SCON, a3: C_SAUTO, a6: C_VREG}, // VRS element shift/rotate and load gr to/from vr element {i: 104, as: AVESLG, a1: C_SCON, a2: C_VREG, a6: C_VREG}, {i: 104, as: AVESLG, a1: C_REG, a2: C_VREG, a6: C_VREG}, {i: 104, as: AVESLG, a1: C_SCON, a6: C_VREG}, {i: 104, as: AVESLG, a1: C_REG, a6: C_VREG}, {i: 104, as: AVLGVG, a1: C_SCON, a2: C_VREG, a6: C_REG}, {i: 104, as: AVLGVG, a1: C_REG, a2: C_VREG, a6: C_REG}, {i: 104, as: AVLVGG, a1: C_SCON, a2: C_REG, a6: C_VREG}, {i: 104, as: AVLVGG, a1: C_REG, a2: C_REG, a6: C_VREG}, // VRS store multiple {i: 105, as: AVSTM, a1: C_VREG, a2: C_VREG, a6: C_SOREG}, {i: 105, as: AVSTM, a1: C_VREG, a2: C_VREG, a6: C_SAUTO}, // VRS load multiple {i: 106, as: AVLM, a1: C_SOREG, a2: C_VREG, a6: C_VREG}, {i: 106, as: AVLM, a1: C_SAUTO, a2: C_VREG, a6: C_VREG}, // VRS store with length {i: 107, as: AVSTL, a1: C_REG, a2: C_VREG, a6: C_SOREG}, {i: 107, as: AVSTL, a1: C_REG, a2: C_VREG, a6: C_SAUTO}, // VRS load with length {i: 108, as: AVLL, a1: C_REG, a3: C_SOREG, a6: C_VREG}, {i: 108, as: AVLL, a1: C_REG, a3: C_SAUTO, a6: C_VREG}, // VRI-a {i: 109, as: AVGBM, a1: C_ANDCON, a6: C_VREG}, {i: 109, as: AVZERO, a6: C_VREG}, {i: 109, as: AVREPIG, a1: C_ADDCON, a6: C_VREG}, {i: 109, as: AVREPIG, a1: C_SCON, a6: C_VREG}, {i: 109, as: AVLEIG, a1: C_SCON, a3: C_ADDCON, a6: C_VREG}, {i: 109, as: AVLEIG, a1: C_SCON, a3: C_SCON, a6: C_VREG}, // VRI-b generate mask {i: 110, as: AVGMG, a1: C_SCON, a3: C_SCON, a6: C_VREG}, // VRI-c replicate {i: 111, as: AVREPG, a1: C_UCON, a2: C_VREG, a6: C_VREG}, // VRI-d element rotate and insert under mask and // shift left double by byte {i: 112, as: AVERIMG, a1: C_SCON, a2: C_VREG, a3: C_VREG, a6: C_VREG}, {i: 112, as: AVSLDB, a1: C_SCON, a2: C_VREG, a3: C_VREG, a6: C_VREG}, // VRI-d fp test data class immediate {i: 113, as: AVFTCIDB, a1: C_SCON, a2: C_VREG, a6: C_VREG}, // VRR-a load reg {i: 114, as: AVLR, a1: C_VREG, a6: C_VREG}, // VRR-a compare {i: 115, as: AVECG, a1: C_VREG, a6: C_VREG}, // VRR-b {i: 117, as: AVCEQG, a1: C_VREG, a2: C_VREG, a6: C_VREG}, {i: 117, as: AVFAEF, a1: C_VREG, a2: C_VREG, a6: C_VREG}, {i: 117, as: AVPKSG, a1: C_VREG, a2: C_VREG, a6: C_VREG}, // VRR-c {i: 118, as: AVAQ, a1: C_VREG, a2: C_VREG, a6: C_VREG}, {i: 118, as: AVAQ, a1: C_VREG, a6: C_VREG}, {i: 118, as: AVNOT, a1: C_VREG, a6: C_VREG}, {i: 123, as: AVPDI, a1: C_SCON, a2: C_VREG, a3: C_VREG, a6: C_VREG}, // VRR-c shifts {i: 119, as: AVERLLVG, a1: C_VREG, a2: C_VREG, a6: C_VREG}, {i: 119, as: AVERLLVG, a1: C_VREG, a6: C_VREG}, // VRR-d {i: 120, as: AVACQ, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG}, // VRR-e {i: 121, as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG}, // VRR-f {i: 122, as: AVLVGP, a1: C_REG, a2: C_REG, a6: C_VREG}, } var oprange [ALAST & obj.AMask][]Optab var xcmp [C_NCLASS][C_NCLASS]bool func spanz(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if ctxt.Retpoline { ctxt.Diag("-spectre=ret not supported on s390x") ctxt.Retpoline = false // don't keep printing } p := cursym.Func.Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols return } if oprange[AORW&obj.AMask] == nil { ctxt.Diag("s390x ops not initialized, call s390x.buildop first") } c := ctxtz{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} buffer := make([]byte, 0) changed := true loop := 0 for changed { if loop > 100 { c.ctxt.Diag("stuck in spanz loop") break } changed = false buffer = buffer[:0] c.cursym.R = make([]obj.Reloc, 0) for p := c.cursym.Func.Text; p != nil; p = p.Link { pc := int64(len(buffer)) if pc != p.Pc { changed = true } p.Pc = pc c.pc = p.Pc c.asmout(p, &buffer) if pc == int64(len(buffer)) { switch p.As { case obj.ANOP, obj.AFUNCDATA, obj.APCDATA, obj.ATEXT: // ok default: c.ctxt.Diag("zero-width instruction\n%v", p) } } } loop++ } c.cursym.Size = int64(len(buffer)) if c.cursym.Size%funcAlign != 0 { c.cursym.Size += funcAlign - (c.cursym.Size % funcAlign) } c.cursym.Grow(c.cursym.Size) copy(c.cursym.P, buffer) // Mark nonpreemptible instruction sequences. // We use REGTMP as a scratch register during call injection, // so instruction sequences that use REGTMP are unsafe to // preempt asynchronously. obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint, nil) } // Return whether p is an unsafe point. func (c *ctxtz) isUnsafePoint(p *obj.Prog) bool { if p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP { return true } for _, a := range p.RestArgs { if a.Reg == REGTMP { return true } } return p.Mark&USETMP != 0 } func isint32(v int64) bool { return int64(int32(v)) == v } func isuint32(v uint64) bool { return uint64(uint32(v)) == v } func (c *ctxtz) aclass(a *obj.Addr) int { switch a.Type { case obj.TYPE_NONE: return C_NONE case obj.TYPE_REG: if REG_R0 <= a.Reg && a.Reg <= REG_R15 { return C_REG } if REG_F0 <= a.Reg && a.Reg <= REG_F15 { return C_FREG } if REG_AR0 <= a.Reg && a.Reg <= REG_AR15 { return C_AREG } if REG_V0 <= a.Reg && a.Reg <= REG_V31 { return C_VREG } return C_GOK case obj.TYPE_MEM: switch a.Name { case obj.NAME_EXTERN, obj.NAME_STATIC: if a.Sym == nil { // must have a symbol break } c.instoffset = a.Offset if a.Sym.Type == objabi.STLSBSS { if c.ctxt.Flag_shared { return C_TLS_IE // initial exec model } return C_TLS_LE // local exec model } return C_ADDR case obj.NAME_GOTREF: return C_GOTADDR case obj.NAME_AUTO: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_PARAM: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-FP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_NONE: c.instoffset = a.Offset if c.instoffset == 0 { return C_ZOREG } if c.instoffset >= -BIG && c.instoffset < BIG { return C_SOREG } return C_LOREG } return C_GOK case obj.TYPE_TEXTSIZE: return C_TEXTSIZE case obj.TYPE_FCONST: if f64, ok := a.Val.(float64); ok && math.Float64bits(f64) == 0 { return C_ZCON } c.ctxt.Diag("cannot handle the floating point constant %v", a.Val) case obj.TYPE_CONST, obj.TYPE_ADDR: switch a.Name { case obj.NAME_NONE: c.instoffset = a.Offset if a.Reg != 0 { if -BIG <= c.instoffset && c.instoffset <= BIG { return C_SACON } if isint32(c.instoffset) { return C_LACON } return C_DACON } case obj.NAME_EXTERN, obj.NAME_STATIC: s := a.Sym if s == nil { return C_GOK } c.instoffset = a.Offset return C_SYMADDR case obj.NAME_AUTO: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON case obj.NAME_PARAM: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-FP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON default: return C_GOK } if c.instoffset == 0 { return C_ZCON } if c.instoffset >= 0 { if c.instoffset <= 0x7fff { return C_SCON } if c.instoffset <= 0xffff { return C_ANDCON } if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */ return C_UCON } if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) { return C_LCON } return C_DCON } if c.instoffset >= -0x8000 { return C_ADDCON } if c.instoffset&0xffff == 0 && isint32(c.instoffset) { return C_UCON } if isint32(c.instoffset) { return C_LCON } return C_DCON case obj.TYPE_BRANCH: return C_SBRA } return C_GOK } func (c *ctxtz) oplook(p *obj.Prog) *Optab { // Return cached optab entry if available. if p.Optab != 0 { return &optab[p.Optab-1] } if len(p.RestArgs) > 3 { c.ctxt.Diag("too many RestArgs: got %v, maximum is 3\n", len(p.RestArgs)) return nil } // Initialize classes for all arguments. p.From.Class = int8(c.aclass(&p.From) + 1) p.To.Class = int8(c.aclass(&p.To) + 1) for i := range p.RestArgs { p.RestArgs[i].Class = int8(c.aclass(&p.RestArgs[i]) + 1) } // Mirrors the argument list in Optab. args := [...]int8{ p.From.Class - 1, C_NONE, // p.Reg C_NONE, // p.RestArgs[0] C_NONE, // p.RestArgs[1] C_NONE, // p.RestArgs[2] p.To.Class - 1, } // Fill in argument class for p.Reg. switch { case REG_R0 <= p.Reg && p.Reg <= REG_R15: args[1] = C_REG case REG_V0 <= p.Reg && p.Reg <= REG_V31: args[1] = C_VREG case REG_F0 <= p.Reg && p.Reg <= REG_F15: args[1] = C_FREG case REG_AR0 <= p.Reg && p.Reg <= REG_AR15: args[1] = C_AREG } // Fill in argument classes for p.RestArgs. for i, a := range p.RestArgs { args[2+i] = a.Class - 1 } // Lookup op in optab. ops := oprange[p.As&obj.AMask] cmp := [len(args)]*[C_NCLASS]bool{} for i := range cmp { cmp[i] = &xcmp[args[i]] } for i := range ops { op := &ops[i] if cmp[0][op.a1] && cmp[1][op.a2] && cmp[2][op.a3] && cmp[3][op.a4] && cmp[4][op.a5] && cmp[5][op.a6] { p.Optab = uint16(cap(optab) - cap(ops) + i + 1) return op } } // Cannot find a case; abort. s := "" for _, a := range args { s += fmt.Sprintf(" %v", DRconv(int(a))) } c.ctxt.Diag("illegal combination %v%v\n", p.As, s) c.ctxt.Diag("prog: %v\n", p) return nil } func cmp(a int, b int) bool { if a == b { return true } switch a { case C_DCON: if b == C_LCON { return true } fallthrough case C_LCON: if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON { return true } case C_ADDCON: if b == C_ZCON || b == C_SCON { return true } case C_ANDCON: if b == C_ZCON || b == C_SCON { return true } case C_UCON: if b == C_ZCON || b == C_SCON { return true } case C_SCON: if b == C_ZCON { return true } case C_LACON: if b == C_SACON { return true } case C_LBRA: if b == C_SBRA { return true } case C_LAUTO: if b == C_SAUTO { return true } case C_LOREG: if b == C_ZOREG || b == C_SOREG { return true } case C_SOREG: if b == C_ZOREG { return true } case C_ANY: return true } return false } type ocmp []Optab func (x ocmp) Len() int { return len(x) } func (x ocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x ocmp) Less(i, j int) bool { p1 := &x[i] p2 := &x[j] n := int(p1.as) - int(p2.as) if n != 0 { return n < 0 } n = int(p1.a1) - int(p2.a1) if n != 0 { return n < 0 } n = int(p1.a2) - int(p2.a2) if n != 0 { return n < 0 } n = int(p1.a3) - int(p2.a3) if n != 0 { return n < 0 } n = int(p1.a4) - int(p2.a4) if n != 0 { return n < 0 } return false } func opset(a, b obj.As) { oprange[a&obj.AMask] = oprange[b&obj.AMask] } func buildop(ctxt *obj.Link) { if oprange[AORW&obj.AMask] != nil { // Already initialized; stop now. // This happens in the cmd/asm tests, // each of which re-initializes the arch. return } for i := 0; i < C_NCLASS; i++ { for n := 0; n < C_NCLASS; n++ { if cmp(n, i) { xcmp[i][n] = true } } } sort.Sort(ocmp(optab)) for i := 0; i < len(optab); i++ { r := optab[i].as start := i for ; i+1 < len(optab); i++ { if optab[i+1].as != r { break } } oprange[r&obj.AMask] = optab[start : i+1] // opset() aliases optab ranges for similar instructions, to reduce the number of optabs in the array. // oprange[] is used by oplook() to find the Optab entry that applies to a given Prog. switch r { case AADD: opset(AADDC, r) opset(AADDW, r) opset(AADDE, r) opset(AMULLD, r) opset(AMULLW, r) case ADIVW: opset(ADIVD, r) opset(ADIVDU, r) opset(ADIVWU, r) opset(AMODD, r) opset(AMODDU, r) opset(AMODW, r) opset(AMODWU, r) case AMULHD: opset(AMULHDU, r) case AMOVBZ: opset(AMOVH, r) opset(AMOVHZ, r) case ALA: opset(ALAY, r) case AMVC: opset(AMVCIN, r) opset(ACLC, r) opset(AXC, r) opset(AOC, r) opset(ANC, r) case ASTCK: opset(ASTCKC, r) opset(ASTCKE, r) opset(ASTCKF, r) case ALAAG: opset(ALAA, r) opset(ALAAL, r) opset(ALAALG, r) opset(ALAN, r) opset(ALANG, r) opset(ALAX, r) opset(ALAXG, r) opset(ALAO, r) opset(ALAOG, r) case ASTMG: opset(ASTMY, r) case ALMG: opset(ALMY, r) case ABEQ: opset(ABGE, r) opset(ABGT, r) opset(ABLE, r) opset(ABLT, r) opset(ABNE, r) opset(ABVC, r) opset(ABVS, r) opset(ABLEU, r) opset(ABLTU, r) case ABR: opset(ABL, r) case ABC: opset(ABCL, r) case AFABS: opset(AFNABS, r) opset(ALPDFR, r) opset(ALNDFR, r) opset(AFNEG, r) opset(AFNEGS, r) opset(ALEDBR, r) opset(ALDEBR, r) opset(AFSQRT, r) opset(AFSQRTS, r) case AFADD: opset(AFADDS, r) opset(AFDIV, r) opset(AFDIVS, r) opset(AFSUB, r) opset(AFSUBS, r) case AFMADD: opset(AFMADDS, r) opset(AFMSUB, r) opset(AFMSUBS, r) case AFMUL: opset(AFMULS, r) case AFCMPO: opset(AFCMPU, r) opset(ACEBR, r) case AAND: opset(AOR, r) opset(AXOR, r) case AANDW: opset(AORW, r) opset(AXORW, r) case ASLD: opset(ASRD, r) opset(ASLW, r) opset(ASRW, r) opset(ASRAD, r) opset(ASRAW, r) opset(ARLL, r) opset(ARLLG, r) case ARNSBG: opset(ARXSBG, r) opset(AROSBG, r) opset(ARNSBGT, r) opset(ARXSBGT, r) opset(AROSBGT, r) opset(ARISBG, r) opset(ARISBGN, r) opset(ARISBGZ, r) opset(ARISBGNZ, r) opset(ARISBHG, r) opset(ARISBLG, r) opset(ARISBHGZ, r) opset(ARISBLGZ, r) case ACSG: opset(ACS, r) case ASUB: opset(ASUBC, r) opset(ASUBE, r) opset(ASUBW, r) case ANEG: opset(ANEGW, r) case AFMOVD: opset(AFMOVS, r) case AMOVDBR: opset(AMOVWBR, r) case ACMP: opset(ACMPW, r) case ACMPU: opset(ACMPWU, r) case ATMHH: opset(ATMHL, r) opset(ATMLH, r) opset(ATMLL, r) case ACEFBRA: opset(ACDFBRA, r) opset(ACEGBRA, r) opset(ACDGBRA, r) opset(ACELFBR, r) opset(ACDLFBR, r) opset(ACELGBR, r) opset(ACDLGBR, r) case ACFEBRA: opset(ACFDBRA, r) opset(ACGEBRA, r) opset(ACGDBRA, r) opset(ACLFEBR, r) opset(ACLFDBR, r) opset(ACLGEBR, r) opset(ACLGDBR, r) case AFIEBR: opset(AFIDBR, r) case ACMPBEQ: opset(ACMPBGE, r) opset(ACMPBGT, r) opset(ACMPBLE, r) opset(ACMPBLT, r) opset(ACMPBNE, r) case ACMPUBEQ: opset(ACMPUBGE, r) opset(ACMPUBGT, r) opset(ACMPUBLE, r) opset(ACMPUBLT, r) opset(ACMPUBNE, r) case ACGRJ: opset(ACRJ, r) case ACLGRJ: opset(ACLRJ, r) case ACGIJ: opset(ACIJ, r) case ACLGIJ: opset(ACLIJ, r) case AMOVDEQ: opset(AMOVDGE, r) opset(AMOVDGT, r) opset(AMOVDLE, r) opset(AMOVDLT, r) opset(AMOVDNE, r) case ALOCGR: opset(ALOCR, r) case ALTDBR: opset(ALTEBR, r) case ATCDB: opset(ATCEB, r) case AVL: opset(AVLLEZB, r) opset(AVLLEZH, r) opset(AVLLEZF, r) opset(AVLLEZG, r) opset(AVLREPB, r) opset(AVLREPH, r) opset(AVLREPF, r) opset(AVLREPG, r) case AVLEG: opset(AVLBB, r) opset(AVLEB, r) opset(AVLEH, r) opset(AVLEF, r) opset(AVLEG, r) opset(AVLREP, r) case AVSTEG: opset(AVSTEB, r) opset(AVSTEH, r) opset(AVSTEF, r) case AVSCEG: opset(AVSCEF, r) case AVGEG: opset(AVGEF, r) case AVESLG: opset(AVESLB, r) opset(AVESLH, r) opset(AVESLF, r) opset(AVERLLB, r) opset(AVERLLH, r) opset(AVERLLF, r) opset(AVERLLG, r) opset(AVESRAB, r) opset(AVESRAH, r) opset(AVESRAF, r) opset(AVESRAG, r) opset(AVESRLB, r) opset(AVESRLH, r) opset(AVESRLF, r) opset(AVESRLG, r) case AVLGVG: opset(AVLGVB, r) opset(AVLGVH, r) opset(AVLGVF, r) case AVLVGG: opset(AVLVGB, r) opset(AVLVGH, r) opset(AVLVGF, r) case AVZERO: opset(AVONE, r) case AVREPIG: opset(AVREPIB, r) opset(AVREPIH, r) opset(AVREPIF, r) case AVLEIG: opset(AVLEIB, r) opset(AVLEIH, r) opset(AVLEIF, r) case AVGMG: opset(AVGMB, r) opset(AVGMH, r) opset(AVGMF, r) case AVREPG: opset(AVREPB, r) opset(AVREPH, r) opset(AVREPF, r) case AVERIMG: opset(AVERIMB, r) opset(AVERIMH, r) opset(AVERIMF, r) case AVFTCIDB: opset(AWFTCIDB, r) case AVLR: opset(AVUPHB, r) opset(AVUPHH, r) opset(AVUPHF, r) opset(AVUPLHB, r) opset(AVUPLHH, r) opset(AVUPLHF, r) opset(AVUPLB, r) opset(AVUPLHW, r) opset(AVUPLF, r) opset(AVUPLLB, r) opset(AVUPLLH, r) opset(AVUPLLF, r) opset(AVCLZB, r) opset(AVCLZH, r) opset(AVCLZF, r) opset(AVCLZG, r) opset(AVCTZB, r) opset(AVCTZH, r) opset(AVCTZF, r) opset(AVCTZG, r) opset(AVLDEB, r) opset(AWLDEB, r) opset(AVFLCDB, r) opset(AWFLCDB, r) opset(AVFLNDB, r) opset(AWFLNDB, r) opset(AVFLPDB, r) opset(AWFLPDB, r) opset(AVFSQDB, r) opset(AWFSQDB, r) opset(AVISTRB, r) opset(AVISTRH, r) opset(AVISTRF, r) opset(AVISTRBS, r) opset(AVISTRHS, r) opset(AVISTRFS, r) opset(AVLCB, r) opset(AVLCH, r) opset(AVLCF, r) opset(AVLCG, r) opset(AVLPB, r) opset(AVLPH, r) opset(AVLPF, r) opset(AVLPG, r) opset(AVPOPCT, r) opset(AVSEGB, r) opset(AVSEGH, r) opset(AVSEGF, r) case AVECG: opset(AVECB, r) opset(AVECH, r) opset(AVECF, r) opset(AVECLB, r) opset(AVECLH, r) opset(AVECLF, r) opset(AVECLG, r) opset(AWFCDB, r) opset(AWFKDB, r) case AVCEQG: opset(AVCEQB, r) opset(AVCEQH, r) opset(AVCEQF, r) opset(AVCEQBS, r) opset(AVCEQHS, r) opset(AVCEQFS, r) opset(AVCEQGS, r) opset(AVCHB, r) opset(AVCHH, r) opset(AVCHF, r) opset(AVCHG, r) opset(AVCHBS, r) opset(AVCHHS, r) opset(AVCHFS, r) opset(AVCHGS, r) opset(AVCHLB, r) opset(AVCHLH, r) opset(AVCHLF, r) opset(AVCHLG, r) opset(AVCHLBS, r) opset(AVCHLHS, r) opset(AVCHLFS, r) opset(AVCHLGS, r) case AVFAEF: opset(AVFAEB, r) opset(AVFAEH, r) opset(AVFAEBS, r) opset(AVFAEHS, r) opset(AVFAEFS, r) opset(AVFAEZB, r) opset(AVFAEZH, r) opset(AVFAEZF, r) opset(AVFAEZBS, r) opset(AVFAEZHS, r) opset(AVFAEZFS, r) opset(AVFEEB, r) opset(AVFEEH, r) opset(AVFEEF, r) opset(AVFEEBS, r) opset(AVFEEHS, r) opset(AVFEEFS, r) opset(AVFEEZB, r) opset(AVFEEZH, r) opset(AVFEEZF, r) opset(AVFEEZBS, r) opset(AVFEEZHS, r) opset(AVFEEZFS, r) opset(AVFENEB, r) opset(AVFENEH, r) opset(AVFENEF, r) opset(AVFENEBS, r) opset(AVFENEHS, r) opset(AVFENEFS, r) opset(AVFENEZB, r) opset(AVFENEZH, r) opset(AVFENEZF, r) opset(AVFENEZBS, r) opset(AVFENEZHS, r) opset(AVFENEZFS, r) case AVPKSG: opset(AVPKSH, r) opset(AVPKSF, r) opset(AVPKSHS, r) opset(AVPKSFS, r)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/a.out.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/a.out.go
// Based on cmd/internal/obj/ppc64/a.out.go. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package s390x import "github.com/twitchyliquid64/golang-asm/obj" //go:generate go run ../stringer.go -i $GOFILE -o anames.go -p s390x const ( NSNAME = 8 NSYM = 50 NREG = 16 // number of general purpose registers NFREG = 16 // number of floating point registers ) const ( // General purpose registers (GPRs). REG_R0 = obj.RBaseS390X + iota REG_R1 REG_R2 REG_R3 REG_R4 REG_R5 REG_R6 REG_R7 REG_R8 REG_R9 REG_R10 REG_R11 REG_R12 REG_R13 REG_R14 REG_R15 // Floating point registers (FPRs). REG_F0 REG_F1 REG_F2 REG_F3 REG_F4 REG_F5 REG_F6 REG_F7 REG_F8 REG_F9 REG_F10 REG_F11 REG_F12 REG_F13 REG_F14 REG_F15 // Vector registers (VRs) - only available when the vector // facility is installed. // V0-V15 are aliases for F0-F15. // We keep them in a separate space to make printing etc. easier // If the code generator ever emits vector instructions it will // need to take into account the aliasing. REG_V0 REG_V1 REG_V2 REG_V3 REG_V4 REG_V5 REG_V6 REG_V7 REG_V8 REG_V9 REG_V10 REG_V11 REG_V12 REG_V13 REG_V14 REG_V15 REG_V16 REG_V17 REG_V18 REG_V19 REG_V20 REG_V21 REG_V22 REG_V23 REG_V24 REG_V25 REG_V26 REG_V27 REG_V28 REG_V29 REG_V30 REG_V31 // Access registers (ARs). // The thread pointer is typically stored in the register pair // AR0 and AR1. REG_AR0 REG_AR1 REG_AR2 REG_AR3 REG_AR4 REG_AR5 REG_AR6 REG_AR7 REG_AR8 REG_AR9 REG_AR10 REG_AR11 REG_AR12 REG_AR13 REG_AR14 REG_AR15 REG_RESERVED // end of allocated registers REGARG = -1 // -1 disables passing the first argument in register REGRT1 = REG_R3 // used during zeroing of the stack - not reserved REGRT2 = REG_R4 // used during zeroing of the stack - not reserved REGTMP = REG_R10 // scratch register used in the assembler and linker REGTMP2 = REG_R11 // scratch register used in the assembler and linker REGCTXT = REG_R12 // context for closures REGG = REG_R13 // G REG_LR = REG_R14 // link register REGSP = REG_R15 // stack pointer ) // LINUX for zSeries ELF Application Binary Interface Supplement // https://refspecs.linuxfoundation.org/ELF/zSeries/lzsabi0_zSeries/x1472.html var S390XDWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregisters[from:to by step] = (base):((to-from)/step+base) f := func(from, step, to, base int16) { for r := int16(from); r <= to; r += step { S390XDWARFRegisters[r] = (r-from)/step + base } } f(REG_R0, 1, REG_R15, 0) f(REG_F0, 2, REG_F6, 16) f(REG_F1, 2, REG_F7, 20) f(REG_F8, 2, REG_F14, 24) f(REG_F9, 2, REG_F15, 28) f(REG_V0, 2, REG_V6, 16) // V0:15 aliased to F0:15 f(REG_V1, 2, REG_V7, 20) // TODO what about V16:31? f(REG_V8, 2, REG_V14, 24) f(REG_V9, 2, REG_V15, 28) f(REG_AR0, 1, REG_AR15, 48) } const ( BIG = 32768 - 8 DISP12 = 4096 DISP16 = 65536 DISP20 = 1048576 ) const ( // mark flags LEAF = 1 << iota BRANCH USETMP // generated code of this Prog uses REGTMP ) const ( // comments from func aclass in asmz.go C_NONE = iota C_REG // general-purpose register (64-bit) C_FREG // floating-point register (64-bit) C_VREG // vector register (128-bit) C_AREG // access register (32-bit) C_ZCON // constant == 0 C_SCON // 0 <= constant <= 0x7fff (positive int16) C_UCON // constant & 0xffff == 0 (int16 or uint16) C_ADDCON // 0 > constant >= -0x8000 (negative int16) C_ANDCON // constant <= 0xffff C_LCON // constant (int32 or uint32) C_DCON // constant (int64 or uint64) C_SACON // computed address, 16-bit displacement, possibly SP-relative C_LACON // computed address, 32-bit displacement, possibly SP-relative C_DACON // computed address, 64-bit displacment? C_SBRA // short branch C_LBRA // long branch C_SAUTO // short auto C_LAUTO // long auto C_ZOREG // heap address, register-based, displacement == 0 C_SOREG // heap address, register-based, int16 displacement C_LOREG // heap address, register-based, int32 displacement C_TLS_LE // TLS - local exec model (for executables) C_TLS_IE // TLS - initial exec model (for shared libraries loaded at program startup) C_GOK // general address C_ADDR // relocation for extern or static symbols (loads and stores) C_SYMADDR // relocation for extern or static symbols (address taking) C_GOTADDR // GOT slot for a symbol in -dynlink mode C_TEXTSIZE // text size C_ANY C_NCLASS // must be the last ) const ( // integer arithmetic AADD = obj.ABaseS390X + obj.A_ARCHSPECIFIC + iota AADDC AADDE AADDW ADIVW ADIVWU ADIVD ADIVDU AMODW AMODWU AMODD AMODDU AMULLW AMULLD AMULHD AMULHDU AMLGR ASUB ASUBC ASUBV ASUBE ASUBW ANEG ANEGW // integer moves AMOVWBR AMOVB AMOVBZ AMOVH AMOVHBR AMOVHZ AMOVW AMOVWZ AMOVD AMOVDBR // conditional moves AMOVDEQ AMOVDGE AMOVDGT AMOVDLE AMOVDLT AMOVDNE ALOCR ALOCGR // find leftmost one AFLOGR // population count APOPCNT // integer bitwise AAND AANDW AOR AORW AXOR AXORW ASLW ASLD ASRW ASRAW ASRD ASRAD ARLL ARLLG ARNSBG ARXSBG AROSBG ARNSBGT ARXSBGT AROSBGT ARISBG ARISBGN ARISBGZ ARISBGNZ ARISBHG ARISBLG ARISBHGZ ARISBLGZ // floating point AFABS AFADD AFADDS AFCMPO AFCMPU ACEBR AFDIV AFDIVS AFMADD AFMADDS AFMOVD AFMOVS AFMSUB AFMSUBS AFMUL AFMULS AFNABS AFNEG AFNEGS ALEDBR ALDEBR ALPDFR ALNDFR AFSUB AFSUBS AFSQRT AFSQRTS AFIEBR AFIDBR ACPSDR ALTEBR ALTDBR ATCEB ATCDB // move from GPR to FPR and vice versa ALDGR ALGDR // convert from int32/int64 to float/float64 ACEFBRA ACDFBRA ACEGBRA ACDGBRA // convert from float/float64 to int32/int64 ACFEBRA ACFDBRA ACGEBRA ACGDBRA // convert from uint32/uint64 to float/float64 ACELFBR ACDLFBR ACELGBR ACDLGBR // convert from float/float64 to uint32/uint64 ACLFEBR ACLFDBR ACLGEBR ACLGDBR // compare ACMP ACMPU ACMPW ACMPWU // test under mask ATMHH ATMHL ATMLH ATMLL // insert program mask AIPM // set program mask ASPM // compare and swap ACS ACSG // serialize ASYNC // branch ABC ABCL ABRC ABEQ ABGE ABGT ABLE ABLT ABLEU ABLTU ABNE ABVC ABVS ASYSCALL // branch on count ABRCT ABRCTG // compare and branch ACRJ ACGRJ ACLRJ ACLGRJ ACIJ ACGIJ ACLIJ ACLGIJ ACMPBEQ ACMPBGE ACMPBGT ACMPBLE ACMPBLT ACMPBNE ACMPUBEQ ACMPUBGE ACMPUBGT ACMPUBLE ACMPUBLT ACMPUBNE // storage-and-storage AMVC AMVCIN ACLC AXC AOC ANC // load AEXRL ALARL ALA ALAY // interlocked load and op ALAA ALAAG ALAAL ALAALG ALAN ALANG ALAX ALAXG ALAO ALAOG // load/store multiple ALMY ALMG ASTMY ASTMG // store clock ASTCK ASTCKC ASTCKE ASTCKF // macros ACLEAR // vector AVA AVAB AVAH AVAF AVAG AVAQ AVACC AVACCB AVACCH AVACCF AVACCG AVACCQ AVAC AVACQ AVACCC AVACCCQ AVN AVNC AVAVG AVAVGB AVAVGH AVAVGF AVAVGG AVAVGL AVAVGLB AVAVGLH AVAVGLF AVAVGLG AVCKSM AVCEQ AVCEQB AVCEQH AVCEQF AVCEQG AVCEQBS AVCEQHS AVCEQFS AVCEQGS AVCH AVCHB AVCHH AVCHF AVCHG AVCHBS AVCHHS AVCHFS AVCHGS AVCHL AVCHLB AVCHLH AVCHLF AVCHLG AVCHLBS AVCHLHS AVCHLFS AVCHLGS AVCLZ AVCLZB AVCLZH AVCLZF AVCLZG AVCTZ AVCTZB AVCTZH AVCTZF AVCTZG AVEC AVECB AVECH AVECF AVECG AVECL AVECLB AVECLH AVECLF AVECLG AVERIM AVERIMB AVERIMH AVERIMF AVERIMG AVERLL AVERLLB AVERLLH AVERLLF AVERLLG AVERLLV AVERLLVB AVERLLVH AVERLLVF AVERLLVG AVESLV AVESLVB AVESLVH AVESLVF AVESLVG AVESL AVESLB AVESLH AVESLF AVESLG AVESRA AVESRAB AVESRAH AVESRAF AVESRAG AVESRAV AVESRAVB AVESRAVH AVESRAVF AVESRAVG AVESRL AVESRLB AVESRLH AVESRLF AVESRLG AVESRLV AVESRLVB AVESRLVH AVESRLVF AVESRLVG AVX AVFAE AVFAEB AVFAEH AVFAEF AVFAEBS AVFAEHS AVFAEFS AVFAEZB AVFAEZH AVFAEZF AVFAEZBS AVFAEZHS AVFAEZFS AVFEE AVFEEB AVFEEH AVFEEF AVFEEBS AVFEEHS AVFEEFS AVFEEZB AVFEEZH AVFEEZF AVFEEZBS AVFEEZHS AVFEEZFS AVFENE AVFENEB AVFENEH AVFENEF AVFENEBS AVFENEHS AVFENEFS AVFENEZB AVFENEZH AVFENEZF AVFENEZBS AVFENEZHS AVFENEZFS AVFA AVFADB AWFADB AWFK AWFKDB AVFCE AVFCEDB AVFCEDBS AWFCEDB AWFCEDBS AVFCH AVFCHDB AVFCHDBS AWFCHDB AWFCHDBS AVFCHE AVFCHEDB AVFCHEDBS AWFCHEDB AWFCHEDBS AWFC AWFCDB AVCDG AVCDGB AWCDGB AVCDLG AVCDLGB AWCDLGB AVCGD AVCGDB AWCGDB AVCLGD AVCLGDB AWCLGDB AVFD AVFDDB AWFDDB AVLDE AVLDEB AWLDEB AVLED AVLEDB AWLEDB AVFM AVFMDB AWFMDB AVFMA AVFMADB AWFMADB AVFMS AVFMSDB AWFMSDB AVFPSO AVFPSODB AWFPSODB AVFLCDB AWFLCDB AVFLNDB AWFLNDB AVFLPDB AWFLPDB AVFSQ AVFSQDB AWFSQDB AVFS AVFSDB AWFSDB AVFTCI AVFTCIDB AWFTCIDB AVGFM AVGFMB AVGFMH AVGFMF AVGFMG AVGFMA AVGFMAB AVGFMAH AVGFMAF AVGFMAG AVGEF AVGEG AVGBM AVZERO AVONE AVGM AVGMB AVGMH AVGMF AVGMG AVISTR AVISTRB AVISTRH AVISTRF AVISTRBS AVISTRHS AVISTRFS AVL AVLR AVLREP AVLREPB AVLREPH AVLREPF AVLREPG AVLC AVLCB AVLCH AVLCF AVLCG AVLEH AVLEF AVLEG AVLEB AVLEIH AVLEIF AVLEIG AVLEIB AVFI AVFIDB AWFIDB AVLGV AVLGVB AVLGVH AVLGVF AVLGVG AVLLEZ AVLLEZB AVLLEZH AVLLEZF AVLLEZG AVLM AVLP AVLPB AVLPH AVLPF AVLPG AVLBB AVLVG AVLVGB AVLVGH AVLVGF AVLVGG AVLVGP AVLL AVMX AVMXB AVMXH AVMXF AVMXG AVMXL AVMXLB AVMXLH AVMXLF AVMXLG AVMRH AVMRHB AVMRHH AVMRHF AVMRHG AVMRL AVMRLB AVMRLH AVMRLF AVMRLG AVMN AVMNB AVMNH AVMNF AVMNG AVMNL AVMNLB AVMNLH AVMNLF AVMNLG AVMAE AVMAEB AVMAEH AVMAEF AVMAH AVMAHB AVMAHH AVMAHF AVMALE AVMALEB AVMALEH AVMALEF AVMALH AVMALHB AVMALHH AVMALHF AVMALO AVMALOB AVMALOH AVMALOF AVMAL AVMALB AVMALHW AVMALF AVMAO AVMAOB AVMAOH AVMAOF AVME AVMEB AVMEH AVMEF AVMH AVMHB AVMHH AVMHF AVMLE AVMLEB AVMLEH AVMLEF AVMLH AVMLHB AVMLHH AVMLHF AVMLO AVMLOB AVMLOH AVMLOF AVML AVMLB AVMLHW AVMLF AVMO AVMOB AVMOH AVMOF AVNO AVNOT AVO AVPK AVPKH AVPKF AVPKG AVPKLS AVPKLSH AVPKLSF AVPKLSG AVPKLSHS AVPKLSFS AVPKLSGS AVPKS AVPKSH AVPKSF AVPKSG AVPKSHS AVPKSFS AVPKSGS AVPERM AVPDI AVPOPCT AVREP AVREPB AVREPH AVREPF AVREPG AVREPI AVREPIB AVREPIH AVREPIF AVREPIG AVSCEF AVSCEG AVSEL AVSL AVSLB AVSLDB AVSRA AVSRAB AVSRL AVSRLB AVSEG AVSEGB AVSEGH AVSEGF AVST AVSTEH AVSTEF AVSTEG AVSTEB AVSTM AVSTL AVSTRC AVSTRCB AVSTRCH AVSTRCF AVSTRCBS AVSTRCHS AVSTRCFS AVSTRCZB AVSTRCZH AVSTRCZF AVSTRCZBS AVSTRCZHS AVSTRCZFS AVS AVSB AVSH AVSF AVSG AVSQ AVSCBI AVSCBIB AVSCBIH AVSCBIF AVSCBIG AVSCBIQ AVSBCBI AVSBCBIQ AVSBI AVSBIQ AVSUMG AVSUMGH AVSUMGF AVSUMQ AVSUMQF AVSUMQG AVSUM AVSUMB AVSUMH AVTM AVUPH AVUPHB AVUPHH AVUPHF AVUPLH AVUPLHB AVUPLHH AVUPLHF AVUPLL AVUPLLB AVUPLLH AVUPLLF AVUPL AVUPLB AVUPLHW AVUPLF AVMSLG AVMSLEG AVMSLOG AVMSLEOG ANOPH // NOP // binary ABYTE AWORD ADWORD // end marker ALAST // aliases ABR = obj.AJMP ABL = obj.ACALL )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/anamesz.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/anamesz.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package s390x var cnamesz = []string{ "NONE", "REG", "FREG", "VREG", "AREG", "ZCON", "SCON", "UCON", "ADDCON", "ANDCON", "LCON", "DCON", "SACON", "LACON", "DACON", "SBRA", "LBRA", "SAUTO", "LAUTO", "ZOREG", "SOREG", "LOREG", "TLS_LE", "TLS_IE", "GOK", "ADDR", "SYMADDR", "GOTADDR", "TEXTSIZE", "ANY", "NCLASS", }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/anames.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/anames.go
// Code generated by stringer -i a.out.go -o anames.go -p s390x; DO NOT EDIT. package s390x import "github.com/twitchyliquid64/golang-asm/obj" var Anames = []string{ obj.A_ARCHSPECIFIC: "ADD", "ADDC", "ADDE", "ADDW", "DIVW", "DIVWU", "DIVD", "DIVDU", "MODW", "MODWU", "MODD", "MODDU", "MULLW", "MULLD", "MULHD", "MULHDU", "MLGR", "SUB", "SUBC", "SUBV", "SUBE", "SUBW", "NEG", "NEGW", "MOVWBR", "MOVB", "MOVBZ", "MOVH", "MOVHBR", "MOVHZ", "MOVW", "MOVWZ", "MOVD", "MOVDBR", "MOVDEQ", "MOVDGE", "MOVDGT", "MOVDLE", "MOVDLT", "MOVDNE", "LOCR", "LOCGR", "FLOGR", "POPCNT", "AND", "ANDW", "OR", "ORW", "XOR", "XORW", "SLW", "SLD", "SRW", "SRAW", "SRD", "SRAD", "RLL", "RLLG", "RNSBG", "RXSBG", "ROSBG", "RNSBGT", "RXSBGT", "ROSBGT", "RISBG", "RISBGN", "RISBGZ", "RISBGNZ", "RISBHG", "RISBLG", "RISBHGZ", "RISBLGZ", "FABS", "FADD", "FADDS", "FCMPO", "FCMPU", "CEBR", "FDIV", "FDIVS", "FMADD", "FMADDS", "FMOVD", "FMOVS", "FMSUB", "FMSUBS", "FMUL", "FMULS", "FNABS", "FNEG", "FNEGS", "LEDBR", "LDEBR", "LPDFR", "LNDFR", "FSUB", "FSUBS", "FSQRT", "FSQRTS", "FIEBR", "FIDBR", "CPSDR", "LTEBR", "LTDBR", "TCEB", "TCDB", "LDGR", "LGDR", "CEFBRA", "CDFBRA", "CEGBRA", "CDGBRA", "CFEBRA", "CFDBRA", "CGEBRA", "CGDBRA", "CELFBR", "CDLFBR", "CELGBR", "CDLGBR", "CLFEBR", "CLFDBR", "CLGEBR", "CLGDBR", "CMP", "CMPU", "CMPW", "CMPWU", "TMHH", "TMHL", "TMLH", "TMLL", "IPM", "SPM", "CS", "CSG", "SYNC", "BC", "BCL", "BRC", "BEQ", "BGE", "BGT", "BLE", "BLT", "BLEU", "BLTU", "BNE", "BVC", "BVS", "SYSCALL", "BRCT", "BRCTG", "CRJ", "CGRJ", "CLRJ", "CLGRJ", "CIJ", "CGIJ", "CLIJ", "CLGIJ", "CMPBEQ", "CMPBGE", "CMPBGT", "CMPBLE", "CMPBLT", "CMPBNE", "CMPUBEQ", "CMPUBGE", "CMPUBGT", "CMPUBLE", "CMPUBLT", "CMPUBNE", "MVC", "MVCIN", "CLC", "XC", "OC", "NC", "EXRL", "LARL", "LA", "LAY", "LAA", "LAAG", "LAAL", "LAALG", "LAN", "LANG", "LAX", "LAXG", "LAO", "LAOG", "LMY", "LMG", "STMY", "STMG", "STCK", "STCKC", "STCKE", "STCKF", "CLEAR", "VA", "VAB", "VAH", "VAF", "VAG", "VAQ", "VACC", "VACCB", "VACCH", "VACCF", "VACCG", "VACCQ", "VAC", "VACQ", "VACCC", "VACCCQ", "VN", "VNC", "VAVG", "VAVGB", "VAVGH", "VAVGF", "VAVGG", "VAVGL", "VAVGLB", "VAVGLH", "VAVGLF", "VAVGLG", "VCKSM", "VCEQ", "VCEQB", "VCEQH", "VCEQF", "VCEQG", "VCEQBS", "VCEQHS", "VCEQFS", "VCEQGS", "VCH", "VCHB", "VCHH", "VCHF", "VCHG", "VCHBS", "VCHHS", "VCHFS", "VCHGS", "VCHL", "VCHLB", "VCHLH", "VCHLF", "VCHLG", "VCHLBS", "VCHLHS", "VCHLFS", "VCHLGS", "VCLZ", "VCLZB", "VCLZH", "VCLZF", "VCLZG", "VCTZ", "VCTZB", "VCTZH", "VCTZF", "VCTZG", "VEC", "VECB", "VECH", "VECF", "VECG", "VECL", "VECLB", "VECLH", "VECLF", "VECLG", "VERIM", "VERIMB", "VERIMH", "VERIMF", "VERIMG", "VERLL", "VERLLB", "VERLLH", "VERLLF", "VERLLG", "VERLLV", "VERLLVB", "VERLLVH", "VERLLVF", "VERLLVG", "VESLV", "VESLVB", "VESLVH", "VESLVF", "VESLVG", "VESL", "VESLB", "VESLH", "VESLF", "VESLG", "VESRA", "VESRAB", "VESRAH", "VESRAF", "VESRAG", "VESRAV", "VESRAVB", "VESRAVH", "VESRAVF", "VESRAVG", "VESRL", "VESRLB", "VESRLH", "VESRLF", "VESRLG", "VESRLV", "VESRLVB", "VESRLVH", "VESRLVF", "VESRLVG", "VX", "VFAE", "VFAEB", "VFAEH", "VFAEF", "VFAEBS", "VFAEHS", "VFAEFS", "VFAEZB", "VFAEZH", "VFAEZF", "VFAEZBS", "VFAEZHS", "VFAEZFS", "VFEE", "VFEEB", "VFEEH", "VFEEF", "VFEEBS", "VFEEHS", "VFEEFS", "VFEEZB", "VFEEZH", "VFEEZF", "VFEEZBS", "VFEEZHS", "VFEEZFS", "VFENE", "VFENEB", "VFENEH", "VFENEF", "VFENEBS", "VFENEHS", "VFENEFS", "VFENEZB", "VFENEZH", "VFENEZF", "VFENEZBS", "VFENEZHS", "VFENEZFS", "VFA", "VFADB", "WFADB", "WFK", "WFKDB", "VFCE", "VFCEDB", "VFCEDBS", "WFCEDB", "WFCEDBS", "VFCH", "VFCHDB", "VFCHDBS", "WFCHDB", "WFCHDBS", "VFCHE", "VFCHEDB", "VFCHEDBS", "WFCHEDB", "WFCHEDBS", "WFC", "WFCDB", "VCDG", "VCDGB", "WCDGB", "VCDLG", "VCDLGB", "WCDLGB", "VCGD", "VCGDB", "WCGDB", "VCLGD", "VCLGDB", "WCLGDB", "VFD", "VFDDB", "WFDDB", "VLDE", "VLDEB", "WLDEB", "VLED", "VLEDB", "WLEDB", "VFM", "VFMDB", "WFMDB", "VFMA", "VFMADB", "WFMADB", "VFMS", "VFMSDB", "WFMSDB", "VFPSO", "VFPSODB", "WFPSODB", "VFLCDB", "WFLCDB", "VFLNDB", "WFLNDB", "VFLPDB", "WFLPDB", "VFSQ", "VFSQDB", "WFSQDB", "VFS", "VFSDB", "WFSDB", "VFTCI", "VFTCIDB", "WFTCIDB", "VGFM", "VGFMB", "VGFMH", "VGFMF", "VGFMG", "VGFMA", "VGFMAB", "VGFMAH", "VGFMAF", "VGFMAG", "VGEF", "VGEG", "VGBM", "VZERO", "VONE", "VGM", "VGMB", "VGMH", "VGMF", "VGMG", "VISTR", "VISTRB", "VISTRH", "VISTRF", "VISTRBS", "VISTRHS", "VISTRFS", "VL", "VLR", "VLREP", "VLREPB", "VLREPH", "VLREPF", "VLREPG", "VLC", "VLCB", "VLCH", "VLCF", "VLCG", "VLEH", "VLEF", "VLEG", "VLEB", "VLEIH", "VLEIF", "VLEIG", "VLEIB", "VFI", "VFIDB", "WFIDB", "VLGV", "VLGVB", "VLGVH", "VLGVF", "VLGVG", "VLLEZ", "VLLEZB", "VLLEZH", "VLLEZF", "VLLEZG", "VLM", "VLP", "VLPB", "VLPH", "VLPF", "VLPG", "VLBB", "VLVG", "VLVGB", "VLVGH", "VLVGF", "VLVGG", "VLVGP", "VLL", "VMX", "VMXB", "VMXH", "VMXF", "VMXG", "VMXL", "VMXLB", "VMXLH", "VMXLF", "VMXLG", "VMRH", "VMRHB", "VMRHH", "VMRHF", "VMRHG", "VMRL", "VMRLB", "VMRLH", "VMRLF", "VMRLG", "VMN", "VMNB", "VMNH", "VMNF", "VMNG", "VMNL", "VMNLB", "VMNLH", "VMNLF", "VMNLG", "VMAE", "VMAEB", "VMAEH", "VMAEF", "VMAH", "VMAHB", "VMAHH", "VMAHF", "VMALE", "VMALEB", "VMALEH", "VMALEF", "VMALH", "VMALHB", "VMALHH", "VMALHF", "VMALO", "VMALOB", "VMALOH", "VMALOF", "VMAL", "VMALB", "VMALHW", "VMALF", "VMAO", "VMAOB", "VMAOH", "VMAOF", "VME", "VMEB", "VMEH", "VMEF", "VMH", "VMHB", "VMHH", "VMHF", "VMLE", "VMLEB", "VMLEH", "VMLEF", "VMLH", "VMLHB", "VMLHH", "VMLHF", "VMLO", "VMLOB", "VMLOH", "VMLOF", "VML", "VMLB", "VMLHW", "VMLF", "VMO", "VMOB", "VMOH", "VMOF", "VNO", "VNOT", "VO", "VPK", "VPKH", "VPKF", "VPKG", "VPKLS", "VPKLSH", "VPKLSF", "VPKLSG", "VPKLSHS", "VPKLSFS", "VPKLSGS", "VPKS", "VPKSH", "VPKSF", "VPKSG", "VPKSHS", "VPKSFS", "VPKSGS", "VPERM", "VPDI", "VPOPCT", "VREP", "VREPB", "VREPH", "VREPF", "VREPG", "VREPI", "VREPIB", "VREPIH", "VREPIF", "VREPIG", "VSCEF", "VSCEG", "VSEL", "VSL", "VSLB", "VSLDB", "VSRA", "VSRAB", "VSRL", "VSRLB", "VSEG", "VSEGB", "VSEGH", "VSEGF", "VST", "VSTEH", "VSTEF", "VSTEG", "VSTEB", "VSTM", "VSTL", "VSTRC", "VSTRCB", "VSTRCH", "VSTRCF", "VSTRCBS", "VSTRCHS", "VSTRCFS", "VSTRCZB", "VSTRCZH", "VSTRCZF", "VSTRCZBS", "VSTRCZHS", "VSTRCZFS", "VS", "VSB", "VSH", "VSF", "VSG", "VSQ", "VSCBI", "VSCBIB", "VSCBIH", "VSCBIF", "VSCBIG", "VSCBIQ", "VSBCBI", "VSBCBIQ", "VSBI", "VSBIQ", "VSUMG", "VSUMGH", "VSUMGF", "VSUMQ", "VSUMQF", "VSUMQG", "VSUM", "VSUMB", "VSUMH", "VTM", "VUPH", "VUPHB", "VUPHH", "VUPHF", "VUPLH", "VUPLHB", "VUPLHH", "VUPLHF", "VUPLL", "VUPLLB", "VUPLLH", "VUPLLF", "VUPL", "VUPLB", "VUPLHW", "VUPLF", "VMSLG", "VMSLEG", "VMSLOG", "VMSLEOG", "NOPH", "BYTE", "WORD", "DWORD", "LAST", }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/objz.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/objz.go
// Based on cmd/internal/obj/ppc64/obj9.go. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package s390x import ( "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" "math" ) func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p.From.Class = 0 p.To.Class = 0 c := ctxtz{ctxt: ctxt, newprog: newprog} // Rewrite BR/BL to symbol as TYPE_BRANCH. switch p.As { case ABR, ABL, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } } // Rewrite float constants to values stored in memory unless they are +0. switch p.As { case AFMOVS: if p.From.Type == obj.TYPE_FCONST { f32 := float32(p.From.Val.(float64)) if math.Float32bits(f32) == 0 { // +0 break } p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Float32Sym(f32) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } case AFMOVD: if p.From.Type == obj.TYPE_FCONST { f64 := p.From.Val.(float64) if math.Float64bits(f64) == 0 { // +0 break } p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Float64Sym(f64) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } // put constants not loadable by LOAD IMMEDIATE into memory case AMOVD: if p.From.Type == obj.TYPE_CONST { val := p.From.Offset if int64(int32(val)) != val && int64(uint32(val)) != val && int64(uint64(val)&(0xffffffff<<32)) != val { p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Int64Sym(p.From.Offset) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } } } // Rewrite SUB constants into ADD. switch p.As { case ASUBC: if p.From.Type == obj.TYPE_CONST && isint32(-p.From.Offset) { p.From.Offset = -p.From.Offset p.As = AADDC } case ASUB: if p.From.Type == obj.TYPE_CONST && isint32(-p.From.Offset) { p.From.Offset = -p.From.Offset p.As = AADD } } if c.ctxt.Flag_dynlink { c.rewriteToUseGot(p) } } // Rewrite p, if necessary, to access global data via the global offset table. func (c *ctxtz) rewriteToUseGot(p *obj.Prog) { // At the moment EXRL instructions are not emitted by the compiler and only reference local symbols in // assembly code. if p.As == AEXRL { return } // We only care about global data: NAME_EXTERN means a global // symbol in the Go sense, and p.Sym.Local is true for a few // internally defined symbols. // Rewrites must not clobber flags and therefore cannot use the // ADD instruction. if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { // MOVD $sym, Rx becomes MOVD sym@GOT, Rx // MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx or REGTMP2; MOVD $<off>(Rx or REGTMP2), Rx if p.To.Type != obj.TYPE_REG || p.As != AMOVD { c.ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) } p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF q := p if p.From.Offset != 0 { target := p.To.Reg if target == REG_R0 { // Cannot use R0 as input to address calculation. // REGTMP might be used by the assembler. p.To.Reg = REGTMP2 } q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_ADDR q.From.Offset = p.From.Offset q.From.Reg = p.To.Reg q.To.Type = obj.TYPE_REG q.To.Reg = target p.From.Offset = 0 } } if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr // MOVD sym, Ry becomes MOVD sym@GOT, REGTMP2; MOVD (REGTMP2), Ry // MOVD Ry, sym becomes MOVD sym@GOT, REGTMP2; MOVD Ry, (REGTMP2) // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { source = &p.To } else { return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if source.Sym.Type == objabi.STLSBSS { return } if source.Type != obj.TYPE_MEM { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } p1 := obj.Appendp(p, c.newprog) p2 := obj.Appendp(p1, c.newprog) p1.As = AMOVD p1.From.Type = obj.TYPE_MEM p1.From.Sym = source.Sym p1.From.Name = obj.NAME_GOTREF p1.To.Type = obj.TYPE_REG p1.To.Reg = REGTMP2 p2.As = p.As p2.From = p.From p2.To = p.To if p.From.Name == obj.NAME_EXTERN { p2.From.Reg = REGTMP2 p2.From.Name = obj.NAME_NONE p2.From.Sym = nil } else if p.To.Name == obj.NAME_EXTERN { p2.To.Reg = REGTMP2 p2.To.Name = obj.NAME_NONE p2.To.Sym = nil } else { return } obj.Nopout(p) } func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // TODO(minux): add morestack short-cuts with small fixed frame-size. if cursym.Func.Text == nil || cursym.Func.Text.Link == nil { return } c := ctxtz{ctxt: ctxt, cursym: cursym, newprog: newprog} p := c.cursym.Func.Text textstksiz := p.To.Offset if textstksiz == -8 { // Compatibility hack. p.From.Sym.Set(obj.AttrNoFrame, true) textstksiz = 0 } if textstksiz%8 != 0 { c.ctxt.Diag("frame size %d not a multiple of 8", textstksiz) } if p.From.Sym.NoFrame() { if textstksiz != 0 { c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) } } c.cursym.Func.Args = p.To.Val.(int32) c.cursym.Func.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET */ var q *obj.Prog for p := c.cursym.Func.Text; p != nil; p = p.Link { switch p.As { case obj.ATEXT: q = p p.Mark |= LEAF case ABL, ABCL: q = p c.cursym.Func.Text.Mark &^= LEAF fallthrough case ABC, ABRC, ABEQ, ABGE, ABGT, ABLE, ABLT, ABLEU, ABLTU, ABNE, ABR, ABVC, ABVS, ACRJ, ACGRJ, ACLRJ, ACLGRJ, ACIJ, ACGIJ, ACLIJ, ACLGIJ, ACMPBEQ, ACMPBGE, ACMPBGT, ACMPBLE, ACMPBLT, ACMPBNE, ACMPUBEQ, ACMPUBGE, ACMPUBGT, ACMPUBLE, ACMPUBLT, ACMPUBNE: q = p p.Mark |= BRANCH default: q = p } } autosize := int32(0) var pLast *obj.Prog var pPre *obj.Prog var pPreempt *obj.Prog wasSplit := false for p := c.cursym.Func.Text; p != nil; p = p.Link { pLast = p switch p.As { case obj.ATEXT: autosize = int32(textstksiz) if p.Mark&LEAF != 0 && autosize == 0 { // A leaf function with no locals has no frame. p.From.Sym.Set(obj.AttrNoFrame, true) } if !p.From.Sym.NoFrame() { // If there is a stack frame at all, it includes // space to save the LR. autosize += int32(c.ctxt.FixedFrameSize()) } if p.Mark&LEAF != 0 && autosize < objabi.StackSmall { // A leaf function with a small stack can be marked // NOSPLIT, avoiding a stack check. p.From.Sym.Set(obj.AttrNoSplit, true) } p.To.Offset = int64(autosize) q := p if !p.From.Sym.NoSplit() { p, pPreempt = c.stacksplitPre(p, autosize) // emit pre part of split check pPre = p p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) wasSplit = true //need post part of split } if autosize != 0 { // Make sure to save link register for non-empty frame, even if // it is a leaf function, so that traceback works. // Store link register before decrementing SP, so if a signal comes // during the execution of the function prologue, the traceback // code will not see a half-updated stack frame. // This sequence is not async preemptible, as if we open a frame // at the current SP, it will clobber the saved LR. q = c.ctxt.StartUnsafePoint(p, c.newprog) q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_MEM q.To.Reg = REGSP q.To.Offset = int64(-autosize) q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_ADDR q.From.Offset = int64(-autosize) q.From.Reg = REGSP // not actually needed - REGSP is assumed if no reg is provided q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = autosize q = c.ctxt.EndUnsafePoint(q, c.newprog, -1) } else if c.cursym.Func.Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller // (e.g. gogo) are not identified as leaves but still have // no frame. c.cursym.Func.Text.Mark |= LEAF } if c.cursym.Func.Text.Mark&LEAF != 0 { c.cursym.Set(obj.AttrLeaf, true) break } if c.cursym.Func.Text.From.Sym.Wrapper() { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVD g_panic(g), R3 // CMP R3, $0 // BEQ end // MOVD panic_argp(R3), R4 // ADD $(autosize+8), R1, R5 // CMP R4, R5 // BNE end // ADD $8, R1, R6 // MOVD R6, panic_argp(R3) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes. q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(q, c.newprog) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R3 q.To.Type = obj.TYPE_CONST q.To.Offset = 0 q = obj.Appendp(q, c.newprog) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH p1 := q q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R3 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + c.ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(q, c.newprog) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(q, c.newprog) q.As = ABNE q.To.Type = obj.TYPE_BRANCH p2 := q q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = c.ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R6 q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R6 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R3 q.To.Offset = 0 // Panic.argp q = obj.Appendp(q, c.newprog) q.As = obj.ANOP p1.To.SetTarget(q) p2.To.SetTarget(q) } case obj.ARET: retTarget := p.To.Sym if c.cursym.Func.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = ABR p.From = obj.Addr{} if retTarget == nil { p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR } else { p.To.Type = obj.TYPE_BRANCH p.To.Sym = retTarget } p.Mark |= BRANCH break } p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = obj.Appendp(p, c.newprog) q.As = ABR q.From = obj.Addr{} q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Mark |= BRANCH q.Spadj = autosize break } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR q = p if autosize != 0 { q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize } q = obj.Appendp(q, c.newprog) q.As = ABR q.From = obj.Addr{} if retTarget == nil { q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR } else { q.To.Type = obj.TYPE_BRANCH q.To.Sym = retTarget } q.Mark |= BRANCH q.Spadj = autosize case AADD: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } case obj.AGETCALLERPC: if cursym.Leaf() { /* MOVD LR, Rd */ p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR } else { /* MOVD (RSP), Rd */ p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP } } } if wasSplit { c.stacksplitPost(pLast, pPre, pPreempt, autosize) // emit post part of split check } } func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (*obj.Prog, *obj.Prog) { var q *obj.Prog // MOVD g_stackguard(g), R3 p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 if c.cursym.CFunc() { p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 // Mark the stack bound check and morestack call async nonpreemptible. // If we get preempted here, when resumed the preemption request is // cleared, but we'll still call morestack, which will double the stack // unnecessarily. See issue #35470. p = c.ctxt.StartUnsafePoint(p, c.newprog) q = nil if framesize <= objabi.StackSmall { // small stack: SP < stackguard // CMPUBGE stackguard, SP, label-of-call-to-morestack p = obj.Appendp(p, c.newprog) //q1 = p p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REGSP p.As = ACMPUBGE p.To.Type = obj.TYPE_BRANCH } else if framesize <= objabi.StackBig { // large stack: SP-framesize < stackguard-StackSmall // ADD $-(framesize-StackSmall), SP, R4 // CMPUBGE stackguard, R4, label-of-call-to-morestack p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = -(int64(framesize) - objabi.StackSmall) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 p = obj.Appendp(p, c.newprog) p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REG_R4 p.As = ACMPUBGE p.To.Type = obj.TYPE_BRANCH } else { // Such a large stack we need to protect against wraparound. // If SP is close to zero: // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // // Preemption sets stackguard to StackPreempt, a very large value. // That breaks the math above, so we have to check for that explicitly. // // stackguard is R3 // CMP R3, $StackPreempt // BEQ label-of-call-to-morestack // ADD $StackGuard, SP, R4 // SUB R3, R4 // MOVD $(framesize+(StackGuard-StackSmall)), TEMP // CMPUBGE TEMP, R4, label-of-call-to-morestack p = obj.Appendp(p, c.newprog) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.To.Type = obj.TYPE_CONST p.To.Offset = objabi.StackPreempt p = obj.Appendp(p, c.newprog) q = p p.As = ABEQ p.To.Type = obj.TYPE_BRANCH p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(objabi.StackGuard) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 p = obj.Appendp(p, c.newprog) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP p = obj.Appendp(p, c.newprog) p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.Reg = REG_R4 p.As = ACMPUBGE p.To.Type = obj.TYPE_BRANCH } return p, q } func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, framesize int32) *obj.Prog { // Now we are at the end of the function, but logically // we are still in function prologue. We need to fix the // SP data and PCDATA. spfix := obj.Appendp(p, c.newprog) spfix.As = obj.ANOP spfix.Spadj = -framesize pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog) pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog) // MOVD LR, R5 p = obj.Appendp(pcdata, c.newprog) pPre.To.SetTarget(p) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if pPreempt != nil { pPreempt.To.SetTarget(p) } // BL runtime.morestack(SB) p = obj.Appendp(p, c.newprog) p.As = ABL p.To.Type = obj.TYPE_BRANCH if c.cursym.CFunc() { p.To.Sym = c.ctxt.Lookup("runtime.morestackc") } else if !c.cursym.Func.Text.From.Sym.NeedCtxt() { p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt") } else { p.To.Sym = c.ctxt.Lookup("runtime.morestack") } p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) // BR start p = obj.Appendp(p, c.newprog) p.As = ABR p.To.Type = obj.TYPE_BRANCH p.To.SetTarget(c.cursym.Func.Text.Link) return p } var unaryDst = map[obj.As]bool{ ASTCK: true, ASTCKC: true, ASTCKE: true, ASTCKF: true, ANEG: true, ANEGW: true, AVONE: true, AVZERO: true, } var Links390x = obj.LinkArch{ Arch: sys.ArchS390X, Init: buildop, Preprocess: preprocess, Assemble: spanz, Progedit: progedit, UnaryDst: unaryDst, DWARFRegisters: S390XDWARFRegisters, }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/vector.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/vector.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package s390x import ( "github.com/twitchyliquid64/golang-asm/obj" ) // This file contains utility functions for use when // assembling vector instructions. // vop returns the opcode, element size and condition // setting for the given (possibly extended) mnemonic. func vop(as obj.As) (opcode, es, cs uint32) { switch as { default: return 0, 0, 0 case AVA: return op_VA, 0, 0 case AVAB: return op_VA, 0, 0 case AVAH: return op_VA, 1, 0 case AVAF: return op_VA, 2, 0 case AVAG: return op_VA, 3, 0 case AVAQ: return op_VA, 4, 0 case AVACC: return op_VACC, 0, 0 case AVACCB: return op_VACC, 0, 0 case AVACCH: return op_VACC, 1, 0 case AVACCF: return op_VACC, 2, 0 case AVACCG: return op_VACC, 3, 0 case AVACCQ: return op_VACC, 4, 0 case AVAC: return op_VAC, 0, 0 case AVACQ: return op_VAC, 4, 0 case AVMSLG, AVMSLEG, AVMSLOG, AVMSLEOG: return op_VMSL, 3, 0 case AVACCC: return op_VACCC, 0, 0 case AVACCCQ: return op_VACCC, 4, 0 case AVN: return op_VN, 0, 0 case AVNC: return op_VNC, 0, 0 case AVAVG: return op_VAVG, 0, 0 case AVAVGB: return op_VAVG, 0, 0 case AVAVGH: return op_VAVG, 1, 0 case AVAVGF: return op_VAVG, 2, 0 case AVAVGG: return op_VAVG, 3, 0 case AVAVGL: return op_VAVGL, 0, 0 case AVAVGLB: return op_VAVGL, 0, 0 case AVAVGLH: return op_VAVGL, 1, 0 case AVAVGLF: return op_VAVGL, 2, 0 case AVAVGLG: return op_VAVGL, 3, 0 case AVCKSM: return op_VCKSM, 0, 0 case AVCEQ: return op_VCEQ, 0, 0 case AVCEQB: return op_VCEQ, 0, 0 case AVCEQH: return op_VCEQ, 1, 0 case AVCEQF: return op_VCEQ, 2, 0 case AVCEQG: return op_VCEQ, 3, 0 case AVCEQBS: return op_VCEQ, 0, 1 case AVCEQHS: return op_VCEQ, 1, 1 case AVCEQFS: return op_VCEQ, 2, 1 case AVCEQGS: return op_VCEQ, 3, 1 case AVCH: return op_VCH, 0, 0 case AVCHB: return op_VCH, 0, 0 case AVCHH: return op_VCH, 1, 0 case AVCHF: return op_VCH, 2, 0 case AVCHG: return op_VCH, 3, 0 case AVCHBS: return op_VCH, 0, 1 case AVCHHS: return op_VCH, 1, 1 case AVCHFS: return op_VCH, 2, 1 case AVCHGS: return op_VCH, 3, 1 case AVCHL: return op_VCHL, 0, 0 case AVCHLB: return op_VCHL, 0, 0 case AVCHLH: return op_VCHL, 1, 0 case AVCHLF: return op_VCHL, 2, 0 case AVCHLG: return op_VCHL, 3, 0 case AVCHLBS: return op_VCHL, 0, 1 case AVCHLHS: return op_VCHL, 1, 1 case AVCHLFS: return op_VCHL, 2, 1 case AVCHLGS: return op_VCHL, 3, 1 case AVCLZ: return op_VCLZ, 0, 0 case AVCLZB: return op_VCLZ, 0, 0 case AVCLZH: return op_VCLZ, 1, 0 case AVCLZF: return op_VCLZ, 2, 0 case AVCLZG: return op_VCLZ, 3, 0 case AVCTZ: return op_VCTZ, 0, 0 case AVCTZB: return op_VCTZ, 0, 0 case AVCTZH: return op_VCTZ, 1, 0 case AVCTZF: return op_VCTZ, 2, 0 case AVCTZG: return op_VCTZ, 3, 0 case AVEC: return op_VEC, 0, 0 case AVECB: return op_VEC, 0, 0 case AVECH: return op_VEC, 1, 0 case AVECF: return op_VEC, 2, 0 case AVECG: return op_VEC, 3, 0 case AVECL: return op_VECL, 0, 0 case AVECLB: return op_VECL, 0, 0 case AVECLH: return op_VECL, 1, 0 case AVECLF: return op_VECL, 2, 0 case AVECLG: return op_VECL, 3, 0 case AVERIM: return op_VERIM, 0, 0 case AVERIMB: return op_VERIM, 0, 0 case AVERIMH: return op_VERIM, 1, 0 case AVERIMF: return op_VERIM, 2, 0 case AVERIMG: return op_VERIM, 3, 0 case AVERLL: return op_VERLL, 0, 0 case AVERLLB: return op_VERLL, 0, 0 case AVERLLH: return op_VERLL, 1, 0 case AVERLLF: return op_VERLL, 2, 0 case AVERLLG: return op_VERLL, 3, 0 case AVERLLV: return op_VERLLV, 0, 0 case AVERLLVB: return op_VERLLV, 0, 0 case AVERLLVH: return op_VERLLV, 1, 0 case AVERLLVF: return op_VERLLV, 2, 0 case AVERLLVG: return op_VERLLV, 3, 0 case AVESLV: return op_VESLV, 0, 0 case AVESLVB: return op_VESLV, 0, 0 case AVESLVH: return op_VESLV, 1, 0 case AVESLVF: return op_VESLV, 2, 0 case AVESLVG: return op_VESLV, 3, 0 case AVESL: return op_VESL, 0, 0 case AVESLB: return op_VESL, 0, 0 case AVESLH: return op_VESL, 1, 0 case AVESLF: return op_VESL, 2, 0 case AVESLG: return op_VESL, 3, 0 case AVESRA: return op_VESRA, 0, 0 case AVESRAB: return op_VESRA, 0, 0 case AVESRAH: return op_VESRA, 1, 0 case AVESRAF: return op_VESRA, 2, 0 case AVESRAG: return op_VESRA, 3, 0 case AVESRAV: return op_VESRAV, 0, 0 case AVESRAVB: return op_VESRAV, 0, 0 case AVESRAVH: return op_VESRAV, 1, 0 case AVESRAVF: return op_VESRAV, 2, 0 case AVESRAVG: return op_VESRAV, 3, 0 case AVESRL: return op_VESRL, 0, 0 case AVESRLB: return op_VESRL, 0, 0 case AVESRLH: return op_VESRL, 1, 0 case AVESRLF: return op_VESRL, 2, 0 case AVESRLG: return op_VESRL, 3, 0 case AVESRLV: return op_VESRLV, 0, 0 case AVESRLVB: return op_VESRLV, 0, 0 case AVESRLVH: return op_VESRLV, 1, 0 case AVESRLVF: return op_VESRLV, 2, 0 case AVESRLVG: return op_VESRLV, 3, 0 case AVX: return op_VX, 0, 0 case AVFAE: return op_VFAE, 0, 0 case AVFAEB: return op_VFAE, 0, 0 case AVFAEH: return op_VFAE, 1, 0 case AVFAEF: return op_VFAE, 2, 0 case AVFAEBS: return op_VFAE, 0, 1 case AVFAEHS: return op_VFAE, 1, 1 case AVFAEFS: return op_VFAE, 2, 1 case AVFAEZB: return op_VFAE, 0, 2 case AVFAEZH: return op_VFAE, 1, 2 case AVFAEZF: return op_VFAE, 2, 2 case AVFAEZBS: return op_VFAE, 0, 3 case AVFAEZHS: return op_VFAE, 1, 3 case AVFAEZFS: return op_VFAE, 2, 3 case AVFEE: return op_VFEE, 0, 0 case AVFEEB: return op_VFEE, 0, 0 case AVFEEH: return op_VFEE, 1, 0 case AVFEEF: return op_VFEE, 2, 0 case AVFEEBS: return op_VFEE, 0, 1 case AVFEEHS: return op_VFEE, 1, 1 case AVFEEFS: return op_VFEE, 2, 1 case AVFEEZB: return op_VFEE, 0, 2 case AVFEEZH: return op_VFEE, 1, 2 case AVFEEZF: return op_VFEE, 2, 2 case AVFEEZBS: return op_VFEE, 0, 3 case AVFEEZHS: return op_VFEE, 1, 3 case AVFEEZFS: return op_VFEE, 2, 3 case AVFENE: return op_VFENE, 0, 0 case AVFENEB: return op_VFENE, 0, 0 case AVFENEH: return op_VFENE, 1, 0 case AVFENEF: return op_VFENE, 2, 0 case AVFENEBS: return op_VFENE, 0, 1 case AVFENEHS: return op_VFENE, 1, 1 case AVFENEFS: return op_VFENE, 2, 1 case AVFENEZB: return op_VFENE, 0, 2 case AVFENEZH: return op_VFENE, 1, 2 case AVFENEZF: return op_VFENE, 2, 2 case AVFENEZBS: return op_VFENE, 0, 3 case AVFENEZHS: return op_VFENE, 1, 3 case AVFENEZFS: return op_VFENE, 2, 3 case AVFA: return op_VFA, 0, 0 case AVFADB: return op_VFA, 3, 0 case AWFADB: return op_VFA, 3, 0 case AWFK: return op_WFK, 0, 0 case AWFKDB: return op_WFK, 3, 0 case AVFCE: return op_VFCE, 0, 0 case AVFCEDB: return op_VFCE, 3, 0 case AVFCEDBS: return op_VFCE, 3, 1 case AWFCEDB: return op_VFCE, 3, 0 case AWFCEDBS: return op_VFCE, 3, 1 case AVFCH: return op_VFCH, 0, 0 case AVFCHDB: return op_VFCH, 3, 0 case AVFCHDBS: return op_VFCH, 3, 1 case AWFCHDB: return op_VFCH, 3, 0 case AWFCHDBS: return op_VFCH, 3, 1 case AVFCHE: return op_VFCHE, 0, 0 case AVFCHEDB: return op_VFCHE, 3, 0 case AVFCHEDBS: return op_VFCHE, 3, 1 case AWFCHEDB: return op_VFCHE, 3, 0 case AWFCHEDBS: return op_VFCHE, 3, 1 case AWFC: return op_WFC, 0, 0 case AWFCDB: return op_WFC, 3, 0 case AVCDG: return op_VCDG, 0, 0 case AVCDGB: return op_VCDG, 3, 0 case AWCDGB: return op_VCDG, 3, 0 case AVCDLG: return op_VCDLG, 0, 0 case AVCDLGB: return op_VCDLG, 3, 0 case AWCDLGB: return op_VCDLG, 3, 0 case AVCGD: return op_VCGD, 0, 0 case AVCGDB: return op_VCGD, 3, 0 case AWCGDB: return op_VCGD, 3, 0 case AVCLGD: return op_VCLGD, 0, 0 case AVCLGDB: return op_VCLGD, 3, 0 case AWCLGDB: return op_VCLGD, 3, 0 case AVFD: return op_VFD, 0, 0 case AVFDDB: return op_VFD, 3, 0 case AWFDDB: return op_VFD, 3, 0 case AVLDE: return op_VLDE, 0, 0 case AVLDEB: return op_VLDE, 2, 0 case AWLDEB: return op_VLDE, 2, 0 case AVLED: return op_VLED, 0, 0 case AVLEDB: return op_VLED, 3, 0 case AWLEDB: return op_VLED, 3, 0 case AVFM: return op_VFM, 0, 0 case AVFMDB: return op_VFM, 3, 0 case AWFMDB: return op_VFM, 3, 0 case AVFMA: return op_VFMA, 0, 0 case AVFMADB: return op_VFMA, 3, 0 case AWFMADB: return op_VFMA, 3, 0 case AVFMS: return op_VFMS, 0, 0 case AVFMSDB: return op_VFMS, 3, 0 case AWFMSDB: return op_VFMS, 3, 0 case AVFPSO: return op_VFPSO, 0, 0 case AVFPSODB: return op_VFPSO, 3, 0 case AWFPSODB: return op_VFPSO, 3, 0 case AVFLCDB: return op_VFPSO, 3, 0 case AWFLCDB: return op_VFPSO, 3, 0 case AVFLNDB: return op_VFPSO, 3, 1 case AWFLNDB: return op_VFPSO, 3, 1 case AVFLPDB: return op_VFPSO, 3, 2 case AWFLPDB: return op_VFPSO, 3, 2 case AVFSQ: return op_VFSQ, 0, 0 case AVFSQDB: return op_VFSQ, 3, 0 case AWFSQDB: return op_VFSQ, 3, 0 case AVFS: return op_VFS, 0, 0 case AVFSDB: return op_VFS, 3, 0 case AWFSDB: return op_VFS, 3, 0 case AVFTCI: return op_VFTCI, 0, 0 case AVFTCIDB: return op_VFTCI, 3, 0 case AWFTCIDB: return op_VFTCI, 3, 0 case AVGFM: return op_VGFM, 0, 0 case AVGFMB: return op_VGFM, 0, 0 case AVGFMH: return op_VGFM, 1, 0 case AVGFMF: return op_VGFM, 2, 0 case AVGFMG: return op_VGFM, 3, 0 case AVGFMA: return op_VGFMA, 0, 0 case AVGFMAB: return op_VGFMA, 0, 0 case AVGFMAH: return op_VGFMA, 1, 0 case AVGFMAF: return op_VGFMA, 2, 0 case AVGFMAG: return op_VGFMA, 3, 0 case AVGEF: return op_VGEF, 0, 0 case AVGEG: return op_VGEG, 0, 0 case AVGBM: return op_VGBM, 0, 0 case AVZERO: return op_VGBM, 0, 0 case AVONE: return op_VGBM, 0, 0 case AVGM: return op_VGM, 0, 0 case AVGMB: return op_VGM, 0, 0 case AVGMH: return op_VGM, 1, 0 case AVGMF: return op_VGM, 2, 0 case AVGMG: return op_VGM, 3, 0 case AVISTR: return op_VISTR, 0, 0 case AVISTRB: return op_VISTR, 0, 0 case AVISTRH: return op_VISTR, 1, 0 case AVISTRF: return op_VISTR, 2, 0 case AVISTRBS: return op_VISTR, 0, 1 case AVISTRHS: return op_VISTR, 1, 1 case AVISTRFS: return op_VISTR, 2, 1 case AVL: return op_VL, 0, 0 case AVLR: return op_VLR, 0, 0 case AVLREP: return op_VLREP, 0, 0 case AVLREPB: return op_VLREP, 0, 0 case AVLREPH: return op_VLREP, 1, 0 case AVLREPF: return op_VLREP, 2, 0 case AVLREPG: return op_VLREP, 3, 0 case AVLC: return op_VLC, 0, 0 case AVLCB: return op_VLC, 0, 0 case AVLCH: return op_VLC, 1, 0 case AVLCF: return op_VLC, 2, 0 case AVLCG: return op_VLC, 3, 0 case AVLEH: return op_VLEH, 0, 0 case AVLEF: return op_VLEF, 0, 0 case AVLEG: return op_VLEG, 0, 0 case AVLEB: return op_VLEB, 0, 0 case AVLEIH: return op_VLEIH, 0, 0 case AVLEIF: return op_VLEIF, 0, 0 case AVLEIG: return op_VLEIG, 0, 0 case AVLEIB: return op_VLEIB, 0, 0 case AVFI: return op_VFI, 0, 0 case AVFIDB: return op_VFI, 3, 0 case AWFIDB: return op_VFI, 3, 0 case AVLGV: return op_VLGV, 0, 0 case AVLGVB: return op_VLGV, 0, 0 case AVLGVH: return op_VLGV, 1, 0 case AVLGVF: return op_VLGV, 2, 0 case AVLGVG: return op_VLGV, 3, 0 case AVLLEZ: return op_VLLEZ, 0, 0 case AVLLEZB: return op_VLLEZ, 0, 0 case AVLLEZH: return op_VLLEZ, 1, 0 case AVLLEZF: return op_VLLEZ, 2, 0 case AVLLEZG: return op_VLLEZ, 3, 0 case AVLM: return op_VLM, 0, 0 case AVLP: return op_VLP, 0, 0 case AVLPB: return op_VLP, 0, 0 case AVLPH: return op_VLP, 1, 0 case AVLPF: return op_VLP, 2, 0 case AVLPG: return op_VLP, 3, 0 case AVLBB: return op_VLBB, 0, 0 case AVLVG: return op_VLVG, 0, 0 case AVLVGB: return op_VLVG, 0, 0 case AVLVGH: return op_VLVG, 1, 0 case AVLVGF: return op_VLVG, 2, 0 case AVLVGG: return op_VLVG, 3, 0 case AVLVGP: return op_VLVGP, 0, 0 case AVLL: return op_VLL, 0, 0 case AVMX: return op_VMX, 0, 0 case AVMXB: return op_VMX, 0, 0 case AVMXH: return op_VMX, 1, 0 case AVMXF: return op_VMX, 2, 0 case AVMXG: return op_VMX, 3, 0 case AVMXL: return op_VMXL, 0, 0 case AVMXLB: return op_VMXL, 0, 0 case AVMXLH: return op_VMXL, 1, 0 case AVMXLF: return op_VMXL, 2, 0 case AVMXLG: return op_VMXL, 3, 0 case AVMRH: return op_VMRH, 0, 0 case AVMRHB: return op_VMRH, 0, 0 case AVMRHH: return op_VMRH, 1, 0 case AVMRHF: return op_VMRH, 2, 0 case AVMRHG: return op_VMRH, 3, 0 case AVMRL: return op_VMRL, 0, 0 case AVMRLB: return op_VMRL, 0, 0 case AVMRLH: return op_VMRL, 1, 0 case AVMRLF: return op_VMRL, 2, 0 case AVMRLG: return op_VMRL, 3, 0 case AVMN: return op_VMN, 0, 0 case AVMNB: return op_VMN, 0, 0 case AVMNH: return op_VMN, 1, 0 case AVMNF: return op_VMN, 2, 0 case AVMNG: return op_VMN, 3, 0 case AVMNL: return op_VMNL, 0, 0 case AVMNLB: return op_VMNL, 0, 0 case AVMNLH: return op_VMNL, 1, 0 case AVMNLF: return op_VMNL, 2, 0 case AVMNLG: return op_VMNL, 3, 0 case AVMAE: return op_VMAE, 0, 0 case AVMAEB: return op_VMAE, 0, 0 case AVMAEH: return op_VMAE, 1, 0 case AVMAEF: return op_VMAE, 2, 0 case AVMAH: return op_VMAH, 0, 0 case AVMAHB: return op_VMAH, 0, 0 case AVMAHH: return op_VMAH, 1, 0 case AVMAHF: return op_VMAH, 2, 0 case AVMALE: return op_VMALE, 0, 0 case AVMALEB: return op_VMALE, 0, 0 case AVMALEH: return op_VMALE, 1, 0 case AVMALEF: return op_VMALE, 2, 0 case AVMALH: return op_VMALH, 0, 0 case AVMALHB: return op_VMALH, 0, 0 case AVMALHH: return op_VMALH, 1, 0 case AVMALHF: return op_VMALH, 2, 0 case AVMALO: return op_VMALO, 0, 0 case AVMALOB: return op_VMALO, 0, 0 case AVMALOH: return op_VMALO, 1, 0 case AVMALOF: return op_VMALO, 2, 0 case AVMAL: return op_VMAL, 0, 0 case AVMALB: return op_VMAL, 0, 0 case AVMALHW: return op_VMAL, 1, 0 case AVMALF: return op_VMAL, 2, 0 case AVMAO: return op_VMAO, 0, 0 case AVMAOB: return op_VMAO, 0, 0 case AVMAOH: return op_VMAO, 1, 0 case AVMAOF: return op_VMAO, 2, 0 case AVME: return op_VME, 0, 0 case AVMEB: return op_VME, 0, 0 case AVMEH: return op_VME, 1, 0 case AVMEF: return op_VME, 2, 0 case AVMH: return op_VMH, 0, 0 case AVMHB: return op_VMH, 0, 0 case AVMHH: return op_VMH, 1, 0 case AVMHF: return op_VMH, 2, 0 case AVMLE: return op_VMLE, 0, 0 case AVMLEB: return op_VMLE, 0, 0 case AVMLEH: return op_VMLE, 1, 0 case AVMLEF: return op_VMLE, 2, 0 case AVMLH: return op_VMLH, 0, 0 case AVMLHB: return op_VMLH, 0, 0 case AVMLHH: return op_VMLH, 1, 0 case AVMLHF: return op_VMLH, 2, 0 case AVMLO: return op_VMLO, 0, 0 case AVMLOB: return op_VMLO, 0, 0 case AVMLOH: return op_VMLO, 1, 0 case AVMLOF: return op_VMLO, 2, 0 case AVML: return op_VML, 0, 0 case AVMLB: return op_VML, 0, 0 case AVMLHW: return op_VML, 1, 0 case AVMLF: return op_VML, 2, 0 case AVMO: return op_VMO, 0, 0 case AVMOB: return op_VMO, 0, 0 case AVMOH: return op_VMO, 1, 0 case AVMOF: return op_VMO, 2, 0 case AVNO: return op_VNO, 0, 0 case AVNOT: return op_VNO, 0, 0 case AVO: return op_VO, 0, 0 case AVPK: return op_VPK, 0, 0 case AVPKH: return op_VPK, 1, 0 case AVPKF: return op_VPK, 2, 0 case AVPKG: return op_VPK, 3, 0 case AVPKLS: return op_VPKLS, 0, 0 case AVPKLSH: return op_VPKLS, 1, 0 case AVPKLSF: return op_VPKLS, 2, 0 case AVPKLSG: return op_VPKLS, 3, 0 case AVPKLSHS: return op_VPKLS, 1, 1 case AVPKLSFS: return op_VPKLS, 2, 1 case AVPKLSGS: return op_VPKLS, 3, 1 case AVPKS: return op_VPKS, 0, 0 case AVPKSH: return op_VPKS, 1, 0 case AVPKSF: return op_VPKS, 2, 0 case AVPKSG: return op_VPKS, 3, 0 case AVPKSHS: return op_VPKS, 1, 1 case AVPKSFS: return op_VPKS, 2, 1 case AVPKSGS: return op_VPKS, 3, 1 case AVPERM: return op_VPERM, 0, 0 case AVPDI: return op_VPDI, 0, 0 case AVPOPCT: return op_VPOPCT, 0, 0 case AVREP: return op_VREP, 0, 0 case AVREPB: return op_VREP, 0, 0 case AVREPH: return op_VREP, 1, 0 case AVREPF: return op_VREP, 2, 0 case AVREPG: return op_VREP, 3, 0 case AVREPI: return op_VREPI, 0, 0 case AVREPIB: return op_VREPI, 0, 0 case AVREPIH: return op_VREPI, 1, 0 case AVREPIF: return op_VREPI, 2, 0 case AVREPIG: return op_VREPI, 3, 0 case AVSCEF: return op_VSCEF, 0, 0 case AVSCEG: return op_VSCEG, 0, 0 case AVSEL: return op_VSEL, 0, 0 case AVSL: return op_VSL, 0, 0 case AVSLB: return op_VSLB, 0, 0 case AVSLDB: return op_VSLDB, 0, 0 case AVSRA: return op_VSRA, 0, 0 case AVSRAB: return op_VSRAB, 0, 0 case AVSRL: return op_VSRL, 0, 0 case AVSRLB: return op_VSRLB, 0, 0 case AVSEG: return op_VSEG, 0, 0 case AVSEGB: return op_VSEG, 0, 0 case AVSEGH: return op_VSEG, 1, 0 case AVSEGF: return op_VSEG, 2, 0 case AVST: return op_VST, 0, 0 case AVSTEH: return op_VSTEH, 0, 0 case AVSTEF: return op_VSTEF, 0, 0 case AVSTEG: return op_VSTEG, 0, 0 case AVSTEB: return op_VSTEB, 0, 0 case AVSTM: return op_VSTM, 0, 0 case AVSTL: return op_VSTL, 0, 0 case AVSTRC: return op_VSTRC, 0, 0 case AVSTRCB: return op_VSTRC, 0, 0 case AVSTRCH: return op_VSTRC, 1, 0 case AVSTRCF: return op_VSTRC, 2, 0 case AVSTRCBS: return op_VSTRC, 0, 1 case AVSTRCHS: return op_VSTRC, 1, 1 case AVSTRCFS: return op_VSTRC, 2, 1 case AVSTRCZB: return op_VSTRC, 0, 2 case AVSTRCZH: return op_VSTRC, 1, 2 case AVSTRCZF: return op_VSTRC, 2, 2 case AVSTRCZBS: return op_VSTRC, 0, 3 case AVSTRCZHS: return op_VSTRC, 1, 3 case AVSTRCZFS: return op_VSTRC, 2, 3 case AVS: return op_VS, 0, 0 case AVSB: return op_VS, 0, 0 case AVSH: return op_VS, 1, 0 case AVSF: return op_VS, 2, 0 case AVSG: return op_VS, 3, 0 case AVSQ: return op_VS, 4, 0 case AVSCBI: return op_VSCBI, 0, 0 case AVSCBIB: return op_VSCBI, 0, 0 case AVSCBIH: return op_VSCBI, 1, 0 case AVSCBIF: return op_VSCBI, 2, 0 case AVSCBIG: return op_VSCBI, 3, 0 case AVSCBIQ: return op_VSCBI, 4, 0 case AVSBCBI: return op_VSBCBI, 0, 0 case AVSBCBIQ: return op_VSBCBI, 4, 0 case AVSBI: return op_VSBI, 0, 0 case AVSBIQ: return op_VSBI, 4, 0 case AVSUMG: return op_VSUMG, 0, 0 case AVSUMGH: return op_VSUMG, 1, 0 case AVSUMGF: return op_VSUMG, 2, 0 case AVSUMQ: return op_VSUMQ, 0, 0 case AVSUMQF: return op_VSUMQ, 2, 0 case AVSUMQG: return op_VSUMQ, 3, 0 case AVSUM: return op_VSUM, 0, 0 case AVSUMB: return op_VSUM, 0, 0 case AVSUMH: return op_VSUM, 1, 0 case AVTM: return op_VTM, 0, 0 case AVUPH: return op_VUPH, 0, 0 case AVUPHB: return op_VUPH, 0, 0 case AVUPHH: return op_VUPH, 1, 0 case AVUPHF: return op_VUPH, 2, 0 case AVUPLH: return op_VUPLH, 0, 0 case AVUPLHB: return op_VUPLH, 0, 0 case AVUPLHH: return op_VUPLH, 1, 0 case AVUPLHF: return op_VUPLH, 2, 0 case AVUPLL: return op_VUPLL, 0, 0 case AVUPLLB: return op_VUPLL, 0, 0 case AVUPLLH: return op_VUPLL, 1, 0 case AVUPLLF: return op_VUPLL, 2, 0 case AVUPL: return op_VUPL, 0, 0 case AVUPLB: return op_VUPL, 0, 0 case AVUPLHW: return op_VUPL, 1, 0 case AVUPLF: return op_VUPL, 2, 0 } } // singleElementMask returns the single element mask bits required for the // given instruction. func singleElementMask(as obj.As) uint32 { switch as { case AWFADB, AWFK, AWFKDB, AWFCEDB, AWFCEDBS, AWFCHDB, AWFCHDBS, AWFCHEDB, AWFCHEDBS, AWFC, AWFCDB, AWCDGB, AWCDLGB, AWCGDB, AWCLGDB, AWFDDB, AWLDEB, AWLEDB, AWFMDB, AWFMADB, AWFMSDB, AWFPSODB, AWFLCDB, AWFLNDB, AWFLPDB, AWFSQDB, AWFSDB, AWFTCIDB, AWFIDB: return 8 case AVMSLEG: return 8 case AVMSLOG: return 4 case AVMSLEOG: return 12 } return 0 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/listz.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/listz.go
// Based on cmd/internal/obj/ppc64/list9.go. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package s390x import ( "github.com/twitchyliquid64/golang-asm/obj" "fmt" ) func init() { obj.RegisterRegister(obj.RBaseS390X, REG_R0+1024, rconv) obj.RegisterOpcode(obj.ABaseS390X, Anames) } func rconv(r int) string { if r == 0 { return "NONE" } if r == REGG { // Special case. return "g" } if REG_R0 <= r && r <= REG_R15 { return fmt.Sprintf("R%d", r-REG_R0) } if REG_F0 <= r && r <= REG_F15 { return fmt.Sprintf("F%d", r-REG_F0) } if REG_AR0 <= r && r <= REG_AR15 { return fmt.Sprintf("AR%d", r-REG_AR0) } if REG_V0 <= r && r <= REG_V31 { return fmt.Sprintf("V%d", r-REG_V0) } return fmt.Sprintf("Rgok(%d)", r-obj.RBaseS390X) } func DRconv(a int) string { s := "C_??" if a >= C_NONE && a <= C_NCLASS { s = cnamesz[a] } var fp string fp += s return fp }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/condition_code.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/condition_code.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package s390x import ( "fmt" ) // CCMask represents a 4-bit condition code mask. Bits that // are not part of the mask should be 0. // // Condition code masks represent the 4 possible values of // the 2-bit condition code as individual bits. Since IBM Z // is a big-endian platform bits are numbered from left to // right. The lowest value, 0, is represented by 8 (0b1000) // and the highest value, 3, is represented by 1 (0b0001). // // Note that condition code values have different semantics // depending on the instruction that set the condition code. // The names given here assume that the condition code was // set by an integer or floating point comparison. Other // instructions may use these same codes to indicate // different results such as a carry or overflow. type CCMask uint8 const ( Never CCMask = 0 // no-op // 1-bit masks Equal CCMask = 1 << 3 Less CCMask = 1 << 2 Greater CCMask = 1 << 1 Unordered CCMask = 1 << 0 // 2-bit masks EqualOrUnordered CCMask = Equal | Unordered // not less and not greater LessOrEqual CCMask = Less | Equal // ordered and not greater LessOrGreater CCMask = Less | Greater // ordered and not equal LessOrUnordered CCMask = Less | Unordered // not greater and not equal GreaterOrEqual CCMask = Greater | Equal // ordered and not less GreaterOrUnordered CCMask = Greater | Unordered // not less and not equal // 3-bit masks NotEqual CCMask = Always ^ Equal NotLess CCMask = Always ^ Less NotGreater CCMask = Always ^ Greater NotUnordered CCMask = Always ^ Unordered // 4-bit mask Always CCMask = Equal | Less | Greater | Unordered // useful aliases Carry CCMask = GreaterOrUnordered NoCarry CCMask = LessOrEqual Borrow CCMask = NoCarry NoBorrow CCMask = Carry ) // Inverse returns the complement of the condition code mask. func (c CCMask) Inverse() CCMask { return c ^ Always } // ReverseComparison swaps the bits at 0b0100 and 0b0010 in the mask, // reversing the behavior of greater than and less than conditions. func (c CCMask) ReverseComparison() CCMask { r := c & EqualOrUnordered if c&Less != 0 { r |= Greater } if c&Greater != 0 { r |= Less } return r } func (c CCMask) String() string { switch c { // 0-bit mask case Never: return "Never" // 1-bit masks case Equal: return "Equal" case Less: return "Less" case Greater: return "Greater" case Unordered: return "Unordered" // 2-bit masks case EqualOrUnordered: return "EqualOrUnordered" case LessOrEqual: return "LessOrEqual" case LessOrGreater: return "LessOrGreater" case LessOrUnordered: return "LessOrUnordered" case GreaterOrEqual: return "GreaterOrEqual" case GreaterOrUnordered: return "GreaterOrUnordered" // 3-bit masks case NotEqual: return "NotEqual" case NotLess: return "NotLess" case NotGreater: return "NotGreater" case NotUnordered: return "NotUnordered" // 4-bit mask case Always: return "Always" } // invalid return fmt.Sprintf("Invalid (%#x)", c) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/rotate.go
vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/rotate.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package s390x // RotateParams represents the immediates required for a "rotate // then ... selected bits instruction". // // The Start and End values are the indexes that represent // the masked region. They are inclusive and are in big- // endian order (bit 0 is the MSB, bit 63 is the LSB). They // may wrap around. // // Some examples: // // Masked region | Start | End // --------------------------+-------+---- // 0x00_00_00_00_00_00_00_0f | 60 | 63 // 0xf0_00_00_00_00_00_00_00 | 0 | 3 // 0xf0_00_00_00_00_00_00_0f | 60 | 3 // // The Amount value represents the amount to rotate the // input left by. Note that this rotation is performed // before the masked region is used. type RotateParams struct { Start uint8 // big-endian start bit index [0..63] End uint8 // big-endian end bit index [0..63] Amount uint8 // amount to rotate left } func NewRotateParams(start, end, amount int64) RotateParams { if start&^63 != 0 { panic("start out of bounds") } if end&^63 != 0 { panic("end out of bounds") } if amount&^63 != 0 { panic("amount out of bounds") } return RotateParams{ Start: uint8(start), End: uint8(end), Amount: uint8(amount), } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/a.out.go
vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/a.out.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package wasm import "github.com/twitchyliquid64/golang-asm/obj" //go:generate go run ../stringer.go -i $GOFILE -o anames.go -p wasm const ( /* mark flags */ DONE = 1 << iota PRESERVEFLAGS // not allowed to clobber flags ) /* * wasm */ const ( ACallImport = obj.ABaseWasm + obj.A_ARCHSPECIFIC + iota AGet ASet ATee ANot // alias for I32Eqz // The following are low-level WebAssembly instructions. // Their order matters, since it matches the opcode encoding. // Gaps in the encoding are indicated by comments. AUnreachable // opcode 0x00 ANop ABlock ALoop AIf AElse AEnd // opcode 0x0B ABr ABrIf ABrTable // ACall and AReturn are WebAssembly instructions. obj.ACALL and obj.ARET are higher level instructions // with Go semantics, e.g. they manipulate the Go stack on the linear memory. AReturn ACall ACallIndirect ADrop // opcode 0x1A ASelect ALocalGet // opcode 0x20 ALocalSet ALocalTee AGlobalGet AGlobalSet AI32Load // opcode 0x28 AI64Load AF32Load AF64Load AI32Load8S AI32Load8U AI32Load16S AI32Load16U AI64Load8S AI64Load8U AI64Load16S AI64Load16U AI64Load32S AI64Load32U AI32Store AI64Store AF32Store AF64Store AI32Store8 AI32Store16 AI64Store8 AI64Store16 AI64Store32 ACurrentMemory AGrowMemory AI32Const AI64Const AF32Const AF64Const AI32Eqz AI32Eq AI32Ne AI32LtS AI32LtU AI32GtS AI32GtU AI32LeS AI32LeU AI32GeS AI32GeU AI64Eqz AI64Eq AI64Ne AI64LtS AI64LtU AI64GtS AI64GtU AI64LeS AI64LeU AI64GeS AI64GeU AF32Eq AF32Ne AF32Lt AF32Gt AF32Le AF32Ge AF64Eq AF64Ne AF64Lt AF64Gt AF64Le AF64Ge AI32Clz AI32Ctz AI32Popcnt AI32Add AI32Sub AI32Mul AI32DivS AI32DivU AI32RemS AI32RemU AI32And AI32Or AI32Xor AI32Shl AI32ShrS AI32ShrU AI32Rotl AI32Rotr AI64Clz AI64Ctz AI64Popcnt AI64Add AI64Sub AI64Mul AI64DivS AI64DivU AI64RemS AI64RemU AI64And AI64Or AI64Xor AI64Shl AI64ShrS AI64ShrU AI64Rotl AI64Rotr AF32Abs AF32Neg AF32Ceil AF32Floor AF32Trunc AF32Nearest AF32Sqrt AF32Add AF32Sub AF32Mul AF32Div AF32Min AF32Max AF32Copysign AF64Abs AF64Neg AF64Ceil AF64Floor AF64Trunc AF64Nearest AF64Sqrt AF64Add AF64Sub AF64Mul AF64Div AF64Min AF64Max AF64Copysign AI32WrapI64 AI32TruncF32S AI32TruncF32U AI32TruncF64S AI32TruncF64U AI64ExtendI32S AI64ExtendI32U AI64TruncF32S AI64TruncF32U AI64TruncF64S AI64TruncF64U AF32ConvertI32S AF32ConvertI32U AF32ConvertI64S AF32ConvertI64U AF32DemoteF64 AF64ConvertI32S AF64ConvertI32U AF64ConvertI64S AF64ConvertI64U AF64PromoteF32 AI32ReinterpretF32 AI64ReinterpretF64 AF32ReinterpretI32 AF64ReinterpretI64 AI32Extend8S AI32Extend16S AI64Extend8S AI64Extend16S AI64Extend32S AI32TruncSatF32S // opcode 0xFC 0x00 AI32TruncSatF32U AI32TruncSatF64S AI32TruncSatF64U AI64TruncSatF32S AI64TruncSatF32U AI64TruncSatF64S AI64TruncSatF64U ALast // Sentinel: End of low-level WebAssembly instructions. ARESUMEPOINT // ACALLNORESUME is a call which is not followed by a resume point. // It is allowed inside of WebAssembly blocks, whereas obj.ACALL is not. // However, it is not allowed to switch goroutines while inside of an ACALLNORESUME call. ACALLNORESUME ARETUNWIND AMOVB AMOVH AMOVW AMOVD AWORD ALAST ) const ( REG_NONE = 0 ) const ( // globals REG_SP = obj.RBaseWasm + iota // SP is currently 32-bit, until 64-bit memory operations are available REG_CTXT REG_g // RET* are used by runtime.return0 and runtime.reflectcall. These functions pass return values in registers. REG_RET0 REG_RET1 REG_RET2 REG_RET3 REG_PAUSE // i32 locals REG_R0 REG_R1 REG_R2 REG_R3 REG_R4 REG_R5 REG_R6 REG_R7 REG_R8 REG_R9 REG_R10 REG_R11 REG_R12 REG_R13 REG_R14 REG_R15 // f32 locals REG_F0 REG_F1 REG_F2 REG_F3 REG_F4 REG_F5 REG_F6 REG_F7 REG_F8 REG_F9 REG_F10 REG_F11 REG_F12 REG_F13 REG_F14 REG_F15 // f64 locals REG_F16 REG_F17 REG_F18 REG_F19 REG_F20 REG_F21 REG_F22 REG_F23 REG_F24 REG_F25 REG_F26 REG_F27 REG_F28 REG_F29 REG_F30 REG_F31 REG_PC_B // also first parameter, i32 MAXREG MINREG = REG_SP REGSP = REG_SP REGCTXT = REG_CTXT REGG = REG_g )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/anames.go
vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/anames.go
// Code generated by stringer -i a.out.go -o anames.go -p wasm; DO NOT EDIT. package wasm import "github.com/twitchyliquid64/golang-asm/obj" var Anames = []string{ obj.A_ARCHSPECIFIC: "CallImport", "Get", "Set", "Tee", "Not", "Unreachable", "Nop", "Block", "Loop", "If", "Else", "End", "Br", "BrIf", "BrTable", "Return", "Call", "CallIndirect", "Drop", "Select", "LocalGet", "LocalSet", "LocalTee", "GlobalGet", "GlobalSet", "I32Load", "I64Load", "F32Load", "F64Load", "I32Load8S", "I32Load8U", "I32Load16S", "I32Load16U", "I64Load8S", "I64Load8U", "I64Load16S", "I64Load16U", "I64Load32S", "I64Load32U", "I32Store", "I64Store", "F32Store", "F64Store", "I32Store8", "I32Store16", "I64Store8", "I64Store16", "I64Store32", "CurrentMemory", "GrowMemory", "I32Const", "I64Const", "F32Const", "F64Const", "I32Eqz", "I32Eq", "I32Ne", "I32LtS", "I32LtU", "I32GtS", "I32GtU", "I32LeS", "I32LeU", "I32GeS", "I32GeU", "I64Eqz", "I64Eq", "I64Ne", "I64LtS", "I64LtU", "I64GtS", "I64GtU", "I64LeS", "I64LeU", "I64GeS", "I64GeU", "F32Eq", "F32Ne", "F32Lt", "F32Gt", "F32Le", "F32Ge", "F64Eq", "F64Ne", "F64Lt", "F64Gt", "F64Le", "F64Ge", "I32Clz", "I32Ctz", "I32Popcnt", "I32Add", "I32Sub", "I32Mul", "I32DivS", "I32DivU", "I32RemS", "I32RemU", "I32And", "I32Or", "I32Xor", "I32Shl", "I32ShrS", "I32ShrU", "I32Rotl", "I32Rotr", "I64Clz", "I64Ctz", "I64Popcnt", "I64Add", "I64Sub", "I64Mul", "I64DivS", "I64DivU", "I64RemS", "I64RemU", "I64And", "I64Or", "I64Xor", "I64Shl", "I64ShrS", "I64ShrU", "I64Rotl", "I64Rotr", "F32Abs", "F32Neg", "F32Ceil", "F32Floor", "F32Trunc", "F32Nearest", "F32Sqrt", "F32Add", "F32Sub", "F32Mul", "F32Div", "F32Min", "F32Max", "F32Copysign", "F64Abs", "F64Neg", "F64Ceil", "F64Floor", "F64Trunc", "F64Nearest", "F64Sqrt", "F64Add", "F64Sub", "F64Mul", "F64Div", "F64Min", "F64Max", "F64Copysign", "I32WrapI64", "I32TruncF32S", "I32TruncF32U", "I32TruncF64S", "I32TruncF64U", "I64ExtendI32S", "I64ExtendI32U", "I64TruncF32S", "I64TruncF32U", "I64TruncF64S", "I64TruncF64U", "F32ConvertI32S", "F32ConvertI32U", "F32ConvertI64S", "F32ConvertI64U", "F32DemoteF64", "F64ConvertI32S", "F64ConvertI32U", "F64ConvertI64S", "F64ConvertI64U", "F64PromoteF32", "I32ReinterpretF32", "I64ReinterpretF64", "F32ReinterpretI32", "F64ReinterpretI64", "I32Extend8S", "I32Extend16S", "I64Extend8S", "I64Extend16S", "I64Extend32S", "I32TruncSatF32S", "I32TruncSatF32U", "I32TruncSatF64S", "I32TruncSatF64U", "I64TruncSatF32S", "I64TruncSatF32U", "I64TruncSatF64S", "I64TruncSatF64U", "Last", "RESUMEPOINT", "CALLNORESUME", "RETUNWIND", "MOVB", "MOVH", "MOVW", "MOVD", "WORD", "LAST", }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/wasmobj.go
vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/wasmobj.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package wasm import ( "bytes" "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" "encoding/binary" "fmt" "io" "math" ) var Register = map[string]int16{ "SP": REG_SP, "CTXT": REG_CTXT, "g": REG_g, "RET0": REG_RET0, "RET1": REG_RET1, "RET2": REG_RET2, "RET3": REG_RET3, "PAUSE": REG_PAUSE, "R0": REG_R0, "R1": REG_R1, "R2": REG_R2, "R3": REG_R3, "R4": REG_R4, "R5": REG_R5, "R6": REG_R6, "R7": REG_R7, "R8": REG_R8, "R9": REG_R9, "R10": REG_R10, "R11": REG_R11, "R12": REG_R12, "R13": REG_R13, "R14": REG_R14, "R15": REG_R15, "F0": REG_F0, "F1": REG_F1, "F2": REG_F2, "F3": REG_F3, "F4": REG_F4, "F5": REG_F5, "F6": REG_F6, "F7": REG_F7, "F8": REG_F8, "F9": REG_F9, "F10": REG_F10, "F11": REG_F11, "F12": REG_F12, "F13": REG_F13, "F14": REG_F14, "F15": REG_F15, "F16": REG_F16, "F17": REG_F17, "F18": REG_F18, "F19": REG_F19, "F20": REG_F20, "F21": REG_F21, "F22": REG_F22, "F23": REG_F23, "F24": REG_F24, "F25": REG_F25, "F26": REG_F26, "F27": REG_F27, "F28": REG_F28, "F29": REG_F29, "F30": REG_F30, "F31": REG_F31, "PC_B": REG_PC_B, } var registerNames []string func init() { obj.RegisterRegister(MINREG, MAXREG, rconv) obj.RegisterOpcode(obj.ABaseWasm, Anames) registerNames = make([]string, MAXREG-MINREG) for name, reg := range Register { registerNames[reg-MINREG] = name } } func rconv(r int) string { return registerNames[r-MINREG] } var unaryDst = map[obj.As]bool{ ASet: true, ATee: true, ACall: true, ACallIndirect: true, ACallImport: true, ABr: true, ABrIf: true, ABrTable: true, AI32Store: true, AI64Store: true, AF32Store: true, AF64Store: true, AI32Store8: true, AI32Store16: true, AI64Store8: true, AI64Store16: true, AI64Store32: true, ACALLNORESUME: true, } var Linkwasm = obj.LinkArch{ Arch: sys.ArchWasm, Init: instinit, Preprocess: preprocess, Assemble: assemble, UnaryDst: unaryDst, } var ( morestack *obj.LSym morestackNoCtxt *obj.LSym gcWriteBarrier *obj.LSym sigpanic *obj.LSym sigpanic0 *obj.LSym deferreturn *obj.LSym jmpdefer *obj.LSym ) const ( /* mark flags */ WasmImport = 1 << 0 ) func instinit(ctxt *obj.Link) { morestack = ctxt.Lookup("runtime.morestack") morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt") gcWriteBarrier = ctxt.Lookup("runtime.gcWriteBarrier") sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal) sigpanic0 = ctxt.LookupABI("runtime.sigpanic", 0) // sigpanic called from assembly, which has ABI0 deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal) // jmpdefer is defined in assembly as ABI0, but what we're // looking for is the *call* to jmpdefer from the Go function // deferreturn, so we're looking for the ABIInternal version // of jmpdefer that's called by Go. jmpdefer = ctxt.LookupABI(`"".jmpdefer`, obj.ABIInternal) } func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { appendp := func(p *obj.Prog, as obj.As, args ...obj.Addr) *obj.Prog { if p.As != obj.ANOP { p2 := obj.Appendp(p, newprog) p2.Pc = p.Pc p = p2 } p.As = as switch len(args) { case 0: p.From = obj.Addr{} p.To = obj.Addr{} case 1: if unaryDst[as] { p.From = obj.Addr{} p.To = args[0] } else { p.From = args[0] p.To = obj.Addr{} } case 2: p.From = args[0] p.To = args[1] default: panic("bad args") } return p } framesize := s.Func.Text.To.Offset if framesize < 0 { panic("bad framesize") } s.Func.Args = s.Func.Text.To.Val.(int32) s.Func.Locals = int32(framesize) if s.Func.Text.From.Sym.Wrapper() { // if g._panic != nil && g._panic.argp == FP { // g._panic.argp = bottom-of-frame // } // // MOVD g_panic(g), R0 // Get R0 // I64Eqz // Not // If // Get SP // I64ExtendI32U // I64Const $framesize+8 // I64Add // I64Load panic_argp(R0) // I64Eq // If // MOVD SP, panic_argp(R0) // End // End gpanic := obj.Addr{ Type: obj.TYPE_MEM, Reg: REGG, Offset: 4 * 8, // g_panic } panicargp := obj.Addr{ Type: obj.TYPE_MEM, Reg: REG_R0, Offset: 0, // panic.argp } p := s.Func.Text p = appendp(p, AMOVD, gpanic, regAddr(REG_R0)) p = appendp(p, AGet, regAddr(REG_R0)) p = appendp(p, AI64Eqz) p = appendp(p, ANot) p = appendp(p, AIf) p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AI64ExtendI32U) p = appendp(p, AI64Const, constAddr(framesize+8)) p = appendp(p, AI64Add) p = appendp(p, AI64Load, panicargp) p = appendp(p, AI64Eq) p = appendp(p, AIf) p = appendp(p, AMOVD, regAddr(REG_SP), panicargp) p = appendp(p, AEnd) p = appendp(p, AEnd) } if framesize > 0 { p := s.Func.Text p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AI32Const, constAddr(framesize)) p = appendp(p, AI32Sub) p = appendp(p, ASet, regAddr(REG_SP)) p.Spadj = int32(framesize) } // Introduce resume points for CALL instructions // and collect other explicit resume points. numResumePoints := 0 explicitBlockDepth := 0 pc := int64(0) // pc is only incremented when necessary, this avoids bloat of the BrTable instruction var tableIdxs []uint64 tablePC := int64(0) base := ctxt.PosTable.Pos(s.Func.Text.Pos).Base() for p := s.Func.Text; p != nil; p = p.Link { prevBase := base base = ctxt.PosTable.Pos(p.Pos).Base() switch p.As { case ABlock, ALoop, AIf: explicitBlockDepth++ case AEnd: if explicitBlockDepth == 0 { panic("End without block") } explicitBlockDepth-- case ARESUMEPOINT: if explicitBlockDepth != 0 { panic("RESUME can only be used on toplevel") } p.As = AEnd for tablePC <= pc { tableIdxs = append(tableIdxs, uint64(numResumePoints)) tablePC++ } numResumePoints++ pc++ case obj.ACALL: if explicitBlockDepth != 0 { panic("CALL can only be used on toplevel, try CALLNORESUME instead") } appendp(p, ARESUMEPOINT) } p.Pc = pc // Increase pc whenever some pc-value table needs a new entry. Don't increase it // more often to avoid bloat of the BrTable instruction. // The "base != prevBase" condition detects inlined instructions. They are an // implicit call, so entering and leaving this section affects the stack trace. if p.As == ACALLNORESUME || p.As == obj.ANOP || p.As == ANop || p.Spadj != 0 || base != prevBase { pc++ if p.To.Sym == sigpanic { // The panic stack trace expects the PC at the call of sigpanic, // not the next one. However, runtime.Caller subtracts 1 from the // PC. To make both PC and PC-1 work (have the same line number), // we advance the PC by 2 at sigpanic. pc++ } } } tableIdxs = append(tableIdxs, uint64(numResumePoints)) s.Size = pc + 1 if !s.Func.Text.From.Sym.NoSplit() { p := s.Func.Text if framesize <= objabi.StackSmall { // small stack: SP <= stackguard // Get SP // Get g // I32WrapI64 // I32Load $stackguard0 // I32GtU p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AGet, regAddr(REGG)) p = appendp(p, AI32WrapI64) p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0 p = appendp(p, AI32LeU) } else { // large stack: SP-framesize <= stackguard-StackSmall // SP <= stackguard+(framesize-StackSmall) // Get SP // Get g // I32WrapI64 // I32Load $stackguard0 // I32Const $(framesize-StackSmall) // I32Add // I32GtU p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AGet, regAddr(REGG)) p = appendp(p, AI32WrapI64) p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0 p = appendp(p, AI32Const, constAddr(int64(framesize)-objabi.StackSmall)) p = appendp(p, AI32Add) p = appendp(p, AI32LeU) } // TODO(neelance): handle wraparound case p = appendp(p, AIf) p = appendp(p, obj.ACALL, constAddr(0)) if s.Func.Text.From.Sym.NeedCtxt() { p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestack} } else { p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestackNoCtxt} } p = appendp(p, AEnd) } // record the branches targeting the entry loop and the unwind exit, // their targets with be filled in later var entryPointLoopBranches []*obj.Prog var unwindExitBranches []*obj.Prog currentDepth := 0 for p := s.Func.Text; p != nil; p = p.Link { switch p.As { case ABlock, ALoop, AIf: currentDepth++ case AEnd: currentDepth-- } switch p.As { case obj.AJMP: jmp := *p p.As = obj.ANOP if jmp.To.Type == obj.TYPE_BRANCH { // jump to basic block p = appendp(p, AI32Const, constAddr(jmp.To.Val.(*obj.Prog).Pc)) p = appendp(p, ASet, regAddr(REG_PC_B)) // write next basic block to PC_B p = appendp(p, ABr) // jump to beginning of entryPointLoop entryPointLoopBranches = append(entryPointLoopBranches, p) break } // low-level WebAssembly call to function switch jmp.To.Type { case obj.TYPE_MEM: if !notUsePC_B[jmp.To.Sym.Name] { // Set PC_B parameter to function entry. p = appendp(p, AI32Const, constAddr(0)) } p = appendp(p, ACall, jmp.To) case obj.TYPE_NONE: // (target PC is on stack) p = appendp(p, AI32WrapI64) p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero p = appendp(p, AI32ShrU) // Set PC_B parameter to function entry. // We need to push this before pushing the target PC_F, // so temporarily pop PC_F, using our REG_PC_B as a // scratch register, and push it back after pushing 0. p = appendp(p, ASet, regAddr(REG_PC_B)) p = appendp(p, AI32Const, constAddr(0)) p = appendp(p, AGet, regAddr(REG_PC_B)) p = appendp(p, ACallIndirect) default: panic("bad target for JMP") } p = appendp(p, AReturn) case obj.ACALL, ACALLNORESUME: call := *p p.As = obj.ANOP pcAfterCall := call.Link.Pc if call.To.Sym == sigpanic { pcAfterCall-- // sigpanic expects to be called without advancing the pc } // jmpdefer manipulates the return address on the stack so deferreturn gets called repeatedly. // Model this in WebAssembly with a loop. if call.To.Sym == deferreturn { p = appendp(p, ALoop) } // SP -= 8 p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AI32Const, constAddr(8)) p = appendp(p, AI32Sub) p = appendp(p, ASet, regAddr(REG_SP)) // write return address to Go stack p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AI64Const, obj.Addr{ Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: s, // PC_F Offset: pcAfterCall, // PC_B }) p = appendp(p, AI64Store, constAddr(0)) // low-level WebAssembly call to function switch call.To.Type { case obj.TYPE_MEM: if !notUsePC_B[call.To.Sym.Name] { // Set PC_B parameter to function entry. p = appendp(p, AI32Const, constAddr(0)) } p = appendp(p, ACall, call.To) case obj.TYPE_NONE: // (target PC is on stack) p = appendp(p, AI32WrapI64) p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero p = appendp(p, AI32ShrU) // Set PC_B parameter to function entry. // We need to push this before pushing the target PC_F, // so temporarily pop PC_F, using our PC_B as a // scratch register, and push it back after pushing 0. p = appendp(p, ASet, regAddr(REG_PC_B)) p = appendp(p, AI32Const, constAddr(0)) p = appendp(p, AGet, regAddr(REG_PC_B)) p = appendp(p, ACallIndirect) default: panic("bad target for CALL") } // gcWriteBarrier has no return value, it never unwinds the stack if call.To.Sym == gcWriteBarrier { break } // jmpdefer removes the frame of deferreturn from the Go stack. // However, its WebAssembly function still returns normally, // so we need to return from deferreturn without removing its // stack frame (no RET), because the frame is already gone. if call.To.Sym == jmpdefer { p = appendp(p, AReturn) break } // return value of call is on the top of the stack, indicating whether to unwind the WebAssembly stack if call.As == ACALLNORESUME && call.To.Sym != sigpanic && call.To.Sym != sigpanic0 { // sigpanic unwinds the stack, but it never resumes // trying to unwind WebAssembly stack but call has no resume point, terminate with error p = appendp(p, AIf) p = appendp(p, obj.AUNDEF) p = appendp(p, AEnd) } else { // unwinding WebAssembly stack to switch goroutine, return 1 p = appendp(p, ABrIf) unwindExitBranches = append(unwindExitBranches, p) } // jump to before the call if jmpdefer has reset the return address to the call's PC if call.To.Sym == deferreturn { // get PC_B from -8(SP) p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AI32Const, constAddr(8)) p = appendp(p, AI32Sub) p = appendp(p, AI32Load16U, constAddr(0)) p = appendp(p, ATee, regAddr(REG_PC_B)) p = appendp(p, AI32Const, constAddr(call.Pc)) p = appendp(p, AI32Eq) p = appendp(p, ABrIf, constAddr(0)) p = appendp(p, AEnd) // end of Loop } case obj.ARET, ARETUNWIND: ret := *p p.As = obj.ANOP if framesize > 0 { // SP += framesize p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AI32Const, constAddr(framesize)) p = appendp(p, AI32Add) p = appendp(p, ASet, regAddr(REG_SP)) // TODO(neelance): This should theoretically set Spadj, but it only works without. // p.Spadj = int32(-framesize) } if ret.To.Type == obj.TYPE_MEM { // Set PC_B parameter to function entry. p = appendp(p, AI32Const, constAddr(0)) // low-level WebAssembly call to function p = appendp(p, ACall, ret.To) p = appendp(p, AReturn) break } // SP += 8 p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, AI32Const, constAddr(8)) p = appendp(p, AI32Add) p = appendp(p, ASet, regAddr(REG_SP)) if ret.As == ARETUNWIND { // function needs to unwind the WebAssembly stack, return 1 p = appendp(p, AI32Const, constAddr(1)) p = appendp(p, AReturn) break } // not unwinding the WebAssembly stack, return 0 p = appendp(p, AI32Const, constAddr(0)) p = appendp(p, AReturn) } } for p := s.Func.Text; p != nil; p = p.Link { switch p.From.Name { case obj.NAME_AUTO: p.From.Offset += int64(framesize) case obj.NAME_PARAM: p.From.Reg = REG_SP p.From.Offset += int64(framesize) + 8 // parameters are after the frame and the 8-byte return address } switch p.To.Name { case obj.NAME_AUTO: p.To.Offset += int64(framesize) case obj.NAME_PARAM: p.To.Reg = REG_SP p.To.Offset += int64(framesize) + 8 // parameters are after the frame and the 8-byte return address } switch p.As { case AGet: if p.From.Type == obj.TYPE_ADDR { get := *p p.As = obj.ANOP switch get.From.Name { case obj.NAME_EXTERN: p = appendp(p, AI64Const, get.From) case obj.NAME_AUTO, obj.NAME_PARAM: p = appendp(p, AGet, regAddr(get.From.Reg)) if get.From.Reg == REG_SP { p = appendp(p, AI64ExtendI32U) } if get.From.Offset != 0 { p = appendp(p, AI64Const, constAddr(get.From.Offset)) p = appendp(p, AI64Add) } default: panic("bad Get: invalid name") } } case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U: if p.From.Type == obj.TYPE_MEM { as := p.As from := p.From p.As = AGet p.From = regAddr(from.Reg) if from.Reg != REG_SP { p = appendp(p, AI32WrapI64) } p = appendp(p, as, constAddr(from.Offset)) } case AMOVB, AMOVH, AMOVW, AMOVD: mov := *p p.As = obj.ANOP var loadAs obj.As var storeAs obj.As switch mov.As { case AMOVB: loadAs = AI64Load8U storeAs = AI64Store8 case AMOVH: loadAs = AI64Load16U storeAs = AI64Store16 case AMOVW: loadAs = AI64Load32U storeAs = AI64Store32 case AMOVD: loadAs = AI64Load storeAs = AI64Store } appendValue := func() { switch mov.From.Type { case obj.TYPE_CONST: p = appendp(p, AI64Const, constAddr(mov.From.Offset)) case obj.TYPE_ADDR: switch mov.From.Name { case obj.NAME_NONE, obj.NAME_PARAM, obj.NAME_AUTO: p = appendp(p, AGet, regAddr(mov.From.Reg)) if mov.From.Reg == REG_SP { p = appendp(p, AI64ExtendI32U) } p = appendp(p, AI64Const, constAddr(mov.From.Offset)) p = appendp(p, AI64Add) case obj.NAME_EXTERN: p = appendp(p, AI64Const, mov.From) default: panic("bad name for MOV") } case obj.TYPE_REG: p = appendp(p, AGet, mov.From) if mov.From.Reg == REG_SP { p = appendp(p, AI64ExtendI32U) } case obj.TYPE_MEM: p = appendp(p, AGet, regAddr(mov.From.Reg)) if mov.From.Reg != REG_SP { p = appendp(p, AI32WrapI64) } p = appendp(p, loadAs, constAddr(mov.From.Offset)) default: panic("bad MOV type") } } switch mov.To.Type { case obj.TYPE_REG: appendValue() if mov.To.Reg == REG_SP { p = appendp(p, AI32WrapI64) } p = appendp(p, ASet, mov.To) case obj.TYPE_MEM: switch mov.To.Name { case obj.NAME_NONE, obj.NAME_PARAM: p = appendp(p, AGet, regAddr(mov.To.Reg)) if mov.To.Reg != REG_SP { p = appendp(p, AI32WrapI64) } case obj.NAME_EXTERN: p = appendp(p, AI32Const, obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: mov.To.Sym}) default: panic("bad MOV name") } appendValue() p = appendp(p, storeAs, constAddr(mov.To.Offset)) default: panic("bad MOV type") } case ACallImport: p.As = obj.ANOP p = appendp(p, AGet, regAddr(REG_SP)) p = appendp(p, ACall, obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: s}) p.Mark = WasmImport } } { p := s.Func.Text if len(unwindExitBranches) > 0 { p = appendp(p, ABlock) // unwindExit, used to return 1 when unwinding the stack for _, b := range unwindExitBranches { b.To = obj.Addr{Type: obj.TYPE_BRANCH, Val: p} } } if len(entryPointLoopBranches) > 0 { p = appendp(p, ALoop) // entryPointLoop, used to jump between basic blocks for _, b := range entryPointLoopBranches { b.To = obj.Addr{Type: obj.TYPE_BRANCH, Val: p} } } if numResumePoints > 0 { // Add Block instructions for resume points and BrTable to jump to selected resume point. for i := 0; i < numResumePoints+1; i++ { p = appendp(p, ABlock) } p = appendp(p, AGet, regAddr(REG_PC_B)) // read next basic block from PC_B p = appendp(p, ABrTable, obj.Addr{Val: tableIdxs}) p = appendp(p, AEnd) // end of Block } for p.Link != nil { p = p.Link // function instructions } if len(entryPointLoopBranches) > 0 { p = appendp(p, AEnd) // end of entryPointLoop } p = appendp(p, obj.AUNDEF) if len(unwindExitBranches) > 0 { p = appendp(p, AEnd) // end of unwindExit p = appendp(p, AI32Const, constAddr(1)) } } currentDepth = 0 blockDepths := make(map[*obj.Prog]int) for p := s.Func.Text; p != nil; p = p.Link { switch p.As { case ABlock, ALoop, AIf: currentDepth++ blockDepths[p] = currentDepth case AEnd: currentDepth-- } switch p.As { case ABr, ABrIf: if p.To.Type == obj.TYPE_BRANCH { blockDepth, ok := blockDepths[p.To.Val.(*obj.Prog)] if !ok { panic("label not at block") } p.To = constAddr(int64(currentDepth - blockDepth)) } } } } func constAddr(value int64) obj.Addr { return obj.Addr{Type: obj.TYPE_CONST, Offset: value} } func regAddr(reg int16) obj.Addr { return obj.Addr{Type: obj.TYPE_REG, Reg: reg} } // Most of the Go functions has a single parameter (PC_B) in // Wasm ABI. This is a list of exceptions. var notUsePC_B = map[string]bool{ "_rt0_wasm_js": true, "wasm_export_run": true, "wasm_export_resume": true, "wasm_export_getsp": true, "wasm_pc_f_loop": true, "runtime.wasmMove": true, "runtime.wasmZero": true, "runtime.wasmDiv": true, "runtime.wasmTruncS": true, "runtime.wasmTruncU": true, "runtime.gcWriteBarrier": true, "cmpbody": true, "memeqbody": true, "memcmp": true, "memchr": true, } func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { type regVar struct { global bool index uint64 } type varDecl struct { count uint64 typ valueType } hasLocalSP := false regVars := [MAXREG - MINREG]*regVar{ REG_SP - MINREG: {true, 0}, REG_CTXT - MINREG: {true, 1}, REG_g - MINREG: {true, 2}, REG_RET0 - MINREG: {true, 3}, REG_RET1 - MINREG: {true, 4}, REG_RET2 - MINREG: {true, 5}, REG_RET3 - MINREG: {true, 6}, REG_PAUSE - MINREG: {true, 7}, } var varDecls []*varDecl useAssemblyRegMap := func() { for i := int16(0); i < 16; i++ { regVars[REG_R0+i-MINREG] = &regVar{false, uint64(i)} } } // Function starts with declaration of locals: numbers and types. // Some functions use a special calling convention. switch s.Name { case "_rt0_wasm_js", "wasm_export_run", "wasm_export_resume", "wasm_export_getsp", "wasm_pc_f_loop", "runtime.wasmMove", "runtime.wasmZero", "runtime.wasmDiv", "runtime.wasmTruncS", "runtime.wasmTruncU", "memeqbody": varDecls = []*varDecl{} useAssemblyRegMap() case "memchr", "memcmp": varDecls = []*varDecl{{count: 2, typ: i32}} useAssemblyRegMap() case "cmpbody": varDecls = []*varDecl{{count: 2, typ: i64}} useAssemblyRegMap() case "runtime.gcWriteBarrier": varDecls = []*varDecl{{count: 4, typ: i64}} useAssemblyRegMap() default: // Normal calling convention: PC_B as WebAssembly parameter. First local variable is local SP cache. regVars[REG_PC_B-MINREG] = &regVar{false, 0} hasLocalSP = true var regUsed [MAXREG - MINREG]bool for p := s.Func.Text; p != nil; p = p.Link { if p.From.Reg != 0 { regUsed[p.From.Reg-MINREG] = true } if p.To.Reg != 0 { regUsed[p.To.Reg-MINREG] = true } } regs := []int16{REG_SP} for reg := int16(REG_R0); reg <= REG_F31; reg++ { if regUsed[reg-MINREG] { regs = append(regs, reg) } } var lastDecl *varDecl for i, reg := range regs { t := regType(reg) if lastDecl == nil || lastDecl.typ != t { lastDecl = &varDecl{ count: 0, typ: t, } varDecls = append(varDecls, lastDecl) } lastDecl.count++ if reg != REG_SP { regVars[reg-MINREG] = &regVar{false, 1 + uint64(i)} } } } w := new(bytes.Buffer) writeUleb128(w, uint64(len(varDecls))) for _, decl := range varDecls { writeUleb128(w, decl.count) w.WriteByte(byte(decl.typ)) } if hasLocalSP { // Copy SP from its global variable into a local variable. Accessing a local variable is more efficient. updateLocalSP(w) } for p := s.Func.Text; p != nil; p = p.Link { switch p.As { case AGet: if p.From.Type != obj.TYPE_REG { panic("bad Get: argument is not a register") } reg := p.From.Reg v := regVars[reg-MINREG] if v == nil { panic("bad Get: invalid register") } if reg == REG_SP && hasLocalSP { writeOpcode(w, ALocalGet) writeUleb128(w, 1) // local SP continue } if v.global { writeOpcode(w, AGlobalGet) } else { writeOpcode(w, ALocalGet) } writeUleb128(w, v.index) continue case ASet: if p.To.Type != obj.TYPE_REG { panic("bad Set: argument is not a register") } reg := p.To.Reg v := regVars[reg-MINREG] if v == nil { panic("bad Set: invalid register") } if reg == REG_SP && hasLocalSP { writeOpcode(w, ALocalTee) writeUleb128(w, 1) // local SP } if v.global { writeOpcode(w, AGlobalSet) } else { if p.Link.As == AGet && p.Link.From.Reg == reg { writeOpcode(w, ALocalTee) p = p.Link } else { writeOpcode(w, ALocalSet) } } writeUleb128(w, v.index) continue case ATee: if p.To.Type != obj.TYPE_REG { panic("bad Tee: argument is not a register") } reg := p.To.Reg v := regVars[reg-MINREG] if v == nil { panic("bad Tee: invalid register") } writeOpcode(w, ALocalTee) writeUleb128(w, v.index) continue case ANot: writeOpcode(w, AI32Eqz) continue case obj.AUNDEF: writeOpcode(w, AUnreachable) continue case obj.ANOP, obj.ATEXT, obj.AFUNCDATA, obj.APCDATA: // ignore continue } writeOpcode(w, p.As) switch p.As { case ABlock, ALoop, AIf: if p.From.Offset != 0 { // block type, rarely used, e.g. for code compiled with emscripten w.WriteByte(0x80 - byte(p.From.Offset)) continue } w.WriteByte(0x40) case ABr, ABrIf: if p.To.Type != obj.TYPE_CONST { panic("bad Br/BrIf") } writeUleb128(w, uint64(p.To.Offset)) case ABrTable: idxs := p.To.Val.([]uint64) writeUleb128(w, uint64(len(idxs)-1)) for _, idx := range idxs { writeUleb128(w, idx) } case ACall: switch p.To.Type { case obj.TYPE_CONST: writeUleb128(w, uint64(p.To.Offset)) case obj.TYPE_MEM: if p.To.Name != obj.NAME_EXTERN && p.To.Name != obj.NAME_STATIC { fmt.Println(p.To) panic("bad name for Call") } r := obj.Addrel(s) r.Off = int32(w.Len()) r.Type = objabi.R_CALL if p.Mark&WasmImport != 0 { r.Type = objabi.R_WASMIMPORT } r.Sym = p.To.Sym if hasLocalSP { // The stack may have moved, which changes SP. Update the local SP variable. updateLocalSP(w) } default: panic("bad type for Call") } case ACallIndirect: writeUleb128(w, uint64(p.To.Offset)) w.WriteByte(0x00) // reserved value if hasLocalSP { // The stack may have moved, which changes SP. Update the local SP variable. updateLocalSP(w) } case AI32Const, AI64Const: if p.From.Name == obj.NAME_EXTERN { r := obj.Addrel(s) r.Off = int32(w.Len()) r.Type = objabi.R_ADDR r.Sym = p.From.Sym r.Add = p.From.Offset break } writeSleb128(w, p.From.Offset) case AF32Const: b := make([]byte, 4) binary.LittleEndian.PutUint32(b, math.Float32bits(float32(p.From.Val.(float64)))) w.Write(b) case AF64Const: b := make([]byte, 8) binary.LittleEndian.PutUint64(b, math.Float64bits(p.From.Val.(float64))) w.Write(b) case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U: if p.From.Offset < 0 { panic("negative offset for *Load") } if p.From.Type != obj.TYPE_CONST { panic("bad type for *Load") } if p.From.Offset > math.MaxUint32 { ctxt.Diag("bad offset in %v", p) } writeUleb128(w, align(p.As)) writeUleb128(w, uint64(p.From.Offset)) case AI32Store, AI64Store, AF32Store, AF64Store, AI32Store8, AI32Store16, AI64Store8, AI64Store16, AI64Store32: if p.To.Offset < 0 { panic("negative offset") } if p.From.Offset > math.MaxUint32 { ctxt.Diag("bad offset in %v", p) } writeUleb128(w, align(p.As)) writeUleb128(w, uint64(p.To.Offset)) case ACurrentMemory, AGrowMemory: w.WriteByte(0x00) } } w.WriteByte(0x0b) // end s.P = w.Bytes() } func updateLocalSP(w *bytes.Buffer) { writeOpcode(w, AGlobalGet) writeUleb128(w, 0) // global SP writeOpcode(w, ALocalSet) writeUleb128(w, 1) // local SP } func writeOpcode(w *bytes.Buffer, as obj.As) { switch { case as < AUnreachable: panic(fmt.Sprintf("unexpected assembler op: %s", as)) case as < AEnd: w.WriteByte(byte(as - AUnreachable + 0x00)) case as < ADrop: w.WriteByte(byte(as - AEnd + 0x0B)) case as < ALocalGet: w.WriteByte(byte(as - ADrop + 0x1A)) case as < AI32Load: w.WriteByte(byte(as - ALocalGet + 0x20)) case as < AI32TruncSatF32S: w.WriteByte(byte(as - AI32Load + 0x28)) case as < ALast: w.WriteByte(0xFC) w.WriteByte(byte(as - AI32TruncSatF32S + 0x00)) default: panic(fmt.Sprintf("unexpected assembler op: %s", as)) } } type valueType byte const ( i32 valueType = 0x7F i64 valueType = 0x7E f32 valueType = 0x7D f64 valueType = 0x7C ) func regType(reg int16) valueType { switch { case reg == REG_SP: return i32 case reg >= REG_R0 && reg <= REG_R15: return i64 case reg >= REG_F0 && reg <= REG_F15: return f32 case reg >= REG_F16 && reg <= REG_F31: return f64 default: panic("invalid register") } } func align(as obj.As) uint64 { switch as { case AI32Load8S, AI32Load8U, AI64Load8S, AI64Load8U, AI32Store8, AI64Store8: return 0 case AI32Load16S, AI32Load16U, AI64Load16S, AI64Load16U, AI32Store16, AI64Store16: return 1 case AI32Load, AF32Load, AI64Load32S, AI64Load32U, AI32Store, AF32Store, AI64Store32: return 2 case AI64Load, AF64Load, AI64Store, AF64Store: return 3 default: panic("align: bad op") } } func writeUleb128(w io.ByteWriter, v uint64) { if v < 128 { w.WriteByte(uint8(v)) return } more := true for more { c := uint8(v & 0x7f) v >>= 7 more = v != 0 if more { c |= 0x80 } w.WriteByte(c) } } func writeSleb128(w io.ByteWriter, v int64) { more := true for more { c := uint8(v & 0x7f) s := uint8(v & 0x40) v >>= 7 more = !((v == 0 && s == 0) || (v == -1 && s != 0)) if more { c |= 0x80 } w.WriteByte(c) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/a.out.go
vendor/github.com/twitchyliquid64/golang-asm/obj/mips/a.out.go
// cmd/9c/9.out.h from Vita Nuova. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package mips import ( "github.com/twitchyliquid64/golang-asm/obj" ) //go:generate go run ../stringer.go -i $GOFILE -o anames.go -p mips /* * mips 64 */ const ( NSNAME = 8 NSYM = 50 NREG = 32 /* number of general registers */ NFREG = 32 /* number of floating point registers */ NWREG = 32 /* number of MSA registers */ ) const ( REG_R0 = obj.RBaseMIPS + iota // must be a multiple of 32 REG_R1 REG_R2 REG_R3 REG_R4 REG_R5 REG_R6 REG_R7 REG_R8 REG_R9 REG_R10 REG_R11 REG_R12 REG_R13 REG_R14 REG_R15 REG_R16 REG_R17 REG_R18 REG_R19 REG_R20 REG_R21 REG_R22 REG_R23 REG_R24 REG_R25 REG_R26 REG_R27 REG_R28 REG_R29 REG_R30 REG_R31 REG_F0 // must be a multiple of 32 REG_F1 REG_F2 REG_F3 REG_F4 REG_F5 REG_F6 REG_F7 REG_F8 REG_F9 REG_F10 REG_F11 REG_F12 REG_F13 REG_F14 REG_F15 REG_F16 REG_F17 REG_F18 REG_F19 REG_F20 REG_F21 REG_F22 REG_F23 REG_F24 REG_F25 REG_F26 REG_F27 REG_F28 REG_F29 REG_F30 REG_F31 // co-processor 0 control registers REG_M0 // must be a multiple of 32 REG_M1 REG_M2 REG_M3 REG_M4 REG_M5 REG_M6 REG_M7 REG_M8 REG_M9 REG_M10 REG_M11 REG_M12 REG_M13 REG_M14 REG_M15 REG_M16 REG_M17 REG_M18 REG_M19 REG_M20 REG_M21 REG_M22 REG_M23 REG_M24 REG_M25 REG_M26 REG_M27 REG_M28 REG_M29 REG_M30 REG_M31 // FPU control registers REG_FCR0 // must be a multiple of 32 REG_FCR1 REG_FCR2 REG_FCR3 REG_FCR4 REG_FCR5 REG_FCR6 REG_FCR7 REG_FCR8 REG_FCR9 REG_FCR10 REG_FCR11 REG_FCR12 REG_FCR13 REG_FCR14 REG_FCR15 REG_FCR16 REG_FCR17 REG_FCR18 REG_FCR19 REG_FCR20 REG_FCR21 REG_FCR22 REG_FCR23 REG_FCR24 REG_FCR25 REG_FCR26 REG_FCR27 REG_FCR28 REG_FCR29 REG_FCR30 REG_FCR31 // MSA registers // The lower bits of W registers are alias to F registers REG_W0 // must be a multiple of 32 REG_W1 REG_W2 REG_W3 REG_W4 REG_W5 REG_W6 REG_W7 REG_W8 REG_W9 REG_W10 REG_W11 REG_W12 REG_W13 REG_W14 REG_W15 REG_W16 REG_W17 REG_W18 REG_W19 REG_W20 REG_W21 REG_W22 REG_W23 REG_W24 REG_W25 REG_W26 REG_W27 REG_W28 REG_W29 REG_W30 REG_W31 REG_HI REG_LO REG_LAST = REG_LO // the last defined register REG_SPECIAL = REG_M0 REGZERO = REG_R0 /* set to zero */ REGSP = REG_R29 REGSB = REG_R28 REGLINK = REG_R31 REGRET = REG_R1 REGARG = -1 /* -1 disables passing the first argument in register */ REGRT1 = REG_R1 /* reserved for runtime, duffzero and duffcopy */ REGRT2 = REG_R2 /* reserved for runtime, duffcopy */ REGCTXT = REG_R22 /* context for closures */ REGG = REG_R30 /* G */ REGTMP = REG_R23 /* used by the linker */ FREGRET = REG_F0 ) // https://llvm.org/svn/llvm-project/llvm/trunk/lib/Target/Mips/MipsRegisterInfo.td search for DwarfRegNum // https://gcc.gnu.org/viewcvs/gcc/trunk/gcc/config/mips/mips.c?view=co&revision=258099&content-type=text%2Fplain search for mips_dwarf_regno // For now, this is adequate for both 32 and 64 bit. var MIPSDWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregisters[from:to] = (base):(to-from+base) f := func(from, to, base int16) { for r := int16(from); r <= to; r++ { MIPSDWARFRegisters[r] = (r - from) + base } } f(REG_R0, REG_R31, 0) f(REG_F0, REG_F31, 32) // For 32-bit MIPS, compiler only uses even numbered registers -- see cmd/compile/internal/ssa/gen/MIPSOps.go MIPSDWARFRegisters[REG_HI] = 64 MIPSDWARFRegisters[REG_LO] = 65 // The lower bits of W registers are alias to F registers f(REG_W0, REG_W31, 32) } const ( BIG = 32766 ) const ( /* mark flags */ FOLL = 1 << 0 LABEL = 1 << 1 LEAF = 1 << 2 SYNC = 1 << 3 BRANCH = 1 << 4 LOAD = 1 << 5 FCMP = 1 << 6 NOSCHED = 1 << 7 NSCHED = 20 ) const ( C_NONE = iota C_REG C_FREG C_FCREG C_MREG /* special processor register */ C_WREG /* MSA registers */ C_HI C_LO C_ZCON C_SCON /* 16 bit signed */ C_UCON /* 32 bit signed, low 16 bits 0 */ C_ADD0CON C_AND0CON C_ADDCON /* -0x8000 <= v < 0 */ C_ANDCON /* 0 < v <= 0xFFFF */ C_LCON /* other 32 */ C_DCON /* other 64 (could subdivide further) */ C_SACON /* $n(REG) where n <= int16 */ C_SECON C_LACON /* $n(REG) where int16 < n <= int32 */ C_LECON C_DACON /* $n(REG) where int32 < n */ C_STCON /* $tlsvar */ C_SBRA C_LBRA C_SAUTO C_LAUTO C_SEXT C_LEXT C_ZOREG C_SOREG C_LOREG C_GOK C_ADDR C_TLS C_TEXTSIZE C_NCLASS /* must be the last */ ) const ( AABSD = obj.ABaseMIPS + obj.A_ARCHSPECIFIC + iota AABSF AABSW AADD AADDD AADDF AADDU AADDW AAND ABEQ ABFPF ABFPT ABGEZ ABGEZAL ABGTZ ABLEZ ABLTZ ABLTZAL ABNE ABREAK ACLO ACLZ ACMOVF ACMOVN ACMOVT ACMOVZ ACMPEQD ACMPEQF ACMPGED ACMPGEF ACMPGTD ACMPGTF ADIV ADIVD ADIVF ADIVU ADIVW AGOK ALL ALLV ALUI AMADD AMOVB AMOVBU AMOVD AMOVDF AMOVDW AMOVF AMOVFD AMOVFW AMOVH AMOVHU AMOVW AMOVWD AMOVWF AMOVWL AMOVWR AMSUB AMUL AMULD AMULF AMULU AMULW ANEGD ANEGF ANEGW ANEGV ANOOP // hardware nop ANOR AOR AREM AREMU ARFE ASC ASCV ASGT ASGTU ASLL ASQRTD ASQRTF ASRA ASRL ASUB ASUBD ASUBF ASUBU ASUBW ASYNC ASYSCALL ATEQ ATLBP ATLBR ATLBWI ATLBWR ATNE AWORD AXOR /* 64-bit */ AMOVV AMOVVL AMOVVR ASLLV ASRAV ASRLV ADIVV ADIVVU AREMV AREMVU AMULV AMULVU AADDV AADDVU ASUBV ASUBVU /* 64-bit FP */ ATRUNCFV ATRUNCDV ATRUNCFW ATRUNCDW AMOVWU AMOVFV AMOVDV AMOVVF AMOVVD /* MSA */ AVMOVB AVMOVH AVMOVW AVMOVD ALAST // aliases AJMP = obj.AJMP AJAL = obj.ACALL ARET = obj.ARET ) func init() { // The asm encoder generally assumes that the lowest 5 bits of the // REG_XX constants match the machine instruction encoding, i.e. // the lowest 5 bits is the register number. // Check this here. if REG_R0%32 != 0 { panic("REG_R0 is not a multiple of 32") } if REG_F0%32 != 0 { panic("REG_F0 is not a multiple of 32") } if REG_M0%32 != 0 { panic("REG_M0 is not a multiple of 32") } if REG_FCR0%32 != 0 { panic("REG_FCR0 is not a multiple of 32") } if REG_W0%32 != 0 { panic("REG_W0 is not a multiple of 32") } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/anames.go
vendor/github.com/twitchyliquid64/golang-asm/obj/mips/anames.go
// Code generated by stringer -i a.out.go -o anames.go -p mips; DO NOT EDIT. package mips import "github.com/twitchyliquid64/golang-asm/obj" var Anames = []string{ obj.A_ARCHSPECIFIC: "ABSD", "ABSF", "ABSW", "ADD", "ADDD", "ADDF", "ADDU", "ADDW", "AND", "BEQ", "BFPF", "BFPT", "BGEZ", "BGEZAL", "BGTZ", "BLEZ", "BLTZ", "BLTZAL", "BNE", "BREAK", "CLO", "CLZ", "CMOVF", "CMOVN", "CMOVT", "CMOVZ", "CMPEQD", "CMPEQF", "CMPGED", "CMPGEF", "CMPGTD", "CMPGTF", "DIV", "DIVD", "DIVF", "DIVU", "DIVW", "GOK", "LL", "LLV", "LUI", "MADD", "MOVB", "MOVBU", "MOVD", "MOVDF", "MOVDW", "MOVF", "MOVFD", "MOVFW", "MOVH", "MOVHU", "MOVW", "MOVWD", "MOVWF", "MOVWL", "MOVWR", "MSUB", "MUL", "MULD", "MULF", "MULU", "MULW", "NEGD", "NEGF", "NEGW", "NEGV", "NOOP", "NOR", "OR", "REM", "REMU", "RFE", "SC", "SCV", "SGT", "SGTU", "SLL", "SQRTD", "SQRTF", "SRA", "SRL", "SUB", "SUBD", "SUBF", "SUBU", "SUBW", "SYNC", "SYSCALL", "TEQ", "TLBP", "TLBR", "TLBWI", "TLBWR", "TNE", "WORD", "XOR", "MOVV", "MOVVL", "MOVVR", "SLLV", "SRAV", "SRLV", "DIVV", "DIVVU", "REMV", "REMVU", "MULV", "MULVU", "ADDV", "ADDVU", "SUBV", "SUBVU", "TRUNCFV", "TRUNCDV", "TRUNCFW", "TRUNCDW", "MOVWU", "MOVFV", "MOVDV", "MOVVF", "MOVVD", "VMOVB", "VMOVH", "VMOVW", "VMOVD", "LAST", }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/list0.go
vendor/github.com/twitchyliquid64/golang-asm/obj/mips/list0.go
// cmd/9l/list.c from Vita Nuova. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package mips import ( "github.com/twitchyliquid64/golang-asm/obj" "fmt" ) func init() { obj.RegisterRegister(obj.RBaseMIPS, REG_LAST+1, rconv) obj.RegisterOpcode(obj.ABaseMIPS, Anames) } func rconv(r int) string { if r == 0 { return "NONE" } if r == REGG { // Special case. return "g" } if REG_R0 <= r && r <= REG_R31 { return fmt.Sprintf("R%d", r-REG_R0) } if REG_F0 <= r && r <= REG_F31 { return fmt.Sprintf("F%d", r-REG_F0) } if REG_M0 <= r && r <= REG_M31 { return fmt.Sprintf("M%d", r-REG_M0) } if REG_FCR0 <= r && r <= REG_FCR31 { return fmt.Sprintf("FCR%d", r-REG_FCR0) } if REG_W0 <= r && r <= REG_W31 { return fmt.Sprintf("W%d", r-REG_W0) } if r == REG_HI { return "HI" } if r == REG_LO { return "LO" } return fmt.Sprintf("Rgok(%d)", r-obj.RBaseMIPS) } func DRconv(a int) string { s := "C_??" if a >= C_NONE && a <= C_NCLASS { s = cnames0[a] } var fp string fp += s return fp }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/asm0.go
vendor/github.com/twitchyliquid64/golang-asm/obj/mips/asm0.go
// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package mips import ( "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" "fmt" "log" "sort" ) // ctxt0 holds state while assembling a single function. // Each function gets a fresh ctxt0. // This allows for multiple functions to be safely concurrently assembled. type ctxt0 struct { ctxt *obj.Link newprog obj.ProgAlloc cursym *obj.LSym autosize int32 instoffset int64 pc int64 } // Instruction layout. const ( mips64FuncAlign = 8 ) const ( r0iszero = 1 ) type Optab struct { as obj.As a1 uint8 a2 uint8 a3 uint8 type_ int8 size int8 param int16 family sys.ArchFamily // 0 means both sys.MIPS and sys.MIPS64 flag uint8 } const ( // Optab.flag NOTUSETMP = 1 << iota // p expands to multiple instructions, but does NOT use REGTMP ) var optab = []Optab{ {obj.ATEXT, C_LEXT, C_NONE, C_TEXTSIZE, 0, 0, 0, sys.MIPS64, 0}, {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_REG, 1, 4, 0, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_REG, 12, 8, 0, 0, NOTUSETMP}, {AMOVBU, C_REG, C_NONE, C_REG, 13, 4, 0, 0, 0}, {AMOVWU, C_REG, C_NONE, C_REG, 14, 8, 0, sys.MIPS64, NOTUSETMP}, {ASUB, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {ASUBV, C_REG, C_REG, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AADD, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {AADDV, C_REG, C_REG, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AAND, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {ASUB, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {ASUBV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AADD, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {AADDV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AAND, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {ACMOVN, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {ANEGW, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {ANEGV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0}, {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0}, {ASLLV, C_REG, C_NONE, C_REG, 9, 4, 0, sys.MIPS64, 0}, {ASLLV, C_REG, C_REG, C_REG, 9, 4, 0, sys.MIPS64, 0}, {ACLO, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, {AADDF, C_FREG, C_NONE, C_FREG, 32, 4, 0, 0, 0}, {AADDF, C_FREG, C_REG, C_FREG, 32, 4, 0, 0, 0}, {ACMPEQF, C_FREG, C_REG, C_NONE, 32, 4, 0, 0, 0}, {AABSF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, {AMOVVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, {AMOVD, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVWU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVBU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVWL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVVL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVWU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVBU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVWL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVVL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVWU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVBU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVWL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVVL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {ASC, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {ASCV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVWU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVV, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVB, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVBU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVWL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVVL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVWU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0}, {AMOVV, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0}, {AMOVB, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVBU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVWL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVVL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0}, {AMOVW, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVWU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {AMOVV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {AMOVB, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVBU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVWL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVVL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {ALL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {ALLV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVWU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVBU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, {AMOVWU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, {AMOVBU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, {AMOVW, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {AMOVWU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {AMOVBU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {ASC, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVW, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVWU, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVB, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP}, {AMOVWU, C_REG, C_NONE, C_TLS, 53, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVV, C_REG, C_NONE, C_TLS, 53, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVB, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP}, {AMOVBU, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP}, {AMOVW, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVWU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVV, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVB, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVBU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, {AMOVWU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.MIPS64, 0}, {AMOVV, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.MIPS64, 0}, {AMOVB, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, {AMOVBU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, {AMOVW, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, {AMOVWU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.MIPS64, 0}, {AMOVV, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.MIPS64, 0}, {AMOVB, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, {AMOVBU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, {AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0}, {AMOVW, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVWU, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVV, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0}, {AMOVB, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0}, {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVW, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP}, {AMOVWU, C_TLS, C_NONE, C_REG, 54, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVV, C_TLS, C_NONE, C_REG, 54, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVB, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP}, {AMOVBU, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP}, {AMOVW, C_SECON, C_NONE, C_REG, 3, 4, REGSB, sys.MIPS64, 0}, {AMOVV, C_SECON, C_NONE, C_REG, 3, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_SACON, C_NONE, C_REG, 3, 4, REGSP, 0, 0}, {AMOVV, C_SACON, C_NONE, C_REG, 3, 4, REGSP, sys.MIPS64, 0}, {AMOVW, C_LECON, C_NONE, C_REG, 52, 8, REGSB, sys.MIPS, NOTUSETMP}, {AMOVW, C_LECON, C_NONE, C_REG, 52, 12, REGSB, sys.MIPS64, NOTUSETMP}, {AMOVV, C_LECON, C_NONE, C_REG, 52, 12, REGSB, sys.MIPS64, NOTUSETMP}, {AMOVW, C_LACON, C_NONE, C_REG, 26, 12, REGSP, 0, 0}, {AMOVV, C_LACON, C_NONE, C_REG, 26, 12, REGSP, sys.MIPS64, 0}, {AMOVW, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, {AMOVV, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, {AMOVV, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_STCON, C_NONE, C_REG, 55, 8, 0, 0, NOTUSETMP}, {AMOVV, C_STCON, C_NONE, C_REG, 55, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVW, C_UCON, C_NONE, C_REG, 24, 4, 0, 0, 0}, {AMOVV, C_UCON, C_NONE, C_REG, 24, 4, 0, sys.MIPS64, 0}, {AMOVW, C_LCON, C_NONE, C_REG, 19, 8, 0, 0, NOTUSETMP}, {AMOVV, C_LCON, C_NONE, C_REG, 19, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVW, C_HI, C_NONE, C_REG, 20, 4, 0, 0, 0}, {AMOVV, C_HI, C_NONE, C_REG, 20, 4, 0, sys.MIPS64, 0}, {AMOVW, C_LO, C_NONE, C_REG, 20, 4, 0, 0, 0}, {AMOVV, C_LO, C_NONE, C_REG, 20, 4, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_HI, 21, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_HI, 21, 4, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_LO, 21, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_LO, 21, 4, 0, sys.MIPS64, 0}, {AMUL, C_REG, C_REG, C_NONE, 22, 4, 0, 0, 0}, {AMUL, C_REG, C_REG, C_REG, 22, 4, 0, 0, 0}, {AMULV, C_REG, C_REG, C_NONE, 22, 4, 0, sys.MIPS64, 0}, {AADD, C_ADD0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, {AADD, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, {AADD, C_ANDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, {AADD, C_ANDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, {AADDV, C_ADD0CON, C_REG, C_REG, 4, 4, 0, sys.MIPS64, 0}, {AADDV, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, sys.MIPS64, 0}, {AADDV, C_ANDCON, C_REG, C_REG, 10, 8, 0, sys.MIPS64, 0}, {AADDV, C_ANDCON, C_NONE, C_REG, 10, 8, 0, sys.MIPS64, 0}, {AAND, C_AND0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, {AAND, C_AND0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, {AAND, C_ADDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, {AAND, C_ADDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, {AADD, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, {AADD, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, {AADDV, C_UCON, C_REG, C_REG, 25, 8, 0, sys.MIPS64, 0}, {AADDV, C_UCON, C_NONE, C_REG, 25, 8, 0, sys.MIPS64, 0}, {AAND, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, {AAND, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, {AADD, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, {AADDV, C_LCON, C_NONE, C_REG, 23, 12, 0, sys.MIPS64, 0}, {AAND, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, {AADD, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, {AADDV, C_LCON, C_REG, C_REG, 23, 12, 0, sys.MIPS64, 0}, {AAND, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, {ASLL, C_SCON, C_REG, C_REG, 16, 4, 0, 0, 0}, {ASLL, C_SCON, C_NONE, C_REG, 16, 4, 0, 0, 0}, {ASLLV, C_SCON, C_REG, C_REG, 16, 4, 0, sys.MIPS64, 0}, {ASLLV, C_SCON, C_NONE, C_REG, 16, 4, 0, sys.MIPS64, 0}, {ASYSCALL, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, {ABEQ, C_REG, C_REG, C_SBRA, 6, 4, 0, 0, 0}, {ABEQ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, {ABLEZ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, {ABFPT, C_NONE, C_NONE, C_SBRA, 6, 8, 0, 0, NOTUSETMP}, {AJMP, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, {AJAL, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, {AJMP, C_NONE, C_NONE, C_ZOREG, 18, 4, REGZERO, 0, 0}, {AJAL, C_NONE, C_NONE, C_ZOREG, 18, 4, REGLINK, 0, 0}, {AMOVW, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0}, {AMOVF, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0}, {AMOVD, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, sys.MIPS64, 0}, {AMOVF, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, {AMOVD, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, {AMOVW, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, sys.MIPS64, 0}, {AMOVF, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, {AMOVD, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, {AMOVW, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0}, {AMOVF, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0}, {AMOVD, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, sys.MIPS64, 0}, {AMOVF, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, {AMOVD, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, {AMOVW, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, sys.MIPS64, 0}, {AMOVF, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, {AMOVD, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.MIPS, 0}, {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 12, 0, sys.MIPS64, 0}, {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.MIPS, 0}, {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 12, 0, sys.MIPS64, 0}, {AMOVW, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0}, {AMOVD, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, {AMOVD, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, {AMOVW, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, {AMOVD, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, {AMOVW, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0}, {AMOVD, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, {AMOVD, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, {AMOVW, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, {AMOVD, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_FREG, 30, 4, 0, 0, 0}, {AMOVW, C_FREG, C_NONE, C_REG, 31, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_FREG, 47, 4, 0, sys.MIPS64, 0}, {AMOVV, C_FREG, C_NONE, C_REG, 48, 4, 0, sys.MIPS64, 0}, {AMOVW, C_ADDCON, C_NONE, C_FREG, 34, 8, 0, sys.MIPS64, 0}, {AMOVW, C_ANDCON, C_NONE, C_FREG, 34, 8, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_MREG, 37, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_MREG, 37, 4, 0, sys.MIPS64, 0}, {AMOVW, C_MREG, C_NONE, C_REG, 38, 4, 0, 0, 0}, {AMOVV, C_MREG, C_NONE, C_REG, 38, 4, 0, sys.MIPS64, 0}, {AWORD, C_LCON, C_NONE, C_NONE, 40, 4, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_FCREG, 41, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_FCREG, 41, 4, 0, sys.MIPS64, 0}, {AMOVW, C_FCREG, C_NONE, C_REG, 42, 4, 0, 0, 0}, {AMOVV, C_FCREG, C_NONE, C_REG, 42, 4, 0, sys.MIPS64, 0}, {ATEQ, C_SCON, C_REG, C_REG, 15, 4, 0, 0, 0}, {ATEQ, C_SCON, C_NONE, C_REG, 15, 4, 0, 0, 0}, {ACMOVT, C_REG, C_NONE, C_REG, 17, 4, 0, 0, 0}, {AVMOVB, C_SCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0}, {AVMOVB, C_ADDCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0}, {AVMOVB, C_SOREG, C_NONE, C_WREG, 57, 4, 0, sys.MIPS64, 0}, {AVMOVB, C_WREG, C_NONE, C_SOREG, 58, 4, 0, sys.MIPS64, 0}, {ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, /* really CACHE instruction */ {ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {ABREAK, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0}, {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0}, {obj.AFUNCDATA, C_SCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0}, {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, {obj.ANOP, C_LCON, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689 {obj.ANOP, C_REG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, {obj.ANOP, C_FREG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, {obj.ADUFFZERO, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP {obj.ADUFFCOPY, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0}, } var oprange [ALAST & obj.AMask][]Optab var xcmp [C_NCLASS][C_NCLASS]bool func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if ctxt.Retpoline { ctxt.Diag("-spectre=ret not supported on mips") ctxt.Retpoline = false // don't keep printing } p := cursym.Func.Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols return } c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset + ctxt.FixedFrameSize())} if oprange[AOR&obj.AMask] == nil { c.ctxt.Diag("mips ops not initialized, call mips.buildop first") } pc := int64(0) p.Pc = pc var m int var o *Optab for p = p.Link; p != nil; p = p.Link { p.Pc = pc o = c.oplook(p) m = int(o.size) if m == 0 { if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { c.ctxt.Diag("zero-width instruction\n%v", p) } continue } pc += int64(m) } c.cursym.Size = pc /* * if any procedure is large enough to * generate a large SBRA branch, then * generate extra passes putting branches * around jmps to fix. this is rare. */ bflag := 1 var otxt int64 var q *obj.Prog for bflag != 0 { bflag = 0 pc = 0 for p = c.cursym.Func.Text.Link; p != nil; p = p.Link { p.Pc = pc o = c.oplook(p) // very large conditional branches if o.type_ == 6 && p.To.Target() != nil { otxt = p.To.Target().Pc - pc if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { q = c.newprog() q.Link = p.Link p.Link = q q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH q.To.SetTarget(p.To.Target()) p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH q.To.SetTarget(q.Link.Link) c.addnop(p.Link) c.addnop(p) bflag = 1 } } m = int(o.size) if m == 0 { if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { c.ctxt.Diag("zero-width instruction\n%v", p) } continue } pc += int64(m) } c.cursym.Size = pc } if c.ctxt.Arch.Family == sys.MIPS64 { pc += -pc & (mips64FuncAlign - 1) } c.cursym.Size = pc /* * lay out the code, emitting code and data relocations. */ c.cursym.Grow(c.cursym.Size) bp := c.cursym.P var i int32 var out [4]uint32 for p := c.cursym.Func.Text.Link; p != nil; p = p.Link { c.pc = p.Pc o = c.oplook(p) if int(o.size) > 4*len(out) { log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p) } c.asmout(p, o, out[:]) for i = 0; i < int32(o.size/4); i++ { c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) bp = bp[4:] } } // Mark nonpreemptible instruction sequences. // We use REGTMP as a scratch register during call injection, // so instruction sequences that use REGTMP are unsafe to // preempt asynchronously. obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint, c.isRestartable) } // isUnsafePoint returns whether p is an unsafe point. func (c *ctxt0) isUnsafePoint(p *obj.Prog) bool { // If p explicitly uses REGTMP, it's unsafe to preempt, because the // preemption sequence clobbers REGTMP. return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP } // isRestartable returns whether p is a multi-instruction sequence that, // if preempted, can be restarted. func (c *ctxt0) isRestartable(p *obj.Prog) bool { if c.isUnsafePoint(p) { return false } // If p is a multi-instruction sequence with uses REGTMP inserted by // the assembler in order to materialize a large constant/offset, we // can restart p (at the start of the instruction sequence), recompute // the content of REGTMP, upon async preemption. Currently, all cases // of assembler-inserted REGTMP fall into this category. // If p doesn't use REGTMP, it can be simply preempted, so we don't // mark it. o := c.oplook(p) return o.size > 4 && o.flag&NOTUSETMP == 0 } func isint32(v int64) bool { return int64(int32(v)) == v } func isuint32(v uint64) bool { return uint64(uint32(v)) == v } func (c *ctxt0) aclass(a *obj.Addr) int { switch a.Type { case obj.TYPE_NONE: return C_NONE case obj.TYPE_REG: if REG_R0 <= a.Reg && a.Reg <= REG_R31 { return C_REG } if REG_F0 <= a.Reg && a.Reg <= REG_F31 { return C_FREG } if REG_M0 <= a.Reg && a.Reg <= REG_M31 { return C_MREG } if REG_FCR0 <= a.Reg && a.Reg <= REG_FCR31 { return C_FCREG } if REG_W0 <= a.Reg && a.Reg <= REG_W31 { return C_WREG } if a.Reg == REG_LO { return C_LO } if a.Reg == REG_HI { return C_HI } return C_GOK case obj.TYPE_MEM: switch a.Name { case obj.NAME_EXTERN, obj.NAME_STATIC: if a.Sym == nil { break } c.instoffset = a.Offset if a.Sym != nil { // use relocation if a.Sym.Type == objabi.STLSBSS { return C_TLS } return C_ADDR } return C_LEXT case obj.NAME_AUTO: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_PARAM: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-FP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_NONE: c.instoffset = a.Offset if c.instoffset == 0 { return C_ZOREG } if c.instoffset >= -BIG && c.instoffset < BIG { return C_SOREG } return C_LOREG } return C_GOK case obj.TYPE_TEXTSIZE: return C_TEXTSIZE case obj.TYPE_CONST, obj.TYPE_ADDR: switch a.Name { case obj.NAME_NONE: c.instoffset = a.Offset if a.Reg != 0 { if -BIG <= c.instoffset && c.instoffset <= BIG { return C_SACON } if isint32(c.instoffset) { return C_LACON } return C_DACON } case obj.NAME_EXTERN, obj.NAME_STATIC: s := a.Sym if s == nil { return C_GOK } c.instoffset = a.Offset if s.Type == objabi.STLSBSS { return C_STCON // address of TLS variable } return C_LECON case obj.NAME_AUTO: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON case obj.NAME_PARAM: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-FP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON default: return C_GOK } if c.instoffset >= 0 { if c.instoffset == 0 { return C_ZCON } if c.instoffset <= 0x7fff { return C_SCON } if c.instoffset <= 0xffff { return C_ANDCON } if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */ return C_UCON } if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) { return C_LCON } return C_LCON // C_DCON } if c.instoffset >= -0x8000 { return C_ADDCON } if c.instoffset&0xffff == 0 && isint32(c.instoffset) { return C_UCON } if isint32(c.instoffset) { return C_LCON } return C_LCON // C_DCON case obj.TYPE_BRANCH: return C_SBRA } return C_GOK } func prasm(p *obj.Prog) { fmt.Printf("%v\n", p) } func (c *ctxt0) oplook(p *obj.Prog) *Optab { if oprange[AOR&obj.AMask] == nil { c.ctxt.Diag("mips ops not initialized, call mips.buildop first") } a1 := int(p.Optab) if a1 != 0 { return &optab[a1-1] } a1 = int(p.From.Class) if a1 == 0 { a1 = c.aclass(&p.From) + 1 p.From.Class = int8(a1) } a1-- a3 := int(p.To.Class) if a3 == 0 { a3 = c.aclass(&p.To) + 1 p.To.Class = int8(a3) } a3-- a2 := C_NONE if p.Reg != 0 { a2 = C_REG } ops := oprange[p.As&obj.AMask] c1 := &xcmp[a1] c3 := &xcmp[a3] for i := range ops { op := &ops[i] if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || c.ctxt.Arch.Family == op.family) { p.Optab = uint16(cap(optab) - cap(ops) + i + 1) return op } } c.ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3)) prasm(p) // Turn illegal instruction into an UNDEF, avoid crashing in asmout. return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0} } func cmp(a int, b int) bool { if a == b { return true } switch a { case C_LCON: if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON { return true } case C_ADD0CON: if b == C_ADDCON { return true } fallthrough case C_ADDCON: if b == C_ZCON || b == C_SCON { return true } case C_AND0CON: if b == C_ANDCON { return true } fallthrough case C_ANDCON: if b == C_ZCON || b == C_SCON { return true } case C_UCON: if b == C_ZCON { return true } case C_SCON: if b == C_ZCON { return true } case C_LACON: if b == C_SACON { return true } case C_LBRA: if b == C_SBRA { return true } case C_LEXT: if b == C_SEXT { return true } case C_LAUTO: if b == C_SAUTO { return true } case C_REG: if b == C_ZCON { return r0iszero != 0 /*TypeKind(100016)*/ } case C_LOREG: if b == C_ZOREG || b == C_SOREG { return true } case C_SOREG: if b == C_ZOREG { return true } } return false } type ocmp []Optab func (x ocmp) Len() int { return len(x) } func (x ocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x ocmp) Less(i, j int) bool { p1 := &x[i] p2 := &x[j] n := int(p1.as) - int(p2.as) if n != 0 { return n < 0 } n = int(p1.a1) - int(p2.a1) if n != 0 { return n < 0 } n = int(p1.a2) - int(p2.a2) if n != 0 { return n < 0 } n = int(p1.a3) - int(p2.a3) if n != 0 { return n < 0 } return false } func opset(a, b0 obj.As) { oprange[a&obj.AMask] = oprange[b0] } func buildop(ctxt *obj.Link) { if oprange[AOR&obj.AMask] != nil { // Already initialized; stop now. // This happens in the cmd/asm tests, // each of which re-initializes the arch. return } var n int for i := 0; i < C_NCLASS; i++ { for n = 0; n < C_NCLASS; n++ { if cmp(n, i) { xcmp[i][n] = true } } } for n = 0; optab[n].as != obj.AXXX; n++ { } sort.Sort(ocmp(optab[:n])) for i := 0; i < n; i++ { r := optab[i].as r0 := r & obj.AMask start := i for optab[i].as == r { i++ } oprange[r0] = optab[start:i] i-- switch r { default: ctxt.Diag("unknown op in build: %v", r) ctxt.DiagFlush() log.Fatalf("bad code") case AABSF: opset(AMOVFD, r0) opset(AMOVDF, r0) opset(AMOVWF, r0) opset(AMOVFW, r0) opset(AMOVWD, r0) opset(AMOVDW, r0) opset(ANEGF, r0) opset(ANEGD, r0) opset(AABSD, r0) opset(ATRUNCDW, r0) opset(ATRUNCFW, r0) opset(ASQRTF, r0) opset(ASQRTD, r0) case AMOVVF: opset(AMOVVD, r0) opset(AMOVFV, r0) opset(AMOVDV, r0) opset(ATRUNCDV, r0) opset(ATRUNCFV, r0) case AADD: opset(ASGT, r0) opset(ASGTU, r0) opset(AADDU, r0) case AADDV: opset(AADDVU, r0) case AADDF: opset(ADIVF, r0) opset(ADIVD, r0) opset(AMULF, r0) opset(AMULD, r0) opset(ASUBF, r0) opset(ASUBD, r0) opset(AADDD, r0) case AAND: opset(AOR, r0) opset(AXOR, r0) case ABEQ: opset(ABNE, r0) case ABLEZ: opset(ABGEZ, r0) opset(ABGEZAL, r0) opset(ABLTZ, r0) opset(ABLTZAL, r0) opset(ABGTZ, r0) case AMOVB: opset(AMOVH, r0) case AMOVBU: opset(AMOVHU, r0) case AMUL: opset(AREM, r0) opset(AREMU, r0) opset(ADIVU, r0) opset(AMULU, r0) opset(ADIV, r0) opset(AMADD, r0) opset(AMSUB, r0) case AMULV: opset(ADIVV, r0) opset(ADIVVU, r0) opset(AMULVU, r0) opset(AREMV, r0) opset(AREMVU, r0) case ASLL: opset(ASRL, r0) opset(ASRA, r0) case ASLLV: opset(ASRAV, r0) opset(ASRLV, r0) case ASUB: opset(ASUBU, r0) opset(ANOR, r0) case ASUBV: opset(ASUBVU, r0) case ASYSCALL: opset(ASYNC, r0) opset(ANOOP, r0) opset(ATLBP, r0) opset(ATLBR, r0) opset(ATLBWI, r0) opset(ATLBWR, r0) case ACMPEQF: opset(ACMPGTF, r0) opset(ACMPGTD, r0) opset(ACMPGEF, r0) opset(ACMPGED, r0) opset(ACMPEQD, r0) case ABFPT: opset(ABFPF, r0) case AMOVWL: opset(AMOVWR, r0) case AMOVVL: opset(AMOVVR, r0) case AVMOVB: opset(AVMOVH, r0) opset(AVMOVW, r0) opset(AVMOVD, r0) case AMOVW, AMOVD, AMOVF, AMOVV, ABREAK, ARFE, AJAL, AJMP, AMOVWU, ALL, ALLV, ASC, ASCV, ANEGW, ANEGV, AWORD, obj.ANOP, obj.ATEXT, obj.AUNDEF, obj.AFUNCDATA, obj.APCDATA, obj.ADUFFZERO, obj.ADUFFCOPY: break case ACMOVN: opset(ACMOVZ, r0) case ACMOVT: opset(ACMOVF, r0) case ACLO: opset(ACLZ, r0) case ATEQ: opset(ATNE, r0) } } } func OP(x uint32, y uint32) uint32 { return x<<3 | y<<0 } func SP(x uint32, y uint32) uint32 { return x<<29 | y<<26 } func BCOND(x uint32, y uint32) uint32 { return x<<19 | y<<16 } func MMU(x uint32, y uint32) uint32 { return SP(2, 0) | 16<<21 | x<<3 | y<<0 } func FPF(x uint32, y uint32) uint32 { return SP(2, 1) | 16<<21 | x<<3 | y<<0 } func FPD(x uint32, y uint32) uint32 { return SP(2, 1) | 17<<21 | x<<3 | y<<0 } func FPW(x uint32, y uint32) uint32 { return SP(2, 1) | 20<<21 | x<<3 | y<<0 } func FPV(x uint32, y uint32) uint32 { return SP(2, 1) | 21<<21 | x<<3 | y<<0 } func OP_RRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true