_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q15000
MoveNext
train
func (t *NodeIterator) MoveNext() bool { n := t.query.Select(t) if n != nil { if !t.node.MoveTo(n) { t.node = n.Copy() } return true } return false }
go
{ "resource": "" }
q15001
Select
train
func (expr *Expr) Select(root NodeNavigator) *NodeIterator { return &NodeIterator{query: expr.q.Clone(), node: root} }
go
{ "resource": "" }
q15002
Compile
train
func Compile(expr string) (*Expr, error) { if expr == "" { return nil, errors.New("expr expression is nil") } qy, err := build(expr) if err != nil { return nil, err } return &Expr{s: expr, q: qy}, nil }
go
{ "resource": "" }
q15003
MustCompile
train
func MustCompile(expr string) *Expr { exp, err := Compile(expr) if err != nil { return nil } return exp }
go
{ "resource": "" }
q15004
predicate
train
func predicate(q query) func(NodeNavigator) bool { type Predicater interface { Test(NodeNavigator) bool } if p, ok := q.(Predicater); ok { return p.Test } return func(NodeNavigator) bool { return true } }
go
{ "resource": "" }
q15005
substringFunc
train
func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { return func(q query, t iterator) interface{} { var m string switch typ := arg1.Evaluate(t).(type) { case string: m = typ case query: node := typ.Select(t) if node == nil { return "" } m = node.Value() } var start, length float64 var ok bool if start, ok = arg2.Evaluate(t).(float64); !ok { panic(errors.New("substring() function first argument type must be int")) } else if start < 1 { panic(errors.New("substring() function first argument type must be >= 1")) } start-- if arg3 != nil { if length, ok = arg3.Evaluate(t).(float64); !ok { panic(errors.New("substring() function second argument type must be int")) } } if (len(m) - int(start)) < int(length) { panic(errors.New("substring() function start and length argument out of range")) } if length > 0 { return m[int(start):int(length+start)] } return m[int(start):] } }
go
{ "resource": "" }
q15006
utpProcessUdp
train
func (s *Socket) utpProcessUdp(b []byte, addr net.Addr) (utp bool) { if len(b) == 0 { // The implementation of utp_process_udp rejects null buffers, and // anything smaller than the UTP header size. It's also prone to // assert on those, which we don't want to trigger. return false } if missinggo.AddrPort(addr) == 0 { return false } mu.Unlock() block := func() bool { if s.firewallCallback == nil { return false } return s.firewallCallback(addr) }() mu.Lock() s.block = block if s.closed { return false } var sal C.socklen_t staticRsa, sal = netAddrToLibSockaddr(addr) ret := C.utp_process_udp(s.ctx, (*C.byte)(&b[0]), C.size_t(len(b)), (*C.struct_sockaddr)(unsafe.Pointer(&staticRsa)), sal) switch ret { case 1: return true case 0: return false default: panic(ret) } }
go
{ "resource": "" }
q15007
DialContext
train
func (s *Socket) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { c, err := s.NewConn() if err != nil { return nil, err } err = c.Connect(ctx, network, addr) if err != nil { c.Close() return nil, err } return c, nil }
go
{ "resource": "" }
q15008
fdget
train
func fdget(fd int, fds *syscall.FdSet) (index, offset int) { index = fd / (syscall.FD_SETSIZE / len(fds.Bits)) % len(fds.Bits) offset = fd % (syscall.FD_SETSIZE / len(fds.Bits)) return }
go
{ "resource": "" }
q15009
fdset
train
func fdset(fd int, fds *syscall.FdSet) { idx, pos := fdget(fd, fds) fds.Bits[idx] = 1 << uint(pos) }
go
{ "resource": "" }
q15010
fdisset
train
func fdisset(fd int, fds *syscall.FdSet) bool { idx, pos := fdget(fd, fds) return fds.Bits[idx]&(1<<uint(pos)) != 0 }
go
{ "resource": "" }
q15011
Read
train
func (p *port) Read(b []byte) (n int, err error) { var done uint32 if err = syscall.ReadFile(p.handle, b, &done, nil); err != nil { return } if done == 0 { err = ErrTimeout return } n = int(done) return }
go
{ "resource": "" }
q15012
syscallSelect
train
func syscallSelect(n int, r *syscall.FdSet, w *syscall.FdSet, e *syscall.FdSet, tv *syscall.Timeval) error { return syscall.Select(n, r, w, e, tv) }
go
{ "resource": "" }
q15013
Read
train
func (p *port) Read(b []byte) (n int, err error) { var rfds syscall.FdSet fd := p.fd fdset(fd, &rfds) var tv *syscall.Timeval if p.timeout > 0 { timeout := syscall.NsecToTimeval(p.timeout.Nanoseconds()) tv = &timeout } for { // If syscall.Select() returns EINTR (Interrupted system call), retry it if err = syscallSelect(fd+1, &rfds, nil, nil, tv); err == nil { break } if err != syscall.EINTR { err = fmt.Errorf("serial: could not select: %v", err) return } } if !fdisset(fd, &rfds) { // Timeout err = ErrTimeout return } n, err = syscall.Read(fd, b) return }
go
{ "resource": "" }
q15014
backupTermios
train
func (p *port) backupTermios() { oldTermios := &syscall.Termios{} if err := tcgetattr(p.fd, oldTermios); err != nil { // Warning only. log.Printf("serial: could not get setting: %v\n", err) return } // Will be reloaded when closing. p.oldTermios = oldTermios }
go
{ "resource": "" }
q15015
restoreTermios
train
func (p *port) restoreTermios() { if p.oldTermios == nil { return } if err := tcsetattr(p.fd, p.oldTermios); err != nil { // Warning only. log.Printf("serial: could not restore setting: %v\n", err) return } p.oldTermios = nil }
go
{ "resource": "" }
q15016
newTermios
train
func newTermios(c *Config) (termios *syscall.Termios, err error) { termios = &syscall.Termios{} flag := termios.Cflag // Baud rate if c.BaudRate == 0 { // 19200 is the required default. flag = syscall.B19200 } else { var ok bool flag, ok = baudRates[c.BaudRate] if !ok { err = fmt.Errorf("serial: unsupported baud rate %v", c.BaudRate) return } } termios.Cflag |= flag // Input baud. cfSetIspeed(termios, flag) // Output baud. cfSetOspeed(termios, flag) // Character size. if c.DataBits == 0 { flag = syscall.CS8 } else { var ok bool flag, ok = charSizes[c.DataBits] if !ok { err = fmt.Errorf("serial: unsupported character size %v", c.DataBits) return } } termios.Cflag |= flag // Stop bits switch c.StopBits { case 0, 1: // Default is one stop bit. // noop case 2: // CSTOPB: Set two stop bits. termios.Cflag |= syscall.CSTOPB default: err = fmt.Errorf("serial: unsupported stop bits %v", c.StopBits) return } switch c.Parity { case "N": // noop case "O": // PARODD: Parity is odd. termios.Cflag |= syscall.PARODD fallthrough case "", "E": // As mentioned in the modbus spec, the default parity mode must be Even parity // PARENB: Enable parity generation on output. termios.Cflag |= syscall.PARENB // INPCK: Enable input parity checking. termios.Iflag |= syscall.INPCK default: err = fmt.Errorf("serial: unsupported parity %v", c.Parity) return } // Control modes. // CREAD: Enable receiver. // CLOCAL: Ignore control lines. termios.Cflag |= syscall.CREAD | syscall.CLOCAL // Special characters. // VMIN: Minimum number of characters for noncanonical read. // VTIME: Time in deciseconds for noncanonical read. // Both are unused as NDELAY is we utilized when opening device. return }
go
{ "resource": "" }
q15017
enableRS485
train
func enableRS485(fd int, config *RS485Config) error { if !config.Enabled { return nil } rs485 := rs485_ioctl_opts{ rs485Enabled, uint32(config.DelayRtsBeforeSend / time.Millisecond), uint32(config.DelayRtsAfterSend / time.Millisecond), [5]uint32{0, 0, 0, 0, 0}, } if config.RtsHighDuringSend { rs485.flags |= rs485RTSOnSend } if config.RtsHighAfterSend { rs485.flags |= rs485RTSAfterSend } if config.RxDuringTx { rs485.flags |= rs485RXDuringTX } r, _, errno := syscall.Syscall( syscall.SYS_IOCTL, uintptr(fd), uintptr(rs485Tiocs), uintptr(unsafe.Pointer(&rs485))) if errno != 0 { return os.NewSyscallError("SYS_IOCTL (RS485)", errno) } if r != 0 { return errors.New("serial: unknown error from SYS_IOCTL (RS485)") } return nil }
go
{ "resource": "" }
q15018
Open
train
func Open(c *Config) (p Port, err error) { p = New() err = p.Open(c) return }
go
{ "resource": "" }
q15019
NewServer
train
func NewServer(readHandler func(filename string, rf io.ReaderFrom) error, writeHandler func(filename string, wt io.WriterTo) error) *Server { s := &Server{ timeout: defaultTimeout, retries: defaultRetries, runGC: make(chan []string), gcInterval: 1 * time.Minute, packetReadTimeout: 100 * time.Millisecond, readHandler: readHandler, writeHandler: writeHandler, } return s }
go
{ "resource": "" }
q15020
SetAnticipate
train
func (s *Server) SetAnticipate(winsz uint) { if winsz > 1 { s.sendAEnable = true s.sendAWinSz = winsz } else { s.sendAEnable = false s.sendAWinSz = 1 } }
go
{ "resource": "" }
q15021
EnableSinglePort
train
func (s *Server) EnableSinglePort() { s.singlePort = true s.handlers = make(map[string]chan []byte, datagramLength) s.gcCollect = make(chan string) s.bufPool = sync.Pool{ New: func() interface{} { return make([]byte, datagramLength) }, } go s.internalGC() }
go
{ "resource": "" }
q15022
SetTimeout
train
func (s *Server) SetTimeout(t time.Duration) { if t <= 0 { s.timeout = defaultTimeout } else { s.timeout = t } }
go
{ "resource": "" }
q15023
SetRetries
train
func (s *Server) SetRetries(count int) { if count < 1 { s.retries = defaultRetries } else { s.retries = count } }
go
{ "resource": "" }
q15024
ListenAndServe
train
func (s *Server) ListenAndServe(addr string) error { a, err := net.ResolveUDPAddr("udp", addr) if err != nil { return err } conn, err := net.ListenUDP("udp", a) if err != nil { return err } return s.Serve(conn) }
go
{ "resource": "" }
q15025
Serve
train
func (s *Server) Serve(conn *net.UDPConn) error { defer conn.Close() laddr := conn.LocalAddr() host, _, err := net.SplitHostPort(laddr.String()) if err != nil { return err } s.conn = conn // Having seperate control paths for IP4 and IP6 is annoying, // but necessary at this point. addr := net.ParseIP(host) if addr == nil { return fmt.Errorf("Failed to determine IP class of listening address") } if addr.To4() != nil { s.conn4 = ipv4.NewPacketConn(conn) if err := s.conn4.SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { s.conn4 = nil } } else { s.conn6 = ipv6.NewPacketConn(conn) if err := s.conn6.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { s.conn6 = nil } } s.quit = make(chan chan struct{}) if s.singlePort { s.singlePortProcessRequests() } else { for { select { case q := <-s.quit: q <- struct{}{} return nil default: var err error if s.conn4 != nil { err = s.processRequest4() } else if s.conn6 != nil { err = s.processRequest6() } else { err = s.processRequest() } if err != nil { // TODO: add logging handler } } } } return nil }
go
{ "resource": "" }
q15026
Shutdown
train
func (s *Server) Shutdown() { s.conn.Close() q := make(chan struct{}) s.quit <- q <-q s.wg.Wait() }
go
{ "resource": "" }
q15027
internalGC
train
func (s *Server) internalGC() { var completedHandlers []string for { select { case newHandler := <-s.gcCollect: completedHandlers = append(completedHandlers, newHandler) case <-time.After(s.gcInterval): s.runGC <- completedHandlers completedHandlers = nil } } }
go
{ "resource": "" }
q15028
packRQ
train
func packRQ(p []byte, op uint16, filename, mode string, opts options) int { binary.BigEndian.PutUint16(p, op) n := 2 n += copy(p[2:len(p)-10], filename) p[n] = 0 n++ n += copy(p[n:], mode) p[n] = 0 n++ for name, value := range opts { n += copy(p[n:], name) p[n] = 0 n++ n += copy(p[n:], value) p[n] = 0 n++ } return n }
go
{ "resource": "" }
q15029
NewClient
train
func NewClient(addr string) (*Client, error) { a, err := net.ResolveUDPAddr("udp", addr) if err != nil { return nil, fmt.Errorf("resolving address %s: %v", addr, err) } return &Client{ addr: a, timeout: defaultTimeout, retries: defaultRetries, }, nil }
go
{ "resource": "" }
q15030
SetTimeout
train
func (c *Client) SetTimeout(t time.Duration) { if t <= 0 { c.timeout = defaultTimeout } c.timeout = t }
go
{ "resource": "" }
q15031
SetRetries
train
func (c *Client) SetRetries(count int) { if count < 1 { c.retries = defaultRetries } c.retries = count }
go
{ "resource": "" }
q15032
Send
train
func (c Client) Send(filename string, mode string) (io.ReaderFrom, error) { conn, err := net.ListenUDP("udp", &net.UDPAddr{}) if err != nil { return nil, err } s := &sender{ send: make([]byte, datagramLength), receive: make([]byte, datagramLength), conn: &connConnection{conn: conn}, retry: &backoff{handler: c.backoff}, timeout: c.timeout, retries: c.retries, addr: c.addr, mode: mode, } if c.blksize != 0 { s.opts = make(options) s.opts["blksize"] = strconv.Itoa(c.blksize) } n := packRQ(s.send, opWRQ, filename, mode, s.opts) addr, err := s.sendWithRetry(n) if err != nil { return nil, err } s.addr = addr s.opts = nil return s, nil }
go
{ "resource": "" }
q15033
Receive
train
func (c Client) Receive(filename string, mode string) (io.WriterTo, error) { conn, err := net.ListenUDP("udp", &net.UDPAddr{}) if err != nil { return nil, err } if c.timeout == 0 { c.timeout = defaultTimeout } r := &receiver{ send: make([]byte, datagramLength), receive: make([]byte, datagramLength), conn: &connConnection{conn: conn}, retry: &backoff{handler: c.backoff}, timeout: c.timeout, retries: c.retries, addr: c.addr, autoTerm: true, block: 1, mode: mode, } if c.blksize != 0 || c.tsize { r.opts = make(options) } if c.blksize != 0 { r.opts["blksize"] = strconv.Itoa(c.blksize) // Clean it up so we don't send options twice defer func() { delete(r.opts, "blksize") }() } if c.tsize { r.opts["tsize"] = "0" } n := packRQ(r.send, opRRQ, filename, mode, r.opts) l, addr, err := r.receiveWithRetry(n) if err != nil { return nil, err } r.l = l r.addr = addr return r, nil }
go
{ "resource": "" }
q15034
AvailableBackends
train
func AvailableBackends() []BackendType { b := []BackendType{} for k := range supportedBackends { if k != FileBackend { b = append(b, k) } } // make sure FileBackend is last return append(b, FileBackend) }
go
{ "resource": "" }
q15035
Open
train
func Open(cfg Config) (Keyring, error) { if cfg.AllowedBackends == nil { cfg.AllowedBackends = AvailableBackends() } debugf("Considering backends: %v", cfg.AllowedBackends) for _, backend := range cfg.AllowedBackends { if opener, ok := supportedBackends[backend]; ok { openBackend, err := opener(cfg) if err != nil { debugf("Failed backend %s: %s", backend, err) continue } return openBackend, nil } } return nil, ErrNoAvailImpl }
go
{ "resource": "" }
q15036
NewArrayKeyring
train
func NewArrayKeyring(initial []Item) *ArrayKeyring { kr := &ArrayKeyring{} for _, i := range initial { _ = kr.Set(i) } return kr }
go
{ "resource": "" }
q15037
Get
train
func (k *ArrayKeyring) Get(key string) (Item, error) { if i, ok := k.items[key]; ok { return i, nil } return Item{}, ErrKeyNotFound }
go
{ "resource": "" }
q15038
Set
train
func (k *ArrayKeyring) Set(i Item) error { if k.items == nil { k.items = map[string]Item{} } k.items[i.Key] = i return nil }
go
{ "resource": "" }
q15039
Remove
train
func (k *ArrayKeyring) Remove(key string) error { delete(k.items, key) return nil }
go
{ "resource": "" }
q15040
Keys
train
func (k *ArrayKeyring) Keys() ([]string, error) { var keys = []string{} for key := range k.items { keys = append(keys, key) } return keys, nil }
go
{ "resource": "" }
q15041
WithHTTPClient
train
func WithHTTPClient(client *http.Client) ClientOption { return func(metaclient *Client) { metaclient.client = client } }
go
{ "resource": "" }
q15042
WithBaseURL
train
func WithBaseURL(base *url.URL) ClientOption { return func(metaclient *Client) { metaclient.baseURL = base } }
go
{ "resource": "" }
q15043
NewClient
train
func NewClient(opts ...ClientOption) *Client { client := &Client{ client: &http.Client{Timeout: defaultTimeout}, baseURL: defaultBaseURL, } for _, opt := range opts { opt(client) } return client }
go
{ "resource": "" }
q15044
Metadata
train
func (c *Client) Metadata() (*Metadata, error) { metadata := new(Metadata) err := c.doGetURL(c.resolve("/metadata/v1.json"), func(r io.Reader) error { return json.NewDecoder(r).Decode(metadata) }) return metadata, err }
go
{ "resource": "" }
q15045
DropletID
train
func (c *Client) DropletID() (int, error) { dropletID := new(int) err := c.doGet("id", func(r io.Reader) error { _, err := fmt.Fscanf(r, "%d", dropletID) return err }) return *dropletID, err }
go
{ "resource": "" }
q15046
Hostname
train
func (c *Client) Hostname() (string, error) { var hostname string err := c.doGet("hostname", func(r io.Reader) error { hostnameraw, err := ioutil.ReadAll(r) hostname = string(hostnameraw) return err }) return hostname, err }
go
{ "resource": "" }
q15047
UserData
train
func (c *Client) UserData() (string, error) { var userdata string err := c.doGet("user-data", func(r io.Reader) error { userdataraw, err := ioutil.ReadAll(r) userdata = string(userdataraw) return err }) return userdata, err }
go
{ "resource": "" }
q15048
VendorData
train
func (c *Client) VendorData() (string, error) { var vendordata string err := c.doGet("vendor-data", func(r io.Reader) error { vendordataraw, err := ioutil.ReadAll(r) vendordata = string(vendordataraw) return err }) return vendordata, err }
go
{ "resource": "" }
q15049
Region
train
func (c *Client) Region() (string, error) { var region string err := c.doGet("region", func(r io.Reader) error { regionraw, err := ioutil.ReadAll(r) region = string(regionraw) return err }) return region, err }
go
{ "resource": "" }
q15050
AuthToken
train
func (c *Client) AuthToken() (string, error) { var authToken string err := c.doGet("auth-token", func(r io.Reader) error { authTokenraw, err := ioutil.ReadAll(r) authToken = string(authTokenraw) return err }) return authToken, err }
go
{ "resource": "" }
q15051
FloatingIPv4Active
train
func (c *Client) FloatingIPv4Active() (bool, error) { var active bool err := c.doGet("floating_ip/ipv4/active", func(r io.Reader) error { activeraw, err := ioutil.ReadAll(r) if string(activeraw) == "true" { active = true } return err }) return active, err }
go
{ "resource": "" }
q15052
Save
train
func Save(path string, v interface{}) error { lock.Lock() defer lock.Unlock() f, err := os.Create(path) if err != nil { return err } defer f.Close() r, err := Marshal(v) if err != nil { return err } _, err = io.Copy(f, r) return err }
go
{ "resource": "" }
q15053
Read
train
func (this *Reader) Read(p []byte) (n int, err error) { // checks for when we have no data for this.writePos == 0 || this.readPos == this.writePos { // if we have an error / EOF, just return it if this.err != nil { return n, this.err } // else, fill our buffer this.fillBuffer() } // TODO: checks for when we have less data than len(p) // we should have an appropriate amount of data, convert it into the given buffer bytesRead, bytesWritten, err := this.converter.Convert(this.buffer[this.readPos:this.writePos], p) // adjust byte counters this.readPos += bytesRead n += bytesWritten // if we experienced an iconv error, check it if err != nil { // E2BIG errors can be ignored (we'll get them often) as long // as at least 1 byte was written. If we experienced an E2BIG // and no bytes were written then the buffer is too small for // even the next character if err != syscall.E2BIG || bytesWritten == 0 { // track anything else this.err = err } } // return our results return n, this.err }
go
{ "resource": "" }
q15054
NewConverter
train
func NewConverter(fromEncoding string, toEncoding string) (converter *Converter, err error) { converter = new(Converter) // convert to C strings toEncodingC := C.CString(toEncoding) fromEncodingC := C.CString(fromEncoding) // open an iconv descriptor converter.context, err = C.iconv_open(toEncodingC, fromEncodingC) // free the C Strings C.free(unsafe.Pointer(toEncodingC)) C.free(unsafe.Pointer(fromEncodingC)) // check err if err == nil { // no error, mark the context as open converter.open = true } return }
go
{ "resource": "" }
q15055
Close
train
func (this *Converter) Close() (err error) { if this.open { _, err = C.iconv_close(this.context) } return }
go
{ "resource": "" }
q15056
Convert
train
func (this *Converter) Convert(input []byte, output []byte) (bytesRead int, bytesWritten int, err error) { // make sure we are still open if this.open { inputLeft := C.size_t(len(input)) outputLeft := C.size_t(len(output)) if inputLeft > 0 && outputLeft > 0 { // we have to give iconv a pointer to a pointer of the underlying // storage of each byte slice - so far this is the simplest // way i've found to do that in Go, but it seems ugly inputPointer := (*C.char)(unsafe.Pointer(&input[0])) outputPointer := (*C.char)(unsafe.Pointer(&output[0])) _, err = C.call_iconv(this.context, inputPointer, &inputLeft, outputPointer, &outputLeft) // update byte counters bytesRead = len(input) - int(inputLeft) bytesWritten = len(output) - int(outputLeft) } else if inputLeft == 0 && outputLeft > 0 { // inputPointer will be nil, outputPointer is generated as above outputPointer := (*C.char)(unsafe.Pointer(&output[0])) _, err = C.call_iconv(this.context, nil, &inputLeft, outputPointer, &outputLeft) // update write byte counter bytesWritten = len(output) - int(outputLeft) } else { // both input and output are zero length, do a shift state reset _, err = C.call_iconv(this.context, nil, &inputLeft, nil, &outputLeft) } } else { err = syscall.EBADF } return bytesRead, bytesWritten, err }
go
{ "resource": "" }
q15057
ConvertString
train
func (this *Converter) ConvertString(input string) (output string, err error) { // make sure we are still open if this.open { // construct the buffers inputBuffer := []byte(input) outputBuffer := make([]byte, len(inputBuffer)*2) // we use a larger buffer to help avoid resizing later // call Convert until all input bytes are read or an error occurs var bytesRead, totalBytesRead, bytesWritten, totalBytesWritten int for totalBytesRead < len(inputBuffer) && err == nil { // use the totals to create buffer slices bytesRead, bytesWritten, err = this.Convert(inputBuffer[totalBytesRead:], outputBuffer[totalBytesWritten:]) totalBytesRead += bytesRead totalBytesWritten += bytesWritten // check for the E2BIG error specifically, we can add to the output // buffer to correct for it and then continue if err == syscall.E2BIG { // increase the size of the output buffer by another input length // first, create a new buffer tempBuffer := make([]byte, len(outputBuffer)+len(inputBuffer)) // copy the existing data copy(tempBuffer, outputBuffer) // switch the buffers outputBuffer = tempBuffer // forget the error err = nil } } if err == nil { // perform a final shift state reset _, bytesWritten, err = this.Convert([]byte{}, outputBuffer[totalBytesWritten:]) // update total count totalBytesWritten += bytesWritten } // construct the final output string output = string(outputBuffer[:totalBytesWritten]) } else { err = syscall.EBADF } return output, err }
go
{ "resource": "" }
q15058
Convert
train
func Convert(input []byte, output []byte, fromEncoding string, toEncoding string) (bytesRead int, bytesWritten int, err error) { // create a temporary converter converter, err := NewConverter(fromEncoding, toEncoding) if err == nil { // call converter's Convert bytesRead, bytesWritten, err = converter.Convert(input, output) if err == nil { var shiftBytesWritten int // call Convert with a nil input to generate any end shift sequences _, shiftBytesWritten, err = converter.Convert(nil, output[bytesWritten:]) // add shift bytes to total bytes bytesWritten += shiftBytesWritten } // close the converter converter.Close() } return }
go
{ "resource": "" }
q15059
ConvertString
train
func ConvertString(input string, fromEncoding string, toEncoding string) (output string, err error) { // create a temporary converter converter, err := NewConverter(fromEncoding, toEncoding) if err == nil { // convert the string output, err = converter.ConvertString(input) // close the converter converter.Close() } return }
go
{ "resource": "" }
q15060
Write
train
func (this *Writer) Write(p []byte) (n int, err error) { // write data into our internal buffer bytesRead, bytesWritten, err := this.converter.Convert(p, this.buffer[this.writePos:]) // update bytes written for return n += bytesRead this.writePos += bytesWritten // checks for when we have a full buffer for this.writePos > 0 { // if we have an error, just return it if this.err != nil { return } // else empty the buffer this.emptyBuffer() } return n, err }
go
{ "resource": "" }
q15061
Transform
train
func Transform(scheme ColourScheme, data ViewRegionMap, viewport text.Region) Recipe { pe := util.Prof.Enter("render.Transform") defer pe.Exit() data.Cull(viewport) recipe := make(Recipe) for _, v := range data { k := scheme.Spice(&v) rs := recipe[k] a := util.Prof.Enter("render.Transform.(Regions)") r := v.Regions.Regions() a.Exit() a = util.Prof.Enter("render.Transform.(AddAll)") rs.AddAll(r) a.Exit() recipe[k] = rs } return recipe }
go
{ "resource": "" }
q15062
Transcribe
train
func (r Recipe) Transcribe() (ret TranscribedRecipe) { pe := util.Prof.Enter("render.Transcribe") defer pe.Exit() for flav, set := range r { for _, r := range set.Regions() { ret = append(ret, RenderUnit{Flavour: flav, Region: r}) } } sort.Sort(&ret) return }
go
{ "resource": "" }
q15063
String
train
func (e *Edit) String() string { return fmt.Sprintf("%s: %v, %v, %v", e.command, e.args, e.bypassUndo, e.composite) }
go
{ "resource": "" }
q15064
Undo
train
func (e *Edit) Undo() { e.composite.Undo() e.v.Sel().Clear() for _, r := range e.savedSel.Regions() { e.v.Sel().Add(r) } }
go
{ "resource": "" }
q15065
Less
train
func (k *KeyBindings) Less(i, j int) bool { return k.Bindings[i].Keys[k.seqIndex].Index() < k.Bindings[j].Keys[k.seqIndex].Index() }
go
{ "resource": "" }
q15066
Swap
train
func (k *KeyBindings) Swap(i, j int) { k.Bindings[i], k.Bindings[j] = k.Bindings[j], k.Bindings[i] }
go
{ "resource": "" }
q15067
DropLessEqualKeys
train
func (k *KeyBindings) DropLessEqualKeys(count int) { for { for i := 0; i < len(k.Bindings); { if len(k.Bindings[i].Keys) <= count { k.Bindings[i] = k.Bindings[len(k.Bindings)-1] k.Bindings = k.Bindings[:len(k.Bindings)-1] } else { i++ } } sort.Sort(k) if k.parent == nil { break } k = k.parent.KeyBindings() } }
go
{ "resource": "" }
q15068
Filter
train
func (k *KeyBindings) Filter(kp KeyPress) (ret KeyBindings) { p := Prof.Enter("key.filter") defer p.Exit() kp.fix() k.DropLessEqualKeys(k.seqIndex) ret.seqIndex = k.seqIndex + 1 ki := kp.Index() k.filter(ki, &ret) if kp.IsCharacter() { k.filter(int(Any), &ret) } return }
go
{ "resource": "" }
q15069
Action
train
func (k *KeyBindings) Action(qc func(key string, operator Op, operand interface{}, match_all bool) bool) (kb *KeyBinding) { p := Prof.Enter("key.action") defer p.Exit() for { for i := range k.Bindings { if len(k.Bindings[i].Keys) > k.seqIndex { // This key binding is of a key sequence longer than what is currently // probed for. For example, the binding is for the sequence ['a','b','c'], but // the user has only pressed ['a','b'] so far. continue } for _, c := range k.Bindings[i].Context { if !qc(c.Key, c.Operator, c.Operand, c.MatchAll) { goto skip } } if kb == nil || kb.priority < k.Bindings[i].priority { kb = k.Bindings[i] } skip: } if kb != nil || k.parent == nil { break } k = k.parent.KeyBindings() } return }
go
{ "resource": "" }
q15070
IsCharacter
train
func (k KeyPress) IsCharacter() bool { return unicode.IsPrint(rune(k.Key)) && !k.Super && !k.Ctrl }
go
{ "resource": "" }
q15071
fix
train
func (k *KeyPress) fix() { lower := Key(unicode.ToLower(rune(k.Key))) if lower != k.Key { k.Shift = true k.Key = lower } }
go
{ "resource": "" }
q15072
LoadJSON
train
func LoadJSON(path string, marshal json.Unmarshaler) error { j := NewJSON(path, marshal) watch(j) j.Load() return j.err }
go
{ "resource": "" }
q15073
reparse
train
func (v *View) reparse(forced bool) { if v.isClosed() { // No point in issuing a re-parse if the view has been closed return } if len(v.reparseChan) < cap(v.reparseChan) || forced { v.reparseChan <- parseReq{forced} } }
go
{ "resource": "" }
q15074
Erase
train
func (v *View) Erase(edit *Edit, r text.Region) { edit.composite.AddExec(text.NewEraseAction(v.buffer, r)) }
go
{ "resource": "" }
q15075
Replace
train
func (v *View) Replace(edit *Edit, r text.Region, value string) { edit.composite.AddExec(text.NewReplaceAction(v.buffer, r, value)) }
go
{ "resource": "" }
q15076
EndEdit
train
func (v *View) EndEdit(edit *Edit) { if edit.invalid { // This happens when nesting Edits and the child Edit ends after the parent edit. log.Fine("This edit has already been invalidated: %v, %v", edit, v.editstack) return } // Find the position of this Edit object in this View's Edit stack. // If plugins, commands, etc are well-behaved the ended edit should be // last in the stack, but shit happens and we cannot count on this being the case. i := len(v.editstack) - 1 for i := len(v.editstack) - 1; i >= 0; i-- { if v.editstack[i] == edit { break } } if i == -1 { // TODO(.): Under what instances does this happen again? log.Error("This edit isn't even in the stack... where did it come from? %v, %v", edit, v.editstack) return } var selection_modified bool if l := len(v.editstack) - 1; i != l { // TODO(.): See TODO in BeginEdit log.Error("This edit wasn't last in the stack... %d != %d: %v, %v", i, l, edit, v.editstack) } // Invalidate all Edits "below" and including this Edit. for j := len(v.editstack) - 1; j >= i; j-- { current_edit := v.editstack[j] current_edit.invalid = true sel_same := reflect.DeepEqual(*v.Sel(), current_edit.savedSel) buf_same := v.ChangeCount() == current_edit.savedCount eq := (sel_same && buf_same && current_edit.composite.Len() == 0) if !eq && !sel_same { selection_modified = true } if v.IsScratch() || current_edit.bypassUndo || eq { continue } switch { case i == 0: // Well-behaved, no nested edits! fallthrough case j != i: // BOO! Someone began another Edit without finishing the first one. // In this instance, the parent Edit ended before the child. // TODO(.): What would be the correct way to handle this? v.undoStack.Add(edit) default: // BOO! Also poorly-behaved. This Edit object began after the parent began, // but was finished before the parent finished. // // Add it as a child of the parent Edit so that undoing the parent // will undo this edit as well. v.editstack[i-1].composite.Add(current_edit) } } // Pop this Edit and all the children off the Edit stack. v.editstack = v.editstack[:i] if selection_modified { OnSelectionModified.Call(v) } }
go
{ "resource": "" }
q15077
IsDirty
train
func (v *View) IsDirty() bool { if v.IsScratch() { return false } lastSave := v.Settings().Int("lime.last_save_change_count", -1) return v.ChangeCount() != lastSave }
go
{ "resource": "" }
q15078
SaveAs
train
func (v *View) SaveAs(name string) (err error) { log.Fine("SaveAs(%s)", name) v.Settings().Set("lime.saving", true) defer v.Settings().Erase("lime.saving") OnPreSave.Call(v) if atomic := v.Settings().Bool("atomic_save", true); v.FileName() == "" || !atomic { if err := v.nonAtomicSave(name); err != nil { return err } } else { n, err := ioutil.TempDir(path.Dir(v.FileName()), "lime") if err != nil { return err } tmpf := path.Join(n, "tmp") if err := v.nonAtomicSave(tmpf); err != nil { return err } if err := os.Rename(tmpf, name); err != nil { // When we want to save as a file in another directory // we can't go with os.Rename so we need to force // not atomic saving sometimes as 4th test in TestSaveAsOpenFile if err := v.nonAtomicSave(name); err != nil { return err } } if err := os.RemoveAll(n); err != nil { return err } } ed := GetEditor() if fn := v.FileName(); fn != name { v.SetFileName(name) if fn != "" { ed.UnWatch(fn, v) } ed.Watch(name, v) } v.Settings().Set("lime.last_save_change_count", v.ChangeCount()) OnPostSave.Call(v) return nil }
go
{ "resource": "" }
q15079
AddRegions
train
func (v *View) AddRegions(key string, regions []text.Region, scope, icon string, flags render.ViewRegionFlags) { vr := render.ViewRegions{Scope: scope, Icon: icon, Flags: flags} vr.Regions.AddAll(regions) v.lock.Lock() defer v.lock.Unlock() v.regions[key] = vr }
go
{ "resource": "" }
q15080
GetRegions
train
func (v *View) GetRegions(key string) (ret []text.Region) { v.lock.Lock() defer v.lock.Unlock() vr := v.regions[key] rs := vr.Regions.Regions() ret = make([]text.Region, len(rs)) copy(ret, rs) return }
go
{ "resource": "" }
q15081
EraseRegions
train
func (v *View) EraseRegions(key string) { v.lock.Lock() defer v.lock.Unlock() delete(v.regions, key) }
go
{ "resource": "" }
q15082
Close
train
func (v *View) Close() bool { OnPreClose.Call(v) if v.IsDirty() { close_anyway := GetEditor().Frontend().OkCancelDialog("File has been modified since last save, close anyway?", "Close") if !close_anyway { return false } } if n := v.FileName(); n != "" { GetEditor().UnWatch(n, v) } // Call the event first while there's still access possible to the underlying // buffer OnClose.Call(v) v.window.remove(v) // Closing the reparseChan, and setting to nil will eventually clean up other resources // when the parseThread exits v.lock.Lock() defer v.lock.Unlock() close(v.reparseChan) v.reparseChan = nil return true }
go
{ "resource": "" }
q15083
FindByClass
train
func (v *View) FindByClass(point int, forward bool, classes int) int { i := -1 if forward { i = 1 } size := v.Size() // Sublime doesn't consider initial point even if it matches. for p := point + i; ; p += i { if p <= 0 { return 0 } if p >= size { return size } if v.Classify(p)&classes != 0 { return p } } }
go
{ "resource": "" }
q15084
ExpandByClass
train
func (v *View) ExpandByClass(r text.Region, classes int) text.Region { // Sublime doesn't consider the points the region starts on. // If not already on edge of buffer, expand by 1 in both directions. a := r.A if a > 0 { a -= 1 } else if a < 0 { a = 0 } b := r.B size := v.Size() if b < size { b += 1 } else if b > size { b = size } for ; a > 0 && (v.Classify(a)&classes == 0); a -= 1 { } for ; b < size && (v.Classify(b)&classes == 0); b += 1 { } return text.Region{a, b} }
go
{ "resource": "" }
q15085
init
train
func (ch *commandHandler) init(cmd interface{}, args Args) error { if in, ok := cmd.(CustomInit); ok { return in.Init(args) } v := reflect.ValueOf(cmd).Elem() t := v.Type() for i := 0; i < v.NumField(); i++ { ft := t.Field(i) f := v.Field(i) if ft.Anonymous || !f.CanSet() { continue } key := util.PascalCaseToSnakeCase(ft.Name) fv, ok := args[key] if !ok { fv = reflect.Zero(ft.Type).Interface() if def, ok := cmd.(CustomDefault); ok { if val := def.Default(key); val != nil { fv = val } } } if f.CanAddr() { if f2, ok := f.Addr().Interface().(CustomSet); ok { if err := f2.Set(fv); err != nil { return err } continue } } rv := reflect.ValueOf(fv) rvtype := rv.Type() ftype := f.Type() if !rvtype.AssignableTo(ftype) { if rvtype.ConvertibleTo(ftype) { rv = rv.Convert(ftype) } else { return fmt.Errorf("Command %v arg %v of type %v not assignable or convertable to %v of type %v", t, rv, rvtype, ft.Name, ftype) } } f.Set(rv) } return nil }
go
{ "resource": "" }
q15086
Close
train
func (w *Window) Close() bool { if !w.CloseAllViews() { return false } GetEditor().remove(w) return true }
go
{ "resource": "" }
q15087
CloseAllViews
train
func (w *Window) CloseAllViews() bool { for len(w.views) > 0 { if !w.views[0].Close() { return false } } return true }
go
{ "resource": "" }
q15088
SaveAs
train
func (p *Project) SaveAs(name string) error { log.Fine("Saving project as %s", name) if data, err := json.Marshal(p); err != nil { return err } else if err := ioutil.WriteFile(name, data, 0644); err != nil { return err } if abs, err := filepath.Abs(name); err != nil { p.SetName(name) } else { p.SetName(abs) } return nil }
go
{ "resource": "" }
q15089
call
train
func (ie *InitEvent) call() { log.Debug("OnInit callbacks executing") defer log.Debug("OnInit callbacks finished") for _, ev := range *ie { ev() } }
go
{ "resource": "" }
q15090
flushDir
train
func (w *Watcher) flushDir(name string) { log.Finest("Flusing watched directory %s", name) w.dirs = append(w.dirs, name) for _, p := range w.watchers { if filepath.Dir(p) == name && !util.Exists(w.dirs, p) { if err := w.removeWatch(p); err != nil { log.Error("Couldn't unwatch file %s: %s", p, err) } } } }
go
{ "resource": "" }
q15091
removeDir
train
func (w *Watcher) removeDir(name string) { for p, _ := range w.watched { if filepath.Dir(p) == name { stat, err := os.Stat(p) if err != nil { log.Error("Stat error: %s", err) } if err := w.watch(p, stat.IsDir()); err != nil { log.Error("Could not watch: %s", err) continue } } } w.dirs = util.Remove(w.dirs, name) }
go
{ "resource": "" }
q15092
observe
train
func (w *Watcher) observe() { for { select { case ev, ok := <-w.fsEvent: if !ok { // We get here only when w.fsEvent is stopped when closing the watcher w.watched = nil w.watchers = nil w.dirs = nil close(w.fsEvent) w.fsEvent = nil return } w.parseEv(ev) } } }
go
{ "resource": "" }
q15093
Redo
train
func (us *UndoStack) Redo(hard bool) { if us.position >= len(us.actions) { // No more actions to redo return } to := us.index(1, hard) if to == -1 { to = len(us.actions) } for us.position < to { us.actions[us.position].Apply() us.position++ } }
go
{ "resource": "" }
q15094
GlueFrom
train
func (us *UndoStack) GlueFrom(mark int) { if mark >= us.position { return } var e Edit e.command = "sequence" type entry struct { name string args Args } e.v = us.actions[mark].v e.savedSel.AddAll(us.actions[mark].savedSel.Regions()) entries := make([]entry, us.position-mark) for i := range entries { a := us.actions[i+mark] entries[i].name = a.command entries[i].args = a.args e.composite.Add(a) } us.position = mark us.actions = us.actions[:mark+1] e.args = make(Args) e.args["commands"] = entries us.Add(&e) }
go
{ "resource": "" }
q15095
watch
train
func watch(pkg Package) { if err := watcher.Watch(pkg.Path(), pkg); err != nil { log.Warn("Couldn't watch %s: %s", pkg.Path(), err) } }
go
{ "resource": "" }
q15096
watchDir
train
func watchDir(dir string) { log.Finest("Watching scaned dir: %s", dir) sd := &scanDir{dir} if err := watcher.Watch(sd.path, sd); err != nil { log.Error("Couldn't watch %s: %s", sd.path, err) } }
go
{ "resource": "" }
q15097
Cull
train
func (vrm *ViewRegionMap) Cull(viewport text.Region) { pe := util.Prof.Enter("render.vrm.Cull") defer pe.Exit() rm := []string{} for k, v := range *vrm { v.Cull(viewport) if v.Regions.Len() == 0 { rm = append(rm, k) } else { (*vrm)[k] = v } } for _, r := range rm { delete(*vrm, r) } }
go
{ "resource": "" }
q15098
Cull
train
func (vr *ViewRegions) Cull(viewport text.Region) { pe := util.Prof.Enter("render.vr.Cull") defer pe.Exit() nr := []text.Region{} for _, r := range vr.Regions.Regions() { if viewport.Intersects(r) { in := viewport.Intersection(r) if in.Size() != 0 { nr = append(nr, in) } } } vr.Regions.Clear() vr.Regions.AddAll(nr) }
go
{ "resource": "" }
q15099
Clone
train
func (vr *ViewRegions) Clone() *ViewRegions { ret := ViewRegions{Scope: vr.Scope, Icon: vr.Icon, Flags: vr.Flags} ret.Regions.AddAll(vr.Regions.Regions()) return &ret }
go
{ "resource": "" }