_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q9800 | ReadMIMEFile | train | func ReadMIMEFile(path string) (*Response, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return ReadMIME(f)
} | go | {
"resource": ""
} |
q9801 | NewRequest | train | func NewRequest(query string) (*Request, error) {
req := &Request{Query: query}
if err := req.Prepare(); err != nil {
return nil, err
}
return req, nil
} | go | {
"resource": ""
} |
q9802 | Prepare | train | func (req *Request) Prepare() error {
var err error
if req.Host == "" {
if req.Host, req.URL, err = Server(req.Query); err != nil {
return err
}
}
return req.Adapter().Prepare(req)
} | go | {
"resource": ""
} |
q9803 | Fetch | train | func Fetch(query string) (*Response, error) {
req, err := NewRequest(query)
if err != nil {
return nil, err
}
return DefaultClient.Fetch(req)
} | go | {
"resource": ""
} |
q9804 | Server | train | func Server(query string) (string, string, error) {
// Queries on TLDs always against IANA
if strings.Index(query, ".") < 0 {
return IANA, "", nil
}
z := zonedb.PublicZone(query)
if z == nil {
return "", "", fmt.Errorf("no public zone found for %s", query)
}
host := z.WhoisServer()
wu := z.WhoisURL()
if host != "" {
return host, wu, nil
}
u, err := url.Parse(wu)
if err == nil && u.Host != "" {
return u.Host, wu, nil
}
return "", "", fmt.Errorf("no whois server found for %s", query)
} | go | {
"resource": ""
} |
q9805 | Fetch | train | func (c *Client) Fetch(req *Request) (*Response, error) {
return c.FetchContext(context.Background(), req)
} | go | {
"resource": ""
} |
q9806 | FetchContext | train | func (c *Client) FetchContext(ctx context.Context, req *Request) (*Response, error) {
if c.Timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, c.Timeout)
defer cancel()
}
if req.URL != "" {
return c.fetchHTTP(ctx, req)
}
return c.fetchWhois(ctx, req)
} | go | {
"resource": ""
} |
q9807 | Copy | train | func (l ScopeList) Copy() ScopeList {
scopes := make(ScopeList, len(l))
copy(scopes, l)
return scopes
} | go | {
"resource": ""
} |
q9808 | ParentScopes | train | func (l ScopeList) ParentScopes(scope string) ScopeList {
scopes := l.Copy()
for i, s := range scopes {
if s == scope {
return scopes[:i]
}
}
return ScopeList{}
} | go | {
"resource": ""
} |
q9809 | Contains | train | func (l ScopeList) Contains(scope string) bool {
for _, s := range l {
if scope == s {
return true
}
}
return false
} | go | {
"resource": ""
} |
q9810 | Copy | train | func (m DefMap) Copy() DefMap {
defs := DefMap{}
for name, def := range m {
defs[name] = def
}
return defs
} | go | {
"resource": ""
} |
q9811 | Add | train | func (l builtList) Add(name string) builtList {
if l == nil {
return builtList{name: 0}
}
l[name] = len(l)
return l
} | go | {
"resource": ""
} |
q9812 | Has | train | func (l builtList) Has(name string) bool {
_, ok := l[name]
return ok
} | go | {
"resource": ""
} |
q9813 | OrderedList | train | func (l builtList) OrderedList() []string {
s := make([]string, len(l))
for name, i := range l {
s[i] = name
}
return s
} | go | {
"resource": ""
} |
q9814 | Add | train | func (b *multiErrBuilder) Add(err error) {
if err != nil {
b.errs = append(b.errs, err)
}
} | go | {
"resource": ""
} |
q9815 | Build | train | func (b *multiErrBuilder) Build() error {
if len(b.errs) == 0 {
return nil
}
msgs := make([]string, len(b.errs))
for i, err := range b.errs {
msgs[i] = err.Error()
}
return errors.New(strings.Join(msgs, " AND "))
} | go | {
"resource": ""
} |
q9816 | fill | train | func fill(src, dest interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
d := reflect.TypeOf(dest)
s := reflect.TypeOf(src)
err = fmt.Errorf("the fill destination should be a pointer to a `%s`, but you used a `%s`", s, d)
}
}()
reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
return err
} | go | {
"resource": ""
} |
q9817 | IsDefined | train | func (b *Builder) IsDefined(name string) bool {
_, ok := b.definitions[name]
return ok
} | go | {
"resource": ""
} |
q9818 | Add | train | func (b *Builder) Add(defs ...Def) error {
for _, def := range defs {
if err := b.add(def); err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q9819 | Build | train | func (b *Builder) Build() Container {
if err := checkScopes(b.scopes); err != nil {
return nil
}
defs := b.Definitions()
for name, def := range defs {
if def.Scope == "" {
def.Scope = b.scopes[0]
defs[name] = def
}
}
return &container{
containerCore: &containerCore{
scopes: b.scopes,
scope: b.scopes[0],
definitions: defs,
parent: nil,
children: map[*containerCore]struct{}{},
objects: map[string]interface{}{},
},
}
} | go | {
"resource": ""
} |
q9820 | HTTPMiddleware | train | func HTTPMiddleware(h http.HandlerFunc, app Container, logFunc func(msg string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// create a request container from tha app container
ctn, err := app.SubContainer()
if err != nil {
panic(err)
}
defer func() {
if err := ctn.Delete(); err != nil && logFunc != nil {
logFunc(err.Error())
}
}()
// call the handler with a new request
// containing the container in its context
h(w, r.WithContext(
context.WithValue(r.Context(), ContainerKey("di"), ctn),
))
}
} | go | {
"resource": ""
} |
q9821 | ParseFooter | train | func ParseFooter(token string, footer interface{}) error {
parts := strings.Split(token, ".")
if len(parts) == 4 {
b, err := tokenEncoder.DecodeString(parts[3])
if err != nil {
return errors.Wrap(err, "failed to decode token")
}
return errors.Wrap(fillValue(b, footer), "failed to decode footer")
}
if len(parts) < 3 {
return ErrIncorrectTokenFormat
}
return nil
} | go | {
"resource": ""
} |
q9822 | ForAudience | train | func ForAudience(audience string) Validator {
return func(token *JSONToken) error {
if token.Audience != audience {
return errors.Wrapf(ErrTokenValidationError, `token was not intended for "%s" audience`, audience)
}
return nil
}
} | go | {
"resource": ""
} |
q9823 | IdentifiedBy | train | func IdentifiedBy(jti string) Validator {
return func(token *JSONToken) error {
if token.Jti != jti {
return errors.Wrapf(ErrTokenValidationError, `token was expected to be identified by "%s"`, jti)
}
return nil
}
} | go | {
"resource": ""
} |
q9824 | IssuedBy | train | func IssuedBy(issuer string) Validator {
return func(token *JSONToken) error {
if token.Issuer != issuer {
return errors.Wrapf(ErrTokenValidationError, `token was not issued by "%s"`, issuer)
}
return nil
}
} | go | {
"resource": ""
} |
q9825 | Subject | train | func Subject(subject string) Validator {
return func(token *JSONToken) error {
if token.Subject != subject {
return errors.Wrapf(ErrTokenValidationError, `token was not related to subject "%s"`, subject)
}
return nil
}
} | go | {
"resource": ""
} |
q9826 | ValidAt | train | func ValidAt(t time.Time) Validator {
return func(token *JSONToken) error {
if !token.IssuedAt.IsZero() && t.Before(token.IssuedAt) {
return errors.Wrapf(ErrTokenValidationError, "token was issued in the future")
}
if !token.NotBefore.IsZero() && t.Before(token.NotBefore) {
return errors.Wrapf(ErrTokenValidationError, "token cannot be used yet")
}
if !token.Expiration.IsZero() && t.After(token.Expiration) {
return errors.Wrapf(ErrTokenValidationError, "token has expired")
}
return nil
}
} | go | {
"resource": ""
} |
q9827 | Sign | train | func (*V2) Sign(privateKey crypto.PrivateKey, payload interface{}, footer interface{}) (string, error) {
key, ok := privateKey.(ed25519.PrivateKey)
if !ok {
return "", ErrIncorrectPrivateKeyType
}
payloadBytes, err := infToByteArr(payload)
if err != nil {
return "", errors.Wrap(err, "failed to encode payload to []byte")
}
footerBytes, err := infToByteArr(footer)
if err != nil {
return "", errors.Wrap(err, "failed to encode footer to []byte")
}
sig := ed25519.Sign(key, preAuthEncode(headerV2Public, payloadBytes, footerBytes))
return createToken(headerV2Public, append(payloadBytes, sig...), footerBytes), nil
} | go | {
"resource": ""
} |
q9828 | Verify | train | func (*V2) Verify(token string, publicKey crypto.PublicKey, payload interface{}, footer interface{}) error {
pub, ok := publicKey.(ed25519.PublicKey)
if !ok {
return ErrIncorrectPublicKeyType
}
data, footerBytes, err := splitToken([]byte(token), headerV2Public)
if err != nil {
return errors.Wrap(err, "failed to decode token")
}
if len(data) < v2SignSize {
return errors.Wrap(ErrIncorrectTokenFormat, "incorrect token size")
}
payloadBytes := data[:len(data)-v2SignSize]
signature := data[len(data)-v2SignSize:]
if !ed25519.Verify(pub, preAuthEncode(headerV2Public, payloadBytes, footerBytes), signature) {
return ErrInvalidSignature
}
if payload != nil {
if err := fillValue(payloadBytes, payload); err != nil {
return errors.Wrap(err, "failed to decode payload")
}
}
if footer != nil {
if err := fillValue(footerBytes, footer); err != nil {
return errors.Wrap(err, "failed to decode footer")
}
}
return nil
} | go | {
"resource": ""
} |
q9829 | Set | train | func (t *JSONToken) Set(key string, value string) {
if t.claims == nil {
t.claims = make(map[string]string)
}
t.claims[key] = value
} | go | {
"resource": ""
} |
q9830 | MarshalJSON | train | func (t JSONToken) MarshalJSON() ([]byte, error) {
if t.claims == nil {
t.claims = make(map[string]string)
}
if t.Audience != "" {
t.claims["aud"] = t.Audience
}
if t.Issuer != "" {
t.claims["iss"] = t.Issuer
}
if t.Jti != "" {
t.claims["jti"] = t.Jti
}
if t.Subject != "" {
t.claims["sub"] = t.Subject
}
if !t.Expiration.IsZero() {
t.claims["exp"] = t.Expiration.Format(time.RFC3339)
}
if !t.IssuedAt.IsZero() {
t.claims["iat"] = t.IssuedAt.Format(time.RFC3339)
}
if !t.NotBefore.IsZero() {
t.claims["nbf"] = t.NotBefore.Format(time.RFC3339)
}
return json.Marshal(t.claims)
} | go | {
"resource": ""
} |
q9831 | NotImmediately | train | func (j *Job) NotImmediately() *Job {
rj, ok := j.schedule.(*recurrent)
if !ok {
j.err = errors.New("bad function chaining")
return j
}
rj.done = true
return j
} | go | {
"resource": ""
} |
q9832 | Run | train | func (j *Job) Run(f func()) (*Job, error) {
if j.err != nil {
return nil, j.err
}
var next time.Duration
var err error
j.Quit = make(chan bool, 1)
j.SkipWait = make(chan bool, 1)
j.fn = f
// Check for possible errors in scheduling
next, err = j.schedule.nextRun()
if err != nil {
return nil, err
}
go func(j *Job) {
for {
select {
case <-j.Quit:
return
case <-j.SkipWait:
go runJob(j)
case <-time.After(next):
go runJob(j)
}
next, _ = j.schedule.nextRun()
}
}(j)
return j, nil
} | go | {
"resource": ""
} |
q9833 | Day | train | func (j *Job) Day() *Job {
if j.schedule != nil {
j.err = errors.New("bad function chaining")
}
j.schedule = daily{}
return j
} | go | {
"resource": ""
} |
q9834 | newInfo | train | func newInfo(conn *Connection, commands ...string) (*info, error) {
commandStr := strings.Trim(strings.Join(commands, "\n"), " ")
if strings.Trim(commandStr, " ") != "" {
commandStr += "\n"
}
newInfo := &info{
msg: NewMessage(MSG_INFO, []byte(commandStr)),
}
if err := newInfo.sendCommand(conn); err != nil {
return nil, err
}
return newInfo, nil
} | go | {
"resource": ""
} |
q9835 | RequestInfo | train | func RequestInfo(conn *Connection, names ...string) (map[string]string, error) {
info, err := newInfo(conn, names...)
if err != nil {
return nil, err
}
return info.parseMultiResponse()
} | go | {
"resource": ""
} |
q9836 | sendCommand | train | func (nfo *info) sendCommand(conn *Connection) error {
// Write.
if _, err := conn.Write(nfo.msg.Serialize()); err != nil {
Logger.Debug("Failed to send command.")
return err
}
// Read - reuse input buffer.
header := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
if _, err := conn.Read(header.Bytes(), MSG_HEADER_SIZE); err != nil {
return err
}
if err := binary.Read(header, binary.BigEndian, &nfo.msg.MessageHeader); err != nil {
Logger.Debug("Failed to read command response.")
return err
}
// Logger.Debug("Header Response: %v %v %v %v", t.Type, t.Version, t.Length(), t.DataLen)
if err := nfo.msg.Resize(nfo.msg.Length()); err != nil {
return err
}
_, err := conn.Read(nfo.msg.Data, len(nfo.msg.Data))
return err
} | go | {
"resource": ""
} |
q9837 | NewHost | train | func NewHost(name string, port int) *Host {
return &Host{Name: name, Port: port}
} | go | {
"resource": ""
} |
q9838 | NewCluster | train | func NewCluster(policy *ClientPolicy, hosts []*Host) (*Cluster, error) {
// Default TLS names when TLS enabled.
newHosts := make([]*Host, 0, len(hosts))
if policy.TlsConfig != nil && !policy.TlsConfig.InsecureSkipVerify {
useClusterName := len(policy.ClusterName) > 0
for _, host := range hosts {
nh := *host
if nh.TLSName == "" {
if useClusterName {
nh.TLSName = policy.ClusterName
} else {
nh.TLSName = host.Name
}
}
newHosts = append(newHosts, &nh)
}
hosts = newHosts
}
newCluster := &Cluster{
clientPolicy: *policy,
infoPolicy: InfoPolicy{Timeout: policy.Timeout},
tendChannel: make(chan struct{}),
seeds: NewSyncVal(hosts),
aliases: NewSyncVal(make(map[Host]*Node)),
nodesMap: NewSyncVal(make(map[string]*Node)),
nodes: NewSyncVal([]*Node{}),
stats: map[string]*nodeStats{},
password: NewSyncVal(nil),
supportsFloat: NewAtomicBool(false),
supportsBatchIndex: NewAtomicBool(false),
supportsReplicasAll: NewAtomicBool(false),
supportsGeo: NewAtomicBool(false),
}
newCluster.partitionWriteMap.Store(make(partitionMap))
// setup auth info for cluster
if policy.RequiresAuthentication() {
if policy.AuthMode == AuthModeExternal && policy.TlsConfig == nil {
return nil, errors.New("External Authentication requires TLS configuration to be set, because it sends clear password on the wire.")
}
newCluster.user = policy.User
hashedPass, err := hashPassword(policy.Password)
if err != nil {
return nil, err
}
newCluster.password = NewSyncVal(hashedPass)
}
// try to seed connections for first use
err := newCluster.waitTillStabilized()
// apply policy rules
if policy.FailIfNotConnected && !newCluster.IsConnected() {
if err != nil {
return nil, err
}
return nil, fmt.Errorf("Failed to connect to host(s): %v. The network connection(s) to cluster nodes may have timed out, or the cluster may be in a state of flux.", hosts)
}
// start up cluster maintenance go routine
newCluster.wgTend.Add(1)
go newCluster.clusterBoss(&newCluster.clientPolicy)
if err == nil {
Logger.Debug("New cluster initialized and ready to be used...")
} else {
Logger.Error("New cluster was not initialized successfully, but the client will keep trying to connect to the database. Error: %s", err.Error())
}
return newCluster, err
} | go | {
"resource": ""
} |
q9839 | clusterBoss | train | func (clstr *Cluster) clusterBoss(policy *ClientPolicy) {
Logger.Info("Starting the cluster tend goroutine...")
defer func() {
if r := recover(); r != nil {
Logger.Error("Cluster tend goroutine crashed: %s", debug.Stack())
go clstr.clusterBoss(&clstr.clientPolicy)
}
}()
defer clstr.wgTend.Done()
tendInterval := policy.TendInterval
if tendInterval <= 10*time.Millisecond {
tendInterval = 10 * time.Millisecond
}
Loop:
for {
select {
case <-clstr.tendChannel:
// tend channel closed
Logger.Debug("Tend channel closed. Shutting down the cluster...")
break Loop
case <-time.After(tendInterval):
tm := time.Now()
if err := clstr.tend(); err != nil {
Logger.Warn(err.Error())
}
// Tending took longer than requested tend interval.
// Tending is too slow for the cluster, and may be falling behind scheule.
if tendDuration := time.Since(tm); tendDuration > clstr.clientPolicy.TendInterval {
Logger.Warn("Tending took %s, while your requested ClientPolicy.TendInterval is %s. Tends are slower than the interval, and may be falling behind the changes in the cluster.", tendDuration, clstr.clientPolicy.TendInterval)
}
}
}
// cleanup code goes here
// close the nodes
nodeArray := clstr.GetNodes()
for _, node := range nodeArray {
node.Close()
}
} | go | {
"resource": ""
} |
q9840 | AddSeeds | train | func (clstr *Cluster) AddSeeds(hosts []*Host) {
clstr.seeds.Update(func(val interface{}) (interface{}, error) {
seeds := val.([]*Host)
seeds = append(seeds, hosts...)
return seeds, nil
})
} | go | {
"resource": ""
} |
q9841 | waitTillStabilized | train | func (clstr *Cluster) waitTillStabilized() error {
count := -1
doneCh := make(chan error, 10)
// will run until the cluster is stabilized
go func() {
var err error
for {
if err = clstr.tend(); err != nil {
if aerr, ok := err.(AerospikeError); ok {
switch aerr.ResultCode() {
case NOT_AUTHENTICATED, CLUSTER_NAME_MISMATCH_ERROR:
doneCh <- err
return
}
}
Logger.Warn(err.Error())
}
// // if there are no errors in connecting to the cluster, then validate the partition table
// if err == nil {
// err = clstr.getPartitions().validate()
// }
// Check to see if cluster has changed since the last Tend().
// If not, assume cluster has stabilized and return.
if count == len(clstr.GetNodes()) {
break
}
time.Sleep(time.Millisecond)
count = len(clstr.GetNodes())
}
doneCh <- err
}()
select {
case <-time.After(clstr.clientPolicy.Timeout):
if clstr.clientPolicy.FailIfNotConnected {
clstr.Close()
}
return errors.New("Connecting to the cluster timed out.")
case err := <-doneCh:
if err != nil && clstr.clientPolicy.FailIfNotConnected {
clstr.Close()
}
return err
}
} | go | {
"resource": ""
} |
q9842 | seedNodes | train | func (clstr *Cluster) seedNodes() (bool, error) {
// Must copy array reference for copy on write semantics to work.
seedArrayIfc, _ := clstr.seeds.GetSyncedVia(func(val interface{}) (interface{}, error) {
seeds := val.([]*Host)
seeds_copy := make([]*Host, len(seeds))
copy(seeds_copy, seeds)
return seeds_copy, nil
})
seedArray := seedArrayIfc.([]*Host)
successChan := make(chan struct{}, len(seedArray))
errChan := make(chan error, len(seedArray))
Logger.Info("Seeding the cluster. Seeds count: %d", len(seedArray))
// Add all nodes at once to avoid copying entire array multiple times.
for i, seed := range seedArray {
go func(index int, seed *Host) {
nodesToAdd := make(nodesToAddT, 128)
nv := nodeValidator{}
err := nv.seedNodes(clstr, seed, nodesToAdd)
if err != nil {
Logger.Warn("Seed %s failed: %s", seed.String(), err.Error())
errChan <- err
return
}
clstr.addNodes(nodesToAdd)
successChan <- struct{}{}
}(i, seed)
}
errorList := make([]error, 0, len(seedArray))
seedCount := len(seedArray)
L:
for {
select {
case err := <-errChan:
errorList = append(errorList, err)
seedCount--
if seedCount <= 0 {
break L
}
case <-successChan:
// even one seed is enough
return true, nil
case <-time.After(clstr.clientPolicy.Timeout):
// time is up, no seeds found
break L
}
}
var errStrs []string
for _, err := range errorList {
if err != nil {
if aerr, ok := err.(AerospikeError); ok {
switch aerr.ResultCode() {
case NOT_AUTHENTICATED:
return false, NewAerospikeError(NOT_AUTHENTICATED)
case CLUSTER_NAME_MISMATCH_ERROR:
return false, aerr
}
}
errStrs = append(errStrs, err.Error())
}
}
return false, NewAerospikeError(INVALID_NODE_ERROR, "Failed to connect to hosts:"+strings.Join(errStrs, "\n"))
} | go | {
"resource": ""
} |
q9843 | findNodeName | train | func (clstr *Cluster) findNodeName(list []*Node, name string) bool {
for _, node := range list {
if node.GetName() == name {
return true
}
}
return false
} | go | {
"resource": ""
} |
q9844 | getSameRackNode | train | func (clstr *Cluster) getSameRackNode(partition *Partition, seq *int) (*Node, error) {
// RackAware has not been enabled in client policy.
if !clstr.clientPolicy.RackAware {
return nil, NewAerospikeError(UNSUPPORTED_FEATURE, "ReplicaPolicy is set to PREFER_RACK but ClientPolicy.RackAware is not set.")
}
pmap := clstr.getPartitions()
partitions := pmap[partition.Namespace]
if partitions == nil {
return nil, NewAerospikeError(PARTITION_UNAVAILABLE, "Invalid namespace in partition table:", partition.Namespace)
}
// CP mode (Strong Consistency) does not support the RackAware feature.
if partitions.CPMode {
return nil, NewAerospikeError(UNSUPPORTED_FEATURE, "ReplicaPolicy is set to PREFER_RACK but the cluster is in Strong Consistency Mode.")
}
replicaArray := partitions.Replicas
var seqNode *Node
for range replicaArray {
index := *seq % len(replicaArray)
node := replicaArray[index][partition.PartitionId]
*seq++
if node != nil {
// assign a node to seqNode in case no node was found on the same rack was found
if seqNode == nil {
seqNode = node
}
// if the node didn't belong to rack for that namespace, continue
nodeRack, err := node.Rack(partition.Namespace)
if err != nil {
continue
}
if node.IsActive() && nodeRack == clstr.clientPolicy.RackId {
return node, nil
}
}
}
// if no nodes were found belonging to the same rack, and no other node was also found
// then the partition table replicas are empty for that namespace
if seqNode == nil {
return nil, newInvalidNodeError(len(clstr.GetNodes()), partition)
}
return seqNode, nil
} | go | {
"resource": ""
} |
q9845 | GetRandomNode | train | func (clstr *Cluster) GetRandomNode() (*Node, error) {
// Must copy array reference for copy on write semantics to work.
nodeArray := clstr.GetNodes()
length := len(nodeArray)
for i := 0; i < length; i++ {
// Must handle concurrency with other non-tending goroutines, so nodeIndex is consistent.
index := int(atomic.AddUint64(&clstr.nodeIndex, 1) % uint64(length))
node := nodeArray[index]
if node != nil && node.IsActive() {
// Logger.Debug("Node `%s` is active. index=%d", node, index)
return node, nil
}
}
return nil, NewAerospikeError(INVALID_NODE_ERROR, "Cluster is empty.")
} | go | {
"resource": ""
} |
q9846 | GetSeeds | train | func (clstr *Cluster) GetSeeds() []Host {
res, _ := clstr.seeds.GetSyncedVia(func(val interface{}) (interface{}, error) {
seeds := val.([]*Host)
res := make([]Host, 0, len(seeds))
for _, seed := range seeds {
res = append(res, *seed)
}
return res, nil
})
return res.([]Host)
} | go | {
"resource": ""
} |
q9847 | GetAliases | train | func (clstr *Cluster) GetAliases() map[Host]*Node {
res, _ := clstr.aliases.GetSyncedVia(func(val interface{}) (interface{}, error) {
aliases := val.(map[Host]*Node)
res := make(map[Host]*Node, len(aliases))
for h, n := range aliases {
res[h] = n
}
return res, nil
})
return res.(map[Host]*Node)
} | go | {
"resource": ""
} |
q9848 | GetNodeByName | train | func (clstr *Cluster) GetNodeByName(nodeName string) (*Node, error) {
node := clstr.findNodeByName(nodeName)
if node == nil {
return nil, NewAerospikeError(INVALID_NODE_ERROR, "Invalid node name"+nodeName)
}
return node, nil
} | go | {
"resource": ""
} |
q9849 | Close | train | func (clstr *Cluster) Close() {
if clstr.closed.CompareAndToggle(false) {
// send close signal to maintenance channel
close(clstr.tendChannel)
// wait until tend is over
clstr.wgTend.Wait()
}
} | go | {
"resource": ""
} |
q9850 | MigrationInProgress | train | func (clstr *Cluster) MigrationInProgress(timeout time.Duration) (res bool, err error) {
if timeout <= 0 {
timeout = _DEFAULT_TIMEOUT
}
done := make(chan bool, 1)
go func() {
// this function is guaranteed to return after _DEFAULT_TIMEOUT
nodes := clstr.GetNodes()
for _, node := range nodes {
if node.IsActive() {
if res, err = node.MigrationInProgress(); res || err != nil {
done <- true
return
}
}
}
res, err = false, nil
done <- false
}()
dealine := time.After(timeout)
for {
select {
case <-dealine:
return false, NewAerospikeError(TIMEOUT)
case <-done:
return res, err
}
}
} | go | {
"resource": ""
} |
q9851 | WaitUntillMigrationIsFinished | train | func (clstr *Cluster) WaitUntillMigrationIsFinished(timeout time.Duration) (err error) {
if timeout <= 0 {
timeout = _NO_TIMEOUT
}
done := make(chan error, 1)
go func() {
// this function is guaranteed to return after timeout
// no go routines will be leaked
for {
if res, err := clstr.MigrationInProgress(timeout); err != nil || !res {
done <- err
return
}
}
}()
dealine := time.After(timeout)
select {
case <-dealine:
return NewAerospikeError(TIMEOUT)
case err = <-done:
return err
}
} | go | {
"resource": ""
} |
q9852 | Password | train | func (clstr *Cluster) Password() (res []byte) {
pass := clstr.password.Get()
if pass != nil {
return pass.([]byte)
}
return nil
} | go | {
"resource": ""
} |
q9853 | registerLuaAerospikeType | train | func registerLuaAerospikeType(L *lua.LState) {
mt := L.NewTypeMetatable(luaLuaAerospikeTypeName)
L.SetGlobal("aerospike", mt)
// static attributes
L.SetField(mt, "log", L.NewFunction(luaAerospikeLog))
L.SetMetatable(mt, mt)
} | go | {
"resource": ""
} |
q9854 | NewIndexTask | train | func NewIndexTask(cluster *Cluster, namespace string, indexName string) *IndexTask {
return &IndexTask{
baseTask: newTask(cluster),
namespace: namespace,
indexName: indexName,
}
} | go | {
"resource": ""
} |
q9855 | shiftContentToHead | train | func (bc *bufferedConn) shiftContentToHead(length int) {
// shift data to the head of the byte slice
if length > bc.emptyCap() {
buf := make([]byte, bc.len()+length)
copy(buf, bc.buf()[bc.head:bc.tail])
bc.conn.dataBuffer = buf
bc.tail -= bc.head
bc.head = 0
} else if bc.len() > 0 {
copy(bc.buf(), bc.buf()[bc.head:bc.tail])
bc.tail -= bc.head
bc.head = 0
} else {
bc.tail = 0
bc.head = 0
}
} | go | {
"resource": ""
} |
q9856 | readConn | train | func (bc *bufferedConn) readConn(minLength int) error {
// Corrupted data streams can result in a huge minLength.
// Do a sanity check here.
if minLength > MaxBufferSize || minLength <= 0 || minLength > bc.remaining {
return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid readBytes length: %d", minLength))
}
bc.shiftContentToHead(minLength)
toRead := bc.remaining
if ec := bc.emptyCap(); toRead > ec {
toRead = ec
}
n, err := bc.conn.Read(bc.buf()[bc.tail:], toRead)
bc.tail += n
bc.remaining -= n
if err != nil {
return fmt.Errorf("Requested to read %d bytes, but %d was read. (%v)", minLength, n, err)
}
return nil
} | go | {
"resource": ""
} |
q9857 | newObjectset | train | func newObjectset(objChan reflect.Value, goroutines int, taskID uint64) *objectset {
if objChan.Kind() != reflect.Chan ||
objChan.Type().Elem().Kind() != reflect.Ptr ||
objChan.Type().Elem().Elem().Kind() != reflect.Struct {
panic("Scan/Query object channels should be of type `chan *T`")
}
rs := &objectset{
objChan: objChan,
Errors: make(chan error, goroutines),
active: NewAtomicBool(true),
closed: NewAtomicBool(false),
goroutines: NewAtomicInt(goroutines),
cancelled: make(chan struct{}),
taskID: taskID,
}
rs.wgGoroutines.Add(goroutines)
return rs
} | go | {
"resource": ""
} |
q9858 | newRecordset | train | func newRecordset(recSize, goroutines int, taskID uint64) *Recordset {
var nilChan chan *struct{}
rs := &Recordset{
Records: make(chan *Record, recSize),
objectset: *newObjectset(reflect.ValueOf(nilChan), goroutines, taskID),
}
runtime.SetFinalizer(rs, recordsetFinalizer)
return rs
} | go | {
"resource": ""
} |
q9859 | Read | train | func (rcs *Recordset) Read() (record *Record, err error) {
var ok bool
L:
select {
case record, ok = <-rcs.Records:
if !ok {
err = ErrRecordsetClosed
}
case err = <-rcs.Errors:
if err == nil {
// if err == nil, it means the Errors chan has been closed
// we should not return nil as an error, so we should listen
// to other chans again to determine either cancellation,
// or normal EOR
goto L
}
}
return record, err
} | go | {
"resource": ""
} |
q9860 | Close | train | func (rcs *Recordset) Close() error {
// do it only once
if !rcs.closed.CompareAndToggle(false) {
return ErrRecordsetClosed
}
// mark the recordset as inactive
rcs.active.Set(false)
close(rcs.cancelled)
// wait till all goroutines are done, and signalEnd is called by the scan command
rcs.wgGoroutines.Wait()
return nil
} | go | {
"resource": ""
} |
q9861 | NewClientWithPolicy | train | func NewClientWithPolicy(policy *ClientPolicy, hostname string, port int) (*Client, error) {
return NewClientWithPolicyAndHost(policy, NewHost(hostname, port))
} | go | {
"resource": ""
} |
q9862 | NewClientWithPolicyAndHost | train | func NewClientWithPolicyAndHost(policy *ClientPolicy, hosts ...*Host) (*Client, error) {
if policy == nil {
policy = NewClientPolicy()
}
cluster, err := NewCluster(policy, hosts)
if err != nil && policy.FailIfNotConnected {
if aerr, ok := err.(AerospikeError); ok {
Logger.Debug("Failed to connect to host(s): %v; error: %s", hosts, err)
return nil, aerr
}
return nil, fmt.Errorf("Failed to connect to host(s): %v; error: %s", hosts, err)
}
client := &Client{
cluster: cluster,
DefaultPolicy: NewPolicy(),
DefaultBatchPolicy: NewBatchPolicy(),
DefaultWritePolicy: NewWritePolicy(0, 0),
DefaultScanPolicy: NewScanPolicy(),
DefaultQueryPolicy: NewQueryPolicy(),
DefaultAdminPolicy: NewAdminPolicy(),
}
runtime.SetFinalizer(client, clientFinalizer)
return client, err
} | go | {
"resource": ""
} |
q9863 | GetNodeNames | train | func (clnt *Client) GetNodeNames() []string {
nodes := clnt.cluster.GetNodes()
names := make([]string, 0, len(nodes))
for _, node := range nodes {
names = append(names, node.GetName())
}
return names
} | go | {
"resource": ""
} |
q9864 | AppendBins | train | func (clnt *Client) AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) error {
policy = clnt.getUsableWritePolicy(policy)
command := newWriteCommand(clnt.cluster, policy, key, bins, nil, _APPEND)
return command.Execute()
} | go | {
"resource": ""
} |
q9865 | PrependBins | train | func (clnt *Client) PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) error {
policy = clnt.getUsableWritePolicy(policy)
command := newWriteCommand(clnt.cluster, policy, key, bins, nil, _PREPEND)
return command.Execute()
} | go | {
"resource": ""
} |
q9866 | AddBins | train | func (clnt *Client) AddBins(policy *WritePolicy, key *Key, bins ...*Bin) error {
policy = clnt.getUsableWritePolicy(policy)
command := newWriteCommand(clnt.cluster, policy, key, bins, nil, _ADD)
return command.Execute()
} | go | {
"resource": ""
} |
q9867 | BatchExists | train | func (clnt *Client) BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, error) {
policy = clnt.getUsableBatchPolicy(policy)
// same array can be used without synchronization;
// when a key exists, the corresponding index will be marked true
existsArray := make([]bool, len(keys))
// pass nil to make sure it will be cloned and prepared
cmd := newBatchCommandExists(nil, nil, nil, policy, keys, existsArray)
if err := clnt.batchExecute(policy, keys, cmd); err != nil {
return nil, err
}
return existsArray, nil
} | go | {
"resource": ""
} |
q9868 | GetHeader | train | func (clnt *Client) GetHeader(policy *BasePolicy, key *Key) (*Record, error) {
policy = clnt.getUsablePolicy(policy)
command := newReadHeaderCommand(clnt.cluster, policy, key)
if err := command.Execute(); err != nil {
return nil, err
}
return command.GetRecord(), nil
} | go | {
"resource": ""
} |
q9869 | BatchGetHeader | train | func (clnt *Client) BatchGetHeader(policy *BatchPolicy, keys []*Key) ([]*Record, error) {
policy = clnt.getUsableBatchPolicy(policy)
// same array can be used without synchronization;
// when a key exists, the corresponding index will be set to record
records := make([]*Record, len(keys))
cmd := newBatchCommandGet(nil, nil, nil, policy, keys, nil, records, _INFO1_READ|_INFO1_NOBINDATA)
err := clnt.batchExecute(policy, keys, cmd)
if err != nil && !policy.AllowPartialResults {
return nil, err
}
return records, err
} | go | {
"resource": ""
} |
q9870 | ScanNode | train | func (clnt *Client) ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, error) {
policy := *clnt.getUsableScanPolicy(apolicy)
// results channel must be async for performance
taskID := uint64(xornd.Int64())
res := newRecordset(policy.RecordQueueSize, 1, taskID)
go clnt.scanNode(&policy, node, res, namespace, setName, taskID, binNames...)
return res, nil
} | go | {
"resource": ""
} |
q9871 | RegisterUDF | train | func (clnt *Client) RegisterUDF(policy *WritePolicy, udfBody []byte, serverPath string, language Language) (*RegisterTask, error) {
policy = clnt.getUsableWritePolicy(policy)
content := base64.StdEncoding.EncodeToString(udfBody)
var strCmd bytes.Buffer
// errors are to remove errcheck warnings
// they will always be nil as stated in golang docs
_, err := strCmd.WriteString("udf-put:filename=")
_, err = strCmd.WriteString(serverPath)
_, err = strCmd.WriteString(";content=")
_, err = strCmd.WriteString(content)
_, err = strCmd.WriteString(";content-len=")
_, err = strCmd.WriteString(strconv.Itoa(len(content)))
_, err = strCmd.WriteString(";udf-type=")
_, err = strCmd.WriteString(string(language))
_, err = strCmd.WriteString(";")
// Send UDF to one node. That node will distribute the UDF to other nodes.
responseMap, err := clnt.sendInfoCommand(policy.TotalTimeout, strCmd.String())
if err != nil {
return nil, err
}
response := responseMap[strCmd.String()]
res := make(map[string]string)
vals := strings.Split(response, ";")
for _, pair := range vals {
t := strings.SplitN(pair, "=", 2)
if len(t) == 2 {
res[t[0]] = t[1]
} else if len(t) == 1 {
res[t[0]] = ""
}
}
if _, exists := res["error"]; exists {
msg, _ := base64.StdEncoding.DecodeString(res["message"])
return nil, NewAerospikeError(COMMAND_REJECTED, fmt.Sprintf("Registration failed: %s\nFile: %s\nLine: %s\nMessage: %s",
res["error"], res["file"], res["line"], msg))
}
return NewRegisterTask(clnt.cluster, serverPath), nil
} | go | {
"resource": ""
} |
q9872 | RemoveUDF | train | func (clnt *Client) RemoveUDF(policy *WritePolicy, udfName string) (*RemoveTask, error) {
policy = clnt.getUsableWritePolicy(policy)
var strCmd bytes.Buffer
// errors are to remove errcheck warnings
// they will always be nil as stated in golang docs
_, err := strCmd.WriteString("udf-remove:filename=")
_, err = strCmd.WriteString(udfName)
_, err = strCmd.WriteString(";")
// Send command to one node. That node will distribute it to other nodes.
responseMap, err := clnt.sendInfoCommand(policy.TotalTimeout, strCmd.String())
if err != nil {
return nil, err
}
response := responseMap[strCmd.String()]
if response == "ok" {
return NewRemoveTask(clnt.cluster, udfName), nil
}
return nil, NewAerospikeError(SERVER_ERROR, response)
} | go | {
"resource": ""
} |
q9873 | ListUDF | train | func (clnt *Client) ListUDF(policy *BasePolicy) ([]*UDF, error) {
policy = clnt.getUsablePolicy(policy)
var strCmd bytes.Buffer
// errors are to remove errcheck warnings
// they will always be nil as stated in golang docs
_, err := strCmd.WriteString("udf-list")
// Send command to one node. That node will distribute it to other nodes.
responseMap, err := clnt.sendInfoCommand(policy.TotalTimeout, strCmd.String())
if err != nil {
return nil, err
}
response := responseMap[strCmd.String()]
vals := strings.Split(response, ";")
res := make([]*UDF, 0, len(vals))
for _, udfInfo := range vals {
if strings.Trim(udfInfo, " ") == "" {
continue
}
udfParts := strings.Split(udfInfo, ",")
udf := &UDF{}
for _, values := range udfParts {
valueParts := strings.Split(values, "=")
if len(valueParts) == 2 {
switch valueParts[0] {
case "filename":
udf.Filename = valueParts[1]
case "hash":
udf.Hash = valueParts[1]
case "type":
udf.Language = Language(valueParts[1])
}
}
}
res = append(res, udf)
}
return res, nil
} | go | {
"resource": ""
} |
q9874 | ExecuteUDFNode | train | func (clnt *Client) ExecuteUDFNode(policy *QueryPolicy,
node *Node,
statement *Statement,
packageName string,
functionName string,
functionArgs ...Value,
) (*ExecuteTask, error) {
policy = clnt.getUsableQueryPolicy(policy)
if node == nil {
return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "ExecuteUDFNode failed because node is nil.")
}
statement.SetAggregateFunction(packageName, functionName, functionArgs, false)
command := newServerCommand(node, policy, statement)
err := command.Execute()
return NewExecuteTask(clnt.cluster, statement), err
} | go | {
"resource": ""
} |
q9875 | QueryNode | train | func (clnt *Client) QueryNode(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, error) {
policy = clnt.getUsableQueryPolicy(policy)
// results channel must be async for performance
recSet := newRecordset(policy.RecordQueueSize, 1, statement.TaskId)
// copy policies to avoid race conditions
newPolicy := *policy
command := newQueryRecordCommand(node, &newPolicy, statement, recSet)
go func() {
command.Execute()
}()
return recSet, nil
} | go | {
"resource": ""
} |
q9876 | CreateIndex | train | func (clnt *Client) CreateIndex(
policy *WritePolicy,
namespace string,
setName string,
indexName string,
binName string,
indexType IndexType,
) (*IndexTask, error) {
policy = clnt.getUsableWritePolicy(policy)
return clnt.CreateComplexIndex(policy, namespace, setName, indexName, binName, indexType, ICT_DEFAULT)
} | go | {
"resource": ""
} |
q9877 | DropIndex | train | func (clnt *Client) DropIndex(
policy *WritePolicy,
namespace string,
setName string,
indexName string,
) error {
policy = clnt.getUsableWritePolicy(policy)
var strCmd bytes.Buffer
_, err := strCmd.WriteString("sindex-delete:ns=")
_, err = strCmd.WriteString(namespace)
if len(setName) > 0 {
_, err = strCmd.WriteString(";set=")
_, err = strCmd.WriteString(setName)
}
_, err = strCmd.WriteString(";indexname=")
_, err = strCmd.WriteString(indexName)
// Send index command to one node. That node will distribute the command to other nodes.
responseMap, err := clnt.sendInfoCommand(policy.TotalTimeout, strCmd.String())
if err != nil {
return err
}
response := responseMap[strCmd.String()]
if strings.ToUpper(response) == "OK" {
// Return task that could optionally be polled for completion.
task := NewDropIndexTask(clnt.cluster, namespace, indexName)
return <-task.OnComplete()
}
if strings.HasPrefix(response, "FAIL:201") {
// Index did not previously exist. Return without error.
return nil
}
return NewAerospikeError(INDEX_GENERIC, "Drop index failed: "+response)
} | go | {
"resource": ""
} |
q9878 | DropUser | train | func (clnt *Client) DropUser(policy *AdminPolicy, user string) error {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.dropUser(clnt.cluster, policy, user)
} | go | {
"resource": ""
} |
q9879 | ChangePassword | train | func (clnt *Client) ChangePassword(policy *AdminPolicy, user string, password string) error {
policy = clnt.getUsableAdminPolicy(policy)
if clnt.cluster.user == "" {
return NewAerospikeError(INVALID_USER)
}
hash, err := hashPassword(password)
if err != nil {
return err
}
command := newAdminCommand(nil)
if user == clnt.cluster.user {
// Change own password.
if err := command.changePassword(clnt.cluster, policy, user, hash); err != nil {
return err
}
} else {
// Change other user's password by user admin.
if err := command.setPassword(clnt.cluster, policy, user, hash); err != nil {
return err
}
}
clnt.cluster.changePassword(user, password, hash)
return nil
} | go | {
"resource": ""
} |
q9880 | GrantRoles | train | func (clnt *Client) GrantRoles(policy *AdminPolicy, user string, roles []string) error {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.grantRoles(clnt.cluster, policy, user, roles)
} | go | {
"resource": ""
} |
q9881 | RevokeRoles | train | func (clnt *Client) RevokeRoles(policy *AdminPolicy, user string, roles []string) error {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.revokeRoles(clnt.cluster, policy, user, roles)
} | go | {
"resource": ""
} |
q9882 | QueryUser | train | func (clnt *Client) QueryUser(policy *AdminPolicy, user string) (*UserRoles, error) {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.queryUser(clnt.cluster, policy, user)
} | go | {
"resource": ""
} |
q9883 | QueryUsers | train | func (clnt *Client) QueryUsers(policy *AdminPolicy) ([]*UserRoles, error) {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.queryUsers(clnt.cluster, policy)
} | go | {
"resource": ""
} |
q9884 | QueryRole | train | func (clnt *Client) QueryRole(policy *AdminPolicy, role string) (*Role, error) {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.queryRole(clnt.cluster, policy, role)
} | go | {
"resource": ""
} |
q9885 | QueryRoles | train | func (clnt *Client) QueryRoles(policy *AdminPolicy) ([]*Role, error) {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.queryRoles(clnt.cluster, policy)
} | go | {
"resource": ""
} |
q9886 | CreateRole | train | func (clnt *Client) CreateRole(policy *AdminPolicy, roleName string, privileges []Privilege) error {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.createRole(clnt.cluster, policy, roleName, privileges)
} | go | {
"resource": ""
} |
q9887 | DropRole | train | func (clnt *Client) DropRole(policy *AdminPolicy, roleName string) error {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.dropRole(clnt.cluster, policy, roleName)
} | go | {
"resource": ""
} |
q9888 | GrantPrivileges | train | func (clnt *Client) GrantPrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) error {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.grantPrivileges(clnt.cluster, policy, roleName, privileges)
} | go | {
"resource": ""
} |
q9889 | RevokePrivileges | train | func (clnt *Client) RevokePrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) error {
policy = clnt.getUsableAdminPolicy(policy)
command := newAdminCommand(nil)
return command.revokePrivileges(clnt.cluster, policy, roleName, privileges)
} | go | {
"resource": ""
} |
q9890 | String | train | func (clnt *Client) String() string {
if clnt.cluster != nil {
return clnt.cluster.String()
}
return ""
} | go | {
"resource": ""
} |
q9891 | Stats | train | func (clnt *Client) Stats() (map[string]interface{}, error) {
resStats := clnt.cluster.statsCopy()
clusterStats := nodeStats{}
for _, stats := range resStats {
clusterStats.aggregate(&stats)
}
resStats["cluster-aggregated-stats"] = clusterStats
b, err := json.Marshal(resStats)
if err != nil {
return nil, err
}
res := map[string]interface{}{}
err = json.Unmarshal(b, &res)
if err != nil {
return nil, err
}
res["open-connections"] = clusterStats.ConnectionsOpen
return res, nil
} | go | {
"resource": ""
} |
q9892 | ReadFileEncodeBase64 | train | func ReadFileEncodeBase64(filename string) (string, error) {
// read whole the file
b, err := ioutil.ReadFile(filename)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(b), nil
} | go | {
"resource": ""
} |
q9893 | TTL | train | func TTL(secsFromCitrusLeafEpoc uint32) uint32 {
switch secsFromCitrusLeafEpoc {
// don't convert magic values
case 0: // when set to don't expire, this value is returned
return math.MaxUint32
default:
// Record may not have expired on server, but delay or clock differences may
// cause it to look expired on client. Floor at 1, not 0, to avoid old
// "never expires" interpretation.
now := time.Now().Unix()
expiration := int64(CITRUSLEAF_EPOCH + secsFromCitrusLeafEpoc)
if expiration < 0 || expiration > now {
return uint32(expiration - now)
}
return 1
}
} | go | {
"resource": ""
} |
q9894 | BytesToHexString | train | func BytesToHexString(buf []byte) string {
hlist := make([]byte, 3*len(buf))
for i := range buf {
hex := fmt.Sprintf("%02x ", buf[i])
idx := i * 3
copy(hlist[idx:], hex)
}
return string(hlist)
} | go | {
"resource": ""
} |
q9895 | LittleBytesToInt32 | train | func LittleBytesToInt32(buf []byte, offset int) int32 {
l := len(buf[offset:])
if l > uint32sz {
l = uint32sz
}
r := int32(binary.LittleEndian.Uint32(buf[offset : offset+l]))
return r
} | go | {
"resource": ""
} |
q9896 | BytesToInt64 | train | func BytesToInt64(buf []byte, offset int) int64 {
l := len(buf[offset:])
if l > uint64sz {
l = uint64sz
}
r := int64(binary.BigEndian.Uint64(buf[offset : offset+l]))
return r
} | go | {
"resource": ""
} |
q9897 | BytesToInt32 | train | func BytesToInt32(buf []byte, offset int) int32 {
return int32(binary.BigEndian.Uint32(buf[offset : offset+uint32sz]))
} | go | {
"resource": ""
} |
q9898 | BytesToUint32 | train | func BytesToUint32(buf []byte, offset int) uint32 {
return binary.BigEndian.Uint32(buf[offset : offset+uint32sz])
} | go | {
"resource": ""
} |
q9899 | BytesToInt16 | train | func BytesToInt16(buf []byte, offset int) int16 {
return int16(binary.BigEndian.Uint16(buf[offset : offset+uint16sz]))
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.