_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q9900
SetValue
train
func (ky *Key) SetValue(val Value) error { ky.userKey = val return ky.computeDigest() }
go
{ "resource": "" }
q9901
Equals
train
func (ky *Key) Equals(other *Key) bool { return bytes.Equal(ky.digest[:], other.digest[:]) }
go
{ "resource": "" }
q9902
String
train
func (ky *Key) String() string { if ky == nil { return "" } if ky.userKey != nil { return fmt.Sprintf("%s:%s:%s:%v", ky.namespace, ky.setName, ky.userKey.String(), Buffer.BytesToHexString(ky.digest[:])) } return fmt.Sprintf("%s:%s::%v", ky.namespace, ky.setName, Buffer.BytesToHexString(ky.digest[:])) }
go
{ "resource": "" }
q9903
NewKey
train
func NewKey(namespace string, setName string, key interface{}) (*Key, error) { newKey := &Key{ namespace: namespace, setName: setName, userKey: NewValue(key), } if err := newKey.computeDigest(); err != nil { return nil, err } return newKey, nil }
go
{ "resource": "" }
q9904
NewKeyWithDigest
train
func NewKeyWithDigest(namespace string, setName string, key interface{}, digest []byte) (*Key, error) { newKey := &Key{ namespace: namespace, setName: setName, userKey: NewValue(key), } if err := newKey.SetDigest(digest); err != nil { return nil, err } return newKey, nil }
go
{ "resource": "" }
q9905
SetDigest
train
func (ky *Key) SetDigest(digest []byte) error { if len(digest) != 20 { return NewAerospikeError(PARAMETER_ERROR, "Invalid digest: Digest is required to be exactly 20 bytes.") } copy(ky.digest[:], digest) return nil }
go
{ "resource": "" }
q9906
NewBatchRead
train
func NewBatchRead(key *Key, binNames []string) *BatchRead { res := &BatchRead{ Key: key, BinNames: binNames, } if len(binNames) == 0 { res.ReadAllBins = true } return res }
go
{ "resource": "" }
q9907
LValueToInterface
train
func LValueToInterface(val lua.LValue) interface{} { switch val.Type() { case lua.LTNil: return nil case lua.LTBool: return lua.LVAsBool(val) case lua.LTNumber: return float64(lua.LVAsNumber(val)) case lua.LTString: return lua.LVAsString(val) case lua.LTUserData: ud := val.(*lua.LUserData).Value switch v := ud.(type) { case *LuaMap: return v.m case *LuaList: return v.l default: return v } case lua.LTTable: t := val.(*lua.LTable) m := make(map[interface{}]interface{}, t.Len()) t.ForEach(func(k, v lua.LValue) { m[k] = v }) return m default: panic(fmt.Sprintf("unrecognized data type %#v", val)) } }
go
{ "resource": "" }
q9908
ictToString
train
func ictToString(ict IndexCollectionType) string { switch ict { case ICT_LIST: return "LIST" case ICT_MAPKEYS: return "MAPKEYS" case ICT_MAPVALUES: return "MAPVALUES" default: panic(fmt.Sprintf("Unknown IndexCollectionType value %v", ict)) } }
go
{ "resource": "" }
q9909
NewWritePolicy
train
func NewWritePolicy(generation, expiration uint32) *WritePolicy { res := &WritePolicy{ BasePolicy: *NewPolicy(), RecordExistsAction: UPDATE, GenerationPolicy: NONE, CommitLevel: COMMIT_ALL, Generation: generation, Expiration: expiration, } // Writes may not be idempotent. // do not allow retries on writes by default. res.MaxRetries = 0 return res }
go
{ "resource": "" }
q9910
NewPolicy
train
func NewPolicy() *BasePolicy { return &BasePolicy{ Priority: DEFAULT, ConsistencyLevel: CONSISTENCY_ONE, TotalTimeout: 0 * time.Millisecond, SocketTimeout: 30 * time.Second, MaxRetries: 2, SleepBetweenRetries: 1 * time.Millisecond, SleepMultiplier: 1.0, ReplicaPolicy: SEQUENCE, SendKey: false, LinearizeRead: false, } }
go
{ "resource": "" }
q9911
socketTimeout
train
func (p *BasePolicy) socketTimeout() time.Duration { if p.TotalTimeout == 0 && p.SocketTimeout == 0 { return 0 } else if p.TotalTimeout > 0 && p.SocketTimeout == 0 { return p.TotalTimeout } else if p.TotalTimeout == 0 && p.SocketTimeout > 0 { return p.SocketTimeout } else if p.TotalTimeout > 0 && p.SocketTimeout > 0 { if p.SocketTimeout < p.TotalTimeout { return p.SocketTimeout } } return p.TotalTimeout }
go
{ "resource": "" }
q9912
EncodeBlob
train
func (p Person) EncodeBlob() ([]byte, error) { return append([]byte(p.name)), nil }
go
{ "resource": "" }
q9913
DecodeBlob
train
func (p *Person) DecodeBlob(buf []byte) error { p.name = string(buf) return nil }
go
{ "resource": "" }
q9914
NewStatement
train
func NewStatement(ns string, set string, binNames ...string) *Statement { return &Statement{ Namespace: ns, SetName: set, BinNames: binNames, returnData: true, TaskId: uint64(xornd.Int64()), } }
go
{ "resource": "" }
q9915
SetAggregateFunction
train
func (stmt *Statement) SetAggregateFunction(packageName string, functionName string, functionArgs []Value, returnData bool) { stmt.packageName = packageName stmt.functionName = functionName stmt.functionArgs = functionArgs stmt.returnData = returnData }
go
{ "resource": "" }
q9916
setTaskID
train
func (stmt *Statement) setTaskID() { for stmt.TaskId == 0 { stmt.TaskId = uint64(xornd.Int64()) } }
go
{ "resource": "" }
q9917
SetLevel
train
func (lgr *logger) SetLevel(level LogPriority) { lgr.mutex.Lock() defer lgr.mutex.Unlock() lgr.level = level }
go
{ "resource": "" }
q9918
LogAtLevel
train
func (lgr *logger) LogAtLevel(level LogPriority, format string, v ...interface{}) { switch level { case DEBUG: lgr.Debug(format, v...) case INFO: lgr.Info(format, v...) case WARNING: lgr.Warn(format, v...) case ERR: lgr.Error(format, v...) } }
go
{ "resource": "" }
q9919
Debug
train
func (lgr *logger) Debug(format string, v ...interface{}) { lgr.mutex.RLock() defer lgr.mutex.RUnlock() if lgr.level <= DEBUG { if l, ok := lgr.Logger.(*log.Logger); ok { l.Output(2, fmt.Sprintf(format, v...)) } else { lgr.Logger.Printf(format, v...) } } }
go
{ "resource": "" }
q9920
KeepConnection
train
func KeepConnection(err error) bool { // if error is not an AerospikeError, Throw the connection away conservatively ae, ok := err.(AerospikeError) if !ok { return false } switch ae.resultCode { case 0, // Zero Value QUERY_TERMINATED, SCAN_TERMINATED, PARSE_ERROR, SERIALIZE_ERROR, SERVER_NOT_AVAILABLE, SCAN_ABORT, QUERY_ABORTED, INVALID_NODE_ERROR, SERVER_MEM_ERROR, TIMEOUT, INDEX_OOM, QUERY_TIMEOUT: return false default: return true } }
go
{ "resource": "" }
q9921
NewBatchPolicy
train
func NewBatchPolicy() *BatchPolicy { return &BatchPolicy{ BasePolicy: *NewPolicy(), ConcurrentNodes: 1, AllowInline: true, AllowPartialResults: false, SendSetName: false, } }
go
{ "resource": "" }
q9922
newSingleConnectionHeap
train
func newSingleConnectionHeap(size int) *singleConnectionHeap { if size <= 0 { panic("Heap size cannot be less than 1") } return &singleConnectionHeap{ full: false, data: make([]*Connection, uint32(size)), size: uint32(size), } }
go
{ "resource": "" }
q9923
DropIdleTail
train
func (h *singleConnectionHeap) DropIdleTail() bool { h.mutex.Lock() defer h.mutex.Unlock() // if heap is not empty if h.full || (h.tail != h.head) { conn := h.data[(h.tail+1)%h.size] if conn.IsConnected() && !conn.isIdle() { return false } h.tail = (h.tail + 1) % h.size h.data[h.tail] = nil h.full = false conn.Close() return true } return false }
go
{ "resource": "" }
q9924
Len
train
func (h *singleConnectionHeap) Len() int { cnt := 0 h.mutex.Lock() if !h.full { if h.head >= h.tail { cnt = int(h.head) - int(h.tail) } else { cnt = int(h.size) - (int(h.tail) - int(h.head)) } } else { cnt = int(h.size) } h.mutex.Unlock() return cnt }
go
{ "resource": "" }
q9925
DropIdle
train
func (h *connectionHeap) DropIdle() { for i := 0; i < len(h.heaps); i++ { for h.heaps[i].DropIdleTail() { } } }
go
{ "resource": "" }
q9926
Len
train
func (h *connectionHeap) Len(hint byte) (cnt int) { if int(hint) < len(h.heaps) { cnt = h.heaps[hint].Len() } else { for i := range h.heaps { cnt += h.heaps[i].Len() } } return cnt }
go
{ "resource": "" }
q9927
clone
train
func (p *Partitions) clone() *Partitions { replicas := make([][]*Node, len(p.Replicas)) for i := range p.Replicas { r := make([]*Node, len(p.Replicas[i])) copy(r, p.Replicas[i]) replicas[i] = r } regimes := make([]int, len(p.regimes)) copy(regimes, p.regimes) return &Partitions{ Replicas: replicas, CPMode: p.CPMode, regimes: regimes, } }
go
{ "resource": "" }
q9928
validate
train
func (pm partitionMap) validate() error { masterNodePartitionNotDefined := map[string][]int{} replicaNodePartitionNotDefined := map[string][]int{} var errList []error for nsName, partition := range pm { if len(partition.regimes) != _PARTITIONS { errList = append(errList, fmt.Errorf("Wrong number of regimes for namespace `%s`. Must be %d, but found %d..", nsName, _PARTITIONS, len(partition.regimes))) } for replica, partitionNodes := range partition.Replicas { if len(partitionNodes) != _PARTITIONS { errList = append(errList, fmt.Errorf("Wrong number of partitions for namespace `%s`, replica `%d`. Must be %d, but found %d.", nsName, replica, _PARTITIONS, len(partitionNodes))) } for pIndex, node := range partitionNodes { if node == nil { if replica == 0 { masterNodePartitionNotDefined[nsName] = append(masterNodePartitionNotDefined[nsName], pIndex) } else { replicaNodePartitionNotDefined[nsName] = append(replicaNodePartitionNotDefined[nsName], pIndex) } } } } } if len(errList) > 0 || len(masterNodePartitionNotDefined) > 0 || len(replicaNodePartitionNotDefined) > 0 { for nsName, partitionList := range masterNodePartitionNotDefined { errList = append(errList, fmt.Errorf("Master partition nodes not defined for namespace `%s`: %d out of %d", nsName, len(partitionList), _PARTITIONS)) } for nsName, partitionList := range replicaNodePartitionNotDefined { errList = append(errList, fmt.Errorf("Replica partition nodes not defined for namespace `%s`: %d out of %d", nsName, len(partitionList), _PARTITIONS)) } errList = append(errList, errors.New("Partition map errors normally occur when the cluster has partitioned due to network anomaly or node crash, or is not configured properly. Refer to https://www.aerospike.com/docs/operations/configure for more information.")) return NewAerospikeError(INVALID_CLUSTER_PARTITION_MAP, mergeErrors(errList).Error()) } return nil }
go
{ "resource": "" }
q9929
writeRecords
train
func writeRecords( client *as.Client, keyPrefix string, binName string, valuePrefix string, size int, ) { for i := 1; i <= size; i++ { key, _ := as.NewKey(*shared.Namespace, *shared.Set, keyPrefix+strconv.Itoa(i)) bin := as.NewBin(binName, valuePrefix+strconv.Itoa(i)) log.Printf("Put: ns=%s set=%s key=%s bin=%s value=%s", key.Namespace(), key.SetName(), key.Value(), bin.Name, bin.Value) client.PutBins(shared.WritePolicy, key, bin) } }
go
{ "resource": "" }
q9930
batchExists
train
func batchExists( client *as.Client, keyPrefix string, size int, ) { // Batch into one call. keys := make([]*as.Key, size) for i := 0; i < size; i++ { keys[i], _ = as.NewKey(*shared.Namespace, *shared.Set, keyPrefix+strconv.Itoa(i+1)) } existsArray, err := client.BatchExists(nil, keys) shared.PanicOnError(err) for i := 0; i < len(existsArray); i++ { key := keys[i] exists := existsArray[i] log.Printf("Record: ns=%s set=%s key=%s exists=%t", key.Namespace(), key.SetName(), key.Value(), exists) } }
go
{ "resource": "" }
q9931
batchReads
train
func batchReads( client *as.Client, keyPrefix string, binName string, size int, ) { // Batch gets into one call. keys := make([]*as.Key, size) for i := 0; i < size; i++ { keys[i], _ = as.NewKey(*shared.Namespace, *shared.Set, keyPrefix+strconv.Itoa(i+1)) } records, err := client.BatchGet(nil, keys, binName) shared.PanicOnError(err) for i := 0; i < len(records); i++ { key := keys[i] record := records[i] level := asl.ERR var value interface{} if record != nil { level = asl.INFO value = record.Bins[binName] } asl.Logger.LogAtLevel(level, "Record: ns=%s set=%s key=%s bin=%s value=%s", key.Namespace(), key.SetName(), key.Value(), binName, value) } if len(records) != size { log.Fatalf("Record size mismatch. Expected %d. Received %d.", size, len(records)) } }
go
{ "resource": "" }
q9932
batchReadHeaders
train
func batchReadHeaders( client *as.Client, keyPrefix string, size int, ) { // Batch gets into one call. keys := make([]*as.Key, size) for i := 0; i < size; i++ { keys[i], _ = as.NewKey(*shared.Namespace, *shared.Set, keyPrefix+strconv.Itoa(i+1)) } records, err := client.BatchGetHeader(nil, keys) shared.PanicOnError(err) for i := 0; i < len(records); i++ { key := keys[i] record := records[i] level := asl.ERR generation := uint32(0) expiration := uint32(0) if record != nil && (record.Generation > 0 || record.Expiration > 0) { level = asl.INFO generation = record.Generation expiration = record.Expiration } asl.Logger.LogAtLevel(level, "Record: ns=%s set=%s key=%s generation=%d expiration=%d", key.Namespace(), key.SetName(), key.Value(), generation, expiration) } if len(records) != size { log.Fatalf("Record size mismatch. Expected %d. Received %d.", size, len(records)) } }
go
{ "resource": "" }
q9933
NewRegisterTask
train
func NewRegisterTask(cluster *Cluster, packageName string) *RegisterTask { return &RegisterTask{ baseTask: newTask(cluster), packageName: packageName, } }
go
{ "resource": "" }
q9934
IsDone
train
func (tskr *RegisterTask) IsDone() (bool, error) { command := "udf-list" nodes := tskr.cluster.GetNodes() done := false for _, node := range nodes { responseMap, err := node.requestInfoWithRetry(&tskr.cluster.infoPolicy, 5, command) if err != nil { return false, err } for _, response := range responseMap { find := "filename=" + tskr.packageName index := strings.Index(response, find) if index < 0 { return false, nil } done = true } } return done, nil }
go
{ "resource": "" }
q9935
registerLuaListType
train
func registerLuaListType(L *lua.LState) { mt := L.NewTypeMetatable(luaLuaListTypeName) // List package L.SetGlobal("List", mt) // static attributes L.SetMetatable(mt, mt) // list package mt = L.NewTypeMetatable(luaLuaListTypeName) L.SetGlobal("list", mt) // static attributes L.SetField(mt, "__call", L.NewFunction(newLuaList)) L.SetField(mt, "create", L.NewFunction(createLuaList)) L.SetField(mt, "size", L.NewFunction(luaListSize)) L.SetField(mt, "insert", L.NewFunction(luaListInsert)) L.SetField(mt, "append", L.NewFunction(luaListAppend)) L.SetField(mt, "prepend", L.NewFunction(luaListPrepend)) L.SetField(mt, "take", L.NewFunction(luaListTake)) L.SetField(mt, "remove", L.NewFunction(luaListRemove)) L.SetField(mt, "drop", L.NewFunction(luaListDrop)) L.SetField(mt, "trim", L.NewFunction(luaListTrim)) L.SetField(mt, "clone", L.NewFunction(luaListClone)) L.SetField(mt, "concat", L.NewFunction(luaListConcat)) L.SetField(mt, "merge", L.NewFunction(luaListMerge)) L.SetField(mt, "iterator", L.NewFunction(luaListIterator)) // methods L.SetFuncs(mt, map[string]lua.LGFunction{ "__index": luaListIndex, "__newindex": luaListNewIndex, "__len": luaListLen, "__tostring": luaListToString, }) L.SetMetatable(mt, mt) }
go
{ "resource": "" }
q9936
NewAtomicArray
train
func NewAtomicArray(length int) *AtomicArray { return &AtomicArray{ length: length, items: make([]interface{}, length), } }
go
{ "resource": "" }
q9937
Get
train
func (aa *AtomicArray) Get(idx int) interface{} { // do not lock if not needed if idx < 0 || idx >= aa.length { return nil } aa.mutex.RLock() res := aa.items[idx] aa.mutex.RUnlock() return res }
go
{ "resource": "" }
q9938
Set
train
func (aa *AtomicArray) Set(idx int, node interface{}) error { // do not lock if not needed if idx < 0 || idx >= aa.length { return fmt.Errorf("index %d is larger than array size (%d)", idx, aa.length) } aa.mutex.Lock() aa.items[idx] = node aa.mutex.Unlock() return nil }
go
{ "resource": "" }
q9939
Length
train
func (aa *AtomicArray) Length() int { aa.mutex.RLock() res := aa.length aa.mutex.RUnlock() return res }
go
{ "resource": "" }
q9940
PackList
train
func (ts stringSlice) PackList(buf BufferEx) (int, error) { size := 0 for _, elem := range ts { n, err := PackString(buf, elem) size += n if err != nil { return size, err } } return size, nil }
go
{ "resource": "" }
q9941
PackList
train
func (ts intSlice) PackList(buf BufferEx) (int, error) { size := 0 for _, elem := range ts { n, err := PackInt64(buf, int64(elem)) size += n if err != nil { return size, err } } return size, nil }
go
{ "resource": "" }
q9942
PackList
train
func (ts uint64Slice) PackList(buf BufferEx) (int, error) { size := 0 for _, elem := range ts { n, err := PackUInt64(buf, elem) size += n if err != nil { return size, err } } return size, nil }
go
{ "resource": "" }
q9943
PackList
train
func (ts float32Slice) PackList(buf BufferEx) (int, error) { size := 0 for _, elem := range ts { n, err := PackFloat32(buf, elem) size += n if err != nil { return size, err } } return size, nil }
go
{ "resource": "" }
q9944
PackList
train
func (ts float64Slice) PackList(buf BufferEx) (int, error) { size := 0 for _, elem := range ts { n, err := PackFloat64(buf, elem) size += n if err != nil { return size, err } } return size, nil }
go
{ "resource": "" }
q9945
PackList
train
func PackList(cmd BufferEx, list ListIter) (int, error) { return packList(cmd, list) }
go
{ "resource": "" }
q9946
PackJson
train
func PackJson(cmd BufferEx, theMap map[string]interface{}) (int, error) { return packJsonMap(cmd, theMap) }
go
{ "resource": "" }
q9947
PackMap
train
func PackMap(cmd BufferEx, theMap MapIter) (int, error) { return packMap(cmd, theMap) }
go
{ "resource": "" }
q9948
PackBytes
train
func PackBytes(cmd BufferEx, b []byte) (int, error) { return packBytes(cmd, b) }
go
{ "resource": "" }
q9949
PackInt64
train
func PackInt64(cmd BufferEx, val int64) (int, error) { return packAInt64(cmd, val) }
go
{ "resource": "" }
q9950
PackString
train
func PackString(cmd BufferEx, val string) (int, error) { return packString(cmd, val) }
go
{ "resource": "" }
q9951
PackUInt64
train
func PackUInt64(cmd BufferEx, val uint64) (int, error) { return packUInt64(cmd, val) }
go
{ "resource": "" }
q9952
PackBool
train
func PackBool(cmd BufferEx, val bool) (int, error) { return packBool(cmd, val) }
go
{ "resource": "" }
q9953
PackFloat32
train
func PackFloat32(cmd BufferEx, val float32) (int, error) { return packFloat32(cmd, val) }
go
{ "resource": "" }
q9954
PackFloat64
train
func PackFloat64(cmd BufferEx, val float64) (int, error) { return packFloat64(cmd, val) }
go
{ "resource": "" }
q9955
cache
train
func (op *Operation) cache() error { packer := newPacker() if _, err := op.encoder(op, packer); err != nil { return err } op.binValue = BytesValue(packer.Bytes()) op.encoder = nil // do not encode anymore; just use the cache op.used = false // do not encode anymore; just use the cache return nil }
go
{ "resource": "" }
q9956
GetOpForBin
train
func GetOpForBin(binName string) *Operation { return &Operation{opType: _READ, binName: binName, binValue: NewNullValue()} }
go
{ "resource": "" }
q9957
PutOp
train
func PutOp(bin *Bin) *Operation { return &Operation{opType: _WRITE, binName: bin.Name, binValue: bin.Value} }
go
{ "resource": "" }
q9958
AppendOp
train
func AppendOp(bin *Bin) *Operation { return &Operation{opType: _APPEND, binName: bin.Name, binValue: bin.Value} }
go
{ "resource": "" }
q9959
PrependOp
train
func PrependOp(bin *Bin) *Operation { return &Operation{opType: _PREPEND, binName: bin.Name, binValue: bin.Value} }
go
{ "resource": "" }
q9960
AddOp
train
func AddOp(bin *Bin) *Operation { return &Operation{opType: _ADD, binName: bin.Name, binValue: bin.Value} }
go
{ "resource": "" }
q9961
NewMultiPolicy
train
func NewMultiPolicy() *MultiPolicy { bp := *NewPolicy() bp.SocketTimeout = 30 * time.Second return &MultiPolicy{ BasePolicy: bp, MaxConcurrentNodes: 0, RecordQueueSize: 50, IncludeBinData: true, FailOnClusterChange: true, } }
go
{ "resource": "" }
q9962
Get
train
func (bp *BufferPool) Get() (res []byte) { bp.mutex.Lock() if bp.pos >= 0 { res = bp.pool[bp.pos] bp.pos-- } else { res = make([]byte, bp.initBufSize, bp.initBufSize) } bp.mutex.Unlock() return res }
go
{ "resource": "" }
q9963
newNode
train
func newNode(cluster *Cluster, nv *nodeValidator) *Node { newNode := &Node{ cluster: cluster, name: nv.name, // address: nv.primaryAddress, host: nv.primaryHost, // Assign host to first IP alias because the server identifies nodes // by IP address (not hostname). connections: *newConnectionHeap(cluster.clientPolicy.ConnectionQueueSize), connectionCount: *NewAtomicInt(0), peersGeneration: *NewAtomicInt(-1), partitionGeneration: *NewAtomicInt(-2), referenceCount: *NewAtomicInt(0), failures: *NewAtomicInt(0), active: *NewAtomicBool(true), partitionChanged: *NewAtomicBool(false), supportsFloat: *NewAtomicBool(nv.supportsFloat), supportsBatchIndex: *NewAtomicBool(nv.supportsBatchIndex), supportsReplicas: *NewAtomicBool(nv.supportsReplicas), supportsGeo: *NewAtomicBool(nv.supportsGeo), supportsPeers: *NewAtomicBool(nv.supportsPeers), supportsLUTNow: *NewAtomicBool(nv.supportsLUTNow), supportsTruncateNamespace: *NewAtomicBool(nv.supportsTruncateNamespace), } newNode.aliases.Store(nv.aliases) newNode._sessionToken.Store(nv.sessionToken) newNode.racks.Store(map[string]int{}) // this will reset to zero on first aggregation on the cluster, // therefore will only be counted once. atomic.AddInt64(&newNode.stats.NodeAdded, 1) return newNode }
go
{ "resource": "" }
q9964
refreshSessionToken
train
func (nd *Node) refreshSessionToken() error { // no session token to refresh if !nd.cluster.clientPolicy.RequiresAuthentication() || nd.cluster.clientPolicy.AuthMode != AuthModeExternal { return nil } var deadline time.Time deadlineIfc := nd._sessionExpiration.Load() if deadlineIfc != nil { deadline = deadlineIfc.(time.Time) } if deadline.IsZero() || time.Now().Before(deadline) { return nil } nd.tendConnLock.Lock() defer nd.tendConnLock.Unlock() if err := nd.initTendConn(nd.cluster.clientPolicy.LoginTimeout); err != nil { return err } command := newLoginCommand(nd.tendConn.dataBuffer) if err := command.login(&nd.cluster.clientPolicy, nd.tendConn, nd.cluster.Password()); err != nil { // Socket not authenticated. Do not put back into pool. nd.tendConn.Close() return err } nd._sessionToken.Store(command.SessionToken) nd._sessionExpiration.Store(command.SessionExpiration) return nil }
go
{ "resource": "" }
q9965
GetConnection
train
func (nd *Node) GetConnection(timeout time.Duration) (conn *Connection, err error) { if timeout == 0 { timeout = _DEFAULT_TIMEOUT } deadline := time.Now().Add(timeout) return nd.getConnection(deadline, timeout) }
go
{ "resource": "" }
q9966
getConnection
train
func (nd *Node) getConnection(deadline time.Time, timeout time.Duration) (conn *Connection, err error) { return nd.getConnectionWithHint(deadline, timeout, 0) }
go
{ "resource": "" }
q9967
connectionLimitReached
train
func (nd *Node) connectionLimitReached() (res bool) { if nd.cluster.clientPolicy.LimitConnectionsToQueueSize { cc := nd.connectionCount.IncrementAndGet() if cc > nd.cluster.clientPolicy.ConnectionQueueSize { res = true } nd.connectionCount.DecrementAndGet() } return res }
go
{ "resource": "" }
q9968
newConnection
train
func (nd *Node) newConnection(overrideThreshold bool) (*Connection, error) { // if connection count is limited and enough connections are already created, don't create a new one cc := nd.connectionCount.IncrementAndGet() if nd.cluster.clientPolicy.LimitConnectionsToQueueSize && cc > nd.cluster.clientPolicy.ConnectionQueueSize { nd.connectionCount.DecrementAndGet() atomic.AddInt64(&nd.stats.ConnectionsPoolEmpty, 1) return nil, ErrTooManyConnectionsForNode } // Check for opening connection threshold if !overrideThreshold && nd.cluster.clientPolicy.OpeningConnectionThreshold > 0 { ct := nd.cluster.connectionThreshold.IncrementAndGet() if ct > nd.cluster.clientPolicy.OpeningConnectionThreshold { nd.cluster.connectionThreshold.DecrementAndGet() return nil, ErrTooManyOpeningConnections } defer nd.cluster.connectionThreshold.DecrementAndGet() } atomic.AddInt64(&nd.stats.ConnectionsAttempts, 1) conn, err := NewConnection(&nd.cluster.clientPolicy, nd.host) if err != nil { nd.connectionCount.DecrementAndGet() atomic.AddInt64(&nd.stats.ConnectionsFailed, 1) return nil, err } conn.node = nd // need to authenticate if err = conn.login(nd.sessionToken()); err != nil { atomic.AddInt64(&nd.stats.ConnectionsFailed, 1) // Socket not authenticated. Do not put back into pool. conn.Close() return nil, err } atomic.AddInt64(&nd.stats.ConnectionsSuccessful, 1) return conn, nil }
go
{ "resource": "" }
q9969
makeConnectionForPool
train
func (nd *Node) makeConnectionForPool(deadline time.Time, hint byte) { if deadline.IsZero() { deadline = time.Now().Add(_DEFAULT_TIMEOUT) } // don't even try to make a new connection if connection limit is reached if nd.connectionLimitReached() { return } L: // don't loop forever; free the goroutine after the deadline if time.Now().After(deadline) { return } conn, err := nd.newConnection(false) if err != nil { // The following check can help break the loop under heavy load and remove // quite a lot of latency. if err == ErrTooManyConnectionsForNode { // The connection pool is already full. No need for more connections. return } Logger.Error("Error trying to making a connection to the node %s: %s", nd.String(), err.Error()) time.Sleep(time.Millisecond) goto L } nd.putConnectionWithHint(conn, hint) }
go
{ "resource": "" }
q9970
getConnectionWithHint
train
func (nd *Node) getConnectionWithHint(deadline time.Time, timeout time.Duration, hint byte) (conn *Connection, err error) { var socketDeadline time.Time connReqd := false L: // try to get a valid connection from the connection pool for conn = nd.connections.Poll(hint); conn != nil; conn = nd.connections.Poll(hint) { if conn.IsConnected() { break } conn.Close() conn = nil } // don't loop forever; return the goroutine after deadline. // deadline will always be set when requesting for a connection. if connReqd && time.Now().After(socketDeadline) { atomic.AddInt64(&nd.stats.ConnectionsPoolEmpty, 1) return nil, ErrConnectionPoolEmpty } if conn == nil { if !connReqd { go nd.makeConnectionForPool(deadline, hint) // do not request to open a connection more than once. connReqd = true // only try to set a deadline once without a penalty on each run. // most call to this function will find a connection in the queue. if timeout > 0 { socketDeadline = time.Now().Add(timeout) } else { socketDeadline = time.Now().Add(_DEFAULT_TIMEOUT) } } time.Sleep(time.Millisecond) goto L } if err = conn.SetTimeout(deadline, timeout); err != nil { atomic.AddInt64(&nd.stats.ConnectionsFailed, 1) // Do not put back into pool. conn.Close() return nil, err } conn.setIdleTimeout(nd.cluster.clientPolicy.IdleTimeout) conn.refresh() return conn, nil }
go
{ "resource": "" }
q9971
putConnectionWithHint
train
func (nd *Node) putConnectionWithHint(conn *Connection, hint byte) bool { conn.refresh() if !nd.active.Get() || !nd.connections.Offer(conn, hint) { conn.Close() return false } return true }
go
{ "resource": "" }
q9972
IsActive
train
func (nd *Node) IsActive() bool { return nd != nil && nd.active.Get() && nd.partitionGeneration.Get() >= -1 }
go
{ "resource": "" }
q9973
addAlias
train
func (nd *Node) addAlias(aliasToAdd *Host) { // Aliases are only referenced in the cluster tend goroutine, // so synchronization is not necessary. aliases := nd.GetAliases() if aliases == nil { aliases = []*Host{} } aliases = append(aliases, aliasToAdd) nd.setAliases(aliases) }
go
{ "resource": "" }
q9974
Close
train
func (nd *Node) Close() { nd.active.Set(false) atomic.AddInt64(&nd.stats.NodeRemoved, 1) nd.closeConnections() }
go
{ "resource": "" }
q9975
Equals
train
func (nd *Node) Equals(other *Node) bool { return nd != nil && other != nil && (nd == other || nd.name == other.name) }
go
{ "resource": "" }
q9976
MigrationInProgress
train
func (nd *Node) MigrationInProgress() (bool, error) { values, err := nd.RequestStats(&nd.cluster.infoPolicy) if err != nil { return false, err } // if the migrate_partitions_remaining exists and is not `0`, then migration is in progress if migration, exists := values["migrate_partitions_remaining"]; exists && migration != "0" { return true, nil } // migration not in progress return false, nil }
go
{ "resource": "" }
q9977
initTendConn
train
func (nd *Node) initTendConn(timeout time.Duration) error { var deadline time.Time if timeout > 0 { deadline = time.Now().Add(timeout) } if nd.tendConn == nil || !nd.tendConn.IsConnected() { // Tend connection required a long timeout tendConn, err := nd.getConnection(deadline, timeout) if err != nil { return err } nd.tendConn = tendConn } // Set timeout for tend conn return nd.tendConn.SetTimeout(deadline, timeout) }
go
{ "resource": "" }
q9978
requestInfoWithRetry
train
func (nd *Node) requestInfoWithRetry(policy *InfoPolicy, n int, name ...string) (res map[string]string, err error) { for i := 0; i < n; i++ { if res, err = nd.requestInfo(policy.Timeout, name...); err == nil { return res, nil } Logger.Error("Error occurred while fetching info from the server node %s: %s", nd.host.String(), err.Error()) time.Sleep(100 * time.Millisecond) } // return the last error return nil, err }
go
{ "resource": "" }
q9979
requestRawInfo
train
func (nd *Node) requestRawInfo(policy *InfoPolicy, name ...string) (*info, error) { nd.tendConnLock.Lock() defer nd.tendConnLock.Unlock() if err := nd.initTendConn(policy.Timeout); err != nil { return nil, err } response, err := newInfo(nd.tendConn, name...) if err != nil { nd.tendConn.Close() return nil, err } return response, nil }
go
{ "resource": "" }
q9980
RequestStats
train
func (node *Node) RequestStats(policy *InfoPolicy) (map[string]string, error) { infoMap, err := node.RequestInfo(policy, "statistics") if err != nil { return nil, err } res := map[string]string{} v, exists := infoMap["statistics"] if !exists { return res, nil } values := strings.Split(v, ";") for i := range values { kv := strings.Split(values[i], "=") if len(kv) > 1 { res[kv[0]] = kv[1] } } return res, nil }
go
{ "resource": "" }
q9981
sessionToken
train
func (nd *Node) sessionToken() []byte { var deadline time.Time deadlineIfc := nd._sessionExpiration.Load() if deadlineIfc != nil { deadline = deadlineIfc.(time.Time) } if deadline.IsZero() || time.Now().After(deadline) { return nil } st := nd._sessionToken.Load() if st != nil { return st.([]byte) } return nil }
go
{ "resource": "" }
q9982
Rack
train
func (nd *Node) Rack(namespace string) (int, error) { racks := nd.racks.Load().(map[string]int) v, exists := racks[namespace] if exists { return v, nil } return -1, newAerospikeNodeError(nd, RACK_NOT_DEFINED) }
go
{ "resource": "" }
q9983
newPartitionByKey
train
func newPartitionByKey(key *Key) Partition { return Partition{ Namespace: key.namespace, // CAN'T USE MOD directly - mod will give negative numbers. // First AND makes positive and negative correctly, then mod. // For any x, y : x % 2^y = x & (2^y - 1); the second method is twice as fast PartitionId: int(Buffer.LittleBytesToInt32(key.digest[:], 0)&0xFFFF) & (_PARTITIONS - 1), } }
go
{ "resource": "" }
q9984
NewPartition
train
func NewPartition(namespace string, partitionID int) *Partition { return &Partition{ Namespace: namespace, PartitionId: partitionID, } }
go
{ "resource": "" }
q9985
Equals
train
func (ptn *Partition) Equals(other *Partition) bool { return ptn.PartitionId == other.PartitionId && ptn.Namespace == other.Namespace }
go
{ "resource": "" }
q9986
setWrite
train
func (cmd *baseCommand) setWrite(policy *WritePolicy, operation OperationType, key *Key, bins []*Bin, binMap BinMap) error { cmd.begin() fieldCount, err := cmd.estimateKeySize(key, policy.SendKey) if err != nil { return err } if binMap == nil { for i := range bins { if err := cmd.estimateOperationSizeForBin(bins[i]); err != nil { return err } } } else { for name, value := range binMap { if err := cmd.estimateOperationSizeForBinNameAndValue(name, value); err != nil { return err } } } if err := cmd.sizeBuffer(); err != nil { return err } if binMap == nil { cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE, fieldCount, len(bins)) } else { cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE, fieldCount, len(binMap)) } cmd.writeKey(key, policy.SendKey) if binMap == nil { for i := range bins { if err := cmd.writeOperationForBin(bins[i], operation); err != nil { return err } } } else { for name, value := range binMap { if err := cmd.writeOperationForBinNameAndValue(name, value, operation); err != nil { return err } } } cmd.end() return nil }
go
{ "resource": "" }
q9987
setDelete
train
func (cmd *baseCommand) setDelete(policy *WritePolicy, key *Key) error { cmd.begin() fieldCount, err := cmd.estimateKeySize(key, false) if err != nil { return err } if err := cmd.sizeBuffer(); err != nil { return err } cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE|_INFO2_DELETE, fieldCount, 0) cmd.writeKey(key, false) cmd.end() return nil }
go
{ "resource": "" }
q9988
setTouch
train
func (cmd *baseCommand) setTouch(policy *WritePolicy, key *Key) error { cmd.begin() fieldCount, err := cmd.estimateKeySize(key, policy.SendKey) if err != nil { return err } cmd.estimateOperationSize() if err := cmd.sizeBuffer(); err != nil { return err } cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE, fieldCount, 1) cmd.writeKey(key, policy.SendKey) cmd.writeOperationForOperationType(_TOUCH) cmd.end() return nil }
go
{ "resource": "" }
q9989
setExists
train
func (cmd *baseCommand) setExists(policy *BasePolicy, key *Key) error { cmd.begin() fieldCount, err := cmd.estimateKeySize(key, false) if err != nil { return err } if err := cmd.sizeBuffer(); err != nil { return err } cmd.writeHeader(policy, _INFO1_READ|_INFO1_NOBINDATA, 0, fieldCount, 0) cmd.writeKey(key, false) cmd.end() return nil }
go
{ "resource": "" }
q9990
setReadHeader
train
func (cmd *baseCommand) setReadHeader(policy *BasePolicy, key *Key) error { cmd.begin() fieldCount, err := cmd.estimateKeySize(key, false) if err != nil { return err } cmd.estimateOperationSizeForBinName("") if err := cmd.sizeBuffer(); err != nil { return err } cmd.writeHeader(policy, _INFO1_READ|_INFO1_NOBINDATA, 0, fieldCount, 1) cmd.writeKey(key, false) cmd.writeOperationForBinName("", _READ) cmd.end() return nil }
go
{ "resource": "" }
q9991
setOperate
train
func (cmd *baseCommand) setOperate(policy *WritePolicy, key *Key, operations []*Operation) (bool, error) { if len(operations) == 0 { return false, NewAerospikeError(PARAMETER_ERROR, "No operations were passed.") } cmd.begin() fieldCount := 0 readAttr := 0 writeAttr := 0 hasWrite := false readBin := false readHeader := false RespondPerEachOp := policy.RespondPerEachOp for i := range operations { switch operations[i].opType { case _MAP_READ: // Map operations require RespondPerEachOp to be true. RespondPerEachOp = true // Fall through to read. fallthrough case _READ, _CDT_READ: if !operations[i].headerOnly { readAttr |= _INFO1_READ // Read all bins if no bin is specified. if operations[i].binName == "" { readAttr |= _INFO1_GET_ALL } readBin = true } else { readAttr |= _INFO1_READ readHeader = true } case _MAP_MODIFY: // Map operations require RespondPerEachOp to be true. RespondPerEachOp = true // Fall through to default. fallthrough default: writeAttr = _INFO2_WRITE hasWrite = true } cmd.estimateOperationSizeForOperation(operations[i]) } ksz, err := cmd.estimateKeySize(key, policy.SendKey && hasWrite) if err != nil { return hasWrite, err } fieldCount += ksz if err := cmd.sizeBuffer(); err != nil { return hasWrite, err } if readHeader && !readBin { readAttr |= _INFO1_NOBINDATA } if RespondPerEachOp { writeAttr |= _INFO2_RESPOND_ALL_OPS } if writeAttr != 0 { cmd.writeHeaderWithPolicy(policy, readAttr, writeAttr, fieldCount, len(operations)) } else { cmd.writeHeader(&policy.BasePolicy, readAttr, writeAttr, fieldCount, len(operations)) } cmd.writeKey(key, policy.SendKey && hasWrite) for _, operation := range operations { if err := cmd.writeOperationForOperation(operation); err != nil { return hasWrite, err } } cmd.end() return hasWrite, nil }
go
{ "resource": "" }
q9992
writeHeader
train
func (cmd *baseCommand) writeHeader(policy *BasePolicy, readAttr int, writeAttr int, fieldCount int, operationCount int) { infoAttr := 0 if policy.LinearizeRead { infoAttr |= _INFO3_LINEARIZE_READ } if policy.ConsistencyLevel == CONSISTENCY_ALL { readAttr |= _INFO1_CONSISTENCY_ALL } // Write all header data except total size which must be written last. cmd.dataBuffer[8] = _MSG_REMAINING_HEADER_SIZE // Message header length. cmd.dataBuffer[9] = byte(readAttr) cmd.dataBuffer[10] = byte(writeAttr) cmd.dataBuffer[11] = byte(infoAttr) for i := 12; i < 26; i++ { cmd.dataBuffer[i] = 0 } cmd.dataOffset = 26 cmd.WriteInt16(int16(fieldCount)) cmd.WriteInt16(int16(operationCount)) cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE) }
go
{ "resource": "" }
q9993
writeHeaderWithPolicy
train
func (cmd *baseCommand) writeHeaderWithPolicy(policy *WritePolicy, readAttr int, writeAttr int, fieldCount int, operationCount int) { // Set flags. generation := uint32(0) infoAttr := 0 switch policy.RecordExistsAction { case UPDATE: case UPDATE_ONLY: infoAttr |= _INFO3_UPDATE_ONLY case REPLACE: infoAttr |= _INFO3_CREATE_OR_REPLACE case REPLACE_ONLY: infoAttr |= _INFO3_REPLACE_ONLY case CREATE_ONLY: writeAttr |= _INFO2_CREATE_ONLY } switch policy.GenerationPolicy { case NONE: case EXPECT_GEN_EQUAL: generation = policy.Generation writeAttr |= _INFO2_GENERATION case EXPECT_GEN_GT: generation = policy.Generation writeAttr |= _INFO2_GENERATION_GT } if policy.CommitLevel == COMMIT_MASTER { infoAttr |= _INFO3_COMMIT_MASTER } if policy.LinearizeRead { infoAttr |= _INFO3_LINEARIZE_READ } if policy.ConsistencyLevel == CONSISTENCY_ALL { readAttr |= _INFO1_CONSISTENCY_ALL } if policy.DurableDelete { writeAttr |= _INFO2_DURABLE_DELETE } // Write all header data except total size which must be written last. cmd.dataBuffer[8] = _MSG_REMAINING_HEADER_SIZE // Message header length. cmd.dataBuffer[9] = byte(readAttr) cmd.dataBuffer[10] = byte(writeAttr) cmd.dataBuffer[11] = byte(infoAttr) cmd.dataBuffer[12] = 0 // unused cmd.dataBuffer[13] = 0 // clear the result code cmd.dataOffset = 14 cmd.WriteUint32(generation) cmd.dataOffset = 18 cmd.WriteUint32(policy.Expiration) // Initialize timeout. It will be written later. cmd.dataBuffer[22] = 0 cmd.dataBuffer[23] = 0 cmd.dataBuffer[24] = 0 cmd.dataBuffer[25] = 0 cmd.dataOffset = 26 cmd.WriteInt16(int16(fieldCount)) cmd.WriteInt16(int16(operationCount)) cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE) }
go
{ "resource": "" }
q9994
String
train
func (rc *Record) String() string { return fmt.Sprintf("%s %v", rc.Key, rc.Bins) }
go
{ "resource": "" }
q9995
streamtoword
train
func streamtoword(data []byte, off int) (uint, int) { var word uint for i := 0; i < 4; i++ { word = (word << 8) | uint(data[off]&0xff) off = (off + 1) % len(data) } return word, off }
go
{ "resource": "" }
q9996
key
train
func (c *cipher) key(key []byte) { var word uint off := 0 lr := []uint{0, 0} plen := len(c.P) slen := len(c.S) for i := 0; i < plen; i++ { word, off = streamtoword(key, off) c.P[i] = c.P[i] ^ word } for i := 0; i < plen; i += 2 { c.encipher(lr, 0) c.P[i] = lr[0] c.P[i+1] = lr[1] } for i := 0; i < slen; i += 2 { c.encipher(lr, 0) c.S[i] = lr[0] c.S[i+1] = lr[1] } }
go
{ "resource": "" }
q9997
crypt_raw
train
func crypt_raw(password []byte, salt []byte, log_rounds uint) []byte { c := &cipher{P: p_orig, S: s_orig, data: bf_crypt_ciphertext} rounds := 1 << log_rounds c.ekskey(salt, password) for i := 0; i < rounds; i++ { c.key(password) c.key(salt) } for i := 0; i < 64; i++ { for j := 0; j < (6 >> 1); j++ { c.encipher(c.data[:], j<<1) } } ret := make([]byte, 24) for i := 0; i < 6; i++ { k := i << 2 ret[k] = (byte)((c.data[i] >> 24) & 0xff) ret[k+1] = (byte)((c.data[i] >> 16) & 0xff) ret[k+2] = (byte)((c.data[i] >> 8) & 0xff) ret[k+3] = (byte)(c.data[i] & 0xff) } return ret }
go
{ "resource": "" }
q9998
NewMessage
train
func NewMessage(mtype messageType, data []byte) *Message { return &Message{ MessageHeader: MessageHeader{ Version: uint8(2), Type: uint8(mtype), DataLen: msgLenToBytes(int64(len(data))), }, Data: data, } }
go
{ "resource": "" }
q9999
Resize
train
func (msg *Message) Resize(newSize int64) error { if newSize > maxAllowedBufferSize || newSize < 0 { return fmt.Errorf("Requested new buffer size is invalid. Requested: %d, allowed: 0..%d", newSize, maxAllowedBufferSize) } if int64(len(msg.Data)) == newSize { return nil } msg.Data = make([]byte, newSize) return nil }
go
{ "resource": "" }