id stringlengths 2 7 | text stringlengths 17 51.2k | title stringclasses 1 value |
|---|---|---|
c175400 | }
if y.SameKey(key, it.Key()) {
if version := y.ParseTs(it.Key()); maxVs.Version < version {
maxVs = it.Value()
maxVs.Version = version
}
}
}
return maxVs, decr()
} | |
c175401 | int) bool {
return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0
})
right := sort.Search(len(s.tables), func(i int) bool {
return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0
})
return left, right
} | |
c175402 | item.Key(), item.Version(), item.meta)
} | |
c175403 | y.SafeCopy(dst, item.key)
} | |
c175404 |
klen := int64(len(item.key) + 8) // 8 bytes for timestamp.
return int64(vp.Len) - klen - headerBufSize - crc32.Size
} | |
c175405 | = key // This key must be without the timestamp.
opt.prefixIsKey = true
return txn.NewIterator(opt)
} | |
c175406 | bytes.HasPrefix(it.item.key, it.opt.Prefix)
} | |
c175407 | it.Valid() && bytes.HasPrefix(it.item.key, prefix)
} | |
c175408 | item = l.pop()
}
}
waitFor(it.waste)
waitFor(it.data)
// TODO: We could handle this error.
_ = it.txn.db.vlog.decrIteratorCount()
atomic.AddInt32(&it.txn.numIterators, -1)
} | |
c175409 | return false
}
// Only track in forward direction.
// We should update lastKey as soon as we find a different key in our snapshot.
// Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a.
// Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5,
// which is wrong. Therefore, update lastKey here.
it.lastKey = y.SafeCopy(it.lastKey, mi.Key())
}
FILL:
// If deleted, advance and return.
vs := mi.Value()
if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
mi.Next()
return false
}
item := it.newItem()
it.fill(item)
// fill item based on current cursor position. All Next calls have returned, so reaching here
// means no Next was called.
mi.Next() // Advance but no fill item yet.
if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid.
setItem(item)
return true
}
// Reverse direction.
nextTs := y.ParseTs(mi.Key())
mik := y.ParseKey(mi.Key())
if nextTs <= it.readTs && bytes.Equal(mik, item.key) {
// This is a valid potential candidate.
goto FILL
}
// Ignore the next candidate. Return the current one.
setItem(item)
return true
} | |
c175410 | it.iitr.Rewind()
it.prefetch()
return
}
if !it.opt.Reverse {
key = y.KeyWithTs(key, it.txn.readTs)
} else {
key = y.KeyWithTs(key, 0)
}
it.iitr.Seek(key)
it.prefetch()
} | |
c175411 | db,
key: key,
closer: y.NewCloser(1),
}
go op.runCompactions(dur)
return op
} | |
c175412 | op.iterateAndMerge(txn)
return err
})
if err == errNoMerge {
return existing, nil
}
return existing, err
} | |
c175413 | level.
// Update: We should not be checking size here. Compaction priority already did the size checks.
// Here we should just be executing the wish of others.
thisLevel.ranges = append(thisLevel.ranges, cd.thisRange)
nextLevel.ranges = append(nextLevel.ranges, cd.nextRange)
thisLevel.delSize += cd.thisSize
return true
} | |
c175414 | pointer.
out := &Arena{
n: 1,
buf: make([]byte, n),
}
return out
} | |
c175415 | l)
y.AssertTruef(int(n) <= len(s.buf),
"Arena too small, toWrite:%d newTotal:%d limit:%d",
l, n, len(s.buf))
// Return the aligned offset.
m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign)
return m
} | |
c175416 |
return (*node)(unsafe.Pointer(&s.buf[offset]))
} | |
c175417 | return s.buf[offset : offset+uint32(size)]
} | |
c175418 | y.ValueStruct) {
ret.Decode(s.buf[offset : offset+uint32(size)])
return
} | |
c175419 | uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0])))
} | |
c175420 | expvar.NewMap("badger_lsm_bloom_hits_total")
NumGets = expvar.NewInt("badger_gets_total")
NumPuts = expvar.NewInt("badger_puts_total")
NumBlockedPuts = expvar.NewInt("badger_blocked_puts_total")
NumMemtableGets = expvar.NewInt("badger_memtable_gets_total")
LSMSize = expvar.NewMap("badger_lsm_size_bytes")
VlogSize = expvar.NewMap("badger_vlog_size_bytes")
PendingWrites = expvar.NewMap("badger_pending_writes_total")
} | |
c175421 | not referenced in MANIFEST\n", id)
filename := table.NewFilename(id, kv.opt.Dir)
if err := os.Remove(filename); err != nil {
return y.Wrapf(err, "While removing table %d", id)
}
}
}
return nil
} | |
c175422 |
// Now that manifest has been successfully written, we can delete the tables.
for _, l := range s.levels {
l.Lock()
l.totalSize = 0
l.tables = l.tables[:0]
l.Unlock()
}
for _, table := range all {
if err := table.DecrRef(); err != nil {
return 0, err
}
}
return len(all), nil
} | |
c175423 |
if !absent {
tables = append(tables, table)
}
}
l.RUnlock()
if len(tables) == 0 {
continue
}
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
thisLevel: l,
nextLevel: l,
top: []*table.Table{},
bot: tables,
dropPrefix: prefix,
}
if err := s.runCompactDef(l.level, cd); err != nil {
opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
return err
}
}
return nil
} | |
c175424 | s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
} | |
c175425 | are expected to
// remain unchanged.
if l == 0 {
if !s.fillTablesL0(&cd) {
return errFillTables
}
} else {
if !s.fillTables(&cd) {
return errFillTables
}
}
defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
s.cstatus.toLog(cd.elog)
if err := s.runCompactDef(l, cd); err != nil {
// This compaction couldn't be done successfully.
s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
return err
}
s.cstatus.toLog(cd.elog)
s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
return nil
} | |
c175426 | h.get(key) // Calls h.RLock() and h.RUnlock().
if err != nil {
return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
}
if vs.Value == nil && vs.Meta == 0 {
continue
}
if maxVs == nil || vs.Version == version {
return vs, nil
}
if maxVs.Version < vs.Version {
*maxVs = vs
}
}
if maxVs != nil {
return *maxVs, nil
}
return y.ValueStruct{}, nil
} | |
c175427 | acc := account{
Id: i,
Bal: toUint64(val),
}
accounts = append(accounts, acc)
total += acc.Bal
}
if total != expected {
log.Printf("Balance did NOT match up. Expected: %d. Received: %d",
expected, total)
atomic.AddInt32(&stopAll, 1)
return accounts, errFailure
}
return accounts, nil
} | |
c175428 | violation found at ts: %d\n", lowTs)
return 0
}
midTs := (lowTs + highTs) / 2
log.Println()
log.Printf("Checking. low=%d. high=%d. mid=%d\n", lowTs, highTs, midTs)
err := checkAt(midTs)
if err == badger.ErrKeyNotFound || err == nil {
// If no failure, move to higher ts.
return findFirstInvalidTxn(db, midTs+1, highTs)
}
// Found an error.
return findFirstInvalidTxn(db, lowTs, midTs)
} | |
c175429 | index,
Term: term,
Peers: encodePeers(configuration, trans),
Configuration: configuration,
ConfigurationIndex: configurationIndex,
},
contents: &bytes.Buffer{},
}
m.hasSnapshot = true
m.latest = sink
return sink, nil
} | |
c175430 | []*SnapshotMeta{}, nil
}
return []*SnapshotMeta{&m.latest.meta}, nil
} | |
c175431 | fmt.Errorf("[ERR] snapshot: failed to open snapshot id: %s", id)
}
return &m.latest.meta, ioutil.NopCloser(m.latest.contents), nil
} | |
c175432 |
s.meta.Size += written
return int(written), err
} | |
c175433 | &FileSnapshotStore{
path: path,
retain: retain,
logger: logger,
}
// Do a permissions test
if err := store.testPermissions(); err != nil {
return nil, fmt.Errorf("permissions test failed: %v", err)
}
return store, nil
} | |
c175434 |
if logOutput == nil {
logOutput = os.Stderr
}
return NewFileSnapshotStoreWithLogger(base, retain, log.New(logOutput, "", log.LstdFlags))
} | |
c175435 |
return fmt.Sprintf("%d-%d-%d", term, index, msec)
} | |
c175436 | ConfigurationIndex: configurationIndex,
},
CRC: nil,
},
}
// Write out the meta data
if err := sink.writeMeta(); err != nil {
f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err)
return nil, err
}
// Open the state file
statePath := filepath.Join(path, stateFilePath)
fh, err := os.Create(statePath)
if err != nil {
f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err)
return nil, err
}
sink.stateFile = fh
// Create a CRC64 hash
sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA))
// Wrap both the hash and file in a MultiWriter with buffering
multi := io.MultiWriter(sink.stateFile, sink.stateHash)
sink.buffered = bufio.NewWriter(multi)
// Done
return sink, nil
} | |
c175437 | []*SnapshotMeta
for _, meta := range snapshots {
snapMeta = append(snapMeta, &meta.SnapshotMeta)
if len(snapMeta) == f.retain {
break
}
}
return snapMeta, nil
} | |
c175438 | version.
if meta.Version < SnapshotVersionMin || meta.Version > SnapshotVersionMax {
f.logger.Printf("[WARN] snapshot: Snapshot version for %v not supported: %d", dirName, meta.Version)
continue
}
// Append, but only return up to the retain count
snapMeta = append(snapMeta, meta)
}
// Sort the snapshot, reverse so we get new -> old
sort.Sort(sort.Reverse(snapMetaSlice(snapMeta)))
return snapMeta, nil
} | |
c175439 | err != nil {
return nil, err
}
defer fh.Close()
// Buffer the file IO
buffered := bufio.NewReader(fh)
// Read in the JSON
meta := &fileSnapshotMeta{}
dec := json.NewDecoder(buffered)
if err := dec.Decode(meta); err != nil {
return nil, err
}
return meta, nil
} | |
c175440 | the hash
_, err = io.Copy(stateHash, fh)
if err != nil {
f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err)
fh.Close()
return nil, nil, err
}
// Verify the hash
computed := stateHash.Sum(nil)
if bytes.Compare(meta.CRC, computed) != 0 {
f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)",
meta.CRC, computed)
fh.Close()
return nil, nil, fmt.Errorf("CRC mismatch")
}
// Seek to the start
if _, err := fh.Seek(0, 0); err != nil {
f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err)
fh.Close()
return nil, nil, err
}
// Return a buffered file
buffered := &bufferedFile{
bh: bufio.NewReader(fh),
fh: fh,
}
return &meta.SnapshotMeta, buffered, nil
} | |
c175441 | path)
if err := os.RemoveAll(path); err != nil {
f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err)
return err
}
}
return nil
} | |
c175442 |
return s.buffered.Write(b)
} | |
c175443 | parentFH, err := os.Open(s.parentDir)
defer parentFH.Close()
if err != nil {
s.logger.Printf("[ERR] snapshot: Failed to open snapshot parent directory %v, error: %v", s.parentDir, err)
return err
}
if err = parentFH.Sync(); err != nil {
s.logger.Printf("[ERR] snapshot: Failed syncing parent directory %v, error: %v", s.parentDir, err)
return err
}
}
// Reap any old snapshots
if err := s.store.ReapSnapshots(); err != nil {
return err
}
return nil
} | |
c175444 | handles
if err := s.finalize(); err != nil {
s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err)
return err
}
// Attempt to remove all artifacts
return os.RemoveAll(s.dir)
} | |
c175445 |
// Set the file size, check after we close
if statErr != nil {
return statErr
}
s.meta.Size = stat.Size()
// Set the CRC
s.meta.CRC = s.stateHash.Sum(nil)
return nil
} | |
c175446 |
enc := json.NewEncoder(buffered)
if err := enc.Encode(&s.meta); err != nil {
return err
}
if err = buffered.Flush(); err != nil {
return err
}
if err = fh.Sync(); err != nil {
return err
}
return nil
} | |
c175447 |
timeout: config.Timeout,
TimeoutScale: DefaultTimeoutScale,
serverAddressProvider: config.ServerAddressProvider,
}
// Create the connection context and then start our listener.
trans.setupStreamContext()
go trans.listen()
return trans
} | |
c175448 |
n.streamCtx = ctx
n.streamCancel = cancel
} | |
c175449 | defer n.streamCtxLock.RUnlock()
return n.streamCtx
} | |
c175450 | defer n.heartbeatFnLock.Unlock()
n.heartbeatFn = cb
} | |
c175451 | we can create
// connection handlers that are holding a context that will never be
// cancelable.
n.streamCtxLock.Lock()
n.streamCancel()
n.setupStreamContext()
n.streamCtxLock.Unlock()
} | |
c175452 |
if !n.shutdown {
close(n.shutdownCh)
n.stream.Close()
n.shutdown = true
}
return nil
} | |
c175453 |
var conn *netConn
num := len(conns)
conn, conns[num-1] = conns[num-1], nil
n.connPool[target] = conns[:num-1]
return conn
} | |
c175454 | address := n.getProviderAddressOrFallback(id, target)
return n.getConn(address)
} | |
c175455 | bufio.NewReader(conn),
w: bufio.NewWriter(conn),
}
// Setup encoder/decoders
netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{})
netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{})
// Done
return netConn, nil
} | |
c175456 | if !n.IsShutdown() && len(conns) < n.maxPool {
n.connPool[key] = append(conns, conn)
} else {
conn.Release()
}
} | |
c175457 | loopDelay = maxDelay
}
if !n.IsShutdown() {
n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err)
}
select {
case <-n.shutdownCh:
return
case <-time.After(loopDelay):
continue
}
}
// No error, reset loop delay
loopDelay = 0
n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr())
// Handle the connection in dedicated routine
go n.handleConn(n.getStreamContext(), conn)
}
} | |
c175458 | if err != io.EOF {
n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err)
}
return
}
if err := w.Flush(); err != nil {
n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err)
return
}
}
} | |
c175459 | fmt.Errorf("unknown rpc type %d", rpcType)
}
// Check for heartbeat fast-path
if isHeartbeat {
n.heartbeatFnLock.Lock()
fn := n.heartbeatFn
n.heartbeatFnLock.Unlock()
if fn != nil {
fn(rpc)
goto RESP
}
}
// Dispatch the RPC
select {
case n.consumeCh <- rpc:
case <-n.shutdownCh:
return ErrTransportShutdown
}
// Wait for response
RESP:
select {
case resp := <-respCh:
// Send the error first
respErr := ""
if resp.Error != nil {
respErr = resp.Error.Error()
}
if err := enc.Encode(respErr); err != nil {
return err
}
// Send the response
if err := enc.Encode(resp.Response); err != nil {
return err
}
case <-n.shutdownCh:
return ErrTransportShutdown
}
return nil
} | |
c175460 | conn.dec.Decode(resp); err != nil {
conn.Release()
return false, err
}
// Format an error if any
if rpcError != "" {
return true, fmt.Errorf(rpcError)
}
return true, nil
} | |
c175461 | != nil {
conn.Release()
return err
}
// Flush
if err := conn.w.Flush(); err != nil {
conn.Release()
return err
}
return nil
} | |
c175462 |
doneCh: make(chan AppendFuture, rpcMaxPipeline),
inprogressCh: make(chan *appendFuture, rpcMaxPipeline),
shutdownCh: make(chan struct{}),
}
go n.decodeResponses()
return n
} | |
c175463 | select {
case n.doneCh <- future:
case <-n.shutdownCh:
return
}
case <-n.shutdownCh:
return
}
}
} | |
c175464 | Hand-off for decoding, this can also cause back-pressure
// to prevent too many inflight requests
select {
case n.inprogressCh <- future:
return future, nil
case <-n.shutdownCh:
return nil, ErrPipelineShutdown
}
} | |
c175465 | if n.shutdown {
return nil
}
// Release the connection
n.conn.Release()
n.shutdown = true
close(n.shutdownCh)
return nil
} | |
c175466 | blocking,
filter: filter,
id: atomic.AddUint64(&nextObserverID, 1),
}
} | |
c175467 | r.observersLock.Unlock()
r.observers[or.id] = or
} | |
c175468 | r.observersLock.Unlock()
delete(r.observers, or.id)
} | |
c175469 | != nil && !or.filter(&ob) {
continue
}
if or.channel == nil {
continue
}
if or.blocking {
or.channel <- ob
atomic.AddUint64(&or.numObserved, 1)
} else {
select {
case or.channel <- ob:
atomic.AddUint64(&or.numObserved, 1)
default:
atomic.AddUint64(&or.numDropped, 1)
}
}
}
} | |
c175470 | make(map[string][]byte),
kvInt: make(map[string]uint64),
}
return i
} | |
c175471 | defer i.l.RUnlock()
return i.lowIndex, nil
} | |
c175472 | defer i.l.RUnlock()
return i.highIndex, nil
} | |
c175473 | if !ok {
return ErrLogNotFound
}
*log = *l
return nil
} | |
c175474 | return i.StoreLogs([]*Log{log})
} | |
c175475 | = l.Index
}
if l.Index > i.highIndex {
i.highIndex = l.Index
}
}
return nil
} | |
c175476 |
if max >= i.highIndex {
i.highIndex = min - 1
}
if i.lowIndex > i.highIndex {
i.lowIndex = 0
i.highIndex = 0
}
return nil
} | |
c175477 | defer i.l.Unlock()
i.kv[string(key)] = val
return nil
} | |
c175478 | {
return nil, errors.New("not found")
}
return val, nil
} | |
c175479 | error {
i.l.Lock()
defer i.l.Unlock()
i.kvInt[string(key)] = val
return nil
} | |
c175480 | defer i.l.RUnlock()
return i.kvInt[string(key)], nil
} | |
c175481 | := &LogCache{
store: store,
cache: make([]*Log, capacity),
}
return c, nil
} | |
c175482 | t.(*InmemTransport)
i.Lock()
defer i.Unlock()
i.peers[peer] = trans
} | |
c175483 | i.pipelines[idx].Close()
i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil
idx--
n--
}
}
i.pipelines = i.pipelines[:n]
} | |
c175484 |
// Handle pipelines
for _, pipeline := range i.pipelines {
pipeline.Close()
}
i.pipelines = nil
} | |
c175485 | r.RespChan <- RPCResponse{resp, err}
} | |
c175486 | times,
// which isn't generally safe.
defer func() {
u.opener = nil
}()
return u.opener()
}
} | |
c175487 | v.votes++
if v.votes >= v.quorumSize {
v.notifyCh <- v
v.notifyCh = nil
}
} else {
v.notifyCh <- v
v.notifyCh = nil
}
} | |
c175488 | s.notifyLock.Unlock()
// Submit our votes
for v, _ := range n {
v.vote(leader)
}
} | |
c175489 | delete(s.notify, v)
s.notifyLock.Unlock()
} | |
c175490 |
last := s.lastContact
s.lastContactLock.RUnlock()
return last
} | |
c175491 |
s.lastContact = time.Now()
s.lastContactLock.Unlock()
} | |
c175492 | && s.allowPipeline {
goto PIPELINE
}
}
return
PIPELINE:
// Disable until re-enabled
s.allowPipeline = false
// Replicates using a pipeline for high performance. This method
// is not able to gracefully recover from errors, and so we fall back
// to standard mode on failure.
if err := r.pipelineReplicate(s); err != nil {
if err != ErrPipelineReplicationNotSupported {
r.logger.Error(fmt.Sprintf("Failed to start pipeline replication to %s: %s", s.peer, err))
}
}
goto RPC
} | |
c175493 | {
r.pipelineSend(s, pipeline, &nextIndex, maxIndex)
}
break SEND
case <-s.triggerCh:
lastLogIdx, _ := r.getLastLog()
shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx)
case <-randomTimeout(r.conf.CommitTimeout):
lastLogIdx, _ := r.getLastLog()
shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx)
}
}
// Stop our decoder, and wait for it to finish
close(stopCh)
select {
case <-finishCh:
case <-r.shutdownCh:
}
return nil
} | |
c175494 |
}
// Pipeline the append entries
if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil {
r.logger.Error(fmt.Sprintf("Failed to pipeline AppendEntries to %v: %v", s.peer, err))
return true
}
// Increase the next send log to avoid re-sending old logs
if n := len(req.Entries); n > 0 {
last := req.Entries[n-1]
*nextIdx = last.Index + 1
}
return false
} | |
c175495 |
// Update the last contact
s.setLastContact()
// Abort pipeline if not successful
if !resp.Success {
return
}
// Update our replication state
updateLastAppended(s, req)
case <-stopCh:
return
}
}
} | |
c175496 |
if err := r.setPreviousLog(req, nextIndex); err != nil {
return err
}
if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil {
return err
}
return nil
} | |
c175497 |
if err := r.logs.GetLog(nextIndex-1, &l); err != nil {
r.logger.Error(fmt.Sprintf("Failed to get log at index %d: %v", nextIndex-1, err))
return err
}
// Set the previous index and term (0 if nextIndex is 1)
req.PrevLogEntry = l.Index
req.PrevLogTerm = l.Term
}
return nil
} | |
c175498 | if err := r.logs.GetLog(i, oldLog); err != nil {
r.logger.Error(fmt.Sprintf("Failed to get log at index %d: %v", i, err))
return err
}
req.Entries = append(req.Entries, oldLog)
}
return nil
} | |
c175499 | start)
metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.