repo
stringlengths
5
67
sha
stringlengths
40
40
path
stringlengths
4
234
url
stringlengths
85
339
language
stringclasses
6 values
split
stringclasses
3 values
doc
stringlengths
3
51.2k
sign
stringlengths
5
8.01k
problem
stringlengths
13
51.2k
output
stringlengths
0
3.87M
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3162-L3209
go
train
// lookup returns the message for the given sequence number, possibly // reading the message from disk. // Store write lock is assumed to be held on entry
func (ms *FileMsgStore) lookup(seq uint64) (*pb.MsgProto, error)
// lookup returns the message for the given sequence number, possibly // reading the message from disk. // Store write lock is assumed to be held on entry func (ms *FileMsgStore) lookup(seq uint64) (*pb.MsgProto, error)
{ // Reject message for sequence outside valid range if seq < ms.first || seq > ms.last { return nil, nil } // Check first if it's in the cache. msg := ms.cache.get(seq) if msg == nil && ms.bufferedMsgs != nil { // Possibly in bufferedMsgs bm := ms.bufferedMsgs[seq] if bm != nil { msg = bm.msg ms.cache.add(seq, msg, false) } } // If not, we need to read it from disk... if msg == nil { fslice := ms.getFileSliceForSeq(seq) if fslice == nil { return nil, nil } err := ms.lockFiles(fslice) if err != nil { return nil, err } msgIndex, err := ms.readMsgIndex(fslice, seq) if msgIndex != nil { file := fslice.file.handle // Position file to message's offset. 0 means from start. _, err = file.Seek(msgIndex.offset, io.SeekStart) if err == nil { ms.tmpMsgBuf, _, _, err = readRecord(file, ms.tmpMsgBuf, false, ms.fstore.crcTable, ms.fstore.opts.DoCRC) } } ms.unlockFiles(fslice) if err != nil || msgIndex == nil { return nil, err } // Recover this message msg = &pb.MsgProto{} err = msg.Unmarshal(ms.tmpMsgBuf[:msgIndex.msgSize]) if err != nil { return nil, err } ms.cache.add(seq, msg, false) } return msg, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3220-L3229
go
train
// FirstMsg returns the first message stored.
func (ms *FileMsgStore) FirstMsg() (*pb.MsgProto, error)
// FirstMsg returns the first message stored. func (ms *FileMsgStore) FirstMsg() (*pb.MsgProto, error)
{ var err error ms.RLock() if ms.firstMsg == nil { ms.firstMsg, err = ms.lookup(ms.first) } m := ms.firstMsg ms.RUnlock() return m, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3232-L3241
go
train
// LastMsg returns the last message stored.
func (ms *FileMsgStore) LastMsg() (*pb.MsgProto, error)
// LastMsg returns the last message stored. func (ms *FileMsgStore) LastMsg() (*pb.MsgProto, error)
{ var err error ms.RLock() if ms.lastMsg == nil { ms.lastMsg, err = ms.lookup(ms.last) } m := ms.lastMsg ms.RUnlock() return m, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3245-L3323
go
train
// GetSequenceFromTimestamp returns the sequence of the first message whose // timestamp is greater or equal to given timestamp.
func (ms *FileMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error)
// GetSequenceFromTimestamp returns the sequence of the first message whose // timestamp is greater or equal to given timestamp. func (ms *FileMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error)
{ ms.RLock() defer ms.RUnlock() // No message ever stored if ms.first == 0 { return 0, nil } // All messages have expired if ms.first > ms.last { return ms.last + 1, nil } // If we have some state, try to quickly get the sequence if ms.firstMsg != nil && timestamp <= ms.firstMsg.Timestamp { return ms.first, nil } if ms.lastMsg != nil { if timestamp == ms.lastMsg.Timestamp { return ms.last, nil } if timestamp > ms.lastMsg.Timestamp { return ms.last + 1, nil } } // This will require disk access. for i := ms.firstFSlSeq; i <= ms.lastFSlSeq; i++ { // support possible missing slices slice := ms.files[i] if slice == nil { continue } if err := ms.lockIndexFile(slice); err != nil { return 0, err } seq := slice.firstSeq if firstMsgInSlice, err := ms.getMsgIndex(slice, seq); err != nil { ms.unlockIndexFile(slice) return 0, err } else if timestamp > firstMsgInSlice.timestamp { seq = slice.lastSeq lastMsgInSlice, err := ms.getMsgIndex(slice, seq) if err != nil { ms.unlockIndexFile(slice) return 0, err } if timestamp > lastMsgInSlice.timestamp { // Not there, move to the next slice. ms.unlockIndexFile(slice) continue } // It may be equal, so search only if strictly lower if timestamp < lastMsgInSlice.timestamp { // We know that the timestamp is somewhere in this slice. // Could do binary search, but will be probably more efficient // to do sequential disk reads. The index records are small, // so read of a record will probably bring many consecutive ones // in the system's disk cache, resulting in memory-only access // for the following indexes... for seq = slice.firstSeq + 1; seq <= slice.lastSeq-1; seq++ { mindex, err := ms.getMsgIndex(slice, seq) if err != nil { ms.unlockIndexFile(slice) return 0, err } if mindex.timestamp >= timestamp { break } } } } ms.unlockIndexFile(slice) // We are here if the timestamp is smaller than the first // message in the first slice, or we have found the first // sequence that is >= timestamp. return seq, nil } return ms.last + 1, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3326-L3330
go
train
// initCache initializes the message cache
func (ms *FileMsgStore) initCache()
// initCache initializes the message cache func (ms *FileMsgStore) initCache()
{ ms.cache = &msgsCache{ seqMaps: make(map[uint64]*cachedMsg), } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3334-L3360
go
train
// add adds a message to the cache. // Store write lock is assumed held on entry
func (c *msgsCache) add(seq uint64, msg *pb.MsgProto, isNew bool)
// add adds a message to the cache. // Store write lock is assumed held on entry func (c *msgsCache) add(seq uint64, msg *pb.MsgProto, isNew bool)
{ exp := cacheTTL if isNew { exp += msg.Timestamp } else { exp += time.Now().UnixNano() } cMsg := &cachedMsg{ expiration: exp, msg: msg, } if c.tail == nil { c.head = cMsg } else { c.tail.next = cMsg // Ensure last expiration is at least >= previous one. if cMsg.expiration < c.tail.expiration { cMsg.expiration = c.tail.expiration } } cMsg.prev = c.tail c.tail = cMsg c.seqMaps[seq] = cMsg if len(c.seqMaps) == 1 { atomic.StoreInt32(&c.tryEvict, 1) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3364-L3392
go
train
// get returns a message if available in the cache. // Store write lock is assumed held on entry
func (c *msgsCache) get(seq uint64) *pb.MsgProto
// get returns a message if available in the cache. // Store write lock is assumed held on entry func (c *msgsCache) get(seq uint64) *pb.MsgProto
{ cMsg := c.seqMaps[seq] if cMsg == nil { return nil } // Bump the expiration cMsg.expiration = time.Now().UnixNano() + cacheTTL // If not already at the tail of the list, move it there if cMsg != c.tail { if cMsg.prev != nil { cMsg.prev.next = cMsg.next } if cMsg.next != nil { cMsg.next.prev = cMsg.prev } if cMsg == c.head { c.head = cMsg.next } cMsg.prev = c.tail c.tail.next = cMsg cMsg.next = nil // Ensure last expiration is at least >= previous one. if cMsg.expiration < c.tail.expiration { cMsg.expiration = c.tail.expiration } c.tail = cMsg } return cMsg.msg }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3396-L3417
go
train
// evict move down the cache maps, evicting the last one. // Store write lock is assumed held on entry
func (c *msgsCache) evict(now int64)
// evict move down the cache maps, evicting the last one. // Store write lock is assumed held on entry func (c *msgsCache) evict(now int64)
{ if c.head == nil { return } if now >= c.tail.expiration { // Bulk remove c.seqMaps = make(map[uint64]*cachedMsg) c.head, c.tail, c.tryEvict = nil, nil, 0 return } cMsg := c.head for cMsg != nil && cMsg.expiration <= now { delete(c.seqMaps, cMsg.msg.Sequence) cMsg = cMsg.next } if cMsg != c.head { // There should be at least one left, otherwise, they // would all have been bulk removed at top of this function. cMsg.prev = nil c.head = cMsg } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3420-L3424
go
train
// empty empties the cache
func (c *msgsCache) empty()
// empty empties the cache func (c *msgsCache) empty()
{ atomic.StoreInt32(&c.tryEvict, 0) c.head, c.tail = nil, nil c.seqMaps = make(map[uint64]*cachedMsg) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3427-L3466
go
train
// Close closes the store.
func (ms *FileMsgStore) Close() error
// Close closes the store. func (ms *FileMsgStore) Close() error
{ ms.Lock() if ms.closed { ms.Unlock() return nil } ms.closed = true // Signal the background tasks go-routine to exit ms.bkgTasksDone <- true ms.Unlock() // Wait on go routines/timers to finish ms.allDone.Wait() ms.Lock() var err error if ms.writeSlice != nil { // Flush current file slice where writes happen ms.lockFiles(ms.writeSlice) err = ms.flush(ms.writeSlice) ms.unlockFiles(ms.writeSlice) } // Remove/close all file slices for _, slice := range ms.files { ms.fm.remove(slice.file) ms.fm.remove(slice.idxFile) if slice.file.handle != nil { err = util.CloseFile(err, slice.file.handle) } if slice.idxFile.handle != nil { err = util.CloseFile(err, slice.idxFile.handle) } } ms.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3495-L3507
go
train
// Flush flushes outstanding data into the store.
func (ms *FileMsgStore) Flush() error
// Flush flushes outstanding data into the store. func (ms *FileMsgStore) Flush() error
{ ms.Lock() var err error if ms.writeSlice != nil { err = ms.lockFiles(ms.writeSlice) if err == nil { err = ms.flush(ms.writeSlice) ms.unlockFiles(ms.writeSlice) } } ms.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3510-L3550
go
train
// Empty implements the MsgStore interface
func (ms *FileMsgStore) Empty() error
// Empty implements the MsgStore interface func (ms *FileMsgStore) Empty() error
{ ms.Lock() defer ms.Unlock() var err error // Remove/close all file slices for sliceID, slice := range ms.files { ms.fm.remove(slice.file) ms.fm.remove(slice.idxFile) if slice.file.handle != nil { err = util.CloseFile(err, slice.file.handle) } if lerr := os.Remove(slice.file.name); lerr != nil && err == nil { err = lerr } if slice.idxFile.handle != nil { err = util.CloseFile(err, slice.idxFile.handle) } if lerr := os.Remove(slice.idxFile.name); lerr != nil && err == nil { err = lerr } delete(ms.files, sliceID) } // Reset generic counters ms.empty() // FileMsgStore specific ms.writer = nil ms.writeSlice = nil ms.cache.empty() ms.wOffset = 0 ms.firstMsg, ms.lastMsg = nil, nil ms.expiration = 0 ms.firstFSlSeq, ms.lastFSlSeq = 0, 0 // If we are running in buffered mode... if ms.bw != nil { ms.bw = newBufferWriter(msgBufMinShrinkSize, ms.fstore.opts.BufferSize) ms.bufferedSeqs = make([]uint64, 0, 1) ms.bufferedMsgs = make(map[uint64]*bufferedMsg) } return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3557-L3611
go
train
//////////////////////////////////////////////////////////////////////////// // FileSubStore methods //////////////////////////////////////////////////////////////////////////// // newFileSubStore returns a new instace of a file SubStore.
func (fs *FileStore) newFileSubStore(channel string, limits *SubStoreLimits, doRecover bool) (*FileSubStore, error)
//////////////////////////////////////////////////////////////////////////// // FileSubStore methods //////////////////////////////////////////////////////////////////////////// // newFileSubStore returns a new instace of a file SubStore. func (fs *FileStore) newFileSubStore(channel string, limits *SubStoreLimits, doRecover bool) (*FileSubStore, error)
{ ss := &FileSubStore{ fstore: fs, fm: fs.fm, opts: &fs.opts, crcTable: fs.crcTable, } ss.init(fs.log, limits) // Convert the CompactInterval in time.Duration ss.compactItvl = time.Duration(ss.opts.CompactInterval) * time.Second var err error fileName := filepath.Join(channel, subsFileName) ss.file, err = fs.fm.createFile(fileName, defaultFileFlags, func() error { ss.writer = nil return ss.flush() }) if err != nil { return nil, err } maxBufSize := ss.opts.BufferSize ss.writer = ss.file.handle // If we allow buffering, then create the buffered writer and // set ss's writer to that buffer. if maxBufSize > 0 { ss.bw = newBufferWriter(subBufMinShrinkSize, maxBufSize) ss.writer = ss.bw.createNewWriter(ss.file.handle) } if doRecover { if err := ss.recoverSubscriptions(); err != nil { fs.fm.unlockFile(ss.file) ss.Close() return nil, fmt.Errorf("unable to recover subscription store for [%s]: %v", channel, err) } } // Do not attempt to shrink unless the option is greater than the // minimum shrinkable size. if maxBufSize > subBufMinShrinkSize { // Use lock to avoid RACE report between setting shrinkTimer and // execution of the callback itself. ss.Lock() ss.allDone.Add(1) ss.shrinkTimer = time.AfterFunc(bufShrinkInterval, func() { ss.shrinkBuffer(true) }) ss.Unlock() } if doRecover { fs.fm.closeLockedFile(ss.file) } else { fs.fm.unlockFile(ss.file) } return ss, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3617-L3631
go
train
// getFile ensures that the store's file handle is valid, opening // the file if needed. If file needs to be opened, the store's writer // is set to either the bare file or the buffered writer (based on // store's configuration).
func (ss *FileSubStore) lockFile() error
// getFile ensures that the store's file handle is valid, opening // the file if needed. If file needs to be opened, the store's writer // is set to either the bare file or the buffered writer (based on // store's configuration). func (ss *FileSubStore) lockFile() error
{ wasOpened, err := ss.fm.lockFile(ss.file) if err != nil { return err } // If file was not opened, we need to reset ss.writer if !wasOpened { if ss.bw != nil { ss.writer = ss.bw.createNewWriter(ss.file.handle) } else { ss.writer = ss.file.handle } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3638-L3661
go
train
// shrinkBuffer is a timer callback that shrinks the buffer writer when possible. // Since this function is called directly in tests, the boolean `fromTimer` is // used to indicate if this function is invoked from the timer callback (in which // case, the timer need to be Reset()) or not. Reseting a timer while timer fires // can lead to unexpected behavior.
func (ss *FileSubStore) shrinkBuffer(fromTimer bool)
// shrinkBuffer is a timer callback that shrinks the buffer writer when possible. // Since this function is called directly in tests, the boolean `fromTimer` is // used to indicate if this function is invoked from the timer callback (in which // case, the timer need to be Reset()) or not. Reseting a timer while timer fires // can lead to unexpected behavior. func (ss *FileSubStore) shrinkBuffer(fromTimer bool)
{ ss.Lock() defer ss.Unlock() if ss.closed { ss.allDone.Done() return } // Fire again if fromTimer { ss.shrinkTimer.Reset(bufShrinkInterval) } // If file currently opened, lock it, otherwise we are done for now. if !ss.fm.lockFileIfOpened(ss.file) { return } // If error, the buffer (in bufio) memorizes the error // so any other write/flush on that buffer will fail. We will get the // error at the next "synchronous" operation where we can report back // to the user. ss.writer, _ = ss.bw.tryShrinkBuffer(ss.file.handle) ss.fm.unlockFile(ss.file) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3664-L3783
go
train
// recoverSubscriptions recovers subscriptions state for this store.
func (ss *FileSubStore) recoverSubscriptions() error
// recoverSubscriptions recovers subscriptions state for this store. func (ss *FileSubStore) recoverSubscriptions() error
{ var err error var recType recordType recSize := 0 offset := int64(4) // Create a buffered reader to speed-up recovery br := bufio.NewReaderSize(ss.file.handle, defaultBufSize) for { ss.tmpSubBuf, recSize, recType, err = readRecord(br, ss.tmpSubBuf, true, ss.crcTable, ss.opts.DoCRC) if err != nil { switch err { case io.EOF: // We are done, reset err err = nil case errNeedRewind: err = ss.fm.truncateFile(ss.file, offset) default: err = ss.fstore.handleUnexpectedEOF(err, ss.file, offset, true) } if err == nil { break } return err } readBytes := int64(recSize + recordHeaderSize) offset += readBytes ss.fileSize += readBytes // Based on record type... switch recType { case subRecNew: newSub := &spb.SubState{} if err := newSub.Unmarshal(ss.tmpSubBuf[:recSize]); err != nil { return err } sub := &subscription{ sub: newSub, seqnos: make(map[uint64]struct{}), } ss.subs[newSub.ID] = sub // Keep track of max subscription ID found. if newSub.ID > ss.maxSubID { ss.maxSubID = newSub.ID } ss.numRecs++ case subRecUpdate: modifiedSub := &spb.SubState{} if err := modifiedSub.Unmarshal(ss.tmpSubBuf[:recSize]); err != nil { return err } // Search if the create has been recovered. subi, exists := ss.subs[modifiedSub.ID] if exists { sub := subi.(*subscription) sub.sub = modifiedSub // An update means that the previous version is free space. ss.delRecs++ } else { sub := &subscription{ sub: modifiedSub, seqnos: make(map[uint64]struct{}), } ss.subs[modifiedSub.ID] = sub } // Keep track of max subscription ID found. if modifiedSub.ID > ss.maxSubID { ss.maxSubID = modifiedSub.ID } ss.numRecs++ case subRecDel: delSub := spb.SubStateDelete{} if err := delSub.Unmarshal(ss.tmpSubBuf[:recSize]); err != nil { return err } if si, exists := ss.subs[delSub.ID]; exists { s := si.(*subscription) delete(ss.subs, delSub.ID) // Delete and count all non-ack'ed messages free space. ss.delRecs++ ss.delRecs += len(s.seqnos) } // Keep track of max subscription ID found. if delSub.ID > ss.maxSubID { ss.maxSubID = delSub.ID } case subRecMsg: updateSub := spb.SubStateUpdate{} if err := updateSub.Unmarshal(ss.tmpSubBuf[:recSize]); err != nil { return err } if subi, exists := ss.subs[updateSub.ID]; exists { sub := subi.(*subscription) seqno := updateSub.Seqno // Same seqno/ack can appear several times for the same sub. // See queue subscribers redelivery. if seqno > sub.sub.LastSent { sub.sub.LastSent = seqno } sub.seqnos[seqno] = struct{}{} ss.numRecs++ } case subRecAck: updateSub := spb.SubStateUpdate{} if err := updateSub.Unmarshal(ss.tmpSubBuf[:recSize]); err != nil { return err } if subi, exists := ss.subs[updateSub.ID]; exists { sub := subi.(*subscription) delete(sub.seqnos, updateSub.Seqno) // A message is ack'ed ss.delRecs++ } default: return fmt.Errorf("unexpected record type: %v", recType) } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3787-L3805
go
train
// CreateSub records a new subscription represented by SubState. On success, // it returns an id that is used by the other methods.
func (ss *FileSubStore) CreateSub(sub *spb.SubState) error
// CreateSub records a new subscription represented by SubState. On success, // it returns an id that is used by the other methods. func (ss *FileSubStore) CreateSub(sub *spb.SubState) error
{ // Check if we can create the subscription (check limits and update // subscription count) ss.Lock() defer ss.Unlock() if err := ss.createSub(sub); err != nil { return err } if err := ss.writeRecord(nil, subRecNew, sub); err != nil { delete(ss.subs, sub.ID) return err } // We need to get a copy of the passed sub, we can't hold a reference // to it. csub := *sub s := &subscription{sub: &csub, seqnos: make(map[uint64]struct{})} ss.subs[sub.ID] = s return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3808-L3826
go
train
// UpdateSub updates a given subscription represented by SubState.
func (ss *FileSubStore) UpdateSub(sub *spb.SubState) error
// UpdateSub updates a given subscription represented by SubState. func (ss *FileSubStore) UpdateSub(sub *spb.SubState) error
{ ss.Lock() defer ss.Unlock() if err := ss.writeRecord(nil, subRecUpdate, sub); err != nil { return err } // We need to get a copy of the passed sub, we can't hold a reference // to it. csub := *sub si := ss.subs[sub.ID] if si != nil { s := si.(*subscription) s.sub = &csub } else { s := &subscription{sub: &csub, seqnos: make(map[uint64]struct{})} ss.subs[sub.ID] = s } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3829-L3849
go
train
// DeleteSub invalidates this subscription.
func (ss *FileSubStore) DeleteSub(subid uint64) error
// DeleteSub invalidates this subscription. func (ss *FileSubStore) DeleteSub(subid uint64) error
{ ss.Lock() ss.delSub.ID = subid err := ss.writeRecord(nil, subRecDel, &ss.delSub) // Even if there is an error, continue with cleanup. If later // a compact is successful, the sub won't be present in the compacted file. if si, exists := ss.subs[subid]; exists { s := si.(*subscription) delete(ss.subs, subid) // writeRecord has already accounted for the count of the // delete record. We add to this the number of pending messages ss.delRecs += len(s.seqnos) // Check if this triggers a need for compaction if ss.shouldCompact() { ss.fm.closeFileIfOpened(ss.file) ss.compact(ss.file.name) } } ss.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3853-L3878
go
train
// shouldCompact returns a boolean indicating if we should compact // Lock is held by caller
func (ss *FileSubStore) shouldCompact() bool
// shouldCompact returns a boolean indicating if we should compact // Lock is held by caller func (ss *FileSubStore) shouldCompact() bool
{ // Gobal switch if !ss.opts.CompactEnabled { return false } // Check that if minimum file size is set, the client file // is at least at the minimum. if ss.opts.CompactMinFileSize > 0 && ss.fileSize < ss.opts.CompactMinFileSize { return false } // Check fragmentation frag := 0 if ss.numRecs == 0 { frag = 100 } else { frag = ss.delRecs * 100 / ss.numRecs } if frag < ss.opts.CompactFragmentation { return false } // Check that we don't compact too often if time.Since(ss.compactTS) < ss.compactItvl { return false } return true }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3881-L3898
go
train
// AddSeqPending adds the given message seqno to the given subscription.
func (ss *FileSubStore) AddSeqPending(subid, seqno uint64) error
// AddSeqPending adds the given message seqno to the given subscription. func (ss *FileSubStore) AddSeqPending(subid, seqno uint64) error
{ ss.Lock() ss.updateSub.ID, ss.updateSub.Seqno = subid, seqno if err := ss.writeRecord(nil, subRecMsg, &ss.updateSub); err != nil { ss.Unlock() return err } si := ss.subs[subid] if si != nil { s := si.(*subscription) if seqno > s.sub.LastSent { s.sub.LastSent = seqno } s.seqnos[seqno] = struct{}{} } ss.Unlock() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3902-L3921
go
train
// AckSeqPending records that the given message seqno has been acknowledged // by the given subscription.
func (ss *FileSubStore) AckSeqPending(subid, seqno uint64) error
// AckSeqPending records that the given message seqno has been acknowledged // by the given subscription. func (ss *FileSubStore) AckSeqPending(subid, seqno uint64) error
{ ss.Lock() ss.updateSub.ID, ss.updateSub.Seqno = subid, seqno if err := ss.writeRecord(nil, subRecAck, &ss.updateSub); err != nil { ss.Unlock() return err } si := ss.subs[subid] if si != nil { s := si.(*subscription) delete(s.seqnos, seqno) // Test if we should compact if ss.shouldCompact() { ss.fm.closeFileIfOpened(ss.file) ss.compact(ss.file.name) } } ss.Unlock() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3928-L3990
go
train
// compact rewrites all subscriptions on a temporary file, reducing the size // since we get rid of deleted subscriptions and message sequences that have // been acknowledged. On success, the subscriptions file is replaced by this // temporary file. // Lock is held by caller
func (ss *FileSubStore) compact(orgFileName string) error
// compact rewrites all subscriptions on a temporary file, reducing the size // since we get rid of deleted subscriptions and message sequences that have // been acknowledged. On success, the subscriptions file is replaced by this // temporary file. // Lock is held by caller func (ss *FileSubStore) compact(orgFileName string) error
{ tmpFile, err := getTempFile(ss.fm.rootDir, "subs") if err != nil { return err } tmpBW := bufio.NewWriterSize(tmpFile, defaultBufSize) // Save values in case of failed compaction savedNumRecs := ss.numRecs savedDelRecs := ss.delRecs savedFileSize := ss.fileSize // Cleanup in case of error during compact defer func() { if tmpFile != nil { tmpFile.Close() os.Remove(tmpFile.Name()) // Since we failed compaction, restore values ss.numRecs = savedNumRecs ss.delRecs = savedDelRecs ss.fileSize = savedFileSize } }() // Reset to 0 since writeRecord() is updating the values. ss.numRecs = 0 ss.delRecs = 0 ss.fileSize = 0 for _, subi := range ss.subs { sub := subi.(*subscription) err = ss.writeRecord(tmpBW, subRecNew, sub.sub) if err != nil { return err } ss.updateSub.ID = sub.sub.ID for seqno := range sub.seqnos { ss.updateSub.Seqno = seqno err = ss.writeRecord(tmpBW, subRecMsg, &ss.updateSub) if err != nil { return err } } } // Flush and sync the temporary file err = tmpBW.Flush() if err != nil { return err } err = tmpFile.Sync() if err != nil { return err } // Start by closing the temporary file. if err := tmpFile.Close(); err != nil { return err } // Rename the tmp file to original file name if err := os.Rename(tmpFile.Name(), orgFileName); err != nil { return err } // Prevent cleanup on success tmpFile = nil // Update the timestamp of this last successful compact ss.compactTS = time.Now() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3994-L4060
go
train
// writes a record in the subscriptions file. // store's lock is held on entry.
func (ss *FileSubStore) writeRecord(w io.Writer, recType recordType, rec record) error
// writes a record in the subscriptions file. // store's lock is held on entry. func (ss *FileSubStore) writeRecord(w io.Writer, recType recordType, rec record) error
{ var err error totalSize := 0 recSize := rec.Size() var bwBuf *bufio.Writer needsUnlock := false if w == nil { if err := ss.lockFile(); err != nil { return err } needsUnlock = true if ss.bw != nil { bwBuf = ss.bw.buf // If we are using the buffer writer on this call, and the buffer is // not already at the max size... if bwBuf != nil && ss.bw.bufSize != ss.opts.BufferSize { // Check if record fits required := recSize + recordHeaderSize if required > bwBuf.Available() { ss.writer, err = ss.bw.expand(ss.file.handle, required) if err != nil { ss.fm.unlockFile(ss.file) return err } bwBuf = ss.bw.buf } } } w = ss.writer } ss.tmpSubBuf, totalSize, err = writeRecord(w, ss.tmpSubBuf, recType, rec, recSize, ss.crcTable) if err != nil { if needsUnlock { ss.fm.unlockFile(ss.file) } return err } if bwBuf != nil && ss.bw.shrinkReq { ss.bw.checkShrinkRequest() } // Indicate that we wrote something to the buffer/file ss.activity = true switch recType { case subRecNew: ss.numRecs++ case subRecMsg: ss.numRecs++ case subRecAck: // An ack makes the message record free space ss.delRecs++ case subRecUpdate: ss.numRecs++ // An update makes the old record free space ss.delRecs++ case subRecDel: ss.delRecs++ default: panic(fmt.Errorf("record type %v unknown", recType)) } ss.fileSize += int64(totalSize) if needsUnlock { ss.fm.unlockFile(ss.file) } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L4081-L4090
go
train
// Flush persists buffered operations to disk.
func (ss *FileSubStore) Flush() error
// Flush persists buffered operations to disk. func (ss *FileSubStore) Flush() error
{ ss.Lock() err := ss.lockFile() if err == nil { err = ss.flush() ss.fm.unlockFile(ss.file) } ss.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L4093-L4125
go
train
// Close closes this store
func (ss *FileSubStore) Close() error
// Close closes this store func (ss *FileSubStore) Close() error
{ ss.Lock() if ss.closed { ss.Unlock() return nil } ss.closed = true if ss.shrinkTimer != nil { if ss.shrinkTimer.Stop() { // If we can stop, timer callback won't fire, // so we need to decrement the wait group. ss.allDone.Done() } } ss.Unlock() // Wait on timers/callbacks ss.allDone.Wait() ss.Lock() var err error if ss.fm.remove(ss.file) { if ss.file.handle != nil { err = ss.flush() err = util.CloseFile(err, ss.file.handle) } } ss.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/raft_transport.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L225-L276
go
train
// Dial creates a new net.Conn with the remote address. This is implemented by // performing a handshake over NATS which establishes unique inboxes at each // endpoint for streaming data.
func (n *natsStreamLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error)
// Dial creates a new net.Conn with the remote address. This is implemented by // performing a handshake over NATS which establishes unique inboxes at each // endpoint for streaming data. func (n *natsStreamLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error)
{ if !n.conn.IsConnected() { return nil, errors.New("raft-nats: dial failed, not connected") } // QUESTION: The Raft NetTransport does connection pooling, which is useful // for TCP sockets. The NATS transport simulates a socket using a // subscription at each endpoint, but everything goes over the same NATS // socket. This means there is little advantage to pooling here currently. // Should we actually Dial a new NATS connection here and rely on pooling? connect := &connectRequestProto{ ID: n.localAddr.String(), Inbox: fmt.Sprintf(natsRequestInbox, n.localAddr.String(), nats.NewInbox()), } data, err := json.Marshal(connect) if err != nil { panic(err) } peerConn := n.newNATSConn(string(address)) // Setup inbox. sub, err := n.conn.Subscribe(connect.Inbox, peerConn.msgHandler) if err != nil { return nil, err } sub.SetPendingLimits(-1, -1) if err := n.conn.FlushTimeout(n.timeout); err != nil { sub.Unsubscribe() return nil, err } // Make connect request to peer. msg, err := n.conn.Request(fmt.Sprintf(natsConnectInbox, address), data, timeout) if err != nil { sub.Unsubscribe() return nil, err } var resp connectResponseProto if err := json.Unmarshal(msg.Data, &resp); err != nil { sub.Unsubscribe() return nil, err } peerConn.sub = sub peerConn.outbox = resp.Inbox n.mu.Lock() n.conns[peerConn] = struct{}{} n.mu.Unlock() return peerConn, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/raft_transport.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L279-L329
go
train
// Accept waits for and returns the next connection to the listener.
func (n *natsStreamLayer) Accept() (net.Conn, error)
// Accept waits for and returns the next connection to the listener. func (n *natsStreamLayer) Accept() (net.Conn, error)
{ for { msg, err := n.sub.NextMsgWithContext(context.TODO()) if err != nil { return nil, err } if msg.Reply == "" { n.logger.Println("[ERR] raft-nats: Invalid connect message (missing reply inbox)") continue } var connect connectRequestProto if err := json.Unmarshal(msg.Data, &connect); err != nil { n.logger.Println("[ERR] raft-nats: Invalid connect message (invalid data)") continue } peerConn := n.newNATSConn(connect.ID) peerConn.outbox = connect.Inbox // Setup inbox for peer. inbox := fmt.Sprintf(natsRequestInbox, n.localAddr.String(), nats.NewInbox()) sub, err := n.conn.Subscribe(inbox, peerConn.msgHandler) if err != nil { n.logger.Printf("[ERR] raft-nats: Failed to create inbox for remote peer: %v", err) continue } sub.SetPendingLimits(-1, -1) // Reply to peer. resp := &connectResponseProto{Inbox: inbox} data, err := json.Marshal(resp) if err != nil { panic(err) } if err := n.conn.Publish(msg.Reply, data); err != nil { n.logger.Printf("[ERR] raft-nats: Failed to send connect response to remote peer: %v", err) sub.Unsubscribe() continue } if err := n.conn.FlushTimeout(n.timeout); err != nil { n.logger.Printf("[ERR] raft-nats: Failed to flush connect response to remote peer: %v", err) sub.Unsubscribe() continue } peerConn.sub = sub n.mu.Lock() n.conns[peerConn] = struct{}{} n.mu.Unlock() return peerConn, nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/raft_transport.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L350-L355
go
train
// newNATSTransport creates a new raft.NetworkTransport implemented with NATS // as the transport layer.
func newNATSTransport(id string, conn *nats.Conn, timeout time.Duration, logOutput io.Writer) (*raft.NetworkTransport, error)
// newNATSTransport creates a new raft.NetworkTransport implemented with NATS // as the transport layer. func newNATSTransport(id string, conn *nats.Conn, timeout time.Duration, logOutput io.Writer) (*raft.NetworkTransport, error)
{ if logOutput == nil { logOutput = os.Stderr } return newNATSTransportWithLogger(id, conn, timeout, log.New(logOutput, "", log.LstdFlags)) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/raft_transport.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L359-L363
go
train
// newNATSTransportWithLogger creates a new raft.NetworkTransport implemented // with NATS as the transport layer using the provided Logger.
func newNATSTransportWithLogger(id string, conn *nats.Conn, timeout time.Duration, logger *log.Logger) (*raft.NetworkTransport, error)
// newNATSTransportWithLogger creates a new raft.NetworkTransport implemented // with NATS as the transport layer using the provided Logger. func newNATSTransportWithLogger(id string, conn *nats.Conn, timeout time.Duration, logger *log.Logger) (*raft.NetworkTransport, error)
{ return createNATSTransport(id, conn, logger, timeout, func(stream raft.StreamLayer) *raft.NetworkTransport { return raft.NewNetworkTransportWithLogger(stream, 3, timeout, logger) }) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/raft_transport.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L367-L375
go
train
// newNATSTransportWithConfig returns a raft.NetworkTransport implemented // with NATS as the transport layer, using the given config struct.
func newNATSTransportWithConfig(id string, conn *nats.Conn, config *raft.NetworkTransportConfig) (*raft.NetworkTransport, error)
// newNATSTransportWithConfig returns a raft.NetworkTransport implemented // with NATS as the transport layer, using the given config struct. func newNATSTransportWithConfig(id string, conn *nats.Conn, config *raft.NetworkTransportConfig) (*raft.NetworkTransport, error)
{ if config.Timeout == 0 { config.Timeout = 2 * time.Second } return createNATSTransport(id, conn, config.Logger, config.Timeout, func(stream raft.StreamLayer) *raft.NetworkTransport { config.Stream = stream return raft.NewNetworkTransportWithConfig(config) }) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L51-L57
go
train
//////////////////////////////////////////////////////////////////////////// // MemoryStore methods //////////////////////////////////////////////////////////////////////////// // NewMemoryStore returns a factory for stores held in memory. // If not limits are provided, the store will be created with // DefaultStoreLimits.
func NewMemoryStore(log logger.Logger, limits *StoreLimits) (*MemoryStore, error)
//////////////////////////////////////////////////////////////////////////// // MemoryStore methods //////////////////////////////////////////////////////////////////////////// // NewMemoryStore returns a factory for stores held in memory. // If not limits are provided, the store will be created with // DefaultStoreLimits. func NewMemoryStore(log logger.Logger, limits *StoreLimits) (*MemoryStore, error)
{ ms := &MemoryStore{} if err := ms.init(TypeMemory, log, limits); err != nil { return nil, err } return ms, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L60-L84
go
train
// CreateChannel implements the Store interface
func (ms *MemoryStore) CreateChannel(channel string) (*Channel, error)
// CreateChannel implements the Store interface func (ms *MemoryStore) CreateChannel(channel string) (*Channel, error)
{ ms.Lock() defer ms.Unlock() // Verify that it does not already exist or that we did not hit the limits if err := ms.canAddChannel(channel); err != nil { return nil, err } channelLimits := ms.genericStore.getChannelLimits(channel) msgStore := &MemoryMsgStore{msgs: make(map[uint64]*pb.MsgProto, 64)} msgStore.init(channel, ms.log, &channelLimits.MsgStoreLimits) subStore := &MemorySubStore{} subStore.init(ms.log, &channelLimits.SubStoreLimits) c := &Channel{ Subs: subStore, Msgs: msgStore, } ms.channels[channel] = c return c, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L91-L130
go
train
//////////////////////////////////////////////////////////////////////////// // MemoryMsgStore methods //////////////////////////////////////////////////////////////////////////// // Store a given message.
func (ms *MemoryMsgStore) Store(m *pb.MsgProto) (uint64, error)
//////////////////////////////////////////////////////////////////////////// // MemoryMsgStore methods //////////////////////////////////////////////////////////////////////////// // Store a given message. func (ms *MemoryMsgStore) Store(m *pb.MsgProto) (uint64, error)
{ ms.Lock() defer ms.Unlock() if m.Sequence <= ms.last { // We've already seen this message. return m.Sequence, nil } if ms.first == 0 { ms.first = m.Sequence } ms.last = m.Sequence ms.msgs[ms.last] = m ms.totalCount++ ms.totalBytes += uint64(m.Size()) // If there is an age limit and no timer yet created, do so now if ms.limits.MaxAge > time.Duration(0) && ms.ageTimer == nil { ms.wg.Add(1) ms.ageTimer = time.AfterFunc(ms.limits.MaxAge, ms.expireMsgs) } // Check if we need to remove any (but leave at least the last added) maxMsgs := ms.limits.MaxMsgs maxBytes := ms.limits.MaxBytes if maxMsgs > 0 || maxBytes > 0 { for ms.totalCount > 1 && ((maxMsgs > 0 && ms.totalCount > maxMsgs) || (maxBytes > 0 && (ms.totalBytes > uint64(maxBytes)))) { ms.removeFirstMsg() if !ms.hitLimit { ms.hitLimit = true ms.log.Warnf(droppingMsgsFmt, ms.subject, ms.totalCount, ms.limits.MaxMsgs, util.FriendlyBytes(int64(ms.totalBytes)), util.FriendlyBytes(ms.limits.MaxBytes)) } } } return ms.last, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L133-L138
go
train
// Lookup returns the stored message with given sequence number.
func (ms *MemoryMsgStore) Lookup(seq uint64) (*pb.MsgProto, error)
// Lookup returns the stored message with given sequence number. func (ms *MemoryMsgStore) Lookup(seq uint64) (*pb.MsgProto, error)
{ ms.RLock() m := ms.msgs[seq] ms.RUnlock() return m, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L141-L146
go
train
// FirstMsg returns the first message stored.
func (ms *MemoryMsgStore) FirstMsg() (*pb.MsgProto, error)
// FirstMsg returns the first message stored. func (ms *MemoryMsgStore) FirstMsg() (*pb.MsgProto, error)
{ ms.RLock() m := ms.msgs[ms.first] ms.RUnlock() return m, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L149-L154
go
train
// LastMsg returns the last message stored.
func (ms *MemoryMsgStore) LastMsg() (*pb.MsgProto, error)
// LastMsg returns the last message stored. func (ms *MemoryMsgStore) LastMsg() (*pb.MsgProto, error)
{ ms.RLock() m := ms.msgs[ms.last] ms.RUnlock() return m, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L158-L185
go
train
// GetSequenceFromTimestamp returns the sequence of the first message whose // timestamp is greater or equal to given timestamp.
func (ms *MemoryMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error)
// GetSequenceFromTimestamp returns the sequence of the first message whose // timestamp is greater or equal to given timestamp. func (ms *MemoryMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error)
{ ms.RLock() defer ms.RUnlock() // No message ever stored if ms.first == 0 { return 0, nil } // All messages have expired if ms.first > ms.last { return ms.last + 1, nil } if timestamp <= ms.msgs[ms.first].Timestamp { return ms.first, nil } if timestamp == ms.msgs[ms.last].Timestamp { return ms.last, nil } if timestamp > ms.msgs[ms.last].Timestamp { return ms.last + 1, nil } index := sort.Search(len(ms.msgs), func(i int) bool { return ms.msgs[uint64(i)+ms.first].Timestamp >= timestamp }) return uint64(index) + ms.first, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L189-L222
go
train
// expireMsgs ensures that messages don't stay in the log longer than the // limit's MaxAge.
func (ms *MemoryMsgStore) expireMsgs()
// expireMsgs ensures that messages don't stay in the log longer than the // limit's MaxAge. func (ms *MemoryMsgStore) expireMsgs()
{ ms.Lock() defer ms.Unlock() if ms.closed { ms.wg.Done() return } now := time.Now().UnixNano() maxAge := int64(ms.limits.MaxAge) for { m, ok := ms.msgs[ms.first] if !ok { if ms.first < ms.last { ms.first++ continue } ms.ageTimer = nil ms.wg.Done() return } elapsed := now - m.Timestamp if elapsed >= maxAge { ms.removeFirstMsg() } else { if elapsed < 0 { ms.ageTimer.Reset(time.Duration(m.Timestamp - now + maxAge)) } else { ms.ageTimer.Reset(time.Duration(maxAge - elapsed)) } return } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L225-L231
go
train
// removeFirstMsg removes the first message and updates totals.
func (ms *MemoryMsgStore) removeFirstMsg()
// removeFirstMsg removes the first message and updates totals. func (ms *MemoryMsgStore) removeFirstMsg()
{ firstMsg := ms.msgs[ms.first] ms.totalBytes -= uint64(firstMsg.Size()) ms.totalCount-- delete(ms.msgs, ms.first) ms.first++ }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L234-L246
go
train
// Empty implements the MsgStore interface
func (ms *MemoryMsgStore) Empty() error
// Empty implements the MsgStore interface func (ms *MemoryMsgStore) Empty() error
{ ms.Lock() if ms.ageTimer != nil { if ms.ageTimer.Stop() { ms.wg.Done() } ms.ageTimer = nil } ms.empty() ms.msgs = make(map[uint64]*pb.MsgProto) ms.Unlock() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/memstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L249-L265
go
train
// Close implements the MsgStore interface
func (ms *MemoryMsgStore) Close() error
// Close implements the MsgStore interface func (ms *MemoryMsgStore) Close() error
{ ms.Lock() if ms.closed { ms.Unlock() return nil } ms.closed = true if ms.ageTimer != nil { if ms.ageTimer.Stop() { ms.wg.Done() } } ms.Unlock() ms.wg.Wait() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L231-L236
go
train
// SQLNoCaching sets the NoCaching option
func SQLNoCaching(noCaching bool) SQLStoreOption
// SQLNoCaching sets the NoCaching option func SQLNoCaching(noCaching bool) SQLStoreOption
{ return func(o *SQLStoreOptions) error { o.NoCaching = noCaching return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L239-L244
go
train
// SQLMaxOpenConns sets the MaxOpenConns option
func SQLMaxOpenConns(max int) SQLStoreOption
// SQLMaxOpenConns sets the MaxOpenConns option func SQLMaxOpenConns(max int) SQLStoreOption
{ return func(o *SQLStoreOptions) error { o.MaxOpenConns = max return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L248-L254
go
train
// SQLAllOptions is a convenient option to pass all options from a SQLStoreOptions // structure to the constructor.
func SQLAllOptions(opts *SQLStoreOptions) SQLStoreOption
// SQLAllOptions is a convenient option to pass all options from a SQLStoreOptions // structure to the constructor. func SQLAllOptions(opts *SQLStoreOptions) SQLStoreOption
{ return func(o *SQLStoreOptions) error { o.NoCaching = opts.NoCaching o.MaxOpenConns = opts.MaxOpenConns return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L353-L355
go
train
// sqlStmtError returns an error including the text of the offending SQL statement.
func sqlStmtError(code int, err error) error
// sqlStmtError returns an error including the text of the offending SQL statement. func sqlStmtError(code int, err error) error
{ return fmt.Errorf("sql: error executing %q: %v", sqlStmts[code], err) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L378-L424
go
train
//////////////////////////////////////////////////////////////////////////// // SQLStore methods //////////////////////////////////////////////////////////////////////////// // NewSQLStore returns a factory for stores held in memory. // If not limits are provided, the store will be created with // DefaultStoreLimits.
func NewSQLStore(log logger.Logger, driver, source string, limits *StoreLimits, options ...SQLStoreOption) (*SQLStore, error)
//////////////////////////////////////////////////////////////////////////// // SQLStore methods //////////////////////////////////////////////////////////////////////////// // NewSQLStore returns a factory for stores held in memory. // If not limits are provided, the store will be created with // DefaultStoreLimits. func NewSQLStore(log logger.Logger, driver, source string, limits *StoreLimits, options ...SQLStoreOption) (*SQLStore, error)
{ initSQLStmts.Do(func() { initSQLStmtsTable(driver) }) db, err := sql.Open(driver, source) if err != nil { return nil, err } if err := db.Ping(); err != nil { db.Close() return nil, err } // Start with empty options opts := DefaultSQLStoreOptions() // And apply whatever is given to us as options. for _, opt := range options { if err := opt(opts); err != nil { return nil, err } } db.SetMaxOpenConns(opts.MaxOpenConns) s := &SQLStore{ opts: opts, db: db, doneCh: make(chan struct{}), preparedStmts: make([]*sql.Stmt, 0, len(sqlStmts)), } if err := s.init(TypeSQL, log, limits); err != nil { s.Close() return nil, err } if err := s.createPreparedStmts(); err != nil { s.Close() return nil, err } s.Lock() s.wg.Add(1) go s.timeTick() if !s.opts.NoCaching { s.wg.Add(1) s.ssFlusher = &subStoresFlusher{ stores: make(map[*SQLSubStore]struct{}), signalCh: make(chan struct{}, 1), } go s.subStoresFlusher() } s.Unlock() return s, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L427-L475
go
train
// GetExclusiveLock implements the Store interface
func (s *SQLStore) GetExclusiveLock() (bool, error)
// GetExclusiveLock implements the Store interface func (s *SQLStore) GetExclusiveLock() (bool, error)
{ s.Lock() defer s.Unlock() if s.closed { return false, nil } if s.dbLock == nil { s.dbLock = &sqlDBLock{ id: nuid.Next(), db: s.db, } } if s.dbLock.isOwner { return true, nil } hasLock, id, tick, err := s.acquireDBLock(false) if err != nil { return false, err } if !hasLock { // We did not get the lock. Try to see if the table is updated // after 1 interval. If so, consider the lock "healthy" and just // return that we did not get the lock. If after a configured // number of tries the tick for current owner is not updated, // steal the lock. prevID := id prevTick := tick for i := 0; i < sqlLockLostCount; i++ { time.Sleep(time.Duration(1.5 * float64(sqlLockUpdateInterval))) hasLock, id, tick, err = s.acquireDBLock(false) if hasLock || err != nil || id != prevID || tick != prevTick { return hasLock, err } prevTick = tick } // Try to steal. hasLock, _, _, err = s.acquireDBLock(true) } if hasLock { // Success. Keep track that we own the lock so we can clear // the table on clean shutdown to release the lock immediately. s.dbLock.Lock() s.dbLock.isOwner = true s.wg.Add(1) go s.updateDBLock() s.dbLock.Unlock() } return hasLock, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L478-L514
go
train
// This go-routine updates the DB store lock at regular intervals.
func (s *SQLStore) updateDBLock()
// This go-routine updates the DB store lock at regular intervals. func (s *SQLStore) updateDBLock()
{ defer s.wg.Done() var ( ticker = time.NewTicker(sqlLockUpdateInterval) hasLock = true err error failed int ) for { select { case <-ticker.C: hasLock, _, _, err = s.acquireDBLock(false) if !hasLock || err != nil { // If there is no error but we did not get the lock, // something is really wrong, abort right away. stopNow := !hasLock && err == nil if err != nil { failed++ s.log.Errorf("Unable to update store lock (failed=%v err=%v)", failed, err) } if stopNow || failed == sqlLockLostCount { if sqlNoPanic { s.log.Fatalf("Aborting") return } panic("lost store lock, aborting") } } else { failed = 0 } case <-s.doneCh: ticker.Stop() return } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L517-L558
go
train
// Returns if lock is acquired, the owner and tick value of the lock record.
func (s *SQLStore) acquireDBLock(steal bool) (bool, string, uint64, error)
// Returns if lock is acquired, the owner and tick value of the lock record. func (s *SQLStore) acquireDBLock(steal bool) (bool, string, uint64, error)
{ s.dbLock.Lock() defer s.dbLock.Unlock() var ( lockID string tick uint64 hasLock bool ) tx, err := s.dbLock.db.Begin() if err != nil { return false, "", 0, err } defer func() { if tx != nil { tx.Rollback() } }() r := tx.QueryRow(sqlStmts[sqlDBLockSelect]) err = r.Scan(&lockID, &tick) if err != nil && err != sql.ErrNoRows { return false, "", 0, sqlStmtError(sqlDBLockSelect, err) } if err == sql.ErrNoRows || steal || lockID == "" || lockID == s.dbLock.id { // If we are stealing, reset tick to 0 (so it will become 1 in update statement) if steal { tick = 0 } stmt := sqlStmts[sqlDBLockUpdate] if err == sql.ErrNoRows { stmt = sqlStmts[sqlDBLockInsert] } if _, err := tx.Exec(stmt, s.dbLock.id, tick+1); err != nil { return false, "", 0, sqlStmtError(sqlDBLockUpdate, err) } hasLock = true } if err := tx.Commit(); err != nil { return false, "", 0, err } tx = nil return hasLock, lockID, tick, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L561-L567
go
train
// Release the store lock if this store was the owner of the lock
func (s *SQLStore) releaseDBLockIfOwner()
// Release the store lock if this store was the owner of the lock func (s *SQLStore) releaseDBLockIfOwner()
{ s.dbLock.Lock() defer s.dbLock.Unlock() if s.dbLock.isOwner { s.dbLock.db.Exec(sqlStmts[sqlDBLockUpdate], "", 0) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L576-L612
go
train
// When a SubStore adds a pending message or an ack, it will // notify this go-routine so that the store gets flushed after // some time should it not be flushed explicitly. // This go routine waits to be signaled and when that happens // reset a timer to fire in a short period of time. It then // go through the list of SubStore that have been registered // as needing a flush and call Flush() on them.
func (s *SQLStore) subStoresFlusher()
// When a SubStore adds a pending message or an ack, it will // notify this go-routine so that the store gets flushed after // some time should it not be flushed explicitly. // This go routine waits to be signaled and when that happens // reset a timer to fire in a short period of time. It then // go through the list of SubStore that have been registered // as needing a flush and call Flush() on them. func (s *SQLStore) subStoresFlusher()
{ defer s.wg.Done() s.Lock() flusher := s.ssFlusher s.Unlock() var ( stores []*SQLSubStore tm = time.NewTimer(sqlSubStoreFlushIdleInterval) ) for { select { case <-s.doneCh: return case <-flusher.signalCh: if !tm.Stop() { <-tm.C } tm.Reset(sqlSubStoreFlushInterval) case <-tm.C: flusher.Lock() for ss := range flusher.stores { stores = append(stores, ss) delete(flusher.stores, ss) } flusher.signaled = false flusher.Unlock() for _, ss := range stores { ss.Flush() } stores = stores[:0] tm.Reset(sqlSubStoreFlushIdleInterval) } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L617-L633
go
train
// Add this store to the list of SubStore needing flushing // and signal the go-routine responsible for flushing if // need be.
func (s *SQLStore) scheduleSubStoreFlush(ss *SQLSubStore)
// Add this store to the list of SubStore needing flushing // and signal the go-routine responsible for flushing if // need be. func (s *SQLStore) scheduleSubStoreFlush(ss *SQLSubStore)
{ needSignal := false f := s.ssFlusher f.Lock() f.stores[ss] = struct{}{} if !f.signaled { f.signaled = true needSignal = true } f.Unlock() if needSignal { select { case f.signalCh <- struct{}{}: default: } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L636-L646
go
train
// creates an instance of a SQLMsgStore
func (s *SQLStore) newSQLMsgStore(channel string, channelID int64, limits *MsgStoreLimits) *SQLMsgStore
// creates an instance of a SQLMsgStore func (s *SQLStore) newSQLMsgStore(channel string, channelID int64, limits *MsgStoreLimits) *SQLMsgStore
{ msgStore := &SQLMsgStore{ sqlStore: s, channelID: channelID, } msgStore.init(channel, s.log, limits) if !s.opts.NoCaching { msgStore.writeCache = &sqlMsgsCache{msgs: make(map[uint64]*sqlCachedMsg)} } return msgStore }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L649-L665
go
train
// creates an instance of SQLSubStore
func (s *SQLStore) newSQLSubStore(channelID int64, limits *SubStoreLimits) *SQLSubStore
// creates an instance of SQLSubStore func (s *SQLStore) newSQLSubStore(channelID int64, limits *SubStoreLimits) *SQLSubStore
{ subStore := &SQLSubStore{ sqlStore: s, channelID: channelID, maxSubID: &s.maxSubID, limits: *limits, } subStore.log = s.log if s.opts.NoCaching { subStore.subLastSent = make(map[uint64]uint64) } else { subStore.cache = &sqlSubAcksPendingCache{ subs: make(map[uint64]*sqlSubAcksPending), } } return subStore }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L680-L705
go
train
// initialize the global sqlStmts table to driver's one.
func initSQLStmtsTable(driver string)
// initialize the global sqlStmts table to driver's one. func initSQLStmtsTable(driver string)
{ // The sqlStmts table is initialized with MySQL statements. // Update the statements for the selected driver. switch driver { case driverPostgres: // Replace ? with $1, $2, etc... for i, stmt := range sqlStmts { n := 0 for strings.IndexByte(stmt, '?') != -1 { n++ param := "$" + strconv.Itoa(n) stmt = strings.Replace(stmt, "?", param, 1) } sqlStmts[i] = stmt } // Replace `row` with row for i, stmt := range sqlStmts { stmt := strings.Replace(stmt, "`row`", "row", -1) sqlStmts[i] = stmt } // OVER (PARTITION ...) is not supported in older MySQL servers. // So the default SQL statement is specific to MySQL and uses variables. // For Postgres, replace with this statement: sqlStmts[sqlRecoverGetSeqFloorForMaxBytes] = "SELECT COALESCE(MIN(seq), 0) FROM (SELECT seq, SUM(size) OVER (PARTITION BY id ORDER BY seq DESC) AS total FROM Messages WHERE id=$1)t WHERE t.total<=$2" } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L708-L727
go
train
// Init implements the Store interface
func (s *SQLStore) Init(info *spb.ServerInfo) error
// Init implements the Store interface func (s *SQLStore) Init(info *spb.ServerInfo) error
{ s.Lock() defer s.Unlock() count := 0 r := s.db.QueryRow(sqlStmts[sqlHasServerInfoRow]) if err := r.Scan(&count); err != nil && err != sql.ErrNoRows { return sqlStmtError(sqlHasServerInfoRow, err) } infoBytes, _ := info.Marshal() if count == 0 { if _, err := s.db.Exec(sqlStmts[sqlAddServerInfo], info.ClusterID, infoBytes, sqlVersion); err != nil { return sqlStmtError(sqlAddServerInfo, err) } } else { if _, err := s.db.Exec(sqlStmts[sqlUpdateServerInfo], info.ClusterID, infoBytes, sqlVersion); err != nil { return sqlStmtError(sqlUpdateServerInfo, err) } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L730-L968
go
train
// Recover implements the Store interface
func (s *SQLStore) Recover() (*RecoveredState, error)
// Recover implements the Store interface func (s *SQLStore) Recover() (*RecoveredState, error)
{ s.Lock() defer s.Unlock() var ( clusterID string data []byte version int err error ) r := s.db.QueryRow(sqlStmts[sqlRecoverServerInfo]) if err := r.Scan(&clusterID, &data, &version); err != nil { // If there is no row, that means nothing to recover. Return nil for the // state and no error. if err == sql.ErrNoRows { return nil, nil } return nil, sqlStmtError(sqlRecoverServerInfo, err) } if version != sqlVersion { return nil, fmt.Errorf("sql: unsupported version: %v (supports [1..%v])", version, sqlVersion) } info := &spb.ServerInfo{} if err := info.Unmarshal(data); err != nil { return nil, err } if info.ClusterID != clusterID { return nil, fmt.Errorf("sql: id %q in column does not match cluster ID in data %q", clusterID, info.ClusterID) } // Create recovered state structure and fill it with server info. rs := &RecoveredState{ Info: info, } var clients []*Client cliRows, err := s.db.Query(sqlStmts[sqlRecoverClients]) if err != nil && err != sql.ErrNoRows { return nil, sqlStmtError(sqlRecoverClients, err) } defer cliRows.Close() for cliRows.Next() { var ( clientID string hbInbox string proto []byte ) if err := cliRows.Scan(&clientID, &hbInbox, &proto); err != nil { return nil, err } var client *Client if len(proto) == 0 { client = &Client{spb.ClientInfo{ID: clientID, HbInbox: hbInbox}} } else { info := spb.ClientInfo{} info.Unmarshal(proto) client = &Client{info} } clients = append(clients, client) } cliRows.Close() // Set clients into recovered state. rs.Clients = clients // Get the maxChannelID r = s.db.QueryRow(sqlStmts[sqlRecoverMaxChannelID]) err = r.Scan(&s.maxChannelID) if err != nil && err != sql.ErrNoRows { return nil, sqlStmtError(sqlRecoverMaxChannelID, err) } // If there was no channel recovered, we are done if s.maxChannelID == 0 { return rs, nil } // Get the maxSubID r = s.db.QueryRow(sqlStmts[sqlRecoverMaxSubID]) if err := r.Scan(&s.maxSubID); err != nil && err != sql.ErrNoRows { return nil, sqlStmtError(sqlRecoverMaxSubID, err) } // Recover individual channels var channels map[string]*RecoveredChannel channelRows, err := s.db.Query(sqlStmts[sqlRecoverChannelsList]) if err != nil && err != sql.ErrNoRows { return nil, sqlStmtError(sqlRecoverChannelsList, err) } defer channelRows.Close() for channelRows.Next() { var ( channelID int64 name string maxseq uint64 // We get that from the Channels table. mmseq uint64 // This is the max seq found in the Messages table for given channel. ) if err := channelRows.Scan(&channelID, &name, &maxseq); err != nil { return nil, err } channelLimits := s.genericStore.getChannelLimits(name) msgStore := s.newSQLMsgStore(name, channelID, &channelLimits.MsgStoreLimits) // We need to get the last seq from messages table before possibly expiring messages. r = s.preparedStmts[sqlGetLastSeq].QueryRow(channelID) if err := r.Scan(&mmseq); err != nil { return nil, sqlStmtError(sqlGetLastSeq, err) } // If it is more than the one that was updated in the Channel row, then use this one. if mmseq > maxseq { maxseq = mmseq } if err := s.applyLimitsOnRecovery(msgStore); err != nil { return nil, err } r = s.preparedStmts[sqlRecoverChannelMsgs].QueryRow(channelID) var ( totalCount int first uint64 last uint64 totalBytes uint64 lastTimestamp int64 ) if err := r.Scan(&totalCount, &first, &last, &totalBytes, &lastTimestamp); err != nil && err != sql.ErrNoRows { return nil, sqlStmtError(sqlRecoverChannelMsgs, err) } msgStore.first = first msgStore.last = last msgStore.totalCount = totalCount msgStore.totalBytes = totalBytes // Since messages may have been removed due to limits, update first/last // based on known max sequence. if maxseq > msgStore.last { msgStore.first = maxseq + 1 msgStore.last = maxseq } subStore := s.newSQLSubStore(channelID, &channelLimits.SubStoreLimits) // Prevent scheduling to flusher while we are recovering if !s.opts.NoCaching { // By setting this to true, we prevent scheduling since // scheduling would occur only if needsFlush is false. subStore.cache.needsFlush = true } var subscriptions []*RecoveredSubscription subRows, err := s.preparedStmts[sqlRecoverChannelSubs].Query(channelID) if err != nil { return nil, sqlStmtError(sqlRecoverChannelSubs, err) } defer subRows.Close() for subRows.Next() { var ( lastSent uint64 protoBytes []byte ap *sqlSubAcksPending ) if err := subRows.Scan(&lastSent, &protoBytes); err != nil && err != sql.ErrNoRows { return nil, err } if protoBytes != nil { sub := &spb.SubState{} if err := sub.Unmarshal(protoBytes); err != nil { return nil, err } // We need to use the max of lastSent column or the one in the proto if lastSent > sub.LastSent { sub.LastSent = lastSent } if s.opts.NoCaching { // We can remove entries for sequence that are below the smallest // sequence that was found in Messages. if _, err := s.preparedStmts[sqlRecoverDoPurgeSubsPending].Exec(sub.ID, msgStore.first); err != nil { return nil, sqlStmtError(sqlRecoverDoPurgeSubsPending, err) } } else { ap = subStore.getOrCreateAcksPending(sub.ID, 0) } rows, err := s.preparedStmts[sqlRecoverSubPending].Query(sub.ID) if err != nil { return nil, sqlStmtError(sqlRecoverSubPending, err) } defer rows.Close() pendingAcks := make(PendingAcks) var gcedRows map[uint64]struct{} if !s.opts.NoCaching { gcedRows = make(map[uint64]struct{}) } for rows.Next() { if err := subStore.recoverPendingRow(rows, sub, ap, pendingAcks, gcedRows); err != nil { return nil, err } } rows.Close() if s.opts.NoCaching { // Update the in-memory map tracking last sent subStore.subLastSent[sub.ID] = sub.LastSent } else { // Go over garbage collected rows and delete them for rowID := range gcedRows { if err := subStore.deleteSubPendingRow(sub.ID, rowID); err != nil { return nil, err } } } // Add to the recovered subscriptions subscriptions = append(subscriptions, &RecoveredSubscription{Sub: sub, Pending: pendingAcks}) } } subRows.Close() if !s.opts.NoCaching { // Clear but also allow scheduling now that the recovery is complete. subStore.cache.needsFlush = false } rc := &RecoveredChannel{ Channel: &Channel{ Msgs: msgStore, Subs: subStore, }, Subscriptions: subscriptions, } if channels == nil { channels = make(map[string]*RecoveredChannel) } channels[name] = rc s.channels[name] = rc.Channel } channelRows.Close() // Set channels into recovered state rs.Channels = channels return rs, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1063-L1091
go
train
// CreateChannel implements the Store interface
func (s *SQLStore) CreateChannel(channel string) (*Channel, error)
// CreateChannel implements the Store interface func (s *SQLStore) CreateChannel(channel string) (*Channel, error)
{ s.Lock() defer s.Unlock() // Verify that it does not already exist or that we did not hit the limits if err := s.canAddChannel(channel); err != nil { return nil, err } channelLimits := s.genericStore.getChannelLimits(channel) cid := s.maxChannelID + 1 if _, err := s.preparedStmts[sqlAddChannel].Exec(cid, channel, channelLimits.MaxMsgs, channelLimits.MaxBytes, int64(channelLimits.MaxAge)); err != nil { return nil, sqlStmtError(sqlAddChannel, err) } s.maxChannelID = cid msgStore := s.newSQLMsgStore(channel, cid, &channelLimits.MsgStoreLimits) subStore := s.newSQLSubStore(cid, &channelLimits.SubStoreLimits) c := &Channel{ Subs: subStore, Msgs: msgStore, } s.channels[channel] = c return c, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1094-L1132
go
train
// DeleteChannel implements the Store interface
func (s *SQLStore) DeleteChannel(channel string) error
// DeleteChannel implements the Store interface func (s *SQLStore) DeleteChannel(channel string) error
{ s.Lock() defer s.Unlock() c := s.channels[channel] if c == nil { return ErrNotFound } // Get the channel ID from Msgs store var cid int64 if cms, ok := c.Msgs.(*CryptoMsgStore); ok { cid = cms.MsgStore.(*SQLMsgStore).channelID } else { cid = c.Msgs.(*SQLMsgStore).channelID } // Fast delete just marks the channel row as deleted if _, err := s.preparedStmts[sqlDeleteChannelFast].Exec(cid); err != nil { return err } // If that succeeds, proceed with deletion of channel delete(s.channels, channel) // Close the messages and subs stores c.Msgs.Close() c.Subs.Close() // Now trigger in a go routine the longer deletion of entries // in all other tables. s.wg.Add(1) go func() { defer s.wg.Done() if err := s.deepChannelDelete(cid); err != nil { s.log.Errorf("Unable to completely delete channel %q: %v", channel, err) } }() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1141-L1202
go
train
// This function is called after a channel has been marked // as deleted. It will do a "deep" delete of the channel, // which means removing all rows from any table that has // a reference to the deleted channel. It is executed in // a separate go-routine (as to not block DeleteChannel() // call). It will run to completion possibly delaying // the closing of the store.
func (s *SQLStore) deepChannelDelete(channelID int64) error
// This function is called after a channel has been marked // as deleted. It will do a "deep" delete of the channel, // which means removing all rows from any table that has // a reference to the deleted channel. It is executed in // a separate go-routine (as to not block DeleteChannel() // call). It will run to completion possibly delaying // the closing of the store. func (s *SQLStore) deepChannelDelete(channelID int64) error
{ // On Store.Close(), the prepared statements and DB // won't be closed until after this call returns, // so we don't need explicit store locking. // We start by removing from SubsPending. limit := 1000 for { // This will get us a set of subscription ids. We need // to repeat since we have a limit in the query rows, err := s.preparedStmts[sqlDeleteChannelGetSubIds].Query(channelID, limit) // If no more row, we are done, continue with other tables. if err == sql.ErrNoRows { break } if err != nil { return err } defer rows.Close() count := 0 for rows.Next() { var subid uint64 if err := rows.Scan(&subid); err != nil { return err } _, err := s.preparedStmts[sqlDeleteChannelDelSubsPending].Exec(subid) if err != nil { return err } count++ } rows.Close() if count < limit { break } } // Same for messages, we will get a certain number of messages // to delete and repeat the operation. for { var maxSeq uint64 row := s.preparedStmts[sqlDeleteChannelGetSomeMessagesSeq].QueryRow(channelID, limit) if err := row.Scan(&maxSeq); err != nil { return err } if maxSeq == 0 { break } _, err := s.preparedStmts[sqlDeleteChannelDelSomeMessages].Exec(channelID, maxSeq) if err != nil { return err } } // Now with the subscriptions and channel _, err := s.preparedStmts[sqlDeleteChannelDelSubscriptions].Exec(channelID) if err == nil { _, err = s.preparedStmts[sqlDeleteChannelDelChannel].Exec(channelID) } return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1205-L1239
go
train
// AddClient implements the Store interface
func (s *SQLStore) AddClient(info *spb.ClientInfo) (*Client, error)
// AddClient implements the Store interface func (s *SQLStore) AddClient(info *spb.ClientInfo) (*Client, error)
{ s.Lock() defer s.Unlock() var ( protoBytes []byte err error ) protoBytes, err = info.Marshal() if err != nil { return nil, err } client := &Client{*info} for i := 0; i < 2; i++ { _, err = s.preparedStmts[sqlAddClient].Exec(client.ID, client.HbInbox, protoBytes) if err == nil { break } // We stop if this is the second AddClient failed attempt. if i > 0 { err = sqlStmtError(sqlAddClient, err) break } // This is the first AddClient failed attempt. It could be because // client was already in db, so delete now and try again. _, err = s.preparedStmts[sqlDeleteClient].Exec(client.ID) if err != nil { err = sqlStmtError(sqlDeleteClient, err) break } } if err != nil { return nil, err } return client, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1242-L1250
go
train
// DeleteClient implements the Store interface
func (s *SQLStore) DeleteClient(clientID string) error
// DeleteClient implements the Store interface func (s *SQLStore) DeleteClient(clientID string) error
{ s.Lock() _, err := s.preparedStmts[sqlDeleteClient].Exec(clientID) if err != nil { err = sqlStmtError(sqlDeleteClient, err) } s.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1257-L1269
go
train
// timeTick updates the store's time in nanosecond at regular // interval. The time is used in Lookup() to compensate for possible // delay in expiring messages. The Lookup() will check the message's // expiration time against the time captured here. If it is expired // even though it is still in the database, Lookup() will return nil.
func (s *SQLStore) timeTick()
// timeTick updates the store's time in nanosecond at regular // interval. The time is used in Lookup() to compensate for possible // delay in expiring messages. The Lookup() will check the message's // expiration time against the time captured here. If it is expired // even though it is still in the database, Lookup() will return nil. func (s *SQLStore) timeTick()
{ defer s.wg.Done() timer := time.NewTicker(sqlTimeTickInterval) for { select { case <-s.doneCh: timer.Stop() return case <-timer.C: atomic.StoreInt64(&s.nowInNano, time.Now().UnixNano()) } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1272-L1308
go
train
// Close implements the Store interface
func (s *SQLStore) Close() error
// Close implements the Store interface func (s *SQLStore) Close() error
{ s.Lock() if s.closed { s.Unlock() return nil } s.closed = true // This will cause MsgStore's and SubStore's to be closed. err := s.close() db := s.db wg := &s.wg // Signal background go-routines to quit if s.doneCh != nil { close(s.doneCh) } s.Unlock() // Wait for go routine(s) to finish wg.Wait() s.Lock() for _, ps := range s.preparedStmts { if lerr := ps.Close(); lerr != nil && err == nil { err = lerr } } if db != nil { if s.dbLock != nil { s.releaseDBLockIfOwner() } if lerr := db.Close(); lerr != nil && err == nil { err = lerr } } s.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1314-L1333
go
train
//////////////////////////////////////////////////////////////////////////// // SQLMsgStore methods ////////////////////////////////////////////////////////////////////////////
func (mc *sqlMsgsCache) add(msg *pb.MsgProto, data []byte)
//////////////////////////////////////////////////////////////////////////// // SQLMsgStore methods //////////////////////////////////////////////////////////////////////////// func (mc *sqlMsgsCache) add(msg *pb.MsgProto, data []byte)
{ cachedMsg := mc.free if cachedMsg != nil { mc.free = cachedMsg.next cachedMsg.next = nil // Remove old message from the map delete(mc.msgs, cachedMsg.msg.Sequence) } else { cachedMsg = &sqlCachedMsg{} } cachedMsg.msg = msg cachedMsg.data = data mc.msgs[msg.Sequence] = cachedMsg if mc.head == nil { mc.head = cachedMsg } else { mc.tail.next = cachedMsg } mc.tail = cachedMsg }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1357-L1429
go
train
// Store implements the MsgStore interface
func (ms *SQLMsgStore) Store(m *pb.MsgProto) (uint64, error)
// Store implements the MsgStore interface func (ms *SQLMsgStore) Store(m *pb.MsgProto) (uint64, error)
{ ms.Lock() defer ms.Unlock() if m.Sequence <= ms.last { // We've already seen this message. return m.Sequence, nil } seq := m.Sequence msgBytes, _ := m.Marshal() dataLen := uint64(len(msgBytes)) useCache := !ms.sqlStore.opts.NoCaching if useCache { ms.writeCache.add(m, msgBytes) } else { if _, err := ms.sqlStore.preparedStmts[sqlStoreMsg].Exec(ms.channelID, seq, m.Timestamp, dataLen, msgBytes); err != nil { return 0, sqlStmtError(sqlStoreMsg, err) } } if ms.first == 0 { ms.first = seq } ms.last = seq ms.totalCount++ ms.totalBytes += dataLen // Check if we need to remove any (but leave at least the last added) maxMsgs := ms.limits.MaxMsgs maxBytes := ms.limits.MaxBytes if maxMsgs > 0 || maxBytes > 0 { for ms.totalCount > 1 && ((maxMsgs > 0 && ms.totalCount > maxMsgs) || (maxBytes > 0 && (ms.totalBytes > uint64(maxBytes)))) { didSQL := false delBytes := uint64(0) if useCache && ms.writeCache.head.msg.Sequence == ms.first { firstCachedMsg := ms.writeCache.pop() delBytes = uint64(len(firstCachedMsg.data)) } else { r := ms.sqlStore.preparedStmts[sqlGetSizeOfMessage].QueryRow(ms.channelID, ms.first) if err := r.Scan(&delBytes); err != nil && err != sql.ErrNoRows { return 0, sqlStmtError(sqlGetSizeOfMessage, err) } didSQL = true } if delBytes > 0 { if didSQL { if _, err := ms.sqlStore.preparedStmts[sqlDeleteMessage].Exec(ms.channelID, ms.first); err != nil { return 0, sqlStmtError(sqlDeleteMessage, err) } } ms.totalCount-- ms.totalBytes -= delBytes ms.first++ } if !ms.hitLimit { ms.hitLimit = true ms.log.Warnf(droppingMsgsFmt, ms.subject, ms.totalCount, ms.limits.MaxMsgs, util.FriendlyBytes(int64(ms.totalBytes)), util.FriendlyBytes(ms.limits.MaxBytes)) } } } if !useCache && ms.limits.MaxAge > 0 && ms.expireTimer == nil { ms.createExpireTimer() } return seq, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1437-L1442
go
train
// Lookup implements the MsgStore interface
func (ms *SQLMsgStore) Lookup(seq uint64) (*pb.MsgProto, error)
// Lookup implements the MsgStore interface func (ms *SQLMsgStore) Lookup(seq uint64) (*pb.MsgProto, error)
{ ms.Lock() msg, err := ms.lookup(seq) ms.Unlock() return msg, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1481-L1502
go
train
// GetSequenceFromTimestamp implements the MsgStore interface
func (ms *SQLMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error)
// GetSequenceFromTimestamp implements the MsgStore interface func (ms *SQLMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error)
{ ms.Lock() defer ms.Unlock() // No message ever stored if ms.first == 0 { return 0, nil } // All messages have expired if ms.first > ms.last { return ms.last + 1, nil } r := ms.sqlStore.preparedStmts[sqlGetSequenceFromTimestamp].QueryRow(ms.channelID, timestamp) seq := uint64(0) err := r.Scan(&seq) if err == sql.ErrNoRows { return ms.last + 1, nil } if err != nil { return 0, sqlStmtError(sqlGetSequenceFromTimestamp, err) } return seq, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1505-L1510
go
train
// FirstMsg implements the MsgStore interface
func (ms *SQLMsgStore) FirstMsg() (*pb.MsgProto, error)
// FirstMsg implements the MsgStore interface func (ms *SQLMsgStore) FirstMsg() (*pb.MsgProto, error)
{ ms.Lock() msg, err := ms.lookup(ms.first) ms.Unlock() return msg, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1513-L1518
go
train
// LastMsg implements the MsgStore interface
func (ms *SQLMsgStore) LastMsg() (*pb.MsgProto, error)
// LastMsg implements the MsgStore interface func (ms *SQLMsgStore) LastMsg() (*pb.MsgProto, error)
{ ms.Lock() msg, err := ms.lookup(ms.last) ms.Unlock() return msg, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1522-L1591
go
train
// expireMsgsLocked removes all messages that have expired in this channel. // Store lock is assumed held on entry
func (ms *SQLMsgStore) expireMsgs()
// expireMsgsLocked removes all messages that have expired in this channel. // Store lock is assumed held on entry func (ms *SQLMsgStore) expireMsgs()
{ ms.Lock() defer ms.Unlock() if ms.closed { ms.wg.Done() return } var ( count int maxSeq uint64 totalSize uint64 timestamp int64 ) processErr := func(errCode int, err error) { ms.log.Errorf("Unable to perform expiration for channel %q: %v", ms.subject, sqlStmtError(errCode, err)) ms.expireTimer.Reset(sqlExpirationIntervalOnError) } for { expiredTimestamp := time.Now().UnixNano() - int64(ms.limits.MaxAge) r := ms.sqlStore.preparedStmts[sqlGetExpiredMessages].QueryRow(ms.channelID, expiredTimestamp) if err := r.Scan(&count, &maxSeq, &totalSize); err != nil { processErr(sqlGetExpiredMessages, err) return } // It could be that messages that should have expired have been // removed due to count/size limit. We still need to adjust the // expiration timer based on the first message that need to expire. if count > 0 { if maxSeq == ms.last { if _, err := ms.sqlStore.preparedStmts[sqlUpdateChannelMaxSeq].Exec(maxSeq, ms.channelID); err != nil { processErr(sqlUpdateChannelMaxSeq, err) return } } if _, err := ms.sqlStore.preparedStmts[sqlDeletedMsgsWithSeqLowerThan].Exec(ms.channelID, maxSeq); err != nil { processErr(sqlDeletedMsgsWithSeqLowerThan, err) return } ms.first = maxSeq + 1 ms.totalCount -= count ms.totalBytes -= totalSize } // Reset since we are in a loop timestamp = 0 // If there is any message left in the channel, find out what the expiration // timer needs to be set to. if ms.totalCount > 0 { r = ms.sqlStore.preparedStmts[sqlGetFirstMsgTimestamp].QueryRow(ms.channelID, ms.first) if err := r.Scan(&timestamp); err != nil { processErr(sqlGetFirstMsgTimestamp, err) return } } // No message left or no message to expire. The timer will be recreated when // a new message is added to the channel. if timestamp == 0 { ms.wg.Done() ms.expireTimer = nil return } elapsed := time.Duration(time.Now().UnixNano() - timestamp) if elapsed < ms.limits.MaxAge { ms.expireTimer.Reset(ms.limits.MaxAge - elapsed) // Done with the for loop return } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1643-L1671
go
train
// Empty implements the MsgStore interface
func (ms *SQLMsgStore) Empty() error
// Empty implements the MsgStore interface func (ms *SQLMsgStore) Empty() error
{ ms.Lock() tx, err := ms.sqlStore.db.Begin() if err != nil { return err } defer tx.Rollback() if _, err := tx.Exec(sqlStmts[sqlDeletedMsgsWithSeqLowerThan], ms.channelID, ms.last); err != nil { return err } if _, err := tx.Exec(sqlStmts[sqlUpdateChannelMaxSeq], 0, ms.channelID); err != nil { return err } if err := tx.Commit(); err != nil { return err } ms.empty() if ms.expireTimer != nil { if ms.expireTimer.Stop() { ms.wg.Done() } ms.expireTimer = nil } if ms.writeCache != nil { ms.writeCache.transferToFreeList() } ms.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1674-L1679
go
train
// Flush implements the MsgStore interface
func (ms *SQLMsgStore) Flush() error
// Flush implements the MsgStore interface func (ms *SQLMsgStore) Flush() error
{ ms.Lock() err := ms.flush() ms.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1682-L1700
go
train
// Close implements the MsgStore interface
func (ms *SQLMsgStore) Close() error
// Close implements the MsgStore interface func (ms *SQLMsgStore) Close() error
{ ms.Lock() if ms.closed { ms.Unlock() return nil } // Flush before switching the state to closed err := ms.flush() ms.closed = true if ms.expireTimer != nil { if ms.expireTimer.Stop() { ms.wg.Done() } } ms.Unlock() ms.wg.Wait() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1707-L1734
go
train
//////////////////////////////////////////////////////////////////////////// // SQLSubStore methods //////////////////////////////////////////////////////////////////////////// // CreateSub implements the SubStore interface
func (ss *SQLSubStore) CreateSub(sub *spb.SubState) error
//////////////////////////////////////////////////////////////////////////// // SQLSubStore methods //////////////////////////////////////////////////////////////////////////// // CreateSub implements the SubStore interface func (ss *SQLSubStore) CreateSub(sub *spb.SubState) error
{ ss.Lock() defer ss.Unlock() // Check limits only if needed if ss.limits.MaxSubscriptions > 0 { r := ss.sqlStore.preparedStmts[sqlCheckMaxSubs].QueryRow(ss.channelID) count := 0 if err := r.Scan(&count); err != nil { return sqlStmtError(sqlCheckMaxSubs, err) } if count >= ss.limits.MaxSubscriptions { return ErrTooManySubs } } sub.ID = atomic.AddUint64(ss.maxSubID, 1) subBytes, _ := sub.Marshal() if _, err := ss.sqlStore.preparedStmts[sqlCreateSub].Exec(ss.channelID, sub.ID, subBytes); err != nil { sub.ID = 0 return sqlStmtError(sqlCreateSub, err) } if ss.hasMarkedAsDel { if _, err := ss.sqlStore.preparedStmts[sqlDeleteSubMarkedAsDeleted].Exec(ss.channelID); err != nil { return sqlStmtError(sqlDeleteSubMarkedAsDeleted, err) } ss.hasMarkedAsDel = false } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1737-L1758
go
train
// UpdateSub implements the SubStore interface
func (ss *SQLSubStore) UpdateSub(sub *spb.SubState) error
// UpdateSub implements the SubStore interface func (ss *SQLSubStore) UpdateSub(sub *spb.SubState) error
{ ss.Lock() defer ss.Unlock() subBytes, _ := sub.Marshal() r, err := ss.sqlStore.preparedStmts[sqlUpdateSub].Exec(subBytes, ss.channelID, sub.ID) if err != nil { return sqlStmtError(sqlUpdateSub, err) } // FileSubStoe supports updating a subscription for which there was no CreateSub. // Not sure if this is necessary, since I think server would never do that. // Stay consistent. c, err := r.RowsAffected() if err != nil { return err } if c == 0 { if _, err := ss.sqlStore.preparedStmts[sqlCreateSub].Exec(ss.channelID, sub.ID, subBytes); err != nil { return sqlStmtError(sqlCreateSub, err) } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1761-L1783
go
train
// DeleteSub implements the SubStore interface
func (ss *SQLSubStore) DeleteSub(subid uint64) error
// DeleteSub implements the SubStore interface func (ss *SQLSubStore) DeleteSub(subid uint64) error
{ ss.Lock() defer ss.Unlock() if subid == atomic.LoadUint64(ss.maxSubID) { if _, err := ss.sqlStore.preparedStmts[sqlMarkSubscriptionAsDeleted].Exec(ss.channelID, subid); err != nil { return sqlStmtError(sqlMarkSubscriptionAsDeleted, err) } ss.hasMarkedAsDel = true } else { if _, err := ss.sqlStore.preparedStmts[sqlDeleteSubscription].Exec(ss.channelID, subid); err != nil { return sqlStmtError(sqlDeleteSubscription, err) } } if ss.cache != nil { delete(ss.cache.subs, subid) } else { delete(ss.subLastSent, subid) } // Ignore error on this since subscription would not be recovered // if above executed ok. ss.sqlStore.preparedStmts[sqlDeleteSubPendingMessages].Exec(subid) return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1787-L1806
go
train
// This returns the structure responsible to keep track of // pending messages and acks for a given subscription ID.
func (ss *SQLSubStore) getOrCreateAcksPending(subid, seqno uint64) *sqlSubAcksPending
// This returns the structure responsible to keep track of // pending messages and acks for a given subscription ID. func (ss *SQLSubStore) getOrCreateAcksPending(subid, seqno uint64) *sqlSubAcksPending
{ if !ss.cache.needsFlush { ss.cache.needsFlush = true ss.sqlStore.scheduleSubStoreFlush(ss) } ap := ss.cache.subs[subid] if ap == nil { ap = &sqlSubAcksPending{ msgToRow: make(map[uint64]*sqlSubsPendingRow), ackToRow: make(map[uint64]*sqlSubsPendingRow), msgs: make(map[uint64]struct{}), acks: make(map[uint64]struct{}), } ss.cache.subs[subid] = ap } if seqno > ap.lastSent { ap.lastSent = seqno } return ap }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1812-L1816
go
train
// Adds the given sequence to the list of pending messages. // Returns true if the number of pending messages has // reached a certain threshold, indicating that the // store should be flushed.
func (ss *SQLSubStore) addSeq(subid, seqno uint64) bool
// Adds the given sequence to the list of pending messages. // Returns true if the number of pending messages has // reached a certain threshold, indicating that the // store should be flushed. func (ss *SQLSubStore) addSeq(subid, seqno uint64) bool
{ ap := ss.getOrCreateAcksPending(subid, seqno) ap.msgs[seqno] = struct{}{} return len(ap.msgs) >= sqlMaxPendingAcks }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1822-L1877
go
train
// Adds the given sequence to the list of acks and possibly // delete rows that have all their pending messages acknowledged. // Returns true if the number of acks has reached a certain threshold, // indicating that the store should be flushed.
func (ss *SQLSubStore) ackSeq(subid, seqno uint64) (bool, error)
// Adds the given sequence to the list of acks and possibly // delete rows that have all their pending messages acknowledged. // Returns true if the number of acks has reached a certain threshold, // indicating that the store should be flushed. func (ss *SQLSubStore) ackSeq(subid, seqno uint64) (bool, error)
{ ap := ss.getOrCreateAcksPending(subid, seqno) // If still in cache and not persisted into a row, // then simply remove from map and do not persist the ack. if _, exists := ap.msgs[seqno]; exists { delete(ap.msgs, seqno) } else if row := ap.msgToRow[seqno]; row != nil { ap.acks[seqno] = struct{}{} // This is an ack for a pending msg that was persisted // in a row. Update the row's msgRef count. delete(ap.msgToRow, seqno) row.msgsRefs-- // If all pending messages in that row have been ack'ed if row.msgsRefs == 0 { // and if all acks on that row are no longer needed // (or there was none) if row.acksRefs == 0 { // then this row can be deleted. if err := ss.deleteSubPendingRow(subid, row.ID); err != nil { return false, err } // If there is no error, we don't even need // to persist this ack. delete(ap.acks, seqno) } // Since there is no pending message left in this // row, let's find all the corresponding acks' rows // for these sequences and update their acksRefs for seq := range row.msgs { delete(row.msgs, seq) ackRow := ap.ackToRow[seq] if ackRow != nil { // We found the row for the ack of this sequence, // remove from map and update reference count. // delete(ap.ackToRow, seq) ackRow.acksRefs-- // If all acks for that row are no longer needed and // that row has also no pending messages, then ok to // delete. if ackRow.acksRefs == 0 && ackRow.msgsRefs == 0 { if err := ss.deleteSubPendingRow(subid, ackRow.ID); err != nil { return false, err } } } else { // That means the ack is in current cache so we won't // need to persist it. delete(ap.acks, seq) } } sqlSeqMapPool.Put(row.msgs) row.msgs = nil } } return len(ap.acks) >= sqlMaxPendingAcks, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1880-L1902
go
train
// AddSeqPending implements the SubStore interface
func (ss *SQLSubStore) AddSeqPending(subid, seqno uint64) error
// AddSeqPending implements the SubStore interface func (ss *SQLSubStore) AddSeqPending(subid, seqno uint64) error
{ var err error ss.Lock() if !ss.closed { if ss.cache != nil { if isFull := ss.addSeq(subid, seqno); isFull { err = ss.flush() } } else { ls := ss.subLastSent[subid] if seqno > ls { ss.subLastSent[subid] = seqno } ss.curRow++ _, err = ss.sqlStore.preparedStmts[sqlSubAddPending].Exec(subid, ss.curRow, seqno) if err != nil { err = sqlStmtError(sqlSubAddPending, err) } } } ss.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1905-L1938
go
train
// AckSeqPending implements the SubStore interface
func (ss *SQLSubStore) AckSeqPending(subid, seqno uint64) error
// AckSeqPending implements the SubStore interface func (ss *SQLSubStore) AckSeqPending(subid, seqno uint64) error
{ var err error ss.Lock() if !ss.closed { if ss.cache != nil { var isFull bool isFull, err = ss.ackSeq(subid, seqno) if err == nil && isFull { err = ss.flush() } } else { updateLastSent := false ls := ss.subLastSent[subid] if seqno >= ls { if seqno > ls { ss.subLastSent[subid] = seqno } updateLastSent = true } if updateLastSent { if _, err := ss.sqlStore.preparedStmts[sqlSubUpdateLastSent].Exec(seqno, ss.channelID, subid); err != nil { ss.Unlock() return sqlStmtError(sqlSubUpdateLastSent, err) } } _, err = ss.sqlStore.preparedStmts[sqlSubDeletePending].Exec(subid, seqno) if err != nil { err = sqlStmtError(sqlSubDeletePending, err) } } } ss.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L2015-L2020
go
train
// Flush implements the SubStore interface
func (ss *SQLSubStore) Flush() error
// Flush implements the SubStore interface func (ss *SQLSubStore) Flush() error
{ ss.Lock() err := ss.flush() ss.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/sqlstore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L2136-L2147
go
train
// Close implements the SubStore interface
func (ss *SQLSubStore) Close() error
// Close implements the SubStore interface func (ss *SQLSubStore) Close() error
{ ss.Lock() if ss.closed { ss.Unlock() return nil } // Flush before switching the state to closed. err := ss.flush() ss.closed = true ss.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/channels.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/channels.go#L30-L57
go
train
// SendsChannelsList sends the list of channels to the given subject, possibly // splitting the list in several requests if it cannot fit in a single message.
func SendChannelsList(channels []string, sendInbox, replyInbox string, nc *nats.Conn, serverID string) error
// SendsChannelsList sends the list of channels to the given subject, possibly // splitting the list in several requests if it cannot fit in a single message. func SendChannelsList(channels []string, sendInbox, replyInbox string, nc *nats.Conn, serverID string) error
{ // Since the NATS message payload is limited, we need to repeat // requests if all channels can't fit in a request. maxPayload := int(nc.MaxPayload()) // Reuse this request object to send the (possibly many) protocol message(s). header := &spb.CtrlMsg{ ServerID: serverID, MsgType: spb.CtrlMsg_Partitioning, } // The Data field (a byte array) will require 1+len(array)+(encoded size of array). // To be conservative, let's just use a 8 bytes integer headerSize := header.Size() + 1 + 8 var ( bytes []byte // Reused buffer in which the request is to marshal info n int // Size of the serialized request in the above buffer count int // Number of channels added to the request ) for start := 0; start != len(channels); start += count { bytes, n, count = encodeChannelsRequest(header, channels, bytes, headerSize, maxPayload, start) if count == 0 { return errors.New("message payload too small to send channels list") } if err := nc.PublishRequest(sendInbox, replyInbox, bytes[:n]); err != nil { return err } } return nc.Flush() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/channels.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/channels.go#L61-L80
go
train
// DecodeChannels decodes from the given byte array the list of channel names // and return them as an array of strings.
func DecodeChannels(data []byte) ([]string, error)
// DecodeChannels decodes from the given byte array the list of channel names // and return them as an array of strings. func DecodeChannels(data []byte) ([]string, error)
{ channels := []string{} pos := 0 for pos < len(data) { if pos+2 > len(data) { return nil, fmt.Errorf("unable to decode size, pos=%v len=%v", pos, len(data)) } cl := int(ByteOrder.Uint16(data[pos:])) pos += encodedChannelLen end := pos + cl if end > len(data) { return nil, fmt.Errorf("unable to decode channel, pos=%v len=%v max=%v (string=%v)", pos, cl, len(data), string(data[pos:])) } c := string(data[pos:end]) channels = append(channels, c) pos = end } return channels, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/channels.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/channels.go#L87-L130
go
train
// Adds as much channels as possible (based on the NATS max message payload) and // returns a serialized request. The buffer `reqBytes` is passed (and returned) so // that it can be reused if more than one request is needed. This call will // expand the size as needed. The number of bytes used in this buffer is returned // along with the number of encoded channels.
func encodeChannelsRequest(request *spb.CtrlMsg, channels []string, reqBytes []byte, headerSize, maxPayload, start int) ([]byte, int, int)
// Adds as much channels as possible (based on the NATS max message payload) and // returns a serialized request. The buffer `reqBytes` is passed (and returned) so // that it can be reused if more than one request is needed. This call will // expand the size as needed. The number of bytes used in this buffer is returned // along with the number of encoded channels. func encodeChannelsRequest(request *spb.CtrlMsg, channels []string, reqBytes []byte, headerSize, maxPayload, start int) ([]byte, int, int)
{ // Each string will be encoded in the form: // - length (2 bytes) // - string as a byte array. var _encodedSize = [encodedChannelLen]byte{} encodedSize := _encodedSize[:] // We are going to encode the channels in this buffer chanBuf := make([]byte, 0, maxPayload) var ( count int // Number of encoded channels estimatedSize = headerSize // This is not an overestimation of the total size numBytes int // This is what is returned by MarshalTo ) for i := start; i < len(channels); i++ { c := []byte(channels[i]) cl := len(c) needed := encodedChannelLen + cl // Check if adding this channel to current buffer makes us go over if estimatedSize+needed > maxPayload { // Special case if we cannot even encode 1 channel if count == 0 { return reqBytes, 0, 0 } break } // Encoding the channel here. First the size, then the channel name as byte array. ByteOrder.PutUint16(encodedSize, uint16(cl)) chanBuf = append(chanBuf, encodedSize...) chanBuf = append(chanBuf, c...) count++ estimatedSize += needed } if count > 0 { request.Data = chanBuf reqBytes = EnsureBufBigEnough(reqBytes, estimatedSize) numBytes, _ = request.MarshalTo(reqBytes) if numBytes > maxPayload { panic(fmt.Errorf("request size is %v (max payload is %v)", numBytes, maxPayload)) } } return reqBytes, numBytes, count }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/ft.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L56-L102
go
train
// ftStart will return only when this server has become active // and was able to get the store's exclusive lock. // This is running in a separate go-routine so if server state // changes, take care of using the server's lock.
func (s *StanServer) ftStart() (retErr error)
// ftStart will return only when this server has become active // and was able to get the store's exclusive lock. // This is running in a separate go-routine so if server state // changes, take care of using the server's lock. func (s *StanServer) ftStart() (retErr error)
{ s.log.Noticef("Starting in standby mode") // For tests purposes if ftPauseBeforeFirstAttempt { <-ftPauseCh } print, _ := util.NewBackoffTimeCheck(time.Second, 2, time.Minute) for { select { case <-s.ftQuit: // we are done return nil case <-s.ftHBCh: // go back to the beginning of the for loop continue case <-time.After(s.ftHBMissedInterval): // try to lock the store } locked, err := s.ftGetStoreLock() if err != nil { // Log the error, but go back and wait for the next interval and // try again. It is possible that the error resolves (for instance // the connection to the database is restored - for SQL stores). s.log.Errorf("ft: error attempting to get the store lock: %v", err) continue } else if locked { break } // Here, we did not get the lock, print and go back to standby. // Use some backoff for the printing to not fill up the log if print.Ok() { s.log.Noticef("ft: unable to get store lock at this time, going back to standby") } } // Capture the time this server activated. It will be used in case several // servers claim to be active. Not bulletproof since there could be clock // differences, etc... but when more than one server has acquired the store // lock it means we are already in trouble, so just trying to minimize the // possible store corruption... activationTime := time.Now() s.log.Noticef("Server is active") s.startGoRoutine(func() { s.ftSendHBLoop(activationTime) }) // Start the recovery process, etc.. return s.start(FTActive) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/ft.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L106-L123
go
train
// ftGetStoreLock returns true if the server was able to get the // exclusive store lock, false othewise, or if there was a fatal error doing so.
func (s *StanServer) ftGetStoreLock() (bool, error)
// ftGetStoreLock returns true if the server was able to get the // exclusive store lock, false othewise, or if there was a fatal error doing so. func (s *StanServer) ftGetStoreLock() (bool, error)
{ // Normally, the store would be set early and is immutable, but some // FT tests do set a mock store after the server is created, so use // locking here to avoid race reports. s.mu.Lock() store := s.store s.mu.Unlock() if ok, err := store.GetExclusiveLock(); !ok || err != nil { // We got an error not related to locking (could be not supported, // permissions error, file not reachable, etc..) if err != nil { return false, fmt.Errorf("ft: fatal error getting the store lock: %v", err) } // If ok is false, it means that we did not get the lock. return false, nil } return true, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/ft.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L128-L181
go
train
// ftSendHBLoop is used by an active server to send HB to the FT subject. // Standby servers receiving those HBs do not attempt to lock the store. // When they miss HBs, they will.
func (s *StanServer) ftSendHBLoop(activationTime time.Time)
// ftSendHBLoop is used by an active server to send HB to the FT subject. // Standby servers receiving those HBs do not attempt to lock the store. // When they miss HBs, they will. func (s *StanServer) ftSendHBLoop(activationTime time.Time)
{ // Release the wait group on exit defer s.wg.Done() timeAsBytes, _ := activationTime.MarshalBinary() ftHB := &spb.CtrlMsg{ MsgType: spb.CtrlMsg_FTHeartbeat, ServerID: s.serverID, Data: timeAsBytes, } ftHBBytes, _ := ftHB.Marshal() print, _ := util.NewBackoffTimeCheck(time.Second, 2, time.Minute) for { if err := s.ftnc.Publish(s.ftSubject, ftHBBytes); err != nil { if print.Ok() { s.log.Errorf("Unable to send FT heartbeat: %v", err) } } startSelect: select { case m := <-s.ftHBCh: hb := spb.CtrlMsg{} if err := hb.Unmarshal(m.Data); err != nil { goto startSelect } // Ignore our own message if hb.MsgType != spb.CtrlMsg_FTHeartbeat || hb.ServerID == s.serverID { goto startSelect } // Another server claims to be active peerActivationTime := time.Time{} if err := peerActivationTime.UnmarshalBinary(hb.Data); err != nil { s.log.Errorf("Error decoding activation time: %v", err) } else { // Step down if the peer's activation time is earlier than ours. err := fmt.Errorf("ft: serverID %q claims to be active", hb.ServerID) if peerActivationTime.Before(activationTime) { err = fmt.Errorf("%s, aborting", err) if ftNoPanic { s.setLastError(err) return } panic(err) } else { s.log.Errorf(err.Error()) } } case <-time.After(s.ftHBInterval): // We'll send the ping at the top of the for loop case <-s.ftQuit: return } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/ft.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L187-L225
go
train
// ftSetup checks that all required FT parameters have been specified and // create the channel required for shutdown. // Note that FTGroupName has to be set before server invokes this function, // so this parameter is not checked here.
func (s *StanServer) ftSetup() error
// ftSetup checks that all required FT parameters have been specified and // create the channel required for shutdown. // Note that FTGroupName has to be set before server invokes this function, // so this parameter is not checked here. func (s *StanServer) ftSetup() error
{ // Check that store type is ok. So far only support for FileStore if s.opts.StoreType != stores.TypeFile && s.opts.StoreType != stores.TypeSQL { return fmt.Errorf("ft: only %v or %v stores supported in FT mode", stores.TypeFile, stores.TypeSQL) } // So far, those are not exposed to users, just used in tests. // Still make sure that the missed HB interval is > than the HB // interval. if ftHBMissedInterval < time.Duration(float64(ftHBInterval)*1.1) { return fmt.Errorf("ft: the missed heartbeat interval needs to be"+ " at least 10%% of the heartbeat interval (hb=%v missed hb=%v", ftHBInterval, ftHBMissedInterval) } // Set the HB and MissedHB intervals, using a bit of randomness rand.Seed(time.Now().UnixNano()) s.ftHBInterval = ftGetRandomInterval(ftHBInterval) s.ftHBMissedInterval = ftGetRandomInterval(ftHBMissedInterval) // Subscribe to FT subject s.ftSubject = fmt.Sprintf("%s.%s.%s", ftHBPrefix, s.opts.ID, s.opts.FTGroupName) s.ftHBCh = make(chan *nats.Msg) sub, err := s.ftnc.Subscribe(s.ftSubject, func(m *nats.Msg) { // Dropping incoming FT HBs is not crucial, we will then check for // store lock. select { case s.ftHBCh <- m: default: } }) if err != nil { return fmt.Errorf("ft: unable to subscribe on ft subject: %v", err) } // We don't want to cause possible slow consumer error sub.SetPendingLimits(-1, -1) // Create channel to notify FT go routine to quit. s.ftQuit = make(chan struct{}, 1) // Set the state as standby initially s.state = FTStandby return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/ft.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L229-L236
go
train
// ftGetRandomInterval returns a random interval with at most +/- 10% // of the given interval.
func ftGetRandomInterval(interval time.Duration) time.Duration
// ftGetRandomInterval returns a random interval with at most +/- 10% // of the given interval. func ftGetRandomInterval(interval time.Duration) time.Duration
{ tenPercent := int(float64(interval) * 0.10) random := time.Duration(rand.Intn(tenPercent)) if rand.Intn(2) == 1 { return interval + random } return interval - random }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/lockfile_win.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/lockfile_win.go#L34-L58
go
train
// CreateLockFile attempt to lock the given file, creating it // if necessary. On success, the file is returned, otherwise // an error is returned. // The file returned should be closed to release the lock // quicker than if left to the operating system.
func CreateLockFile(file string) (LockFile, error)
// CreateLockFile attempt to lock the given file, creating it // if necessary. On success, the file is returned, otherwise // an error is returned. // The file returned should be closed to release the lock // quicker than if left to the operating system. func CreateLockFile(file string) (LockFile, error)
{ fname, err := syscall.UTF16PtrFromString(file) if err != nil { return nil, err } f, err := syscall.CreateFile(fname, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, // dwShareMode: 0 means "Prevents other processes from opening a file or device if they request delete, read, or write access." nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0, ) if err != nil { // TODO: There HAS to be a better way, but I can't seem to // find how to get Windows error codes (also syscall.GetLastError() // returns nil here). if strings.Contains(err.Error(), "used by another process") { err = ErrUnableToLockNow } syscall.CloseHandle(f) return nil, err } return &lockFile{f: f}, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/lockfile_win.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/lockfile_win.go#L61-L70
go
train
// Close implements the LockFile interface
func (lf *lockFile) Close() error
// Close implements the LockFile interface func (lf *lockFile) Close() error
{ lf.Lock() defer lf.Unlock() if lf.f == syscall.InvalidHandle { return nil } err := syscall.CloseHandle(lf.f) lf.f = syscall.InvalidHandle return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/lockfile_win.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/lockfile_win.go#L73-L77
go
train
// IsClosed implements the LockFile interface
func (lf *lockFile) IsClosed() bool
// IsClosed implements the LockFile interface func (lf *lockFile) IsClosed() bool
{ lf.Lock() defer lf.Unlock() return lf.f == syscall.InvalidHandle }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/client.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L51-L58
go
train
// newClientStore creates a new clientStore instance using `store` as the backing storage.
func newClientStore(store stores.Store) *clientStore
// newClientStore creates a new clientStore instance using `store` as the backing storage. func newClientStore(store stores.Store) *clientStore
{ return &clientStore{ clients: make(map[string]*client), connIDs: make(map[string]*client), knownInvalid: make(map[string]struct{}), store: store, } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/client.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L62-L66
go
train
// getSubsCopy returns a copy of the client's subscribers array. // At least Read-lock must be held by the caller.
func (c *client) getSubsCopy() []*subState
// getSubsCopy returns a copy of the client's subscribers array. // At least Read-lock must be held by the caller. func (c *client) getSubsCopy() []*subState
{ subs := make([]*subState, len(c.subs)) copy(subs, c.subs) return subs }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/client.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L76-L101
go
train
// Register a new client. Returns ErrInvalidClient if client is already registered.
func (cs *clientStore) register(info *spb.ClientInfo) (*client, error)
// Register a new client. Returns ErrInvalidClient if client is already registered. func (cs *clientStore) register(info *spb.ClientInfo) (*client, error)
{ cs.Lock() defer cs.Unlock() c := cs.clients[info.ID] if c != nil { return nil, ErrInvalidClient } sc, err := cs.store.AddClient(info) if err != nil { return nil, err } c = &client{info: sc, subs: make([]*subState, 0, 4)} cs.clients[c.info.ID] = c if len(c.info.ConnID) > 0 { cs.connIDs[string(c.info.ConnID)] = c } delete(cs.knownInvalid, getKnownInvalidKey(info.ID, info.ConnID)) if cs.waitOnRegister != nil { ch := cs.waitOnRegister[c.info.ID] if ch != nil { ch <- struct{}{} delete(cs.waitOnRegister, c.info.ID) } } return c, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/client.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L104-L127
go
train
// Unregister a client.
func (cs *clientStore) unregister(ID string) (*client, error)
// Unregister a client. func (cs *clientStore) unregister(ID string) (*client, error)
{ cs.Lock() defer cs.Unlock() c := cs.clients[ID] if c == nil { return nil, nil } c.Lock() if c.hbt != nil { c.hbt.Stop() c.hbt = nil } connID := c.info.ConnID c.Unlock() delete(cs.clients, ID) if len(connID) > 0 { delete(cs.connIDs, string(connID)) } if cs.waitOnRegister != nil { delete(cs.waitOnRegister, ID) } err := cs.store.DeleteClient(ID) return c, err }