repo
stringlengths
5
67
sha
stringlengths
40
40
path
stringlengths
4
234
url
stringlengths
85
339
language
stringclasses
6 values
split
stringclasses
3 values
doc
stringlengths
3
51.2k
sign
stringlengths
5
8.01k
problem
stringlengths
13
51.2k
output
stringlengths
0
3.87M
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4442-L4453
go
train
// addSubscription adds `sub` to the client and store.
func (s *StanServer) addSubscription(ss *subStore, sub *subState) error
// addSubscription adds `sub` to the client and store. func (s *StanServer) addSubscription(ss *subStore, sub *subState) error
{ // Store in client if !s.clients.addSub(sub.ClientID, sub) { return fmt.Errorf("can't find clientID: %v", sub.ClientID) } // Store this subscription in subStore if err := ss.Store(sub); err != nil { s.clients.removeSub(sub.ClientID, sub) return err } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4457-L4481
go
train
// updateDurable adds back `sub` to the client and updates the store. // No lock is needed for `sub` since it has just been created.
func (s *StanServer) updateDurable(ss *subStore, sub *subState) error
// updateDurable adds back `sub` to the client and updates the store. // No lock is needed for `sub` since it has just been created. func (s *StanServer) updateDurable(ss *subStore, sub *subState) error
{ // Reset the hasFailedHB boolean since it may have been set // if the client previously crashed and server set this // flag to its subs. sub.hasFailedHB = false // Store in the client if !s.clients.addSub(sub.ClientID, sub) { return fmt.Errorf("can't find clientID: %v", sub.ClientID) } // Update this subscription in the store if err := sub.store.UpdateSub(&sub.SubState); err != nil { return err } ss.Lock() // Do this only for durable subscribers (not durable queue subscribers). if sub.isDurableSubscriber() { // Add back into plain subscribers ss.psubs = append(ss.psubs, sub) } // And in ackInbox lookup map. ss.acks[sub.AckInbox] = sub ss.Unlock() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4484-L4653
go
train
// processSub adds the subscription to the server.
func (s *StanServer) processSub(c *channel, sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error)
// processSub adds the subscription to the server. func (s *StanServer) processSub(c *channel, sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error)
{ // If channel not provided, we have to look it up var err error if c == nil { c, err = s.lookupOrCreateChannel(sr.Subject) if err != nil { s.log.Errorf("Unable to create channel for subscription on %q", sr.Subject) return nil, err } } var ( sub *subState ss = c.ss ) // Will be true for durable queue subscribers and durable subscribers alike. isDurable := false // Will be set to false for en existing durable subscriber or existing // queue group (durable or not). setStartPos := true // Check for durable queue subscribers if sr.QGroup != "" { if sr.DurableName != "" { // For queue subscribers, we prevent DurableName to contain // the ':' character, since we use it for the compound name. if strings.Contains(sr.DurableName, ":") { s.log.Errorf("[Client:%s] Invalid DurableName (%q) for queue subscriber from %s", sr.ClientID, sr.DurableName, sr.Subject) return nil, ErrInvalidDurName } isDurable = true // Make the queue group a compound name between durable name and q group. sr.QGroup = fmt.Sprintf("%s:%s", sr.DurableName, sr.QGroup) // Clear DurableName from this subscriber. sr.DurableName = "" } // Lookup for an existing group. Only interested in situation where // the group exist, but is empty and had a shadow subscriber. ss.RLock() qs := ss.qsubs[sr.QGroup] if qs != nil { qs.Lock() if qs.shadow != nil { sub = qs.shadow qs.shadow = nil qs.subs = append(qs.subs, sub) } qs.Unlock() setStartPos = false } ss.RUnlock() } else if sr.DurableName != "" { // Check for DurableSubscriber status if sub = ss.LookupByDurable(durableKey(sr)); sub != nil { sub.RLock() clientID := sub.ClientID sub.RUnlock() if clientID != "" { s.log.Errorf("[Client:%s] Duplicate durable subscription registration", sr.ClientID) return nil, ErrDupDurable } setStartPos = false } isDurable = true } var ( subStartTrace string subIsNew bool ) if sub != nil { // ok we have a remembered subscription sub.Lock() // Set ClientID and new AckInbox but leave LastSent to the // remembered value. sub.AckInbox = ackInbox sub.ClientID = sr.ClientID sub.Inbox = sr.Inbox sub.IsDurable = true // Use some of the new options, but ignore the ones regarding start position sub.MaxInFlight = sr.MaxInFlight sub.AckWaitInSecs = sr.AckWaitInSecs sub.ackWait = computeAckWait(sr.AckWaitInSecs) sub.stalled = false if len(sub.acksPending) > 0 { // We have a durable with pending messages, set newOnHold // until we have performed the initial redelivery. sub.newOnHold = true if !s.isClustered || s.isLeader() { s.setupAckTimer(sub, sub.ackWait) } } // Clear the IsClosed flags that were set during a Close() sub.IsClosed = false sub.Unlock() // Case of restarted durable subscriber, or first durable queue // subscriber re-joining a group that was left with pending messages. err = s.updateDurable(ss, sub) } else { subIsNew = true // Create sub here (can be plain, durable or queue subscriber) sub = &subState{ SubState: spb.SubState{ ClientID: sr.ClientID, QGroup: sr.QGroup, Inbox: sr.Inbox, AckInbox: ackInbox, MaxInFlight: sr.MaxInFlight, AckWaitInSecs: sr.AckWaitInSecs, DurableName: sr.DurableName, IsDurable: isDurable, }, subject: sr.Subject, ackWait: computeAckWait(sr.AckWaitInSecs), acksPending: make(map[uint64]int64), store: c.store.Subs, } if setStartPos { // set the start sequence of the subscriber. var lastSent uint64 subStartTrace, lastSent, err = s.setSubStartSequence(c, sr) if err == nil { sub.LastSent = lastSent } } if err == nil { // add the subscription to stan. // In cluster mode, the server decides of the subscription ID // (so that subscriptions have the same ID on replay). So // set it prior to this call. sub.ID = subID err = s.addSubscription(ss, sub) if err == nil && subID > 0 { ss.Lock() if subID >= c.nextSubID { c.nextSubID = subID + 1 } ss.Unlock() } } } if err == nil && (!s.isClustered || s.isLeader()) { err = sub.startAckSub(s.nca, s.processAckMsg) if err == nil { // Need tp make sure that this subscription is processed by // NATS Server before sending response (since we use different // connection to send the response) s.nca.Flush() } } if err != nil { // Try to undo what has been done. s.closeMu.Lock() ss.Remove(c, sub, false) s.closeMu.Unlock() s.log.Errorf("Unable to add subscription for %s: %v", sr.Subject, err) return nil, err } if s.debug { traceCtx := subStateTraceCtx{clientID: sr.ClientID, isNew: subIsNew, startTrace: subStartTrace} traceSubState(s.log, sub, &traceCtx) } s.monMu.Lock() s.numSubs++ s.monMu.Unlock() return sub, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4656-L4795
go
train
// processSubscriptionRequest will process a subscription request.
func (s *StanServer) processSubscriptionRequest(m *nats.Msg)
// processSubscriptionRequest will process a subscription request. func (s *StanServer) processSubscriptionRequest(m *nats.Msg)
{ sr := &pb.SubscriptionRequest{} err := sr.Unmarshal(m.Data) if err != nil { s.log.Errorf("Invalid Subscription request from %s: %v", m.Subject, err) s.sendSubscriptionResponseErr(m.Reply, ErrInvalidSubReq) return } // ClientID must not be empty. if sr.ClientID == "" { s.log.Errorf("Missing ClientID in subscription request from %s", m.Subject) s.sendSubscriptionResponseErr(m.Reply, ErrMissingClient) return } // AckWait must be >= 1s (except in test mode where negative value means that // duration should be interpreted as Milliseconds) if !testAckWaitIsInMillisecond && sr.AckWaitInSecs <= 0 { s.log.Errorf("[Client:%s] Invalid AckWait (%v) in subscription request from %s", sr.ClientID, sr.AckWaitInSecs, m.Subject) s.sendSubscriptionResponseErr(m.Reply, ErrInvalidAckWait) return } // MaxInflight must be >= 1 if sr.MaxInFlight <= 0 { s.log.Errorf("[Client:%s] Invalid MaxInflight (%v) in subscription request from %s", sr.ClientID, sr.MaxInFlight, m.Subject) s.sendSubscriptionResponseErr(m.Reply, ErrInvalidMaxInflight) return } // StartPosition between StartPosition_NewOnly and StartPosition_First if sr.StartPosition < pb.StartPosition_NewOnly || sr.StartPosition > pb.StartPosition_First { s.log.Errorf("[Client:%s] Invalid StartPosition (%v) in subscription request from %s", sr.ClientID, int(sr.StartPosition), m.Subject) s.sendSubscriptionResponseErr(m.Reply, ErrInvalidStart) return } // Make sure subject is valid if !util.IsChannelNameValid(sr.Subject, false) { s.log.Errorf("[Client:%s] Invalid channel %q in subscription request from %s", sr.ClientID, sr.Subject, m.Subject) s.sendSubscriptionResponseErr(m.Reply, ErrInvalidSubject) return } // In partitioning mode, do not fail the subscription request // if this server does not have the channel. It could be that there // is another server out there that will accept the subscription. // If not, the client will get a subscription request timeout. if s.partitions != nil { if r := s.partitions.sl.Match(sr.Subject); len(r) == 0 { return } // Also check that the connection request has already // been processed. Check clientCheckTimeout doc for details. if !s.clients.isValidWithTimeout(sr.ClientID, nil, clientCheckTimeout) { s.log.Errorf("[Client:%s] Rejecting subscription on %q: connection not created yet", sr.ClientID, sr.Subject) s.sendSubscriptionResponseErr(m.Reply, ErrInvalidSubReq) return } } var ( sub *subState ackInbox = nats.NewInbox() ) // Lookup/create the channel and prevent this channel to be deleted // until we are done with this subscription. This will also stop // the delete timer if one was set. c, preventDelete, err := s.lookupOrCreateChannelPreventDelete(sr.Subject) // Immediately register a defer action to turn off preventing the // deletion of the channel if it was turned on if preventDelete { defer s.channels.turnOffPreventDelete(c) } if err == nil { // If clustered, thread operations through Raft. if s.isClustered { // For start requests other than SequenceStart, we MUST convert the request // to a SequenceStart, otherwise, during the replay on server restart, the // subscription would be created with whatever is the seq at that time. // For instance, a request with new-only could originally be created with // the current max seq of 100, but when the cluster is restarted and sub // request is replayed, the channel's current max may be 200, which // would cause the subscription to be created at start 200, which could cause // subscription to miss all messages in between. if sr.StartPosition != pb.StartPosition_SequenceStart { // Figure out what the sequence should be based on orinal StartPosition // request. var seq uint64 _, seq, err = s.setSubStartSequence(c, sr) if err == nil { // Convert to a SequenceStart start position with the proper sequence // number. Since setSubStartSequence() is returning what should be // the lastSent, we need to bump the count by 1. sr.StartPosition = pb.StartPosition_SequenceStart sr.StartSequence = seq + 1 } } if err == nil { c.ss.Lock() subID := c.nextSubID c.ss.Unlock() sub, err = s.replicateSub(sr, ackInbox, subID) } } else { sub, err = s.processSub(c, sr, ackInbox, 0) } } if err != nil { s.channels.maybeStartChannelDeleteTimer(sr.Subject, c) s.sendSubscriptionResponseErr(m.Reply, err) return } // In case this is a durable, sub already exists so we need to protect access sub.Lock() // Create a non-error response resp := &pb.SubscriptionResponse{AckInbox: sub.AckInbox} b, _ := resp.Marshal() s.ncs.Publish(m.Reply, b) // Capture under lock here qs := sub.qstate // Now that we have sent the response, we set the subscription to initialized, // which allows messages to be sent to it - but not sooner (which could happen // without this since the subscription is added to the system earlier and // incoming messages to the channel would trigger delivery). sub.initialized = true sub.Unlock() s.subStartCh <- &subStartInfo{c: c, sub: sub, qs: qs, isDurable: sub.IsDurable} }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4879-L4896
go
train
// processAckMsg processes inbound acks from clients for delivered messages.
func (s *StanServer) processAckMsg(m *nats.Msg)
// processAckMsg processes inbound acks from clients for delivered messages. func (s *StanServer) processAckMsg(m *nats.Msg)
{ ack := &pb.Ack{} if ack.Unmarshal(m.Data) != nil { if s.processCtrlMsg(m) { return } } c := s.channels.get(ack.Subject) if c == nil { s.log.Errorf("Unable to process ack seq=%d, channel %s not found", ack.Sequence, ack.Subject) return } sub := c.ss.LookupByAckInbox(m.Subject) if sub == nil { return } s.processAck(c, sub, ack.Sequence, true) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4899-L4994
go
train
// processAck processes an ack and if needed sends more messages.
func (s *StanServer) processAck(c *channel, sub *subState, sequence uint64, fromUser bool)
// processAck processes an ack and if needed sends more messages. func (s *StanServer) processAck(c *channel, sub *subState, sequence uint64, fromUser bool)
{ var stalled bool // This is immutable, so can grab outside of sub's lock. // If we have a queue group, we want to grab queue's lock before // sub's lock. qs := sub.qstate if qs != nil { qs.Lock() } sub.Lock() persistAck := func(aSub *subState) bool { if err := aSub.store.AckSeqPending(aSub.ID, sequence); err != nil { s.log.Errorf("[Client:%s] Unable to persist ack for subid=%d, subject=%s, seq=%d, err=%v", aSub.ClientID, aSub.ID, aSub.subject, sequence, err) return false } return true } if _, found := sub.acksPending[sequence]; found { // If in cluster mode, schedule replication of the ack. if s.isClustered { s.collectSentOrAck(sub, replicateAck, sequence) } if s.trace && fromUser { s.log.Tracef("[Client:%s] Processing ack for subid=%d, subject=%s, seq=%d", sub.ClientID, sub.ID, sub.subject, sequence) } if !persistAck(sub) { sub.Unlock() if qs != nil { qs.Unlock() } return } delete(sub.acksPending, sequence) } else if qs != nil && fromUser { // For queue members, if this is not an internally generated ACK // and we don't find the sequence in this sub's pending, we are // going to look for it in other members and process it if found. sub.Unlock() for _, qsub := range qs.subs { if qsub == sub { continue } qsub.Lock() if _, found := qsub.acksPending[sequence]; found { delete(qsub.acksPending, sequence) persistAck(qsub) qsub.Unlock() break } qsub.Unlock() } sub.Lock() // Proceed with original sub (regardless if member was found // or not) so that server sends more messages if needed. } if sub.stalled && int32(len(sub.acksPending)) < sub.MaxInFlight { // For queue, we must not check the queue stalled count here. The queue // as a whole may not be stalled, yet, if this sub was stalled, it is // not now since the pending acks is below MaxInflight. The server should // try to send available messages. // It works also if the queue *was* stalled (all members were stalled), // then this member is no longer stalled, which release the queue. // Trigger send of available messages by setting this to true. stalled = true // Clear the stalled flag from this sub sub.stalled = false // .. and update the queue's stalled members count if this is a queue sub. if qs != nil && qs.stalledSubCount > 0 { qs.stalledSubCount-- } } sub.Unlock() if qs != nil { qs.Unlock() } // Leave the reset/cancel of the ackTimer to the redelivery cb. if !stalled { return } if sub.qstate != nil { s.sendAvailableMessagesToQueue(c, sub.qstate) } else { s.sendAvailableMessages(c, sub) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4997-L5023
go
train
// Send any messages that are ready to be sent that have been queued to the group.
func (s *StanServer) sendAvailableMessagesToQueue(c *channel, qs *queueState)
// Send any messages that are ready to be sent that have been queued to the group. func (s *StanServer) sendAvailableMessagesToQueue(c *channel, qs *queueState)
{ if c == nil || qs == nil { return } qs.Lock() // Short circuit if no active members if len(qs.subs) == 0 { qs.Unlock() return } // If redelivery at startup in progress, don't attempt to deliver new messages if qs.newOnHold { qs.Unlock() return } for nextSeq := qs.lastSent + 1; qs.stalledSubCount < len(qs.subs); nextSeq++ { nextMsg := s.getNextMsg(c, &nextSeq, &qs.lastSent) if nextMsg == nil { break } if _, sent, sendMore := s.sendMsgToQueueGroup(qs, nextMsg, honorMaxInFlight); !sent || !sendMore { break } } qs.Unlock() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5026-L5038
go
train
// Send any messages that are ready to be sent that have been queued.
func (s *StanServer) sendAvailableMessages(c *channel, sub *subState)
// Send any messages that are ready to be sent that have been queued. func (s *StanServer) sendAvailableMessages(c *channel, sub *subState)
{ sub.Lock() for nextSeq := sub.LastSent + 1; !sub.stalled; nextSeq++ { nextMsg := s.getNextMsg(c, &nextSeq, &sub.LastSent) if nextMsg == nil { break } if sent, sendMore := s.sendMsgToSub(sub, nextMsg, honorMaxInFlight); !sent || !sendMore { break } } sub.Unlock() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5070-L5148
go
train
// Setup the start position for the subscriber.
func (s *StanServer) setSubStartSequence(c *channel, sr *pb.SubscriptionRequest) (string, uint64, error)
// Setup the start position for the subscriber. func (s *StanServer) setSubStartSequence(c *channel, sr *pb.SubscriptionRequest) (string, uint64, error)
{ lastSent := uint64(0) debugTrace := "" // In all start position cases, if there is no message, ensure // lastSent stays at 0. switch sr.StartPosition { case pb.StartPosition_NewOnly: var err error lastSent, err = c.store.Msgs.LastSequence() if err != nil { return "", 0, err } if s.debug { debugTrace = fmt.Sprintf("new-only, seq=%d", lastSent+1) } case pb.StartPosition_LastReceived: lastSeq, err := c.store.Msgs.LastSequence() if err != nil { return "", 0, err } if lastSeq > 0 { lastSent = lastSeq - 1 } if s.debug { debugTrace = fmt.Sprintf("last message, seq=%d", lastSent+1) } case pb.StartPosition_TimeDeltaStart: startTime := time.Now().UnixNano() - sr.StartTimeDelta // If there is no message, seq will be 0. seq, err := c.store.Msgs.GetSequenceFromTimestamp(startTime) if err != nil { return "", 0, err } if seq > 0 { // If the time delta is in the future relative to the last // message in the log, 'seq' will be equal to last sequence + 1, // so this would translate to "new only" semantic. lastSent = seq - 1 } if s.debug { debugTrace = fmt.Sprintf("from time time='%v' seq=%d", time.Unix(0, startTime), lastSent+1) } case pb.StartPosition_SequenceStart: // If there is no message, firstSeq and lastSeq will be equal to 0. firstSeq, lastSeq, err := c.store.Msgs.FirstAndLastSequence() if err != nil { return "", 0, err } // StartSequence is an uint64, so can't be lower than 0. if sr.StartSequence < firstSeq { // That translates to sending the first message available. lastSent = firstSeq - 1 } else if sr.StartSequence > lastSeq { // That translates to "new only" lastSent = lastSeq } else if sr.StartSequence > 0 { // That translates to sending the message with StartSequence // sequence number. lastSent = sr.StartSequence - 1 } if s.debug { debugTrace = fmt.Sprintf("from sequence, asked_seq=%d actual_seq=%d", sr.StartSequence, lastSent+1) } case pb.StartPosition_First: firstSeq, err := c.store.Msgs.FirstSequence() if err != nil { return "", 0, err } if firstSeq > 0 { lastSent = firstSeq - 1 } if s.debug { debugTrace = fmt.Sprintf("from beginning, seq=%d", lastSent+1) } } return debugTrace, lastSent, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5153-L5160
go
train
// startGoRoutine starts the given function as a go routine if and only if // the server was not shutdown at that time. This is required because // we cannot increment the wait group after the shutdown process has started.
func (s *StanServer) startGoRoutine(f func())
// startGoRoutine starts the given function as a go routine if and only if // the server was not shutdown at that time. This is required because // we cannot increment the wait group after the shutdown process has started. func (s *StanServer) startGoRoutine(f func())
{ s.mu.Lock() if !s.shutdown { s.wg.Add(1) go f() } s.mu.Unlock() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5163-L5167
go
train
// ClusterID returns the NATS Streaming Server's ID.
func (s *StanServer) ClusterID() string
// ClusterID returns the NATS Streaming Server's ID. func (s *StanServer) ClusterID() string
{ s.mu.RLock() defer s.mu.RUnlock() return s.info.ClusterID }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5170-L5174
go
train
// State returns the state of this server.
func (s *StanServer) State() State
// State returns the state of this server. func (s *StanServer) State() State
{ s.mu.RLock() defer s.mu.RUnlock() return s.state }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5179-L5185
go
train
// setLastError sets the last fatal error that occurred. This is // used in case of an async error that cannot directly be reported // to the user.
func (s *StanServer) setLastError(err error)
// setLastError sets the last fatal error that occurred. This is // used in case of an async error that cannot directly be reported // to the user. func (s *StanServer) setLastError(err error)
{ s.mu.Lock() s.lastError = err s.state = Failed s.mu.Unlock() s.log.Fatalf("%v", err) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5188-L5192
go
train
// LastError returns the last fatal error the server experienced.
func (s *StanServer) LastError() error
// LastError returns the last fatal error the server experienced. func (s *StanServer) LastError() error
{ s.mu.RLock() defer s.mu.RUnlock() return s.lastError }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5195-L5289
go
train
// Shutdown will close our NATS connection and shutdown any embedded NATS server.
func (s *StanServer) Shutdown()
// Shutdown will close our NATS connection and shutdown any embedded NATS server. func (s *StanServer) Shutdown()
{ s.log.Noticef("Shutting down.") s.mu.Lock() if s.shutdown { s.mu.Unlock() return } close(s.shutdownCh) // Allows Shutdown() to be idempotent s.shutdown = true // Change the state too s.state = Shutdown // We need to make sure that the storeIOLoop returns before // closing the Store waitForIOStoreLoop := true // Capture under lock store := s.store ns := s.natsServer // Do not close and nil the connections here, they are used in many places // without locking. Once closed, s.nc.xxx() calls will simply fail, but // we won't panic. ncs := s.ncs ncr := s.ncr ncsr := s.ncsr nc := s.nc ftnc := s.ftnc nca := s.nca // Stop processing subscriptions start requests s.subStartQuit <- struct{}{} if s.ioChannel != nil { // Notify the IO channel that we are shutting down close(s.ioChannelQuit) } else { waitForIOStoreLoop = false } // In case we are running in FT mode. if s.ftQuit != nil { s.ftQuit <- struct{}{} } // In case we are running in Partitioning mode if s.partitions != nil { s.partitions.shutdown() } s.mu.Unlock() // Make sure the StoreIOLoop returns before closing the Store if waitForIOStoreLoop { s.ioChannelWG.Wait() } // Close Raft group before closing store. if s.raft != nil { if err := s.raft.shutdown(); err != nil { s.log.Errorf("Failed to stop Raft node: %v", err) } } // Close/Shutdown resources. Note that unless one instantiates StanServer // directly (instead of calling RunServer() and the like), these should // not be nil. if store != nil { store.Close() } if ncs != nil { ncs.Close() } if ncr != nil { ncr.Close() } if ncsr != nil { ncsr.Close() } if nc != nil { nc.Close() } if ftnc != nil { ftnc.Close() } if nca != nil { nca.Close() } if ns != nil { ns.Shutdown() } // Wait for go-routines to return s.wg.Wait() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L48-L56
go
train
// SetLogger sets the logger, debug and trace
func (s *StanLogger) SetLogger(log Logger, logtime, debug, trace bool, logfile string)
// SetLogger sets the logger, debug and trace func (s *StanLogger) SetLogger(log Logger, logtime, debug, trace bool, logfile string)
{ s.mu.Lock() s.log = log s.ltime = logtime s.debug = debug s.trace = trace s.lfile = logfile s.mu.Unlock() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L59-L64
go
train
// GetLogger returns the logger
func (s *StanLogger) GetLogger() Logger
// GetLogger returns the logger func (s *StanLogger) GetLogger() Logger
{ s.mu.RLock() l := s.log s.mu.RUnlock() return l }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L68-L86
go
train
// ReopenLogFile closes and reopen the logfile. // Does nothing if the logger is not a file based.
func (s *StanLogger) ReopenLogFile()
// ReopenLogFile closes and reopen the logfile. // Does nothing if the logger is not a file based. func (s *StanLogger) ReopenLogFile()
{ s.mu.Lock() if s.lfile == "" { s.mu.Unlock() s.Noticef("File log re-open ignored, not a file logger") return } if l, ok := s.log.(io.Closer); ok { if err := l.Close(); err != nil { s.mu.Unlock() s.Errorf("Unable to close logger: %v", err) return } } fileLog := natsdLogger.NewFileLogger(s.lfile, s.ltime, s.debug, s.trace, true) s.log = fileLog s.mu.Unlock() s.Noticef("File log re-opened") }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L89-L96
go
train
// Close closes this logger, releasing possible held resources.
func (s *StanLogger) Close() error
// Close closes this logger, releasing possible held resources. func (s *StanLogger) Close() error
{ s.mu.Lock() defer s.mu.Unlock() if l, ok := s.log.(io.Closer); ok { return l.Close() } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L106-L110
go
train
// Errorf logs an error
func (s *StanLogger) Errorf(format string, v ...interface{})
// Errorf logs an error func (s *StanLogger) Errorf(format string, v ...interface{})
{ s.executeLogCall(func(log Logger, format string, v ...interface{}) { log.Errorf(format, v...) }, format, v...) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L120-L127
go
train
// Debugf logs a debug statement
func (s *StanLogger) Debugf(format string, v ...interface{})
// Debugf logs a debug statement func (s *StanLogger) Debugf(format string, v ...interface{})
{ s.executeLogCall(func(log Logger, format string, v ...interface{}) { // This is running under the protection of StanLogging's lock if s.debug { log.Debugf(format, v...) } }, format, v...) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L130-L136
go
train
// Tracef logs a trace statement
func (s *StanLogger) Tracef(format string, v ...interface{})
// Tracef logs a trace statement func (s *StanLogger) Tracef(format string, v ...interface{})
{ s.executeLogCall(func(logger Logger, format string, v ...interface{}) { if s.trace { logger.Tracef(format, v...) } }, format, v...) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
logger/logger.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L139-L143
go
train
// Warnf logs a warning statement
func (s *StanLogger) Warnf(format string, v ...interface{})
// Warnf logs a warning statement func (s *StanLogger) Warnf(format string, v ...interface{})
{ s.executeLogCall(func(logger Logger, format string, v ...interface{}) { logger.Warnf(format, v...) }, format, v...) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/signal.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/signal.go#L33-L53
go
train
// Signal Handling
func (s *StanServer) handleSignals()
// Signal Handling func (s *StanServer) handleSignals()
{ c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGUSR1, syscall.SIGHUP) go func() { for sig := range c { // Notify will relay only the signals that we have // registered, so we don't need a "default" in the // switch statement. switch sig { case syscall.SIGINT, syscall.SIGTERM: s.Shutdown() os.Exit(0) case syscall.SIGUSR1: // File log re-open for rotating file logs. s.log.ReopenLogFile() case syscall.SIGHUP: // Ignore for now } } }() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/service.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/service.go#L24-L26
go
train
// Run starts the NATS Streaming server. This wrapper function allows Windows to add a // hook for running NATS Streaming as a service.
func Run(sOpts *Options, nOpts *natsd.Options) (*StanServer, error)
// Run starts the NATS Streaming server. This wrapper function allows Windows to add a // hook for running NATS Streaming as a service. func Run(sOpts *Options, nOpts *natsd.Options) (*StanServer, error)
{ return RunServerWithOpts(sOpts, nOpts) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L210-L218
go
train
// BufferSize is a FileStore option that sets the size of the buffer used // during store writes. This can help improve write performance.
func BufferSize(size int) FileStoreOption
// BufferSize is a FileStore option that sets the size of the buffer used // during store writes. This can help improve write performance. func BufferSize(size int) FileStoreOption
{ return func(o *FileStoreOptions) error { if size < 0 { return fmt.Errorf("buffer size value must be a positive number") } o.BufferSize = size return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L222-L227
go
train
// CompactEnabled is a FileStore option that enables or disables file compaction. // The value false will disable compaction.
func CompactEnabled(enabled bool) FileStoreOption
// CompactEnabled is a FileStore option that enables or disables file compaction. // The value false will disable compaction. func CompactEnabled(enabled bool) FileStoreOption
{ return func(o *FileStoreOptions) error { o.CompactEnabled = enabled return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L232-L240
go
train
// CompactInterval is a FileStore option that defines the minimum compaction interval. // Compaction is not timer based, but instead when things get "deleted". This value // prevents compaction to happen too often.
func CompactInterval(seconds int) FileStoreOption
// CompactInterval is a FileStore option that defines the minimum compaction interval. // Compaction is not timer based, but instead when things get "deleted". This value // prevents compaction to happen too often. func CompactInterval(seconds int) FileStoreOption
{ return func(o *FileStoreOptions) error { if seconds <= 0 { return fmt.Errorf("compact interval value must at least be 1 seconds") } o.CompactInterval = seconds return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L246-L254
go
train
// CompactFragmentation is a FileStore option that defines the fragmentation ratio // below which compaction would not occur. For instance, specifying 50 means that // if other variables would allow for compaction, the compaction would occur only // after 50% of the file has data that is no longer valid.
func CompactFragmentation(fragmentation int) FileStoreOption
// CompactFragmentation is a FileStore option that defines the fragmentation ratio // below which compaction would not occur. For instance, specifying 50 means that // if other variables would allow for compaction, the compaction would occur only // after 50% of the file has data that is no longer valid. func CompactFragmentation(fragmentation int) FileStoreOption
{ return func(o *FileStoreOptions) error { if fragmentation <= 0 { return fmt.Errorf("compact fragmentation value must at least be 1") } o.CompactFragmentation = fragmentation return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L258-L266
go
train
// CompactMinFileSize is a FileStore option that defines the minimum file size below // which compaction would not occur. Specify `0` if you don't want any minimum.
func CompactMinFileSize(fileSize int64) FileStoreOption
// CompactMinFileSize is a FileStore option that defines the minimum file size below // which compaction would not occur. Specify `0` if you don't want any minimum. func CompactMinFileSize(fileSize int64) FileStoreOption
{ return func(o *FileStoreOptions) error { if fileSize < 0 { return fmt.Errorf("compact minimum file size value must be a positive number") } o.CompactMinFileSize = fileSize return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L270-L275
go
train
// DoCRC is a FileStore option that defines if a CRC checksum verification should // be performed when records are read from disk.
func DoCRC(enableCRC bool) FileStoreOption
// DoCRC is a FileStore option that defines if a CRC checksum verification should // be performed when records are read from disk. func DoCRC(enableCRC bool) FileStoreOption
{ return func(o *FileStoreOptions) error { o.DoCRC = enableCRC return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L280-L288
go
train
// CRCPolynomial is a FileStore option that defines the polynomial to use to create // the table used for CRC-32 Checksum. // See https://golang.org/pkg/hash/crc32/#MakeTable
func CRCPolynomial(polynomial int64) FileStoreOption
// CRCPolynomial is a FileStore option that defines the polynomial to use to create // the table used for CRC-32 Checksum. // See https://golang.org/pkg/hash/crc32/#MakeTable func CRCPolynomial(polynomial int64) FileStoreOption
{ return func(o *FileStoreOptions) error { if polynomial <= 0 || polynomial > int64(0xFFFFFFFF) { return fmt.Errorf("crc polynomial should be between 1 and %v", int64(0xFFFFFFFF)) } o.CRCPolynomial = polynomial return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L292-L297
go
train
// DoSync is a FileStore option that defines if `File.Sync()` should be called // during a `Flush()` call.
func DoSync(enableFileSync bool) FileStoreOption
// DoSync is a FileStore option that defines if `File.Sync()` should be called // during a `Flush()` call. func DoSync(enableFileSync bool) FileStoreOption
{ return func(o *FileStoreOptions) error { o.DoSync = enableFileSync return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L301-L312
go
train
// SliceConfig is a FileStore option that allows the configuration of // file slice limits and optional archive script file name.
func SliceConfig(maxMsgs int, maxBytes int64, maxAge time.Duration, script string) FileStoreOption
// SliceConfig is a FileStore option that allows the configuration of // file slice limits and optional archive script file name. func SliceConfig(maxMsgs int, maxBytes int64, maxAge time.Duration, script string) FileStoreOption
{ return func(o *FileStoreOptions) error { if maxMsgs < 0 || maxBytes < 0 || maxAge < 0 { return fmt.Errorf("slice max values must be positive numbers") } o.SliceMaxMsgs = maxMsgs o.SliceMaxBytes = maxBytes o.SliceMaxAge = maxAge o.SliceArchiveScript = script return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L316-L324
go
train
// FileDescriptorsLimit is a soft limit hinting at FileStore to try to // limit the number of concurrent opened files to that limit.
func FileDescriptorsLimit(limit int64) FileStoreOption
// FileDescriptorsLimit is a soft limit hinting at FileStore to try to // limit the number of concurrent opened files to that limit. func FileDescriptorsLimit(limit int64) FileStoreOption
{ return func(o *FileStoreOptions) error { if limit < 0 { return fmt.Errorf("file descriptor limit must be a positive number") } o.FileDescriptorsLimit = limit return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L330-L338
go
train
// ParallelRecovery is a FileStore option that allows the parallel // recovery of channels. When running with SSDs, try to use a higher // value than the default number of 1. When running with HDDs, // performance may be better if it stays at 1.
func ParallelRecovery(count int) FileStoreOption
// ParallelRecovery is a FileStore option that allows the parallel // recovery of channels. When running with SSDs, try to use a higher // value than the default number of 1. When running with HDDs, // performance may be better if it stays at 1. func ParallelRecovery(count int) FileStoreOption
{ return func(o *FileStoreOptions) error { if count <= 0 { return fmt.Errorf("parallel recovery value must be at least 1") } o.ParallelRecovery = count return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L345-L350
go
train
// TruncateUnexpectedEOF indicates if on recovery the store should // truncate a file that reports an unexpected end-of-file (EOF) on recovery. // If set to true, the invalid record byte content is printed but the store // will truncate the file prior to this bad record and proceed with recovery. // Dataloss may occur.
func TruncateUnexpectedEOF(truncate bool) FileStoreOption
// TruncateUnexpectedEOF indicates if on recovery the store should // truncate a file that reports an unexpected end-of-file (EOF) on recovery. // If set to true, the invalid record byte content is printed but the store // will truncate the file prior to this bad record and proceed with recovery. // Dataloss may occur. func TruncateUnexpectedEOF(truncate bool) FileStoreOption
{ return func(o *FileStoreOptions) error { o.TruncateUnexpectedEOF = truncate return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L354-L386
go
train
// AllOptions is a convenient option to pass all options from a FileStoreOptions // structure to the constructor.
func AllOptions(opts *FileStoreOptions) FileStoreOption
// AllOptions is a convenient option to pass all options from a FileStoreOptions // structure to the constructor. func AllOptions(opts *FileStoreOptions) FileStoreOption
{ return func(o *FileStoreOptions) error { if err := BufferSize(opts.BufferSize)(o); err != nil { return err } if err := CompactInterval(opts.CompactInterval)(o); err != nil { return err } if err := CompactFragmentation(opts.CompactFragmentation)(o); err != nil { return err } if err := CompactMinFileSize(opts.CompactMinFileSize)(o); err != nil { return err } if err := CRCPolynomial(opts.CRCPolynomial)(o); err != nil { return err } if err := SliceConfig(opts.SliceMaxMsgs, opts.SliceMaxBytes, opts.SliceMaxAge, opts.SliceArchiveScript)(o); err != nil { return err } if err := FileDescriptorsLimit(opts.FileDescriptorsLimit)(o); err != nil { return err } if err := ParallelRecovery(opts.ParallelRecovery)(o); err != nil { return err } o.CompactEnabled = opts.CompactEnabled o.DoCRC = opts.DoCRC o.DoSync = opts.DoSync o.TruncateUnexpectedEOF = opts.TruncateUnexpectedEOF return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L634-L657
go
train
// openFileWithModes opens the file specified by `filename`, using // the `modes` as open flags. // If the file exists, it checks that the version is supported. // If no open mode override is provided, the file is created if not present, // opened in Read/Write and Append mode.
func openFileWithFlags(fileName string, flags int) (*os.File, error)
// openFileWithModes opens the file specified by `filename`, using // the `modes` as open flags. // If the file exists, it checks that the version is supported. // If no open mode override is provided, the file is created if not present, // opened in Read/Write and Append mode. func openFileWithFlags(fileName string, flags int) (*os.File, error)
{ checkVersion := false // Check if file already exists if s, err := os.Stat(fileName); s != nil && err == nil { checkVersion = true } file, err := os.OpenFile(fileName, flags, 0666) if err != nil { return nil, err } if checkVersion { err = checkFileVersion(file) } else { // This is a new file, write our file version err = util.WriteInt(file, fileVersion) } if err != nil { file.Close() file = nil } return file, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L660-L669
go
train
// check that the version of the file is understood by this interface
func checkFileVersion(r io.Reader) error
// check that the version of the file is understood by this interface func checkFileVersion(r io.Reader) error
{ fv, err := util.ReadInt(r) if err != nil { return fmt.Errorf("unable to verify file version: %v", err) } if fv == 0 || fv > fileVersion { return fmt.Errorf("unsupported file version: %v (supports [1..%v])", fv, fileVersion) } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L679-L723
go
train
// writeRecord writes a record to `w`. // The record layout is as follows: // 8 bytes: 4 bytes for type and/or size combined // 4 bytes for CRC-32 // variable bytes: payload. // If a buffer is provided, this function uses it and expands it if necessary. // The function returns the buffer (possibly changed due to expansion) and the // number of bytes written into that buffer.
func writeRecord(w io.Writer, buf []byte, recType recordType, rec record, recSize int, crcTable *crc32.Table) ([]byte, int, error)
// writeRecord writes a record to `w`. // The record layout is as follows: // 8 bytes: 4 bytes for type and/or size combined // 4 bytes for CRC-32 // variable bytes: payload. // If a buffer is provided, this function uses it and expands it if necessary. // The function returns the buffer (possibly changed due to expansion) and the // number of bytes written into that buffer. func writeRecord(w io.Writer, buf []byte, recType recordType, rec record, recSize int, crcTable *crc32.Table) ([]byte, int, error)
{ // This is the header + payload size totalSize := recordHeaderSize + recSize // Alloc or realloc as needed buf = util.EnsureBufBigEnough(buf, totalSize) // If there is a record type, encode it headerFirstInt := 0 if recType != recNoType { if recSize > 0xFFFFFF { panic("record size too big") } // Encode the type in the high byte of the header headerFirstInt = int(recType)<<24 | recSize } else { // The header is the size of the record headerFirstInt = recSize } // Write the first part of the header at the beginning of the buffer util.ByteOrder.PutUint32(buf[:4], uint32(headerFirstInt)) // Marshal the record into the given buffer, after the header offset if _, err := rec.MarshalTo(buf[recordHeaderSize:totalSize]); err != nil { // Return the buffer because the caller may have provided one return buf, 0, err } // Compute CRC crc := crc32.Checksum(buf[recordHeaderSize:totalSize], crcTable) // Write it in the buffer util.ByteOrder.PutUint32(buf[4:recordHeaderSize], crc) // Are we dealing with a buffered writer? bw, isBuffered := w.(*bufio.Writer) // if so, make sure that if what we are about to "write" is more // than what's available, then first flush the buffer. // This is to reduce the risk of partial writes. if isBuffered && (bw.Buffered() > 0) && (bw.Available() < totalSize) { if err := bw.Flush(); err != nil { return buf, 0, err } } // Write the content of our slice into the writer `w` if _, err := w.Write(buf[:totalSize]); err != nil { // Return the tmpBuf because the caller may have provided one return buf, 0, err } return buf, totalSize, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L730-L764
go
train
// readRecord reads a record from `r`, possibly checking the CRC-32 checksum. // When `buf`` is not nil, this function ensures the buffer is big enough to // hold the payload (expanding if necessary). Therefore, this call always // return `buf`, regardless if there is an error or not. // The caller is indicating if the record is supposed to be typed or not.
func readRecord(r io.Reader, buf []byte, recTyped bool, crcTable *crc32.Table, checkCRC bool) ([]byte, int, recordType, error)
// readRecord reads a record from `r`, possibly checking the CRC-32 checksum. // When `buf`` is not nil, this function ensures the buffer is big enough to // hold the payload (expanding if necessary). Therefore, this call always // return `buf`, regardless if there is an error or not. // The caller is indicating if the record is supposed to be typed or not. func readRecord(r io.Reader, buf []byte, recTyped bool, crcTable *crc32.Table, checkCRC bool) ([]byte, int, recordType, error)
{ _header := [recordHeaderSize]byte{} header := _header[:] if _, err := io.ReadFull(r, header); err != nil { return buf, 0, recNoType, err } recType := recNoType recSize := 0 firstInt := int(util.ByteOrder.Uint32(header[:4])) if recTyped { recType = recordType(firstInt >> 24 & 0xFF) recSize = firstInt & 0xFFFFFF } else { recSize = firstInt } if recSize == 0 && recType == 0 { crc := util.ByteOrder.Uint32(header[4:recordHeaderSize]) if crc == 0 { return buf, 0, 0, errNeedRewind } } // Now we are going to read the payload buf = util.EnsureBufBigEnough(buf, recSize) if _, err := io.ReadFull(r, buf[:recSize]); err != nil { return buf, 0, recNoType, err } if checkCRC { crc := util.ByteOrder.Uint32(header[4:recordHeaderSize]) // check CRC against what was stored if c := crc32.Checksum(buf[:recSize], crcTable); c != crc { return buf, 0, recNoType, fmt.Errorf("corrupted data, expected crc to be 0x%08x, got 0x%08x", crc, c) } } return buf, recSize, recType, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L767-L777
go
train
// setSize sets the initial buffer size and keep track of min/max allowed sizes
func newBufferWriter(minShrinkSize, maxSize int) *bufferedWriter
// setSize sets the initial buffer size and keep track of min/max allowed sizes func newBufferWriter(minShrinkSize, maxSize int) *bufferedWriter
{ w := &bufferedWriter{minShrinkSize: minShrinkSize, maxSize: maxSize} w.bufSize = minShrinkSize // The minSize is the minimum size the buffer can shrink to. // However, if the given max size is smaller than the min // shrink size, use that instead. if maxSize < minShrinkSize { w.bufSize = maxSize } return w }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L781-L784
go
train
// createNewWriter creates a new buffer writer for `file` with // the bufferedWriter's current buffer size.
func (w *bufferedWriter) createNewWriter(file *os.File) io.Writer
// createNewWriter creates a new buffer writer for `file` with // the bufferedWriter's current buffer size. func (w *bufferedWriter) createNewWriter(file *os.File) io.Writer
{ w.buf = bufio.NewWriterSize(file, w.bufSize) return w.buf }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L787-L808
go
train
// expand the buffer (first flushing the buffer if not empty)
func (w *bufferedWriter) expand(file *os.File, required int) (io.Writer, error)
// expand the buffer (first flushing the buffer if not empty) func (w *bufferedWriter) expand(file *os.File, required int) (io.Writer, error)
{ // If there was a request to shrink the buffer, cancel that. w.shrinkReq = false // If there was something, flush first if w.buf.Buffered() > 0 { if err := w.buf.Flush(); err != nil { return w.buf, err } } // Double the size w.bufSize *= 2 // If still smaller than what is required, adjust if w.bufSize < required { w.bufSize = required } // But cap it. if w.bufSize > w.maxSize { w.bufSize = w.maxSize } w.buf = bufio.NewWriterSize(file, w.bufSize) return w.buf, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L811-L840
go
train
// tryShrinkBuffer checks and possibly shrinks the buffer
func (w *bufferedWriter) tryShrinkBuffer(file *os.File) (io.Writer, error)
// tryShrinkBuffer checks and possibly shrinks the buffer func (w *bufferedWriter) tryShrinkBuffer(file *os.File) (io.Writer, error)
{ // Nothing to do if we are already at the lowest // or file not set/opened. if w.bufSize == w.minShrinkSize || file == nil { return w.buf, nil } if !w.shrinkReq { percentFilled := w.buf.Buffered() * 100 / w.bufSize if percentFilled <= bufShrinkThreshold { w.shrinkReq = true } // Wait for next tick to see if we can shrink return w.buf, nil } if err := w.buf.Flush(); err != nil { return w.buf, err } // Reduce size, but ensure it does not go below the limit w.bufSize /= 2 if w.bufSize < w.minShrinkSize { w.bufSize = w.minShrinkSize } w.buf = bufio.NewWriterSize(file, w.bufSize) // Don't reset shrinkReq unless we are down to the limit if w.bufSize == w.minShrinkSize { w.shrinkReq = true } return w.buf, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L844-L850
go
train
// checkShrinkRequest checks how full the buffer is, and if is above a certain // threshold, cancels the shrink request
func (w *bufferedWriter) checkShrinkRequest()
// checkShrinkRequest checks how full the buffer is, and if is above a certain // threshold, cancels the shrink request func (w *bufferedWriter) checkShrinkRequest()
{ percentFilled := w.buf.Buffered() * 100 / w.bufSize // If above the threshold, cancel the request. if percentFilled > bufShrinkThreshold { w.shrinkReq = false } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L857-L864
go
train
//////////////////////////////////////////////////////////////////////////// // filesManager methods //////////////////////////////////////////////////////////////////////////// // createFilesManager returns an instance of the files manager.
func createFilesManager(rootDir string, openedFilesLimit int64) *filesManager
//////////////////////////////////////////////////////////////////////////// // filesManager methods //////////////////////////////////////////////////////////////////////////// // createFilesManager returns an instance of the files manager. func createFilesManager(rootDir string, openedFilesLimit int64) *filesManager
{ fm := &filesManager{ rootDir: rootDir, limit: openedFilesLimit, files: make(map[fileID]*file), } return fm }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L872-L884
go
train
// closeUnusedFiles cloes files that are opened and not currently in-use. // Since the number of opened files is a soft limit, and if this function // is unable to close any file, the caller will still attempt to create/open // the requested file. If the system's file descriptor limit is reached, // opening the file will fail and that error will be returned to the caller. // Lock is required on entry.
func (fm *filesManager) closeUnusedFiles(idToSkip fileID)
// closeUnusedFiles cloes files that are opened and not currently in-use. // Since the number of opened files is a soft limit, and if this function // is unable to close any file, the caller will still attempt to create/open // the requested file. If the system's file descriptor limit is reached, // opening the file will fail and that error will be returned to the caller. // Lock is required on entry. func (fm *filesManager) closeUnusedFiles(idToSkip fileID)
{ for _, file := range fm.files { if file.id == idToSkip { continue } if atomic.CompareAndSwapInt32(&file.state, fileOpened, fileClosing) { fm.doClose(file) if fm.openedFDs < fm.limit { break } } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L890-L918
go
train
// createFile creates a file, open it, adds it to the list of files and returns // an instance of `*file` with the state sets to `fileInUse`. // This call will possibly cause opened but unused files to be closed if the // number of open file requests is above the set limit.
func (fm *filesManager) createFile(name string, flags int, bfc beforeFileClose) (*file, error)
// createFile creates a file, open it, adds it to the list of files and returns // an instance of `*file` with the state sets to `fileInUse`. // This call will possibly cause opened but unused files to be closed if the // number of open file requests is above the set limit. func (fm *filesManager) createFile(name string, flags int, bfc beforeFileClose) (*file, error)
{ fm.Lock() if fm.isClosed { fm.Unlock() return nil, fmt.Errorf("unable to create file %q, store is being closed", name) } if fm.limit > 0 && fm.openedFDs >= fm.limit { fm.closeUnusedFiles(0) } fileName := filepath.Join(fm.rootDir, name) handle, err := openFileWithFlags(fileName, flags) if err != nil { fm.Unlock() return nil, err } fm.nextID++ newFile := &file{ state: fileInUse, id: fm.nextID, handle: handle, name: fileName, flags: flags, beforeClose: bfc, } fm.files[newFile.id] = newFile fm.openedFDs++ fm.Unlock() return newFile, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L926-L952
go
train
// openFile opens the given file and sets its state to `fileInUse`. // If the file manager has been closed or the file removed, this call // returns an error. // Otherwise, if the file's state is not `fileClosed` this call will panic. // This call will possibly cause opened but unused files to be closed if the // number of open file requests is above the set limit.
func (fm *filesManager) openFile(file *file) error
// openFile opens the given file and sets its state to `fileInUse`. // If the file manager has been closed or the file removed, this call // returns an error. // Otherwise, if the file's state is not `fileClosed` this call will panic. // This call will possibly cause opened but unused files to be closed if the // number of open file requests is above the set limit. func (fm *filesManager) openFile(file *file) error
{ fm.Lock() if fm.isClosed { fm.Unlock() return fmt.Errorf("unable to open file %q, store is being closed", file.name) } curState := atomic.LoadInt32(&file.state) if curState == fileRemoved { fm.Unlock() return fmt.Errorf("unable to open file %q, it has been removed", file.name) } if curState != fileClosed || file.handle != nil { fm.Unlock() panic(fmt.Errorf("request to open file %q but invalid state: handle=%v - state=%v", file.name, file.handle, file.state)) } var err error if fm.limit > 0 && fm.openedFDs >= fm.limit { fm.closeUnusedFiles(file.id) } file.handle, err = openFileWithFlags(file.name, file.flags) if err == nil { atomic.StoreInt32(&file.state, fileInUse) fm.openedFDs++ } fm.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L958-L966
go
train
// closeLockedFile closes the handle of the given file, but only if the caller // has locked the file. Will panic otherwise. // If the file's beforeClose callback is not nil, this callback is invoked // before the file handle is closed.
func (fm *filesManager) closeLockedFile(file *file) error
// closeLockedFile closes the handle of the given file, but only if the caller // has locked the file. Will panic otherwise. // If the file's beforeClose callback is not nil, this callback is invoked // before the file handle is closed. func (fm *filesManager) closeLockedFile(file *file) error
{ if !atomic.CompareAndSwapInt32(&file.state, fileInUse, fileClosing) { panic(fmt.Errorf("file %q is requested to be closed but was not locked by caller", file.name)) } fm.Lock() err := fm.doClose(file) fm.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L973-L981
go
train
// closeFileIfOpened closes the handle of the given file, but only if the // file is opened and not currently locked. Does not return any error or panic // if file is in any other state. // If the file's beforeClose callback is not nil, this callback is invoked // before the file handle is closed.
func (fm *filesManager) closeFileIfOpened(file *file) error
// closeFileIfOpened closes the handle of the given file, but only if the // file is opened and not currently locked. Does not return any error or panic // if file is in any other state. // If the file's beforeClose callback is not nil, this callback is invoked // before the file handle is closed. func (fm *filesManager) closeFileIfOpened(file *file) error
{ if !atomic.CompareAndSwapInt32(&file.state, fileOpened, fileClosing) { return nil } fm.Lock() err := fm.doClose(file) fm.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1006-L1017
go
train
// doClose closes the file handle, setting it to nil and switching state to `fileClosed`. // If a `beforeClose` callback was registered on file creation, it is invoked // before the file handler is actually closed. // Lock is required on entry.
func (fm *filesManager) doClose(file *file) error
// doClose closes the file handle, setting it to nil and switching state to `fileClosed`. // If a `beforeClose` callback was registered on file creation, it is invoked // before the file handler is actually closed. // Lock is required on entry. func (fm *filesManager) doClose(file *file) error
{ var err error if file.beforeClose != nil { err = file.beforeClose() } util.CloseFile(err, file.handle) // Regardless of error, we need to change the state to closed. file.handle = nil atomic.StoreInt32(&file.state, fileClosed) fm.openedFDs-- return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1022-L1027
go
train
// lockFile locks the given file. // If the file was already opened, the boolean returned is true, // otherwise, the file is opened and the call returns false.
func (fm *filesManager) lockFile(file *file) (bool, error)
// lockFile locks the given file. // If the file was already opened, the boolean returned is true, // otherwise, the file is opened and the call returns false. func (fm *filesManager) lockFile(file *file) (bool, error)
{ if atomic.CompareAndSwapInt32(&file.state, fileOpened, fileInUse) { return true, nil } return false, fm.openFile(file) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1031-L1033
go
train
// lockFileIfOpened is like lockFile but returns true only if the // file is already opened, false otherwise (and the file remain closed).
func (fm *filesManager) lockFileIfOpened(file *file) bool
// lockFileIfOpened is like lockFile but returns true only if the // file is already opened, false otherwise (and the file remain closed). func (fm *filesManager) lockFileIfOpened(file *file) bool
{ return atomic.CompareAndSwapInt32(&file.state, fileOpened, fileInUse) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1036-L1041
go
train
// unlockFile unlocks the file if currently locked, otherwise panic.
func (fm *filesManager) unlockFile(file *file)
// unlockFile unlocks the file if currently locked, otherwise panic. func (fm *filesManager) unlockFile(file *file)
{ if !atomic.CompareAndSwapInt32(&file.state, fileInUse, fileOpened) { panic(fmt.Errorf("failed to switch state from fileInUse to fileOpened for file %q, state=%v", file.name, file.state)) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1047-L1067
go
train
// trySwitchState attempts to switch an initial state of `fileOpened` // or `fileClosed` to the given newState. If it can't it will return an // error, otherwise, returned a boolean to indicate if the initial state // was `fileOpened`.
func (fm *filesManager) trySwitchState(file *file, newState int32) (bool, error)
// trySwitchState attempts to switch an initial state of `fileOpened` // or `fileClosed` to the given newState. If it can't it will return an // error, otherwise, returned a boolean to indicate if the initial state // was `fileOpened`. func (fm *filesManager) trySwitchState(file *file, newState int32) (bool, error)
{ wasOpened := false wasClosed := false for i := 0; i < 10000; i++ { if atomic.CompareAndSwapInt32(&file.state, fileOpened, newState) { wasOpened = true break } if atomic.CompareAndSwapInt32(&file.state, fileClosed, newState) { wasClosed = true break } if i%1000 == 1 { time.Sleep(time.Millisecond) } } if !wasOpened && !wasClosed { return false, fmt.Errorf("file %q is still probably locked", file.name) } return wasOpened, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1076-L1091
go
train
// remove a file from the list of files. The initial state must be either `fileOpened` // or `fileClosed`. This call will loop until it can switch the file's state from // one of these states to `fileRemoved`, or return an error if the change can't // be made after a certain number of attempts. // When removed, this call returns true and the given `file` is untouched (except // for its state). So it is still possible for caller to read/write (if handle is // valid) or close this file.
func (fm *filesManager) remove(file *file) bool
// remove a file from the list of files. The initial state must be either `fileOpened` // or `fileClosed`. This call will loop until it can switch the file's state from // one of these states to `fileRemoved`, or return an error if the change can't // be made after a certain number of attempts. // When removed, this call returns true and the given `file` is untouched (except // for its state). So it is still possible for caller to read/write (if handle is // valid) or close this file. func (fm *filesManager) remove(file *file) bool
{ fm.Lock() wasOpened, err := fm.trySwitchState(file, fileRemoved) if err != nil { fm.Unlock() return false } // With code above, we can't be removing a file twice, so no need to check if // file is present in map. delete(fm.files, file.id) if wasOpened { fm.openedFDs-- } fm.Unlock() return true }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1098-L1102
go
train
// setBeforeCloseCb sets the beforeFileClose callback for this file. // When this callback is set, and the files manager closes a file, // the callback is invoked prior to actual closing of the file handle. // This allows the caller to perfom some work before the file is // asynchronously (form its perspective) closed.
func (fm *filesManager) setBeforeCloseCb(file *file, bccb beforeFileClose)
// setBeforeCloseCb sets the beforeFileClose callback for this file. // When this callback is set, and the files manager closes a file, // the callback is invoked prior to actual closing of the file handle. // This allows the caller to perfom some work before the file is // asynchronously (form its perspective) closed. func (fm *filesManager) setBeforeCloseCb(file *file, bccb beforeFileClose)
{ fm.Lock() file.beforeClose = bccb fm.Unlock() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1109-L1143
go
train
// truncateFile truncates the file to the given offset. // The file is assumed to be locked on entry. // If the file's flags indicate that this file is opened with O_APPEND, it // is first closed, reopened in non append mode, truncated, then reopened // (and locked) with original flags.
func (fm *filesManager) truncateFile(file *file, offset int64) error
// truncateFile truncates the file to the given offset. // The file is assumed to be locked on entry. // If the file's flags indicate that this file is opened with O_APPEND, it // is first closed, reopened in non append mode, truncated, then reopened // (and locked) with original flags. func (fm *filesManager) truncateFile(file *file, offset int64) error
{ reopen := false fd := file.handle if file.flags&os.O_APPEND != 0 { if err := fm.closeLockedFile(file); err != nil { return err } var err error fd, err = openFileWithFlags(file.name, os.O_RDWR) if err != nil { return err } reopen = true } newPos := offset if err := fd.Truncate(newPos); err != nil { return err } pos, err := fd.Seek(newPos, io.SeekStart) // or Seek(0, io.SeekEnd) if err != nil { return err } if pos != newPos { return fmt.Errorf("unable to set position of file %q to %v", file.name, newPos) } if reopen { if err := fd.Close(); err != nil { return err } if err := fm.openFile(file); err != nil { return err } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1147-L1178
go
train
// close the files manager, including all files currently opened. // Returns the first error encountered when closing the files.
func (fm *filesManager) close() error
// close the files manager, including all files currently opened. // Returns the first error encountered when closing the files. func (fm *filesManager) close() error
{ fm.Lock() if fm.isClosed { fm.Unlock() return nil } fm.isClosed = true files := make([]*file, 0, len(fm.files)) for _, file := range fm.files { files = append(files, file) } fm.files = nil fm.Unlock() var err error for _, file := range files { wasOpened, sserr := fm.trySwitchState(file, fmClosed) if sserr != nil { if err == nil { err = sserr } } else if wasOpened { fm.Lock() if cerr := fm.doClose(file); cerr != nil && err == nil { err = cerr } fm.Unlock() } } return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1187-L1236
go
train
//////////////////////////////////////////////////////////////////////////// // FileStore methods //////////////////////////////////////////////////////////////////////////// // NewFileStore returns a factory for stores backed by files. // If not limits are provided, the store will be created with // DefaultStoreLimits.
func NewFileStore(log logger.Logger, rootDir string, limits *StoreLimits, options ...FileStoreOption) (*FileStore, error)
//////////////////////////////////////////////////////////////////////////// // FileStore methods //////////////////////////////////////////////////////////////////////////// // NewFileStore returns a factory for stores backed by files. // If not limits are provided, the store will be created with // DefaultStoreLimits. func NewFileStore(log logger.Logger, rootDir string, limits *StoreLimits, options ...FileStoreOption) (*FileStore, error)
{ if rootDir == "" { return nil, fmt.Errorf("for %v stores, root directory must be specified", TypeFile) } fs := &FileStore{opts: DefaultFileStoreOptions, clients: make(map[string]*Client)} if err := fs.init(TypeFile, log, limits); err != nil { return nil, err } for _, opt := range options { if err := opt(&fs.opts); err != nil { return nil, err } } // Create filesManager based on options' FD limit fs.fm = createFilesManager(rootDir, fs.opts.FileDescriptorsLimit) // Convert the compact interval in time.Duration fs.compactItvl = time.Duration(fs.opts.CompactInterval) * time.Second // Create the table using polynomial in options if fs.opts.CRCPolynomial == int64(crc32.IEEE) { fs.crcTable = crc32.IEEETable } else { fs.crcTable = crc32.MakeTable(uint32(fs.opts.CRCPolynomial)) } if err := os.MkdirAll(rootDir, os.ModeDir+os.ModePerm); err != nil && !os.IsExist(err) { return nil, fmt.Errorf("unable to create the root directory [%s]: %v", rootDir, err) } // If the TruncateUnexpectedEOF is set, check that the witness // file is not present. If it is, fail starting. If it isn't, // create the witness file. truncateFName := filepath.Join(rootDir, truncateBadEOFFileName) if fs.opts.TruncateUnexpectedEOF { // Try to create the file, if it exists, this is an error. f, err := os.OpenFile(truncateFName, os.O_CREATE|os.O_EXCL, 0666) if f != nil { f.Close() } if err != nil { return nil, fmt.Errorf("file store should not be opened consecutively with the TruncateUnexpectedEOF option set to true") } } else { // Delete possible TruncateUnexpectedEOF witness file os.Remove(truncateFName) } return fs, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1251-L1360
go
train
// Recover implements the Store interface
func (fs *FileStore) Recover() (*RecoveredState, error)
// Recover implements the Store interface func (fs *FileStore) Recover() (*RecoveredState, error)
{ fs.Lock() defer fs.Unlock() var ( err error recoveredState *RecoveredState serverInfo *spb.ServerInfo recoveredClients []*Client recoveredChannels = make(map[string]*RecoveredChannel) channels []os.FileInfo ) // Ensure store is closed in case of return with error defer func() { if fs.serverFile != nil { fs.fm.unlockFile(fs.serverFile) } if fs.clientsFile != nil { fs.fm.unlockFile(fs.clientsFile) } }() // Open/Create the server file (note that this file must not be opened, // in APPEND mode to allow truncate to work). fs.serverFile, err = fs.fm.createFile(serverFileName, os.O_RDWR|os.O_CREATE, nil) if err != nil { return nil, err } // Open/Create the client file. fs.clientsFile, err = fs.fm.createFile(clientsFileName, defaultFileFlags, nil) if err != nil { return nil, err } // Recover the server file. serverInfo, err = fs.recoverServerInfo() if err != nil { return nil, fmt.Errorf("unable to recover server file %q: %v", fs.serverFile.name, err) } // If the server file is empty, then we are done if serverInfo == nil { // We return the file store instance, but no recovered state. return nil, nil } // Recover the clients file recoveredClients, err = fs.recoverClients() if err != nil { return nil, fmt.Errorf("unable to recover client file %q: %v", fs.clientsFile.name, err) } // Get the channels (there are subdirectories of rootDir) channels, err = ioutil.ReadDir(fs.fm.rootDir) if err != nil { return nil, err } if len(channels) > 0 { wg, poolCh, errCh, recoverCh := initParalleRecovery(fs.opts.ParallelRecovery, len(channels)) ctx := &channelRecoveryCtx{wg: wg, poolCh: poolCh, errCh: errCh, recoverCh: recoverCh} for _, c := range channels { // Channels are directories. Ignore simple files if !c.IsDir() { continue } channel := c.Name() channelDirName := filepath.Join(fs.fm.rootDir, channel) limits := fs.genericStore.getChannelLimits(channel) // This will block if the max number of go-routines is reached. // When one of the go-routine finishes, it will add back to the // pool and we will be able to start the recovery of another // channel. <-poolCh wg.Add(1) go fs.recoverOneChannel(channelDirName, channel, limits, ctx) // Fail as soon as we detect that a go routine has encountered // an error if len(errCh) > 0 { break } } // We need to wait for all current go routines to exit wg.Wait() // Also, even if there was an error, we need to collect // all channels that were recovered so that we can close // the msgs/subs stores on exit. done := false for !done { select { case rc := <-recoverCh: recoveredChannels[rc.name] = rc.rc fs.channels[rc.name] = rc.rc.Channel default: done = true } } select { case err = <-errCh: return nil, err default: } } // Create the recovered state to return recoveredState = &RecoveredState{ Info: serverInfo, Clients: recoveredClients, Channels: recoveredChannels, } return recoveredState, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1441-L1458
go
train
// GetExclusiveLock implements the Store interface
func (fs *FileStore) GetExclusiveLock() (bool, error)
// GetExclusiveLock implements the Store interface func (fs *FileStore) GetExclusiveLock() (bool, error)
{ fs.Lock() defer fs.Unlock() if fs.lockFile != nil { return true, nil } f, err := util.CreateLockFile(filepath.Join(fs.fm.rootDir, lockFileName)) if err != nil { if err == util.ErrUnableToLockNow { return false, nil } return false, err } // We must keep a reference to the file, otherwise, it `f` is GC'ed, // its file descriptor is closed, which automatically releases the lock. fs.lockFile = f return true, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1461-L1495
go
train
// Init is used to persist server's information after the first start
func (fs *FileStore) Init(info *spb.ServerInfo) error
// Init is used to persist server's information after the first start func (fs *FileStore) Init(info *spb.ServerInfo) error
{ fs.Lock() defer fs.Unlock() if fs.serverFile == nil { var err error // Open/Create the server file (note that this file must not be opened, // in APPEND mode to allow truncate to work). fs.serverFile, err = fs.fm.createFile(serverFileName, os.O_RDWR|os.O_CREATE, nil) if err != nil { return err } } else { if _, err := fs.fm.lockFile(fs.serverFile); err != nil { return err } } f := fs.serverFile.handle // defer is ok for this function... defer fs.fm.unlockFile(fs.serverFile) // Truncate the file (4 is the size of the fileVersion record) if err := f.Truncate(4); err != nil { return err } // Move offset to 4 (truncate does not do that) if _, err := f.Seek(4, io.SeekStart); err != nil { return err } // ServerInfo record is not typed. We also don't pass a reusable buffer. if _, _, err := writeRecord(f, nil, recNoType, info, info.Size(), fs.crcTable); err != nil { return err } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1498-L1557
go
train
// recoverClients reads the client files and returns an array of RecoveredClient
func (fs *FileStore) recoverClients() ([]*Client, error)
// recoverClients reads the client files and returns an array of RecoveredClient func (fs *FileStore) recoverClients() ([]*Client, error)
{ var err error var recType recordType var recSize int _buf := [256]byte{} buf := _buf[:] offset := int64(4) // Create a buffered reader to speed-up recovery br := bufio.NewReaderSize(fs.clientsFile.handle, defaultBufSize) for { buf, recSize, recType, err = readRecord(br, buf, true, fs.crcTable, fs.opts.DoCRC) if err != nil { switch err { case io.EOF: err = nil case errNeedRewind: err = fs.fm.truncateFile(fs.clientsFile, offset) default: err = fs.handleUnexpectedEOF(err, fs.clientsFile, offset, true) } if err == nil { break } return nil, err } readBytes := int64(recSize + recordHeaderSize) offset += readBytes fs.cliFileSize += readBytes switch recType { case addClient: c := &Client{} if err := c.ClientInfo.Unmarshal(buf[:recSize]); err != nil { return nil, err } // Add to the map. Note that if one already exists, which should // not, just replace with this most recent one. fs.clients[c.ID] = c case delClient: c := spb.ClientDelete{} if err := c.Unmarshal(buf[:recSize]); err != nil { return nil, err } delete(fs.clients, c.ID) fs.cliDeleteRecs++ default: return nil, fmt.Errorf("invalid client record type: %v", recType) } } clients := make([]*Client, len(fs.clients)) i := 0 // Convert the map into an array for _, c := range fs.clients { clients[i] = c i++ } return clients, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1560-L1590
go
train
// recoverServerInfo reads the server file and returns a ServerInfo structure
func (fs *FileStore) recoverServerInfo() (*spb.ServerInfo, error)
// recoverServerInfo reads the server file and returns a ServerInfo structure func (fs *FileStore) recoverServerInfo() (*spb.ServerInfo, error)
{ info := &spb.ServerInfo{} buf, size, _, err := readRecord(fs.serverFile.handle, nil, false, fs.crcTable, fs.opts.DoCRC) if err != nil { if err == io.EOF { // We are done, no state recovered return nil, nil } fs.log.Errorf("Server file %q corrupted: %v", fs.serverFile.name, err) fs.log.Errorf("Follow instructions in documentation in order to recover from this") return nil, err } // Check that the size of the file is consistent with the size // of the record we are supposed to recover. Account for the // 12 bytes (4 + recordHeaderSize) corresponding to the fileVersion and // record header. fstat, err := fs.serverFile.handle.Stat() if err != nil { return nil, err } expectedSize := int64(size + 4 + recordHeaderSize) if fstat.Size() != expectedSize { return nil, fmt.Errorf("incorrect file size, expected %v bytes, got %v bytes", expectedSize, fstat.Size()) } // Reconstruct now if err := info.Unmarshal(buf[:size]); err != nil { return nil, err } return info, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1593-L1633
go
train
// CreateChannel implements the Store interface
func (fs *FileStore) CreateChannel(channel string) (*Channel, error)
// CreateChannel implements the Store interface func (fs *FileStore) CreateChannel(channel string) (*Channel, error)
{ fs.Lock() defer fs.Unlock() // Verify that it does not already exist or that we did not hit the limits if err := fs.canAddChannel(channel); err != nil { return nil, err } // We create the channel here... channelDirName := filepath.Join(fs.fm.rootDir, channel) if err := os.MkdirAll(channelDirName, os.ModeDir+os.ModePerm); err != nil { return nil, err } var err error var msgStore MsgStore var subStore SubStore channelLimits := fs.genericStore.getChannelLimits(channel) msgStore, err = fs.newFileMsgStore(channelDirName, channel, &channelLimits.MsgStoreLimits, false) if err != nil { return nil, err } subStore, err = fs.newFileSubStore(channel, &channelLimits.SubStoreLimits, false) if err != nil { msgStore.Close() return nil, err } c := &Channel{ Subs: subStore, Msgs: msgStore, } fs.channels[channel] = c return c, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1636-L1644
go
train
// DeleteChannel implements the Store interface
func (fs *FileStore) DeleteChannel(channel string) error
// DeleteChannel implements the Store interface func (fs *FileStore) DeleteChannel(channel string) error
{ fs.Lock() defer fs.Unlock() err := fs.deleteChannel(channel) if err != nil { return err } return os.RemoveAll(filepath.Join(fs.fm.rootDir, channel)) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1647-L1665
go
train
// AddClient implements the Store interface
func (fs *FileStore) AddClient(info *spb.ClientInfo) (*Client, error)
// AddClient implements the Store interface func (fs *FileStore) AddClient(info *spb.ClientInfo) (*Client, error)
{ fs.Lock() if _, err := fs.fm.lockFile(fs.clientsFile); err != nil { fs.Unlock() return nil, err } _, size, err := writeRecord(fs.clientsFile.handle, nil, addClient, info, info.Size(), fs.crcTable) if err != nil { fs.fm.unlockFile(fs.clientsFile) fs.Unlock() return nil, err } fs.cliFileSize += int64(size) fs.fm.unlockFile(fs.clientsFile) client := &Client{*info} fs.clients[client.ID] = client fs.Unlock() return client, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1668-L1700
go
train
// DeleteClient implements the Store interface
func (fs *FileStore) DeleteClient(clientID string) error
// DeleteClient implements the Store interface func (fs *FileStore) DeleteClient(clientID string) error
{ fs.Lock() if _, err := fs.fm.lockFile(fs.clientsFile); err != nil { fs.Unlock() return err } fs.delClientRec = spb.ClientDelete{ID: clientID} _, size, err := writeRecord(fs.clientsFile.handle, nil, delClient, &fs.delClientRec, fs.delClientRec.Size(), fs.crcTable) // Even if there is an error, proceed. If we compact the file, // this may resolve the issue. delete(fs.clients, clientID) fs.cliDeleteRecs++ fs.cliFileSize += int64(size) // Check if this triggers a need for compaction if fs.shouldCompactClientFile() { // close the file now // If we can't close the file, it does not make sense // to proceed with compaction. if lerr := fs.fm.closeLockedFile(fs.clientsFile); lerr != nil { fs.Unlock() return lerr } // compact (this uses a temporary file) // Override writeRecord error with the result of compaction. // If compaction works, the original error is no longer an issue // since the file has been replaced. err = fs.compactClientFile(fs.clientsFile.name) } else { fs.fm.unlockFile(fs.clientsFile) } fs.Unlock() return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1704-L1724
go
train
// shouldCompactClientFile returns true if the client file should be compacted // Lock is held by caller
func (fs *FileStore) shouldCompactClientFile() bool
// shouldCompactClientFile returns true if the client file should be compacted // Lock is held by caller func (fs *FileStore) shouldCompactClientFile() bool
{ // Global switch if !fs.opts.CompactEnabled { return false } // Check that if minimum file size is set, the client file // is at least at the minimum. if fs.opts.CompactMinFileSize > 0 && fs.cliFileSize < fs.opts.CompactMinFileSize { return false } // Check fragmentation frag := fs.cliDeleteRecs * 100 / (fs.cliDeleteRecs + len(fs.clients)) if frag < fs.opts.CompactFragmentation { return false } // Check that we don't do too often if time.Since(fs.cliCompactTS) < fs.compactItvl { return false } return true }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1729-L1773
go
train
// Rewrite the content of the clients map into a temporary file, // then swap back to active file. // Store lock held on entry
func (fs *FileStore) compactClientFile(orgFileName string) error
// Rewrite the content of the clients map into a temporary file, // then swap back to active file. // Store lock held on entry func (fs *FileStore) compactClientFile(orgFileName string) error
{ // Open a temporary file tmpFile, err := getTempFile(fs.fm.rootDir, clientsFileName) if err != nil { return err } defer func() { if tmpFile != nil { tmpFile.Close() os.Remove(tmpFile.Name()) } }() bw := bufio.NewWriterSize(tmpFile, defaultBufSize) fileSize := int64(0) size := 0 _buf := [256]byte{} buf := _buf[:] // Dump the content of active clients into the temporary file. for _, c := range fs.clients { buf, size, err = writeRecord(bw, buf, addClient, &c.ClientInfo, c.ClientInfo.Size(), fs.crcTable) if err != nil { return err } fileSize += int64(size) } // Flush the buffer on disk if err := bw.Flush(); err != nil { return err } // Start by closing the temporary file. if err := tmpFile.Close(); err != nil { return err } // Rename the tmp file to original file name if err := os.Rename(tmpFile.Name(), orgFileName); err != nil { return err } // Avoid unnecessary attempt to cleanup tmpFile = nil fs.cliDeleteRecs = 0 fs.cliFileSize = fileSize fs.cliCompactTS = time.Now() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1776-L1785
go
train
// Return a temporary file (including file version)
func getTempFile(rootDir, prefix string) (*os.File, error)
// Return a temporary file (including file version) func getTempFile(rootDir, prefix string) (*os.File, error)
{ tmpFile, err := ioutil.TempFile(rootDir, prefix) if err != nil { return nil, err } if err := util.WriteInt(tmpFile, fileVersion); err != nil { return nil, err } return tmpFile, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1788-L1811
go
train
// Close closes all stores.
func (fs *FileStore) Close() error
// Close closes all stores. func (fs *FileStore) Close() error
{ fs.Lock() if fs.closed { fs.Unlock() return nil } fs.closed = true err := fs.genericStore.close() fm := fs.fm lockFile := fs.lockFile fs.Unlock() if fm != nil { if fmerr := fm.close(); fmerr != nil && err == nil { err = fmerr } } if lockFile != nil { err = util.CloseFile(err, lockFile) } return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1922-L2062
go
train
//////////////////////////////////////////////////////////////////////////// // FileMsgStore methods //////////////////////////////////////////////////////////////////////////// // newFileMsgStore returns a new instace of a file MsgStore.
func (fs *FileStore) newFileMsgStore(channelDirName, channel string, limits *MsgStoreLimits, doRecover bool) (*FileMsgStore, error)
//////////////////////////////////////////////////////////////////////////// // FileMsgStore methods //////////////////////////////////////////////////////////////////////////// // newFileMsgStore returns a new instace of a file MsgStore. func (fs *FileStore) newFileMsgStore(channelDirName, channel string, limits *MsgStoreLimits, doRecover bool) (*FileMsgStore, error)
{ // Create an instance and initialize ms := &FileMsgStore{ fm: fs.fm, hasFDsLimit: fs.opts.FileDescriptorsLimit > 0, fstore: fs, wOffset: int64(4), // The very first record starts after the file version record files: make(map[int]*fileSlice), channelName: channel, bkgTasksDone: make(chan bool, 1), bkgTasksWake: make(chan bool, 1), } ms.init(channel, fs.log, limits) ms.setSliceLimits() ms.initCache() maxBufSize := fs.opts.BufferSize if maxBufSize > 0 { ms.bw = newBufferWriter(msgBufMinShrinkSize, maxBufSize) ms.bufferedSeqs = make([]uint64, 0, 1) ms.bufferedMsgs = make(map[uint64]*bufferedMsg) } // Use this variable for all errors below so we can do the cleanup var err error // Recovery case if doRecover { var dirFiles []os.FileInfo var fseq int64 var datFile, idxFile *file var useIdxFile bool dirFiles, err = ioutil.ReadDir(channelDirName) for _, file := range dirFiles { if file.IsDir() { continue } fileName := file.Name() if !strings.HasPrefix(fileName, msgFilesPrefix) || !strings.HasSuffix(fileName, datSuffix) { continue } // Remove suffix fileNameWithoutSuffix := strings.TrimSuffix(fileName, datSuffix) // Remove prefix fileNameWithoutPrefixAndSuffix := strings.TrimPrefix(fileNameWithoutSuffix, msgFilesPrefix) // Get the file sequence number fseq, err = strconv.ParseInt(fileNameWithoutPrefixAndSuffix, 10, 64) if err != nil { err = fmt.Errorf("message log has an invalid name: %v", fileName) break } idxFName := fmt.Sprintf("%s%v%s", msgFilesPrefix, fseq, idxSuffix) useIdxFile = false if s, statErr := os.Stat(filepath.Join(channelDirName, idxFName)); s != nil && statErr == nil { useIdxFile = true } datFile, err = ms.fm.createFile(filepath.Join(channel, fileName), defaultFileFlags, nil) if err != nil { break } idxFile, err = ms.fm.createFile(filepath.Join(channel, idxFName), defaultFileFlags, nil) if err != nil { ms.fm.unlockFile(datFile) break } // Create the slice fslice := &fileSlice{file: datFile, idxFile: idxFile, lastUsed: time.Now().UnixNano()} // Recover the file slice err = ms.recoverOneMsgFile(fslice, int(fseq), useIdxFile) if err != nil { break } } if err == nil && ms.lastFSlSeq > 0 { // Now that all file slices have been recovered, we know which // one is the last, so use it as the write slice. ms.writeSlice = ms.files[ms.lastFSlSeq] // Need to set the writer, etc.. ms.fm.lockFile(ms.writeSlice.file) err = ms.setFile(ms.writeSlice, -1) ms.fm.unlockFile(ms.writeSlice.file) if err == nil { // Set the beforeFileClose callback to the slices now that // we are done recovering. for _, fslice := range ms.files { ms.fm.setBeforeCloseCb(fslice.file, ms.beforeDataFileCloseCb(fslice)) ms.fm.setBeforeCloseCb(fslice.idxFile, ms.beforeIndexFileCloseCb(fslice)) } ms.checkSlices = 1 } } if err == nil { // Apply message limits (no need to check if there are limits // defined, the call won't do anything if they aren't). err = ms.enforceLimits(false, true) } } if err == nil { ms.Lock() ms.allDone.Add(1) // Capture the time here first, it will then be captured // in the go routine we are about to start. ms.timeTick = time.Now().UnixNano() // On recovery, if there is age limit set and at least one message... if doRecover { if ms.limits.MaxAge > 0 && ms.totalCount > 0 { // Force the execution of the expireMsgs method. // This will take care of expiring messages that should have // expired while the server was stopped. ms.expireMsgs(ms.timeTick, int64(ms.limits.MaxAge)) } // Now that we are done with recovery, close the write slice if ms.writeSlice != nil { ms.fm.closeFileIfOpened(ms.writeSlice.file) ms.fm.closeFileIfOpened(ms.writeSlice.idxFile) } } // Start the background tasks go routine go ms.backgroundTasks() ms.Unlock() } // Cleanup on error if err != nil { // The buffer writer may not be fully set yet if ms.bw != nil && ms.bw.buf == nil { ms.bw = nil } ms.Close() ms = nil action := "create" if doRecover { action = "recover" } err = fmt.Errorf("unable to %s message store for [%s]: %v", action, channel, err) return nil, err } return ms, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2069-L2087
go
train
// beforeDataFileCloseCb returns a beforeFileClose callback to be used // by FileMsgStore's files when a data file for that slice is being closed. // This is invoked asynchronously and should not acquire the store's lock. // That being said, we have the guarantee that this will be not be invoked // concurrently for a given file and that the store will not be using this file.
func (ms *FileMsgStore) beforeDataFileCloseCb(fslice *fileSlice) beforeFileClose
// beforeDataFileCloseCb returns a beforeFileClose callback to be used // by FileMsgStore's files when a data file for that slice is being closed. // This is invoked asynchronously and should not acquire the store's lock. // That being said, we have the guarantee that this will be not be invoked // concurrently for a given file and that the store will not be using this file. func (ms *FileMsgStore) beforeDataFileCloseCb(fslice *fileSlice) beforeFileClose
{ return func() error { if fslice != ms.writeSlice { return nil } if ms.bw != nil && ms.bw.buf != nil && ms.bw.buf.Buffered() > 0 { if err := ms.bw.buf.Flush(); err != nil { return err } } if ms.fstore.opts.DoSync { if err := fslice.file.handle.Sync(); err != nil { return err } } ms.writer = nil return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2094-L2111
go
train
// beforeIndexFileCloseCb returns a beforeFileClose callback to be used // by FileMsgStore's files when an index file for that slice is being closed. // This is invoked asynchronously and should not acquire the store's lock. // That being said, we have the guarantee that this will be not be invoked // concurrently for a given file and that the store will not be using this file.
func (ms *FileMsgStore) beforeIndexFileCloseCb(fslice *fileSlice) beforeFileClose
// beforeIndexFileCloseCb returns a beforeFileClose callback to be used // by FileMsgStore's files when an index file for that slice is being closed. // This is invoked asynchronously and should not acquire the store's lock. // That being said, we have the guarantee that this will be not be invoked // concurrently for a given file and that the store will not be using this file. func (ms *FileMsgStore) beforeIndexFileCloseCb(fslice *fileSlice) beforeFileClose
{ return func() error { if fslice != ms.writeSlice { return nil } if len(ms.bufferedMsgs) > 0 { if err := ms.processBufferedMsgs(fslice); err != nil { return err } } if ms.fstore.opts.DoSync { if err := fslice.idxFile.handle.Sync(); err != nil { return err } } return nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2115-L2128
go
train
// setFile sets the current data and index file. // The buffered writer is recreated.
func (ms *FileMsgStore) setFile(fslice *fileSlice, offset int64) error
// setFile sets the current data and index file. // The buffered writer is recreated. func (ms *FileMsgStore) setFile(fslice *fileSlice, offset int64) error
{ var err error file := fslice.file.handle ms.writer = file if file != nil && ms.bw != nil { ms.writer = ms.bw.createNewWriter(file) } if offset == -1 { ms.wOffset, err = file.Seek(0, io.SeekEnd) } else { ms.wOffset = offset } return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2174-L2176
go
train
// lockFiles locks the data and index files of the given file slice. // If files were closed they are opened in this call, and if so, // and if this slice is the write slice, the writer and offset are reset.
func (ms *FileMsgStore) lockFiles(fslice *fileSlice) error
// lockFiles locks the data and index files of the given file slice. // If files were closed they are opened in this call, and if so, // and if this slice is the write slice, the writer and offset are reset. func (ms *FileMsgStore) lockFiles(fslice *fileSlice) error
{ return ms.doLockFiles(fslice, false) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2180-L2182
go
train
// lockIndexFile locks the index file of the given file slice. // If the file was closed it is opened in this call.
func (ms *FileMsgStore) lockIndexFile(fslice *fileSlice) error
// lockIndexFile locks the index file of the given file slice. // If the file was closed it is opened in this call. func (ms *FileMsgStore) lockIndexFile(fslice *fileSlice) error
{ return ms.doLockFiles(fslice, true) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2185-L2187
go
train
// unlockIndexFile unlocks the already locked index file of the given file slice.
func (ms *FileMsgStore) unlockIndexFile(fslice *fileSlice)
// unlockIndexFile unlocks the already locked index file of the given file slice. func (ms *FileMsgStore) unlockIndexFile(fslice *fileSlice)
{ ms.fm.unlockFile(fslice.idxFile) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2190-L2193
go
train
// unlockFiles unlocks both data and index files of the given file slice.
func (ms *FileMsgStore) unlockFiles(fslice *fileSlice)
// unlockFiles unlocks both data and index files of the given file slice. func (ms *FileMsgStore) unlockFiles(fslice *fileSlice)
{ ms.fm.unlockFile(fslice.file) ms.fm.unlockFile(fslice.idxFile) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2196-L2202
go
train
// closeLockedFiles (unlocks and) closes the files of the given file slice.
func (ms *FileMsgStore) closeLockedFiles(fslice *fileSlice) error
// closeLockedFiles (unlocks and) closes the files of the given file slice. func (ms *FileMsgStore) closeLockedFiles(fslice *fileSlice) error
{ err := ms.fm.closeLockedFile(fslice.file) if idxErr := ms.fm.closeLockedFile(fslice.idxFile); idxErr != nil && err == nil { err = idxErr } return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2205-L2404
go
train
// recovers one of the file
func (ms *FileMsgStore) recoverOneMsgFile(fslice *fileSlice, fseq int, useIdxFile bool) error
// recovers one of the file func (ms *FileMsgStore) recoverOneMsgFile(fslice *fileSlice, fseq int, useIdxFile bool) error
{ var err error msgSize := 0 var msg *pb.MsgProto var mindex *msgIndex var seq uint64 // Select which file to recover based on presence of index file file := fslice.file if useIdxFile { file = fslice.idxFile } // Create a buffered reader to speed-up recovery br := bufio.NewReaderSize(file.handle, defaultBufSize) // The first record starts after the file version record offset := int64(4) if useIdxFile { var ( lastIndex *msgIndex lastSeq uint64 ) for { seq, mindex, err = ms.readIndex(br) if err != nil { switch err { case io.EOF: // We are done, reset err err = nil case errNeedRewind: err = ms.fm.truncateFile(file, offset) } break } // Update file slice if fslice.firstSeq == 0 { fslice.firstSeq = seq } fslice.lastSeq = seq fslice.msgsCount++ // For size, add the message record size, the record header and the size // required for the corresponding index record. fslice.msgsSize += uint64(mindex.msgSize + msgRecordOverhead) if fslice.firstWrite == 0 { fslice.firstWrite = mindex.timestamp } lastIndex = mindex lastSeq = seq offset += msgIndexRecSize } if err == nil { if lastIndex != nil { err = ms.ensureLastMsgAndIndexMatch(fslice, lastSeq, lastIndex) if err != nil { ms.fstore.log.Errorf(err.Error()) if _, serr := fslice.file.handle.Seek(4, io.SeekStart); serr != nil { panic(fmt.Errorf("file %q: unable to set position to beginning of file: %v", fslice.file.name, serr)) } } } else { // Nothing recovered from the index file, try to recover // from data file in case it is not empty. useIdxFile = false } } // We can get an error either because the index file was corrupted, // or because the data file is. In both case, we truncate the index // file and recover from data file. The handling of unexpected EOF // is handled in the data file recovery down below. if err != nil { ms.fstore.log.Errorf("Error with index file %q: %v. Truncating and recovering from data file", fslice.idxFile.name, err) if terr := ms.fm.truncateFile(fslice.idxFile, 4); terr != nil { panic(fmt.Errorf("error during recovery of file %q: %v, you need "+ "to manually remove index file %q (truncate failed with err: %v)", fslice.file.name, err, fslice.idxFile.name, terr)) } fslice.firstSeq = 0 fslice.lastSeq = 0 fslice.msgsCount = 0 fslice.msgsSize = 0 fslice.firstWrite = 0 file = fslice.file err = nil useIdxFile = false } } // No `else` here because in case of error recovering index file, we will do data file recovery if !useIdxFile { // Get these from the file store object crcTable := ms.fstore.crcTable doCRC := ms.fstore.opts.DoCRC // Create a buffered reader from the data file to speed-up recovery br := bufio.NewReaderSize(fslice.file.handle, defaultBufSize) // We are going to write the index file while recovering the data file bw := bufio.NewWriterSize(fslice.idxFile.handle, msgIndexRecSize*1000) for { ms.tmpMsgBuf, msgSize, _, err = readRecord(br, ms.tmpMsgBuf, false, crcTable, doCRC) if err != nil { switch err { case io.EOF: // We are done, reset err err = nil case errNeedRewind: err = ms.fm.truncateFile(file, offset) default: err = ms.fstore.handleUnexpectedEOF(err, file, offset, false) } break } // Recover this message msg = &pb.MsgProto{} err = msg.Unmarshal(ms.tmpMsgBuf[:msgSize]) if err != nil { break } if fslice.firstSeq == 0 { fslice.firstSeq = msg.Sequence } fslice.lastSeq = msg.Sequence fslice.msgsCount++ // For size, add the message record size, the record header and the size // required for the corresponding index record. fslice.msgsSize += uint64(msgSize + msgRecordOverhead) if fslice.firstWrite == 0 { fslice.firstWrite = msg.Timestamp } // There was no index file, update it err = ms.writeIndex(bw, msg.Sequence, offset, msg.Timestamp, msgSize) if err != nil { break } // Move offset offset += int64(recordHeaderSize + msgSize) } if err == nil { err = bw.Flush() if err == nil { err = fslice.idxFile.handle.Sync() } } // Since there was no index and there was an error, remove the index // file so when server restarts, it recovers again from the data file. if err != nil { // Close the index file ms.fm.closeLockedFile(fslice.idxFile) // Remove form store's map ms.fm.remove(fslice.idxFile) // Remove it, and panic if we can't if rmErr := os.Remove(fslice.idxFile.name); rmErr != nil { panic(fmt.Errorf("error during recovery of file %q: %v, you need "+ "to manually remove index file %q (remove failed with err: %v)", fslice.file.name, err, fslice.idxFile.name, rmErr)) } // Close the data file ms.fm.closeLockedFile(fslice.file) return err } } // Close the files ms.fm.closeLockedFile(fslice.file) ms.fm.closeLockedFile(fslice.idxFile) // If no error and slice is not empty... if fslice.msgsCount > 0 { if ms.first == 0 || ms.first > fslice.firstSeq { ms.first = fslice.firstSeq } if ms.last < fslice.lastSeq { ms.last = fslice.lastSeq } ms.totalCount += fslice.msgsCount ms.totalBytes += fslice.msgsSize // On success, add to the map of file slices and // update first/last file slice sequence. ms.files[fseq] = fslice if ms.firstFSlSeq == 0 || ms.firstFSlSeq > fseq { ms.firstFSlSeq = fseq } if ms.lastFSlSeq < fseq { ms.lastFSlSeq = fseq } return nil } // Slice was empty and not recovered. Need to remove those from store's files manager. ms.fm.remove(fslice.file) ms.fm.remove(fslice.idxFile) return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2439-L2477
go
train
// setSliceLimits sets the limits of a file slice based on options and/or // channel limits.
func (ms *FileMsgStore) setSliceLimits()
// setSliceLimits sets the limits of a file slice based on options and/or // channel limits. func (ms *FileMsgStore) setSliceLimits()
{ // First set slice limits based on slice configuration. ms.slCountLim = ms.fstore.opts.SliceMaxMsgs ms.slSizeLim = uint64(ms.fstore.opts.SliceMaxBytes) ms.slAgeLim = int64(ms.fstore.opts.SliceMaxAge) // Did we configure any of the "dimension"? ms.slHasLimits = ms.slCountLim > 0 || ms.slSizeLim > 0 || ms.slAgeLim > 0 // If so, we are done. We will use those limits to decide // when to move to a new slice. if ms.slHasLimits { return } // Slices limits were not configured. We will set a limit based on channel limits. if ms.limits.MaxMsgs > 0 { limit := ms.limits.MaxMsgs / 4 if limit == 0 { limit = 1 } ms.slCountLim = limit } if ms.limits.MaxBytes > 0 { limit := uint64(ms.limits.MaxBytes) / 4 if limit == 0 { limit = 1 } ms.slSizeLim = limit } if ms.limits.MaxAge > 0 { limit := time.Duration(int64(ms.limits.MaxAge) / 4) if limit < time.Second { limit = time.Second } ms.slAgeLim = int64(limit) } // Refresh our view of slices having limits. ms.slHasLimits = ms.slCountLim > 0 || ms.slSizeLim > 0 || ms.slAgeLim > 0 }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2480-L2486
go
train
// writeIndex writes a message index record to the writer `w`
func (ms *FileMsgStore) writeIndex(w io.Writer, seq uint64, offset, timestamp int64, msgSize int) error
// writeIndex writes a message index record to the writer `w` func (ms *FileMsgStore) writeIndex(w io.Writer, seq uint64, offset, timestamp int64, msgSize int) error
{ _buf := [msgIndexRecSize]byte{} buf := _buf[:] ms.addIndex(buf, seq, offset, timestamp, msgSize) _, err := w.Write(buf[:msgIndexRecSize]) return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2489-L2496
go
train
// addIndex adds a message index record in the given buffer
func (ms *FileMsgStore) addIndex(buf []byte, seq uint64, offset, timestamp int64, msgSize int)
// addIndex adds a message index record in the given buffer func (ms *FileMsgStore) addIndex(buf []byte, seq uint64, offset, timestamp int64, msgSize int)
{ util.ByteOrder.PutUint64(buf, seq) util.ByteOrder.PutUint64(buf[8:], uint64(offset)) util.ByteOrder.PutUint64(buf[16:], uint64(timestamp)) util.ByteOrder.PutUint32(buf[24:], uint32(msgSize)) crc := crc32.Checksum(buf[:msgIndexRecSize-crcSize], ms.fstore.crcTable) util.ByteOrder.PutUint32(buf[msgIndexRecSize-crcSize:], crc) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2500-L2526
go
train
// readIndex reads a message index record from the given reader // and returns an allocated msgIndex object.
func (ms *FileMsgStore) readIndex(r io.Reader) (uint64, *msgIndex, error)
// readIndex reads a message index record from the given reader // and returns an allocated msgIndex object. func (ms *FileMsgStore) readIndex(r io.Reader) (uint64, *msgIndex, error)
{ _buf := [msgIndexRecSize]byte{} buf := _buf[:] if _, err := io.ReadFull(r, buf); err != nil { return 0, nil, err } mindex := &msgIndex{} seq := util.ByteOrder.Uint64(buf) mindex.offset = int64(util.ByteOrder.Uint64(buf[8:])) mindex.timestamp = int64(util.ByteOrder.Uint64(buf[16:])) mindex.msgSize = util.ByteOrder.Uint32(buf[24:]) // If all zeros, return that caller should rewind (for recovery) if seq == 0 && mindex.offset == 0 && mindex.timestamp == 0 && mindex.msgSize == 0 { storedCRC := util.ByteOrder.Uint32(buf[msgIndexRecSize-crcSize:]) if storedCRC == 0 { return 0, nil, errNeedRewind } } if ms.fstore.opts.DoCRC { storedCRC := util.ByteOrder.Uint32(buf[msgIndexRecSize-crcSize:]) crc := crc32.Checksum(buf[:msgIndexRecSize-crcSize], ms.fstore.crcTable) if storedCRC != crc { return 0, nil, fmt.Errorf("corrupted data, expected crc to be 0x%08x, got 0x%08x", storedCRC, crc) } } return seq, mindex, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2529-L2724
go
train
// Store a given message.
func (ms *FileMsgStore) Store(m *pb.MsgProto) (uint64, error)
// Store a given message. func (ms *FileMsgStore) Store(m *pb.MsgProto) (uint64, error)
{ ms.Lock() defer ms.Unlock() if m.Sequence <= ms.last { // We've already seen this message. return m.Sequence, nil } fslice := ms.writeSlice if fslice != nil { if err := ms.lockFiles(fslice); err != nil { return 0, err } } // Is there a gap in message sequence? if ms.last > 0 && m.Sequence > ms.last+1 { if err := ms.fillGaps(fslice, m); err != nil { ms.unlockFiles(fslice) return 0, err } } // Check if we need to move to next file slice if fslice == nil || ms.slHasLimits { if fslice == nil || (ms.slSizeLim > 0 && fslice.msgsSize >= ms.slSizeLim) || (ms.slCountLim > 0 && fslice.msgsCount >= ms.slCountLim) || (ms.slAgeLim > 0 && atomic.LoadInt64(&ms.timeTick)-fslice.firstWrite >= ms.slAgeLim) { // Don't change store variable until success... newSliceSeq := ms.lastFSlSeq + 1 // Close the current file slice (if applicable) and open the next slice if fslice != nil { if err := ms.closeLockedFiles(fslice); err != nil { return 0, err } } // Create new slice datFName := filepath.Join(ms.channelName, fmt.Sprintf("%s%v%s", msgFilesPrefix, newSliceSeq, datSuffix)) idxFName := filepath.Join(ms.channelName, fmt.Sprintf("%s%v%s", msgFilesPrefix, newSliceSeq, idxSuffix)) datFile, err := ms.fm.createFile(datFName, defaultFileFlags, nil) if err != nil { return 0, err } idxFile, err := ms.fm.createFile(idxFName, defaultFileFlags, nil) if err != nil { ms.fm.closeLockedFile(datFile) ms.fm.remove(datFile) return 0, err } // Success, update the store's variables newSlice := &fileSlice{ file: datFile, idxFile: idxFile, lastUsed: atomic.LoadInt64(&ms.timeTick), } ms.fm.setBeforeCloseCb(datFile, ms.beforeDataFileCloseCb(newSlice)) ms.fm.setBeforeCloseCb(idxFile, ms.beforeIndexFileCloseCb(newSlice)) ms.files[newSliceSeq] = newSlice ms.writeSlice = newSlice if ms.firstFSlSeq == 0 { ms.firstFSlSeq = newSliceSeq } ms.lastFSlSeq = newSliceSeq ms.setFile(newSlice, 4) // If we added a second slice and the first slice was empty but not removed // because it was the only one, we remove it now. if len(ms.files) == 2 && fslice.msgsCount == fslice.rmCount { ms.removeFirstSlice() } // Update the fslice reference to new slice for rest of function fslice = ms.writeSlice } } // !! IMPORTANT !! // We want to reduce use of defer in functions that are in the fast path, // so after this point, on error, use goto processErr instead of return. // It means that we should not use local errors like this: // if err := this(); err != nil { // goto processErr // } seq := m.Sequence msgInBuffer := false var recSize int var err error var mindex *msgIndex var size uint64 var bwBuf *bufio.Writer if ms.bw != nil { bwBuf = ms.bw.buf } msgSize := m.Size() if bwBuf != nil { required := msgSize + recordHeaderSize if required > bwBuf.Available() { ms.writer, err = ms.bw.expand(fslice.file.handle, required) if err != nil { goto processErr } if len(ms.bufferedMsgs) > 0 { err = ms.processBufferedMsgs(fslice) if err != nil { goto processErr } } // Refresh this since it has changed. bwBuf = ms.bw.buf } } ms.tmpMsgBuf, recSize, err = writeRecord(ms.writer, ms.tmpMsgBuf, recNoType, m, msgSize, ms.fstore.crcTable) if err != nil { goto processErr } if bwBuf != nil { // Check to see if we should cancel a buffer shrink request if ms.bw.shrinkReq { ms.bw.checkShrinkRequest() } // If message was added to the buffer we need to also save a reference // to that message outside of the cache, until the buffer is flushed. if bwBuf.Buffered() >= recSize { ms.bufferedSeqs = append(ms.bufferedSeqs, seq) mindex = &msgIndex{offset: ms.wOffset, timestamp: m.Timestamp, msgSize: uint32(msgSize)} ms.bufferedMsgs[seq] = &bufferedMsg{msg: m, index: mindex} msgInBuffer = true } } // Message was flushed to disk, write corresponding index if !msgInBuffer { err = ms.writeIndex(fslice.idxFile.handle, seq, ms.wOffset, m.Timestamp, msgSize) if err != nil { goto processErr } } if ms.first == 0 || ms.first == seq { // First ever message or after all messages expired and this is the // first new message. ms.first = seq ms.firstMsg = m if maxAge := ms.limits.MaxAge; maxAge > 0 { ms.expiration = m.Timestamp + int64(maxAge) if len(ms.bkgTasksWake) == 0 { ms.bkgTasksWake <- true } } } ms.last = seq ms.lastMsg = m ms.cache.add(seq, m, true) ms.wOffset += int64(recSize) // For size, add the message record size, the record header and the size // required for the corresponding index record. size = uint64(msgSize + msgRecordOverhead) // Total stats ms.totalCount++ ms.totalBytes += size // Stats per file slice fslice.msgsCount++ fslice.msgsSize += size if fslice.firstWrite == 0 { fslice.firstWrite = m.Timestamp } // Save references to first and last sequences for this slice if fslice.firstSeq == 0 { fslice.firstSeq = seq } fslice.lastSeq = seq if ms.limits.MaxMsgs > 0 || ms.limits.MaxBytes > 0 { // Enfore limits and update file slice if needed. err = ms.enforceLimits(true, false) if err != nil { goto processErr } } ms.unlockFiles(fslice) return seq, nil processErr: ms.unlockFiles(fslice) return 0, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2767-L2789
go
train
// processBufferedMsgs adds message index records in the given buffer // for every pending buffered messages.
func (ms *FileMsgStore) processBufferedMsgs(fslice *fileSlice) error
// processBufferedMsgs adds message index records in the given buffer // for every pending buffered messages. func (ms *FileMsgStore) processBufferedMsgs(fslice *fileSlice) error
{ idxBufferSize := len(ms.bufferedMsgs) * msgIndexRecSize ms.tmpMsgBuf = util.EnsureBufBigEnough(ms.tmpMsgBuf, idxBufferSize) bufOffset := 0 for _, pseq := range ms.bufferedSeqs { bm := ms.bufferedMsgs[pseq] if bm != nil { mindex := bm.index // We add the index info for this flushed message ms.addIndex(ms.tmpMsgBuf[bufOffset:], pseq, mindex.offset, mindex.timestamp, int(mindex.msgSize)) bufOffset += msgIndexRecSize delete(ms.bufferedMsgs, pseq) } } if bufOffset > 0 { if _, err := fslice.idxFile.handle.Write(ms.tmpMsgBuf[:bufOffset]); err != nil { return err } } ms.bufferedSeqs = ms.bufferedSeqs[:0] return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2795-L2862
go
train
// expireMsgs ensures that messages don't stay in the log longer than the // limit's MaxAge. // Returns the time of the next expiration (possibly 0 if no message left) // The store's lock is assumed to be held on entry
func (ms *FileMsgStore) expireMsgs(now, maxAge int64) int64
// expireMsgs ensures that messages don't stay in the log longer than the // limit's MaxAge. // Returns the time of the next expiration (possibly 0 if no message left) // The store's lock is assumed to be held on entry func (ms *FileMsgStore) expireMsgs(now, maxAge int64) int64
{ if ms.first == 0 { ms.expiration = 0 return ms.expiration } var m *msgIndex var slice *fileSlice for { m = nil if ms.first <= ms.last { if slice == nil || ms.first > slice.lastSeq { // If slice is not nil, it means that we have expired all // messages belong to that slice, and the slice itslef. // So there is no need to unlock it since this has already // been done. slice = ms.getFileSliceForSeq(ms.first) if slice == nil { // If we did not find a slice for this sequence, it could // be cause there is a gap in message sequence due to // file truncation following unexpected EOF on recovery. // So set the first seq to the first sequence of the now // first slice. slice = ms.files[ms.firstFSlSeq] if slice != nil { ms.first = slice.firstSeq } } if slice != nil { if err := ms.lockIndexFile(slice); err != nil { slice = nil break } } } if slice != nil { var err error if m, err = ms.getMsgIndex(slice, ms.first); err != nil { ms.log.Errorf("Error during expiration: %v", err) if slice != nil { ms.unlockIndexFile(slice) } // Try again in 5 secs. ms.expiration = now + int64(5*time.Second) return ms.expiration } } } if m == nil { ms.expiration = 0 break } elapsed := now - m.timestamp if elapsed >= maxAge { ms.removeFirstMsg(m, false) } else { if elapsed < 0 { ms.expiration = m.timestamp + maxAge } else { ms.expiration = now + (maxAge - elapsed) } break } } if slice != nil { ms.unlockIndexFile(slice) } return ms.expiration }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2866-L2893
go
train
// enforceLimits checks total counts with current msg store's limits, // removing a file slice and/or updating slices' count as necessary.
func (ms *FileMsgStore) enforceLimits(reportHitLimit, lockFile bool) error
// enforceLimits checks total counts with current msg store's limits, // removing a file slice and/or updating slices' count as necessary. func (ms *FileMsgStore) enforceLimits(reportHitLimit, lockFile bool) error
{ // Check if we need to remove any (but leave at least the last added). // Note that we may have to remove more than one msg if we are here // after a restart with smaller limits than originally set, or if // message is quite big, etc... maxMsgs := ms.limits.MaxMsgs maxBytes := ms.limits.MaxBytes for ms.totalCount > 1 && ((maxMsgs > 0 && ms.totalCount > maxMsgs) || (maxBytes > 0 && ms.totalBytes > uint64(maxBytes))) { // Remove first message from first slice, potentially removing // the slice, etc... if err := ms.removeFirstMsg(nil, lockFile); err != nil { // We are not going to fail the publish, just report // the error removing the first message. // TODO: Is this the right thing to do? ms.log.Errorf("Unable to remove first message: %v", err) return nil } if reportHitLimit && !ms.hitLimit { ms.hitLimit = true ms.log.Warnf(droppingMsgsFmt, ms.subject, ms.totalCount, ms.limits.MaxMsgs, util.FriendlyBytes(int64(ms.totalBytes)), util.FriendlyBytes(ms.limits.MaxBytes)) } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2901-L2907
go
train
// getMsgIndex returns a msgIndex object for message with sequence `seq`, // or nil if message is not found (or no longer valid: expired, removed // due to limits, etc). // This call first checks that the record is not present in // ms.bufferedMsgs since it is possible that message and index are not // yet stored on disk.
func (ms *FileMsgStore) getMsgIndex(slice *fileSlice, seq uint64) (*msgIndex, error)
// getMsgIndex returns a msgIndex object for message with sequence `seq`, // or nil if message is not found (or no longer valid: expired, removed // due to limits, etc). // This call first checks that the record is not present in // ms.bufferedMsgs since it is possible that message and index are not // yet stored on disk. func (ms *FileMsgStore) getMsgIndex(slice *fileSlice, seq uint64) (*msgIndex, error)
{ bm := ms.bufferedMsgs[seq] if bm != nil { return bm.index, nil } return ms.readMsgIndex(slice, seq) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2912-L2928
go
train
// readMsgIndex reads a message index record from disk and returns a msgIndex // object. Same than getMsgIndex but without checking for message in // ms.bufferedMsgs first.
func (ms *FileMsgStore) readMsgIndex(slice *fileSlice, seq uint64) (*msgIndex, error)
// readMsgIndex reads a message index record from disk and returns a msgIndex // object. Same than getMsgIndex but without checking for message in // ms.bufferedMsgs first. func (ms *FileMsgStore) readMsgIndex(slice *fileSlice, seq uint64) (*msgIndex, error)
{ // Compute the offset in the index file itself. idxFileOffset := 4 + (int64(seq-slice.firstSeq)+int64(slice.rmCount))*msgIndexRecSize // Then position the file pointer of the index file. if _, err := slice.idxFile.handle.Seek(idxFileOffset, io.SeekStart); err != nil { return nil, err } // Read the index record and ensure we have what we expect seqInIndexFile, msgIndex, err := ms.readIndex(slice.idxFile.handle) if err != nil { return nil, err } if seqInIndexFile != seq { return nil, fmt.Errorf("wrong sequence, wanted %v got %v", seq, seqInIndexFile) } return msgIndex, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2932-L2975
go
train
// removeFirstMsg "removes" the first message of the first slice. // If the slice is "empty" the file slice is removed.
func (ms *FileMsgStore) removeFirstMsg(mindex *msgIndex, lockFile bool) error
// removeFirstMsg "removes" the first message of the first slice. // If the slice is "empty" the file slice is removed. func (ms *FileMsgStore) removeFirstMsg(mindex *msgIndex, lockFile bool) error
{ // Work with the first slice slice := ms.files[ms.firstFSlSeq] // Get the message index for the first valid message in this slice if mindex == nil { if lockFile || slice != ms.writeSlice { ms.lockIndexFile(slice) } var err error mindex, err = ms.getMsgIndex(slice, slice.firstSeq) if lockFile || slice != ms.writeSlice { ms.unlockIndexFile(slice) } if err != nil { return err } } // Size of the first message in this slice firstMsgSize := mindex.msgSize // For size, we count the size of serialized message + record header + // the corresponding index record size := uint64(firstMsgSize + msgRecordOverhead) // Keep track of number of "removed" messages in this slice slice.rmCount++ // Update total counts ms.totalCount-- ms.totalBytes -= size // Messages sequence is incremental with no gap on a given msgstore. ms.first++ // Invalidate ms.firstMsg, it will be looked-up on demand. ms.firstMsg = nil // Invalidate ms.lastMsg if it was the last message being removed. if ms.first > ms.last { ms.lastMsg = nil } // Is file slice is "empty" and not the last one if slice.msgsCount == slice.rmCount && len(ms.files) > 1 { ms.removeFirstSlice() } else { // This is the new first message in this slice. slice.firstSeq = ms.first } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2979-L3043
go
train
// removeFirstSlice removes the first file slice. // Should not be called if first slice is also last!
func (ms *FileMsgStore) removeFirstSlice()
// removeFirstSlice removes the first file slice. // Should not be called if first slice is also last! func (ms *FileMsgStore) removeFirstSlice()
{ sl := ms.files[ms.firstFSlSeq] // We may or may not have the first slice locked, so need to close // the file knowing that files can be in either state. ms.fm.closeLockedOrOpenedFile(sl.file) ms.fm.remove(sl.file) // Close index file too. ms.fm.closeLockedOrOpenedFile(sl.idxFile) ms.fm.remove(sl.idxFile) // Assume we will remove the files remove := true // If there is an archive script invoke it first script := ms.fstore.opts.SliceArchiveScript if script != "" { datBak := sl.file.name + bakSuffix idxBak := sl.idxFile.name + bakSuffix var err error if err = os.Rename(sl.file.name, datBak); err == nil { if err = os.Rename(sl.idxFile.name, idxBak); err != nil { // Remove first backup file os.Remove(datBak) } } if err == nil { // Files have been successfully renamed, so don't attempt // to remove the original files. remove = false // We run the script in a go routine to not block the server. ms.allDone.Add(1) go func(subj, dat, idx string) { defer ms.allDone.Done() cmd := exec.Command(script, subj, dat, idx) output, err := cmd.CombinedOutput() if err != nil { ms.log.Noticef("Error invoking archive script %q: %v (output=%v)", script, err, string(output)) } else { ms.log.Noticef("Output of archive script for %s (%s and %s): %v", subj, dat, idx, string(output)) } }(ms.subject, datBak, idxBak) } } // Remove files if remove { os.Remove(sl.file.name) os.Remove(sl.idxFile.name) } // Remove slice from map delete(ms.files, ms.firstFSlSeq) // Normally, file slices have an incremental sequence number with // no gap. However, we want to support the fact that an user could // copy back some old file slice to be recovered, and so there // may be a gap. So find out what is the new first file sequence. for ms.firstFSlSeq < ms.lastFSlSeq { ms.firstFSlSeq++ if _, ok := ms.files[ms.firstFSlSeq]; ok { break } } // This should not happen! if ms.firstFSlSeq > ms.lastFSlSeq { panic("Removed last slice!") } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3048-L3066
go
train
// getFileSliceForSeq returns the file slice where the message of the // given sequence is stored, or nil if the message is not found in any // of the file slices.
func (ms *FileMsgStore) getFileSliceForSeq(seq uint64) *fileSlice
// getFileSliceForSeq returns the file slice where the message of the // given sequence is stored, or nil if the message is not found in any // of the file slices. func (ms *FileMsgStore) getFileSliceForSeq(seq uint64) *fileSlice
{ if len(ms.files) == 0 { return nil } // Start with write slice slice := ms.writeSlice if (slice.firstSeq <= seq) && (seq <= slice.lastSeq) { return slice } // We want to support possible gaps in file slice sequence, so // no dichotomy, but simple iteration of the map, which in Go is // random. for _, slice := range ms.files { if (slice.firstSeq <= seq) && (seq <= slice.lastSeq) { return slice } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
stores/filestore.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3070-L3157
go
train
// backgroundTasks performs some background tasks related to this // messages store.
func (ms *FileMsgStore) backgroundTasks()
// backgroundTasks performs some background tasks related to this // messages store. func (ms *FileMsgStore) backgroundTasks()
{ defer ms.allDone.Done() ms.RLock() hasBuffer := ms.bw != nil maxAge := int64(ms.limits.MaxAge) nextExpiration := ms.expiration lastCacheCheck := ms.timeTick lastBufShrink := ms.timeTick ms.RUnlock() for { // Update time timeTick := time.Now().UnixNano() atomic.StoreInt64(&ms.timeTick, timeTick) // Close unused file slices if atomic.LoadInt64(&ms.checkSlices) == 1 { ms.Lock() opened := 0 for _, slice := range ms.files { // If no FD limit and this is the write slice, skip. if !ms.hasFDsLimit && slice == ms.writeSlice { continue } opened++ if slice.lastUsed > 0 && time.Duration(timeTick-slice.lastUsed) >= sliceCloseInterval { slice.lastUsed = 0 ms.fm.closeFileIfOpened(slice.file) ms.fm.closeFileIfOpened(slice.idxFile) opened-- } } if opened == 0 { // We can update this without atomic since we are under store lock // and this go routine is the only place where we check the value. ms.checkSlices = 0 } ms.Unlock() } // Shrink the buffer if applicable if hasBuffer && time.Duration(timeTick-lastBufShrink) >= bufShrinkInterval { ms.Lock() if ms.writeSlice != nil { file := ms.writeSlice.file if ms.fm.lockFileIfOpened(file) { ms.writer, _ = ms.bw.tryShrinkBuffer(file.handle) ms.fm.unlockFile(file) } } ms.Unlock() lastBufShrink = timeTick } // Check for expiration if maxAge > 0 && nextExpiration > 0 && timeTick >= nextExpiration { ms.Lock() // Expire messages nextExpiration = ms.expireMsgs(timeTick, maxAge) ms.Unlock() } // Check for message caching if timeTick >= lastCacheCheck+cacheTTL { tryEvict := atomic.LoadInt32(&ms.cache.tryEvict) if tryEvict == 1 { ms.Lock() // Possibly remove some/all cached messages ms.cache.evict(timeTick) ms.Unlock() } lastCacheCheck = timeTick } select { case <-ms.bkgTasksDone: return case <-ms.bkgTasksWake: // wake up from a possible sleep to run the loop ms.RLock() nextExpiration = ms.expiration ms.RUnlock() case <-time.After(bkgTasksSleepDuration): // go back to top of for loop. } } }