repo
stringlengths
5
67
sha
stringlengths
40
40
path
stringlengths
4
234
url
stringlengths
85
339
language
stringclasses
6 values
split
stringclasses
3 values
doc
stringlengths
3
51.2k
sign
stringlengths
5
8.01k
problem
stringlengths
13
51.2k
output
stringlengths
0
3.87M
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/conf.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L334-L386
go
train
// parseChannelLimits updates `cl` with channel limits.
func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}, isGlobal bool) error
// parseChannelLimits updates `cl` with channel limits. func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}, isGlobal bool) error
{ switch name { case "msu", "max_subs", "max_subscriptions", "maxsubscriptions": if err := checkType(k, reflect.Int64, v); err != nil { return err } cl.MaxSubscriptions = int(v.(int64)) if !isGlobal && cl.MaxSubscriptions == 0 { cl.MaxSubscriptions = -1 } case "mm", "max_msgs", "maxmsgs", "max_count", "maxcount": if err := checkType(k, reflect.Int64, v); err != nil { return err } cl.MaxMsgs = int(v.(int64)) if !isGlobal && cl.MaxMsgs == 0 { cl.MaxMsgs = -1 } case "mb", "max_bytes", "maxbytes": if err := checkType(k, reflect.Int64, v); err != nil { return err } cl.MaxBytes = v.(int64) if !isGlobal && cl.MaxBytes == 0 { cl.MaxBytes = -1 } case "ma", "max_age", "maxage": if err := checkType(k, reflect.String, v); err != nil { return err } dur, err := time.ParseDuration(v.(string)) if err != nil { return err } cl.MaxAge = dur if !isGlobal && cl.MaxAge == 0 { cl.MaxAge = -1 } case "mi", "max_inactivity", "maxinactivity": if err := checkType(k, reflect.String, v); err != nil { return err } dur, err := time.ParseDuration(v.(string)) if err != nil { return err } cl.MaxInactivity = dur if !isGlobal && cl.MaxInactivity == 0 { cl.MaxInactivity = -1 } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/conf.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L389-L413
go
train
// parsePerChannelLimits updates `opts` with per channel limits.
func parsePerChannelLimits(itf interface{}, opts *Options) error
// parsePerChannelLimits updates `opts` with per channel limits. func parsePerChannelLimits(itf interface{}, opts *Options) error
{ m, ok := itf.(map[string]interface{}) if !ok { return fmt.Errorf("expected per channel limits to be a map/struct, got %v", itf) } for channelName, limits := range m { limitsMap, ok := limits.(map[string]interface{}) if !ok { return fmt.Errorf("expected channel limits to be a map/struct, got %v", limits) } if !util.IsChannelNameValid(channelName, true) { return fmt.Errorf("invalid channel name %q", channelName) } cl := &stores.ChannelLimits{} for k, v := range limitsMap { name := strings.ToLower(k) if err := parseChannelLimits(cl, k, name, v, false); err != nil { return err } } sl := &opts.StoreLimits sl.AddPerChannel(channelName, cl) } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/conf.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L540-L699
go
train
// ConfigureOptions accepts a flag set and augment it with NATS Streaming Server // specific flags. It then invokes the corresponding function from NATS Server. // On success, Streaming and NATS options structures are returned configured // based on the selected flags and/or configuration files. // The command line options take precedence to the ones in the configuration files.
func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, *natsd.Options, error)
// ConfigureOptions accepts a flag set and augment it with NATS Streaming Server // specific flags. It then invokes the corresponding function from NATS Server. // On success, Streaming and NATS options structures are returned configured // based on the selected flags and/or configuration files. // The command line options take precedence to the ones in the configuration files. func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, *natsd.Options, error)
{ sopts := GetDefaultOptions() var ( stanConfigFile string natsConfigFile string clusterPeers string encryptionKey string ) fs.StringVar(&sopts.ID, "cluster_id", DefaultClusterID, "stan.ID") fs.StringVar(&sopts.ID, "cid", DefaultClusterID, "stan.ID") fs.StringVar(&sopts.StoreType, "store", stores.TypeMemory, "stan.StoreType") fs.StringVar(&sopts.StoreType, "st", stores.TypeMemory, "stan.StoreType") fs.StringVar(&sopts.FilestoreDir, "dir", "", "stan.FilestoreDir") fs.IntVar(&sopts.MaxChannels, "max_channels", stores.DefaultStoreLimits.MaxChannels, "stan.MaxChannels") fs.IntVar(&sopts.MaxChannels, "mc", stores.DefaultStoreLimits.MaxChannels, "stan.MaxChannels") fs.IntVar(&sopts.MaxSubscriptions, "max_subs", stores.DefaultStoreLimits.MaxSubscriptions, "stan.MaxSubscriptions") fs.IntVar(&sopts.MaxSubscriptions, "msu", stores.DefaultStoreLimits.MaxSubscriptions, "stan.MaxSubscriptions") fs.IntVar(&sopts.MaxMsgs, "max_msgs", stores.DefaultStoreLimits.MaxMsgs, "stan.MaxMsgs") fs.IntVar(&sopts.MaxMsgs, "mm", stores.DefaultStoreLimits.MaxMsgs, "stan.MaxMsgs") fs.String("max_bytes", fmt.Sprintf("%v", stores.DefaultStoreLimits.MaxBytes), "stan.MaxBytes") fs.String("mb", fmt.Sprintf("%v", stores.DefaultStoreLimits.MaxBytes), "stan.MaxBytes") fs.DurationVar(&sopts.MaxAge, "max_age", stores.DefaultStoreLimits.MaxAge, "stan.MaxAge") fs.DurationVar(&sopts.MaxAge, "ma", stores.DefaultStoreLimits.MaxAge, "stan.MaxAge") fs.DurationVar(&sopts.MaxInactivity, "max_inactivity", stores.DefaultStoreLimits.MaxInactivity, "Maximum inactivity (no new message, no subscription) after which a channel can be garbage collected") fs.DurationVar(&sopts.MaxInactivity, "mi", stores.DefaultStoreLimits.MaxInactivity, "Maximum inactivity (no new message, no subscription) after which a channel can be garbage collected") fs.DurationVar(&sopts.ClientHBInterval, "hbi", DefaultHeartBeatInterval, "stan.ClientHBInterval") fs.DurationVar(&sopts.ClientHBInterval, "hb_interval", DefaultHeartBeatInterval, "stan.ClientHBInterval") fs.DurationVar(&sopts.ClientHBTimeout, "hbt", DefaultClientHBTimeout, "stan.ClientHBTimeout") fs.DurationVar(&sopts.ClientHBTimeout, "hb_timeout", DefaultClientHBTimeout, "stan.ClientHBTimeout") fs.IntVar(&sopts.ClientHBFailCount, "hbf", DefaultMaxFailedHeartBeats, "stan.ClientHBFailCount") fs.IntVar(&sopts.ClientHBFailCount, "hb_fail_count", DefaultMaxFailedHeartBeats, "stan.ClientHBFailCount") fs.BoolVar(&sopts.Debug, "SD", false, "stan.Debug") fs.BoolVar(&sopts.Debug, "stan_debug", false, "stan.Debug") fs.BoolVar(&sopts.Trace, "SV", false, "stan.Trace") fs.BoolVar(&sopts.Trace, "stan_trace", false, "stan.Trace") fs.Bool("SDV", false, "") fs.BoolVar(&sopts.Secure, "secure", false, "stan.Secure") fs.StringVar(&sopts.ClientCert, "tls_client_cert", "", "stan.ClientCert") fs.StringVar(&sopts.ClientKey, "tls_client_key", "", "stan.ClientKey") fs.StringVar(&sopts.ClientCA, "tls_client_cacert", "", "stan.ClientCA") fs.StringVar(&sopts.NATSServerURL, "nats_server", "", "stan.NATSServerURL") fs.StringVar(&sopts.NATSServerURL, "ns", "", "stan.NATSServerURL") fs.StringVar(&stanConfigFile, "sc", "", "") fs.StringVar(&stanConfigFile, "stan_config", "", "") fs.BoolVar(&sopts.FileStoreOpts.CompactEnabled, "file_compact_enabled", stores.DefaultFileStoreOptions.CompactEnabled, "stan.FileStoreOpts.CompactEnabled") fs.IntVar(&sopts.FileStoreOpts.CompactFragmentation, "file_compact_frag", stores.DefaultFileStoreOptions.CompactFragmentation, "stan.FileStoreOpts.CompactFragmentation") fs.IntVar(&sopts.FileStoreOpts.CompactInterval, "file_compact_interval", stores.DefaultFileStoreOptions.CompactInterval, "stan.FileStoreOpts.CompactInterval") fs.String("file_compact_min_size", fmt.Sprintf("%v", stores.DefaultFileStoreOptions.CompactMinFileSize), "stan.FileStoreOpts.CompactMinFileSize") fs.String("file_buffer_size", fmt.Sprintf("%v", stores.DefaultFileStoreOptions.BufferSize), "stan.FileStoreOpts.BufferSize") fs.BoolVar(&sopts.FileStoreOpts.DoCRC, "file_crc", stores.DefaultFileStoreOptions.DoCRC, "stan.FileStoreOpts.DoCRC") fs.Int64Var(&sopts.FileStoreOpts.CRCPolynomial, "file_crc_poly", stores.DefaultFileStoreOptions.CRCPolynomial, "stan.FileStoreOpts.CRCPolynomial") fs.BoolVar(&sopts.FileStoreOpts.DoSync, "file_sync", stores.DefaultFileStoreOptions.DoSync, "stan.FileStoreOpts.DoSync") fs.IntVar(&sopts.FileStoreOpts.SliceMaxMsgs, "file_slice_max_msgs", stores.DefaultFileStoreOptions.SliceMaxMsgs, "stan.FileStoreOpts.SliceMaxMsgs") fs.String("file_slice_max_bytes", fmt.Sprintf("%v", stores.DefaultFileStoreOptions.SliceMaxBytes), "stan.FileStoreOpts.SliceMaxBytes") fs.DurationVar(&sopts.FileStoreOpts.SliceMaxAge, "file_slice_max_age", stores.DefaultFileStoreOptions.SliceMaxAge, "stan.FileStoreOpts.SliceMaxAge") fs.StringVar(&sopts.FileStoreOpts.SliceArchiveScript, "file_slice_archive_script", "", "stan.FileStoreOpts.SliceArchiveScript") fs.Int64Var(&sopts.FileStoreOpts.FileDescriptorsLimit, "file_fds_limit", stores.DefaultFileStoreOptions.FileDescriptorsLimit, "stan.FileStoreOpts.FileDescriptorsLimit") fs.IntVar(&sopts.FileStoreOpts.ParallelRecovery, "file_parallel_recovery", stores.DefaultFileStoreOptions.ParallelRecovery, "stan.FileStoreOpts.ParallelRecovery") fs.BoolVar(&sopts.FileStoreOpts.TruncateUnexpectedEOF, "file_truncate_bad_eof", stores.DefaultFileStoreOptions.TruncateUnexpectedEOF, "Truncate files for which there is an unexpected EOF on recovery, dataloss may occur") fs.IntVar(&sopts.IOBatchSize, "io_batch_size", DefaultIOBatchSize, "stan.IOBatchSize") fs.Int64Var(&sopts.IOSleepTime, "io_sleep_time", DefaultIOSleepTime, "stan.IOSleepTime") fs.StringVar(&sopts.FTGroupName, "ft_group", "", "stan.FTGroupName") fs.BoolVar(&sopts.Clustering.Clustered, "clustered", false, "stan.Clustering.Clustered") fs.StringVar(&sopts.Clustering.NodeID, "cluster_node_id", "", "stan.Clustering.NodeID") fs.BoolVar(&sopts.Clustering.Bootstrap, "cluster_bootstrap", false, "stan.Clustering.Bootstrap") fs.StringVar(&clusterPeers, "cluster_peers", "", "stan.Clustering.Peers") fs.StringVar(&sopts.Clustering.RaftLogPath, "cluster_log_path", "", "stan.Clustering.RaftLogPath") fs.IntVar(&sopts.Clustering.LogCacheSize, "cluster_log_cache_size", DefaultLogCacheSize, "stan.Clustering.LogCacheSize") fs.IntVar(&sopts.Clustering.LogSnapshots, "cluster_log_snapshots", DefaultLogSnapshots, "stan.Clustering.LogSnapshots") fs.Int64Var(&sopts.Clustering.TrailingLogs, "cluster_trailing_logs", DefaultTrailingLogs, "stan.Clustering.TrailingLogs") fs.BoolVar(&sopts.Clustering.Sync, "cluster_sync", false, "stan.Clustering.Sync") fs.BoolVar(&sopts.Clustering.RaftLogging, "cluster_raft_logging", false, "") fs.StringVar(&sopts.SQLStoreOpts.Driver, "sql_driver", "", "SQL Driver") fs.StringVar(&sopts.SQLStoreOpts.Source, "sql_source", "", "SQL Data Source") defSQLOpts := stores.DefaultSQLStoreOptions() fs.BoolVar(&sopts.SQLStoreOpts.NoCaching, "sql_no_caching", defSQLOpts.NoCaching, "Enable/Disable caching") fs.IntVar(&sopts.SQLStoreOpts.MaxOpenConns, "sql_max_open_conns", defSQLOpts.MaxOpenConns, "Max opened connections to the database") fs.StringVar(&sopts.SyslogName, "syslog_name", "", "Syslog Name") fs.BoolVar(&sopts.Encrypt, "encrypt", false, "Specify if server should use encryption at rest") fs.StringVar(&sopts.EncryptionCipher, "encryption_cipher", stores.CryptoCipherAutoSelect, "Encryption cipher. Supported are AES and CHACHA (default is AES)") fs.StringVar(&encryptionKey, "encryption_key", "", "Encryption Key. It is recommended to specify it through the NATS_STREAMING_ENCRYPTION_KEY environment variable instead") // First, we need to call NATS's ConfigureOptions() with above flag set. // It will be augmented with NATS specific flags and call fs.Parse(args) for us. nopts, err := natsd.ConfigureOptions(fs, args, printVersion, printHelp, printTLSHelp) if err != nil { return nil, nil, err } // At this point, if NATS config file was specified in the command line (-c of -config) // nopts.ConfigFile will not be empty. natsConfigFile = nopts.ConfigFile if clusterPeers != "" { sopts.Clustering.Peers = []string{} for _, p := range strings.Split(clusterPeers, ",") { if p = strings.TrimSpace(p); p != sopts.Clustering.NodeID { sopts.Clustering.Peers = append(sopts.Clustering.Peers, p) } } } if encryptionKey != "" { sopts.Encrypt = true sopts.EncryptionKey = []byte(encryptionKey) } // If both nats and streaming configuration files are used, then // we only use the config file for the corresponding module. // However, if only one command line parameter was specified, // we use the same config file for both modules. if stanConfigFile != "" || natsConfigFile != "" { // If NATS config file was not specified, but streaming was, use // streaming config file for NATS too. if natsConfigFile == "" { if err := nopts.ProcessConfigFile(stanConfigFile); err != nil { return nil, nil, err } } // If NATS config file was specified, but not the streaming one, // use nats config file for streaming too. if stanConfigFile == "" { stanConfigFile = natsConfigFile } if err := ProcessConfigFile(stanConfigFile, sopts); err != nil { return nil, nil, err } // Need to call Parse() again to override with command line params. // No need to check for errors since this has already been called // in natsd.ConfigureOptions() fs.Parse(args) } // Special handling for some command line params var flagErr error fs.Visit(func(f *flag.Flag) { if flagErr != nil { return } switch f.Name { case "SDV": // Check value to support -SDV=false boolValue, _ := strconv.ParseBool(f.Value.String()) sopts.Trace, sopts.Debug = boolValue, boolValue case "max_bytes", "mb": sopts.MaxBytes, flagErr = getBytes(f) case "file_compact_min_size": sopts.FileStoreOpts.CompactMinFileSize, flagErr = getBytes(f) case "file_buffer_size": var i64 int64 i64, flagErr = getBytes(f) sopts.FileStoreOpts.BufferSize = int(i64) } }) if flagErr != nil { return nil, nil, flagErr } return sopts, nopts, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/conf.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L703-L715
go
train
// getBytes returns the number of bytes from the flag's String size. // For instance, 1KB would return 1024.
func getBytes(f *flag.Flag) (int64, error)
// getBytes returns the number of bytes from the flag's String size. // For instance, 1KB would return 1024. func getBytes(f *flag.Flag) (int64, error)
{ var res map[string]interface{} // Use NATS parser to do the conversion for us. res, err := conf.Parse(fmt.Sprintf("bytes: %v", f.Value.String())) if err != nil { return 0, err } resVal := res["bytes"] if resVal == nil || reflect.TypeOf(resVal).Kind() != reflect.Int64 { return 0, fmt.Errorf("%v should be a size, got '%v'", f.Name, resVal) } return resVal.(int64), nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L81-L135
go
train
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(subject string, element interface{}) error
// Insert adds a subscription into the sublist func (s *Sublist) Insert(subject string, element interface{}) error
{ tsa := [32]string{} tokens := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tokens = append(tokens, subject[start:i]) start = i + 1 } } tokens = append(tokens, subject[start:]) s.Lock() sfwc := false l := s.root var n *node for _, t := range tokens { if len(t) == 0 || sfwc { s.Unlock() return ErrInvalidSubject } switch t[0] { case pwc: n = l.pwc case fwc: n = l.fwc sfwc = true default: n = l.nodes[t] } if n == nil { n = newNode() switch t[0] { case pwc: l.pwc = n case fwc: l.fwc = n default: l.nodes[t] = n } } if n.next == nil { n.next = newLevel() } l = n.next } n.elements = append(n.elements, element) s.addToCache(subject, element) s.count++ s.Unlock() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L139-L148
go
train
// addToCache will add the new entry to existing cache // entries if needed. Assumes write lock is held.
func (s *Sublist) addToCache(subject string, element interface{})
// addToCache will add the new entry to existing cache // entries if needed. Assumes write lock is held. func (s *Sublist) addToCache(subject string, element interface{})
{ for k, r := range s.cache { if matchLiteral(k, subject) { // Copy since others may have a reference. nr := append([]interface{}(nil), r...) nr = append(nr, element) s.cache[k] = nr } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L152-L161
go
train
// removeFromCache will remove any active cache entries on that subject. // Assumes write lock is held.
func (s *Sublist) removeFromCache(subject string)
// removeFromCache will remove any active cache entries on that subject. // Assumes write lock is held. func (s *Sublist) removeFromCache(subject string)
{ for k := range s.cache { if !matchLiteral(k, subject) { continue } // Since someone else may be referencing, can't modify the list // safely, just let it re-populate. delete(s.cache, k) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L165-L200
go
train
// Match will match all entries to the literal subject. // It will return a set of results.
func (s *Sublist) Match(subject string) []interface{}
// Match will match all entries to the literal subject. // It will return a set of results. func (s *Sublist) Match(subject string) []interface{}
{ s.RLock() rc, ok := s.cache[subject] s.RUnlock() if ok { return rc } tsa := [32]string{} tokens := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tokens = append(tokens, subject[start:i]) start = i + 1 } } tokens = append(tokens, subject[start:]) result := make([]interface{}, 0, 4) s.Lock() matchLevel(s.root, tokens, &result) // Add to our cache s.cache[subject] = result // Bound the number of entries to sublistMaxCache if len(s.cache) > slCacheMax { for k := range s.cache { delete(s.cache, k) break } } s.Unlock() return result }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L203-L228
go
train
// matchLevel is used to recursively descend into the trie.
func matchLevel(l *level, toks []string, results *[]interface{})
// matchLevel is used to recursively descend into the trie. func matchLevel(l *level, toks []string, results *[]interface{})
{ var pwc, n *node for i, t := range toks { if l == nil { return } if l.fwc != nil { *results = append(*results, l.fwc.elements...) } if pwc = l.pwc; pwc != nil { matchLevel(pwc.next, toks[i+1:], results) } n = l.nodes[t] if n != nil { l = n.next } else { l = nil } } if pwc != nil { *results = append(*results, pwc.elements...) } if n != nil { *results = append(*results, n.elements...) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L238-L296
go
train
// Remove will remove an element from the sublist.
func (s *Sublist) Remove(subject string, element interface{}) error
// Remove will remove an element from the sublist. func (s *Sublist) Remove(subject string, element interface{}) error
{ tsa := [32]string{} tokens := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tokens = append(tokens, subject[start:i]) start = i + 1 } } tokens = append(tokens, subject[start:]) s.Lock() defer s.Unlock() sfwc := false l := s.root var n *node // Track levels for pruning var lnts [32]lnt levels := lnts[:0] for _, t := range tokens { if len(t) == 0 || sfwc { return ErrInvalidSubject } if l == nil { return ErrNotFound } switch t[0] { case pwc: n = l.pwc case fwc: n = l.fwc sfwc = true default: n = l.nodes[t] } if n != nil { levels = append(levels, lnt{l, n, t}) l = n.next } else { l = nil } } if !s.removeFromNode(n, element) { return ErrNotFound } s.count-- for i := len(levels) - 1; i >= 0; i-- { l, n, t := levels[i].l, levels[i].n, levels[i].t if n.isEmpty() { l.pruneNode(n, t) } } s.removeFromCache(subject) return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L314-L321
go
train
// isEmpty will test if the node has any entries. Used // in pruning.
func (n *node) isEmpty() bool
// isEmpty will test if the node has any entries. Used // in pruning. func (n *node) isEmpty() bool
{ if len(n.elements) == 0 { if n.next == nil || n.next.numNodes() == 0 { return true } } return false }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L336-L347
go
train
// Removes an element from a list.
func removeFromList(element interface{}, l []interface{}) ([]interface{}, bool)
// Removes an element from a list. func removeFromList(element interface{}, l []interface{}) ([]interface{}, bool)
{ for i := 0; i < len(l); i++ { if l[i] == element { last := len(l) - 1 l[i] = l[last] l[last] = nil l = l[:last] return shrinkAsNeeded(l), true } } return l, false }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L350-L356
go
train
// Remove the sub for the given node.
func (s *Sublist) removeFromNode(n *node, element interface{}) (found bool)
// Remove the sub for the given node. func (s *Sublist) removeFromNode(n *node, element interface{}) (found bool)
{ if n == nil { return false } n.elements, found = removeFromList(element, n.elements) return found }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L360-L372
go
train
// Checks if we need to do a resize. This is for very large growth then // subsequent return to a more normal size from unsubscribe.
func shrinkAsNeeded(l []interface{}) []interface{}
// Checks if we need to do a resize. This is for very large growth then // subsequent return to a more normal size from unsubscribe. func shrinkAsNeeded(l []interface{}) []interface{}
{ ll := len(l) cl := cap(l) // Don't bother if list not too big if cl <= 8 { return l } pFree := float32(cl-ll) / float32(cl) if pFree > 0.50 { return append([]interface{}(nil), l...) } return l }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L382-L386
go
train
// CacheCount returns the number of result sets in the cache.
func (s *Sublist) CacheCount() int
// CacheCount returns the number of result sets in the cache. func (s *Sublist) CacheCount() int
{ s.RLock() defer s.RUnlock() return len(s.cache) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L390-L420
go
train
// matchLiteral is used to test literal subjects, those that do not have any // wildcards, with a target subject. This is used in the cache layer.
func matchLiteral(literal, subject string) bool
// matchLiteral is used to test literal subjects, those that do not have any // wildcards, with a target subject. This is used in the cache layer. func matchLiteral(literal, subject string) bool
{ li := 0 ll := len(literal) for i := 0; i < len(subject); i++ { if li >= ll { return false } b := subject[i] switch b { case pwc: // Skip token in literal ll := len(literal) for { if li >= ll || literal[li] == btsep { li-- break } li++ } case fwc: return true default: if b != literal[li] { return false } } li++ } // Make sure we have processed all of the literal's chars.. return li >= ll }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/sublist.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L483-L489
go
train
// Subjects returns an array of all subjects in this sublist // ordered from the widest to the narrowest of subjects. // Order between non wildcard tokens in a given level is // random though. // // For instance, if the sublist contains (in any inserted order): // // *.*, foo.>, *.>, foo.*.>, >, bar.>, foo.bar.>, bar.baz // // the returned array will be one of the two possibilities: // // >, *.>, *.*, foo.>, foo.*.>, foo.bar.>, bar.>, bar.baz // // or // // >, *.>, *.*, bar.>, bar.baz, foo.>, foo.*.>, foo.bar.> // // For a given level, the order will still always be from // wider to narrower, that is, foo.> comes before foo.*.> // which comes before foo.bar.>, and bar.> always comes // before bar.baz, but all the "bar" subjects may be // before or after all the "foo" subjects.
func (s *Sublist) Subjects() []string
// Subjects returns an array of all subjects in this sublist // ordered from the widest to the narrowest of subjects. // Order between non wildcard tokens in a given level is // random though. // // For instance, if the sublist contains (in any inserted order): // // *.*, foo.>, *.>, foo.*.>, >, bar.>, foo.bar.>, bar.baz // // the returned array will be one of the two possibilities: // // >, *.>, *.*, foo.>, foo.*.>, foo.bar.>, bar.>, bar.baz // // or // // >, *.>, *.*, bar.>, bar.baz, foo.>, foo.*.>, foo.bar.> // // For a given level, the order will still always be from // wider to narrower, that is, foo.> comes before foo.*.> // which comes before foo.bar.>, and bar.> always comes // before bar.baz, but all the "bar" subjects may be // before or after all the "foo" subjects. func (s *Sublist) Subjects() []string
{ s.RLock() defer s.RUnlock() subjects := make([]string, 0, s.count) getSubjects(s.root, "", &subjects) return subjects }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/lockfile_unix.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/lockfile_unix.go#L35-L71
go
train
// CreateLockFile attempt to lock the given file, creating it // if necessary. On success, the file is returned, otherwise // an error is returned. // The file returned should be closed to release the lock // quicker than if left to the operating system.
func CreateLockFile(file string) (LockFile, error)
// CreateLockFile attempt to lock the given file, creating it // if necessary. On success, the file is returned, otherwise // an error is returned. // The file returned should be closed to release the lock // quicker than if left to the operating system. func CreateLockFile(file string) (LockFile, error)
{ f, err := os.Create(file) if err != nil { // Consider those fatal, others may be considered transient // (for instance FD limit reached, etc...) if os.IsNotExist(err) || os.IsPermission(err) { return nil, err } return nil, ErrUnableToLockNow } spec := &syscall.Flock_t{ Type: syscall.F_WRLCK, Whence: int16(io.SeekStart), Start: 0, Len: 0, // 0 means to lock the entire file. } if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, spec); err != nil { // Try to gather all errors that we deem transient and return // ErrUnableToLockNow in this case to indicate the caller that // the lock could not be acquired at this time but it could // try later. // Basing this from possible ERRORS from this page: // http://pubs.opengroup.org/onlinepubs/009695399/functions/fcntl.html if err == syscall.EAGAIN || err == syscall.EACCES || err == syscall.EINTR || err == syscall.ENOLCK { err = ErrUnableToLockNow } // TODO: If error is not ErrUnableToLockNow, it may mean that // the call is not supported on that platform, etc... // We should have another level of verification, for instance // check content of the lockfile is not being updated by the // owner of the file, etc... f.Close() return nil, err } return &lockFile{f: f}, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/lockfile_unix.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/lockfile_unix.go#L74-L90
go
train
// Close implements the LockFile interface
func (lf *lockFile) Close() error
// Close implements the LockFile interface func (lf *lockFile) Close() error
{ lf.Lock() defer lf.Unlock() if lf.f == nil { return nil } spec := &syscall.Flock_t{ Type: syscall.F_UNLCK, Whence: int16(io.SeekStart), Start: 0, Len: 0, // 0 means to lock the entire file. } err := syscall.FcntlFlock(lf.f.Fd(), syscall.F_SETLK, spec) err = CloseFile(err, lf.f) lf.f = nil return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
util/lockfile_unix.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/lockfile_unix.go#L93-L97
go
train
// IsClosed implements the LockFile interface
func (lf *lockFile) IsClosed() bool
// IsClosed implements the LockFile interface func (lf *lockFile) IsClosed() bool
{ lf.Lock() defer lf.Unlock() return lf.f == nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L340-L353
go
train
// low-level creation and storage in memory of a *channel // Lock is held on entry or not needed.
func (cs *channelStore) create(s *StanServer, name string, sc *stores.Channel) (*channel, error)
// low-level creation and storage in memory of a *channel // Lock is held on entry or not needed. func (cs *channelStore) create(s *StanServer, name string, sc *stores.Channel) (*channel, error)
{ c := &channel{name: name, store: sc, ss: s.createSubStore(), stan: s, nextSubID: 1} lastSequence, err := c.store.Msgs.LastSequence() if err != nil { return nil, err } c.nextSequence = lastSequence + 1 cs.channels[name] = c cl := cs.store.GetChannelLimits(name) if cl.MaxInactivity > 0 { c.activity = &channelActivity{maxInactivity: cl.MaxInactivity} } return c, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L453-L456
go
train
// Starts the delete timer that when firing will post // a channel delete request to the ioLoop. // The channelStore's mutex must be held on entry.
func (c *channel) startDeleteTimer()
// Starts the delete timer that when firing will post // a channel delete request to the ioLoop. // The channelStore's mutex must be held on entry. func (c *channel) startDeleteTimer()
{ c.activity.last = time.Now() c.resetDeleteTimer(c.activity.maxInactivity) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L460-L468
go
train
// Stops the delete timer. // The channelStore's mutex must be held on entry.
func (c *channel) stopDeleteTimer()
// Stops the delete timer. // The channelStore's mutex must be held on entry. func (c *channel) stopDeleteTimer()
{ if c.activity.timer != nil { c.activity.timer.Stop() c.activity.timerSet = false if c.stan.debug { c.stan.log.Debugf("Channel %q delete timer stopped", c.name) } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L473-L486
go
train
// Resets the delete timer to the given duration. // If the timer was not created, this call will create it. // The channelStore's mutex must be held on entry.
func (c *channel) resetDeleteTimer(newDuration time.Duration)
// Resets the delete timer to the given duration. // If the timer was not created, this call will create it. // The channelStore's mutex must be held on entry. func (c *channel) resetDeleteTimer(newDuration time.Duration)
{ a := c.activity if a.timer == nil { a.timer = time.AfterFunc(newDuration, func() { c.stan.sendDeleteChannelRequest(c) }) } else { a.timer.Reset(newDuration) } if c.stan.debug { c.stan.log.Debugf("Channel %q delete timer set to fire in %v", c.name, newDuration) } a.timerSet = true }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L490-L503
go
train
// pubMsgToMsgProto converts a PubMsg to a MsgProto and assigns a timestamp // which is monotonic with respect to the channel.
func (c *channel) pubMsgToMsgProto(pm *pb.PubMsg, seq uint64) *pb.MsgProto
// pubMsgToMsgProto converts a PubMsg to a MsgProto and assigns a timestamp // which is monotonic with respect to the channel. func (c *channel) pubMsgToMsgProto(pm *pb.PubMsg, seq uint64) *pb.MsgProto
{ m := &pb.MsgProto{ Sequence: seq, Subject: pm.Subject, Reply: pm.Reply, Data: pm.Data, Timestamp: time.Now().UnixNano(), } if c.lTimestamp > 0 && m.Timestamp < c.lTimestamp { m.Timestamp = c.lTimestamp } c.lTimestamp = m.Timestamp return m }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L506-L564
go
train
// Sets a subscription that will handle snapshot restore requests from followers.
func (s *StanServer) subToSnapshotRestoreRequests() error
// Sets a subscription that will handle snapshot restore requests from followers. func (s *StanServer) subToSnapshotRestoreRequests() error
{ var ( msgBuf []byte buf []byte snapshotRestorePrefix = fmt.Sprintf("%s.%s.", defaultSnapshotPrefix, s.info.ClusterID) prefixLen = len(snapshotRestorePrefix) ) sub, err := s.ncsr.Subscribe(snapshotRestorePrefix+">", func(m *nats.Msg) { if len(m.Data) != 16 { s.log.Errorf("Invalid snapshot request, data len=%v", len(m.Data)) return } cname := m.Subject[prefixLen:] c := s.channels.getIfNotAboutToBeDeleted(cname) if c == nil { s.ncsr.Publish(m.Reply, nil) return } start := util.ByteOrder.Uint64(m.Data[:8]) end := util.ByteOrder.Uint64(m.Data[8:]) for seq := start; seq <= end; seq++ { msg, err := c.store.Msgs.Lookup(seq) if err != nil { s.log.Errorf("Snapshot restore request error for channel %q, error looking up message %v: %v", c.name, seq, err) return } if msg == nil { // We don't have this message because of channel limits. // Return nil to caller to signal this state. buf = nil } else { msgBuf = util.EnsureBufBigEnough(msgBuf, msg.Size()) n, err := msg.MarshalTo(msgBuf) if err != nil { panic(err) } buf = msgBuf[:n] } if err := s.ncsr.Publish(m.Reply, buf); err != nil { s.log.Errorf("Snapshot restore request error for channel %q, unable to send response for seq %v: %v", c.name, seq, err) } if buf == nil { return } select { case <-s.shutdownCh: return default: } } }) if err != nil { return err } sub.SetPendingLimits(-1, -1) s.snapReqSub = sub return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L746-L760
go
train
// Looks up, or create a new channel if it does not exist
func (s *StanServer) lookupOrCreateChannel(name string) (*channel, error)
// Looks up, or create a new channel if it does not exist func (s *StanServer) lookupOrCreateChannel(name string) (*channel, error)
{ cs := s.channels cs.RLock() c := cs.channels[name] if c != nil { if c.activity != nil && c.activity.deleteInProgress { cs.RUnlock() return nil, ErrChanDelInProgress } cs.RUnlock() return c, nil } cs.RUnlock() return cs.createChannel(s, name) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L788-L797
go
train
// createSubStore creates a new instance of `subStore`.
func (s *StanServer) createSubStore() *subStore
// createSubStore creates a new instance of `subStore`. func (s *StanServer) createSubStore() *subStore
{ subs := &subStore{ psubs: make([]*subState, 0, 4), qsubs: make(map[string]*queueState), durables: make(map[string]*subState), acks: make(map[string]*subState), stan: s, } return subs }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L800-L823
go
train
// Store adds this subscription to the server's `subStore` and also in storage
func (ss *subStore) Store(sub *subState) error
// Store adds this subscription to the server's `subStore` and also in storage func (ss *subStore) Store(sub *subState) error
{ if sub == nil { return nil } // Adds to storage. // Use sub lock to avoid race with waitForAcks in some tests sub.Lock() err := sub.store.CreateSub(&sub.SubState) sub.Unlock() if err == nil { err = sub.store.Flush() } if err != nil { ss.stan.log.Errorf("Unable to store subscription [%v:%v] on [%s]: %v", sub.ClientID, sub.Inbox, sub.subject, err) return err } ss.Lock() ss.updateState(sub) ss.Unlock() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L828-L889
go
train
// Updates the subStore state with this sub. // The subStore is locked on entry (or does not need, as during server restart). // However, `sub` does not need locking since it has just been created.
func (ss *subStore) updateState(sub *subState)
// Updates the subStore state with this sub. // The subStore is locked on entry (or does not need, as during server restart). // However, `sub` does not need locking since it has just been created. func (ss *subStore) updateState(sub *subState)
{ // Store by type if sub.isQueueSubscriber() { // Queue subscriber. qs := ss.qsubs[sub.QGroup] if qs == nil { qs = &queueState{ subs: make([]*subState, 0, 4), } ss.qsubs[sub.QGroup] = qs } qs.Lock() // The recovered shadow queue sub will have ClientID=="", // keep a reference to it until a member re-joins the group. if sub.ClientID == "" { // There should be only one shadow queue subscriber, but // we found in https://github.com/nats-io/nats-streaming-server/issues/322 // that some datastore had 2 of those (not sure how this happened except // maybe due to upgrades from much older releases that had bugs?). // So don't panic and use as the shadow the one with the highest LastSent // value. if qs.shadow == nil || sub.LastSent > qs.lastSent { qs.shadow = sub } } else { // Store by ackInbox for ack direct lookup ss.acks[sub.AckInbox] = sub qs.subs = append(qs.subs, sub) // If the added sub has newOnHold it means that we are doing recovery and // that this member had unacknowledged messages. Mark the queue group // with newOnHold if sub.newOnHold { qs.newOnHold = true } // Update stalled (on recovery) if sub.stalled { qs.stalledSubCount++ } } // Needed in the case of server restart, where // the queue group's last sent needs to be updated // based on the recovered subscriptions. if sub.LastSent > qs.lastSent { qs.lastSent = sub.LastSent } qs.Unlock() sub.qstate = qs } else { // First store by ackInbox for ack direct lookup ss.acks[sub.AckInbox] = sub // Plain subscriber. ss.psubs = append(ss.psubs, sub) // Hold onto durables in special lookup. if sub.isDurableSubscriber() { ss.durables[sub.durableKey()] = sub } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L892-L903
go
train
// returns an array of all subscriptions (plain, online durables and queue members).
func (ss *subStore) getAllSubs() []*subState
// returns an array of all subscriptions (plain, online durables and queue members). func (ss *subStore) getAllSubs() []*subState
{ ss.RLock() subs := make([]*subState, 0, len(ss.psubs)) subs = append(subs, ss.psubs...) for _, qs := range ss.qsubs { qs.RLock() subs = append(subs, qs.subs...) qs.RUnlock() } ss.RUnlock() return subs }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L907-L922
go
train
// hasSubs returns true if there is any active subscription for this subStore. // That is, offline durable subscriptions are ignored.
func (ss *subStore) hasActiveSubs() bool
// hasSubs returns true if there is any active subscription for this subStore. // That is, offline durable subscriptions are ignored. func (ss *subStore) hasActiveSubs() bool
{ ss.RLock() defer ss.RUnlock() if len(ss.psubs) > 0 { return true } for _, qsub := range ss.qsubs { // For a durable queue group, when the group is offline, // qsub.shadow is not nil, but the qsub.subs array should be // empty. if len(qsub.subs) > 0 { return true } } return false }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L926-L1135
go
train
// Remove a subscriber from the subscription store, leaving durable // subscriptions unless `unsubscribe` is true.
func (ss *subStore) Remove(c *channel, sub *subState, unsubscribe bool)
// Remove a subscriber from the subscription store, leaving durable // subscriptions unless `unsubscribe` is true. func (ss *subStore) Remove(c *channel, sub *subState, unsubscribe bool)
{ if sub == nil { return } var ( log logger.Logger queueGroupIsEmpty bool ) ss.Lock() if ss.stan.debug { log = ss.stan.log } sub.Lock() subject := sub.subject clientID := sub.ClientID durableKey := "" // Do this before clearing the sub.ClientID since this is part of the key!!! if sub.isDurableSubscriber() { durableKey = sub.durableKey() } // This is needed when doing a snapshot in clustering mode or for monitoring endpoints sub.savedClientID = sub.ClientID // Clear the subscriptions clientID sub.ClientID = "" ackInbox := sub.AckInbox qs := sub.qstate isDurable := sub.IsDurable subid := sub.ID store := sub.store sub.stopAckSub() sub.Unlock() reportError := func(err error) { ss.stan.log.Errorf("Error deleting subscription subid=%d, subject=%s, err=%v", subid, subject, err) } // Delete from storage non durable subscribers on either connection // close or call to Unsubscribe(), and durable subscribers only on // Unsubscribe(). Leave durable queue subs for now, they need to // be treated differently. if !isDurable || (unsubscribe && durableKey != "") { if err := store.DeleteSub(subid); err != nil { reportError(err) } } // Delete from ackInbox lookup. delete(ss.acks, ackInbox) // Delete from durable if needed if unsubscribe && durableKey != "" { delete(ss.durables, durableKey) } var qsubs map[uint64]*subState // Delete ourselves from the list if qs != nil { storageUpdate := false // For queue state, we need to lock specifically, // because qs.subs can be modified by findBestQueueSub, // for which we don't have substore lock held. qs.Lock() sub.Lock() sub.clearAckTimer() qgroup := sub.QGroup sub.Unlock() qs.subs, _ = sub.deleteFromList(qs.subs) if len(qs.subs) == 0 { queueGroupIsEmpty = true // If it was the last being removed, also remove the // queue group from the subStore map, but only if // non durable or explicit unsubscribe. if !isDurable || unsubscribe { delete(ss.qsubs, qgroup) // Delete from storage too. if err := store.DeleteSub(subid); err != nil { reportError(err) } } else { // Group is durable and last member just left the group, // but didn't call Unsubscribe(). Need to keep a reference // to this sub to maintain the state. qs.shadow = sub // Clear the number of stalled members qs.stalledSubCount = 0 // Will need to update the LastSent and clear the ClientID // with a storage update. storageUpdate = true } } else { if sub.stalled && qs.stalledSubCount > 0 { qs.stalledSubCount-- } // Set expiration in the past to force redelivery expirationTime := time.Now().UnixNano() - int64(time.Second) // If there are pending messages in this sub, they need to be // transferred to remaining queue subscribers. numQSubs := len(qs.subs) idx := 0 sub.RLock() // Need to update if this member was the one with the last // message of the group. storageUpdate = sub.LastSent == qs.lastSent sortedPendingMsgs := makeSortedPendingMsgs(sub.acksPending) for _, pm := range sortedPendingMsgs { // Get one of the remaning queue subscribers. qsub := qs.subs[idx] qsub.Lock() // Store in storage if err := qsub.store.AddSeqPending(qsub.ID, pm.seq); err != nil { ss.stan.log.Errorf("[Client:%s] Unable to transfer message to subid=%d, subject=%s, seq=%d, err=%v", clientID, subid, subject, pm.seq, err) qsub.Unlock() continue } // We don't need to update if the sub's lastSent is transferred // to another queue subscriber. if storageUpdate && pm.seq == qs.lastSent { storageUpdate = false } // Update LastSent if applicable if pm.seq > qsub.LastSent { qsub.LastSent = pm.seq } // Store in ackPending. qsub.acksPending[pm.seq] = expirationTime // Keep track of this qsub if qsubs == nil { qsubs = make(map[uint64]*subState) } if _, tracked := qsubs[qsub.ID]; !tracked { qsubs[qsub.ID] = qsub } qsub.Unlock() // Move to the next queue subscriber, going back to first if needed. idx++ if idx == numQSubs { idx = 0 } } sub.RUnlock() // Even for durable queue subscribers, if this is not the last // member, we need to delete from storage (we did that higher in // that function for non durable case). Issue #215. if isDurable { if err := store.DeleteSub(subid); err != nil { reportError(err) } } } if storageUpdate { // If we have a shadow sub, use that one, othewise any queue subscriber // will do, so use the first. qsub := qs.shadow if qsub == nil { qsub = qs.subs[0] } qsub.Lock() qsub.LastSent = qs.lastSent qsub.store.UpdateSub(&qsub.SubState) qsub.Unlock() } qs.Unlock() } else { sub.Lock() sub.clearAckTimer() sub.Unlock() ss.psubs, _ = sub.deleteFromList(ss.psubs) // When closing a durable subscription (calling sub.Close(), not sub.Unsubscribe()), // we need to update the record on store to prevent the server from adding // this durable to the list of active subscriptions. This is especially important // if the client closing this durable is itself not closed when the server is // restarted. The server would have no way to detect if the durable subscription // is offline or not. if isDurable && !unsubscribe { sub.Lock() // ClientID is required on store because this is used on recovery to // "compute" the durable key (clientID+subject+durable name). sub.ClientID = clientID sub.IsClosed = true store.UpdateSub(&sub.SubState) // After storage, clear the ClientID. sub.ClientID = "" sub.Unlock() } } ss.Unlock() if !ss.stan.isClustered || ss.stan.isLeader() { // Calling this will sort current pending messages and ensure // that the ackTimer is properly set. It does not necessarily // mean that messages are going to be redelivered on the spot. for _, qsub := range qsubs { ss.stan.performAckExpirationRedelivery(qsub, false) } } if log != nil { traceCtx := subStateTraceCtx{clientID: clientID, isRemove: true, isUnsubscribe: unsubscribe, isGroupEmpty: queueGroupIsEmpty} traceSubState(log, sub, &traceCtx) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1138-L1143
go
train
// Lookup by durable name.
func (ss *subStore) LookupByDurable(durableName string) *subState
// Lookup by durable name. func (ss *subStore) LookupByDurable(durableName string) *subState
{ ss.RLock() sub := ss.durables[durableName] ss.RUnlock() return sub }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1146-L1151
go
train
// Lookup by ackInbox name.
func (ss *subStore) LookupByAckInbox(ackInbox string) *subState
// Lookup by ackInbox name. func (ss *subStore) LookupByAckInbox(ackInbox string) *subState
{ ss.RLock() sub := ss.acks[ackInbox] ss.RUnlock() return sub }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1188-L1200
go
train
// Clone returns a deep copy of the Options object.
func (o *Options) Clone() *Options
// Clone returns a deep copy of the Options object. func (o *Options) Clone() *Options
{ // A simple copy covers pretty much everything clone := *o // But we have the problem of the PerChannel map that needs // to be copied. clone.PerChannel = (&o.StoreLimits).ClonePerChannelMap() // Make a copy of the clustering peers if len(o.Clustering.Peers) > 0 { clone.Clustering.Peers = make([]string, 0, len(o.Clustering.Peers)) clone.Clustering.Peers = append(clone.Clustering.Peers, o.Clustering.Peers...) } return &clone }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1217-L1221
go
train
// GetDefaultOptions returns default options for the NATS Streaming Server
func GetDefaultOptions() (o *Options)
// GetDefaultOptions returns default options for the NATS Streaming Server func GetDefaultOptions() (o *Options)
{ opts := defaultOptions opts.StoreLimits = stores.DefaultStoreLimits return &opts }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1319-L1379
go
train
// createNatsClientConn creates a connection to the NATS server, using // TLS if configured. Pass in the NATS server options to derive a // connection url, and for other future items (e.g. auth)
func (s *StanServer) createNatsClientConn(name string) (*nats.Conn, error)
// createNatsClientConn creates a connection to the NATS server, using // TLS if configured. Pass in the NATS server options to derive a // connection url, and for other future items (e.g. auth) func (s *StanServer) createNatsClientConn(name string) (*nats.Conn, error)
{ var err error ncOpts := nats.DefaultOptions for _, o := range s.opts.NATSClientOpts { o(&ncOpts) } ncOpts.Servers, err = s.buildServerURLs() if err != nil { return nil, err } ncOpts.User = s.natsOpts.Username ncOpts.Password = s.natsOpts.Password ncOpts.Token = s.natsOpts.Authorization ncOpts.Name = fmt.Sprintf("_NSS-%s-%s", s.opts.ID, name) if err = nats.ErrorHandler(s.stanErrorHandler)(&ncOpts); err != nil { return nil, err } if err = nats.ReconnectHandler(s.stanReconnectedHandler)(&ncOpts); err != nil { return nil, err } if err = nats.ClosedHandler(s.stanClosedHandler)(&ncOpts); err != nil { return nil, err } if err = nats.DisconnectHandler(s.stanDisconnectedHandler)(&ncOpts); err != nil { return nil, err } if s.opts.Secure { if err = nats.Secure()(&ncOpts); err != nil { return nil, err } } if s.opts.ClientCA != "" { if err = nats.RootCAs(s.opts.ClientCA)(&ncOpts); err != nil { return nil, err } } if s.opts.ClientCert != "" { if err = nats.ClientCert(s.opts.ClientCert, s.opts.ClientKey)(&ncOpts); err != nil { return nil, err } } // Shorten the time we wait to try to reconnect. // Don't make it too often because it may exhaust the number of FDs. ncOpts.ReconnectWait = 250 * time.Millisecond // Make it try to reconnect for ever. ncOpts.MaxReconnect = -1 // To avoid possible duplicate redeliveries, etc.., set the reconnect // buffer to -1 to avoid any buffering in the nats library and flush // on reconnect. ncOpts.ReconnectBufSize = -1 var nc *nats.Conn if nc, err = ncOpts.Connect(); err != nil { return nil, err } return nc, err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1411-L1416
go
train
// RunServer will startup an embedded NATS Streaming Server and a nats-server to support it.
func RunServer(ID string) (*StanServer, error)
// RunServer will startup an embedded NATS Streaming Server and a nats-server to support it. func RunServer(ID string) (*StanServer, error)
{ sOpts := GetDefaultOptions() sOpts.ID = ID nOpts := DefaultNatsServerOptions return RunServerWithOpts(sOpts, &nOpts) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1420-L1627
go
train
// RunServerWithOpts allows you to run a NATS Streaming Server with full control // on the Streaming and NATS Server configuration.
func RunServerWithOpts(stanOpts *Options, natsOpts *server.Options) (newServer *StanServer, returnedError error)
// RunServerWithOpts allows you to run a NATS Streaming Server with full control // on the Streaming and NATS Server configuration. func RunServerWithOpts(stanOpts *Options, natsOpts *server.Options) (newServer *StanServer, returnedError error)
{ var sOpts *Options var nOpts *server.Options // Make a copy of the options so we own them. if stanOpts == nil { sOpts = GetDefaultOptions() } else { sOpts = stanOpts.Clone() } if natsOpts == nil { no := DefaultNatsServerOptions nOpts = &no } else { nOpts = natsOpts.Clone() } // For now, no support for partitioning and clustering at the same time if sOpts.Partitioning && sOpts.Clustering.Clustered { return nil, fmt.Errorf("stan: channels partitioning in clustering mode is not supported") } if sOpts.Clustering.Clustered { if sOpts.StoreType == stores.TypeMemory { return nil, fmt.Errorf("stan: clustering mode not supported with %s store type", stores.TypeMemory) } // Override store sync configuration with cluster sync. sOpts.FileStoreOpts.DoSync = sOpts.Clustering.Sync // Remove cluster's node ID (if present) from the list of peers. if len(sOpts.Clustering.Peers) > 0 && sOpts.Clustering.NodeID != "" { nodeID := sOpts.Clustering.NodeID peers := make([]string, 0, len(sOpts.Clustering.Peers)) for _, p := range sOpts.Clustering.Peers { if p != nodeID { peers = append(peers, p) } } if len(peers) != len(sOpts.Clustering.Peers) { sOpts.Clustering.Peers = peers } } } s := StanServer{ serverID: nuid.Next(), opts: sOpts, natsOpts: nOpts, dupCIDTimeout: defaultCheckDupCIDTimeout, ioChannelQuit: make(chan struct{}), trace: sOpts.Trace, debug: sOpts.Debug, subStartCh: make(chan *subStartInfo, defaultSubStartChanLen), subStartQuit: make(chan struct{}, 1), startTime: time.Now(), log: logger.NewStanLogger(), shutdownCh: make(chan struct{}), isClustered: sOpts.Clustering.Clustered, raftLogging: sOpts.Clustering.RaftLogging, cliDipCIDsMap: make(map[string]struct{}), } // If a custom logger is provided, use this one, otherwise, check // if we should configure the logger or not. if sOpts.CustomLogger != nil { s.log.SetLogger(sOpts.CustomLogger, nOpts.Logtime, sOpts.Debug, sOpts.Trace, "") } else if sOpts.EnableLogging { s.configureLogger() } s.log.Noticef("Starting nats-streaming-server[%s] version %s", sOpts.ID, VERSION) // ServerID is used to check that a brodcast protocol is not ours, // for instance with FT. Some err/warn messages may be printed // regarding other instance's ID, so print it on startup. s.log.Noticef("ServerID: %v", s.serverID) s.log.Noticef("Go version: %v", runtime.Version()) gc := gitCommit if gc == "" { gc = "not set" } s.log.Noticef("Git commit: [%s]", gc) // Ensure that we shutdown the server if there is a panic/error during startup. // This will ensure that stores are closed (which otherwise would cause // issues during testing) and that the NATS Server (if started) is also // properly shutdown. To do so, we recover from the panic in order to // call Shutdown, then issue the original panic. defer func() { // We used to issue panic for common errors but now return error // instead. Still we want to log the reason for the panic. if r := recover(); r != nil { s.Shutdown() s.log.Noticef("Failed to start: %v", r) panic(r) } else if returnedError != nil { s.Shutdown() // Log it as a fatal error, process will exit (if // running from executable or logger is configured). s.log.Fatalf("Failed to start: %v", returnedError) } }() storeLimits := &s.opts.StoreLimits var ( err error store stores.Store ) // Ensure store type option is in upper-case sOpts.StoreType = strings.ToUpper(sOpts.StoreType) // Create the store. switch sOpts.StoreType { case stores.TypeFile: store, err = stores.NewFileStore(s.log, sOpts.FilestoreDir, storeLimits, stores.AllOptions(&sOpts.FileStoreOpts)) case stores.TypeSQL: store, err = stores.NewSQLStore(s.log, sOpts.SQLStoreOpts.Driver, sOpts.SQLStoreOpts.Source, storeLimits, stores.SQLAllOptions(&sOpts.SQLStoreOpts)) case stores.TypeMemory: store, err = stores.NewMemoryStore(s.log, storeLimits) default: err = fmt.Errorf("unsupported store type: %v", sOpts.StoreType) } if err != nil { return nil, err } // StanServer.store (s.store here) is of type stores.Store, which is an // interface. If we assign s.store in the call of the constructor and there // is an error, although the call returns "nil" for the store, we can no // longer have a test such as "if s.store != nil" (as we do in shutdown). // This is because the constructors return a store implementention. // We would need to use reflection such as reflect.ValueOf(s.store).IsNil(). // So to not do that, we simply delay the setting of s.store when we know // that it was successful. if s.isClustered { // Wrap our store with a RaftStore instance that avoids persisting // data that we don't need because they are handled by the actual // raft logs. store = stores.NewRaftStore(s.log, store, storeLimits) } if sOpts.Encrypt || len(sOpts.EncryptionKey) > 0 { // In clustering mode, RAFT is using its own logs (not the one above), // so we need to keep the key intact until we call newRaftLog(). var key []byte if s.isClustered && len(sOpts.EncryptionKey) > 0 { key = append(key, sOpts.EncryptionKey...) } else { key = sOpts.EncryptionKey } store, err = stores.NewCryptoStore(store, sOpts.EncryptionCipher, key) if err != nil { return nil, err } } s.store = store // Start the IO Loop before creating the channel store since the // go routine watching for channel inactivity may schedule events // to the IO loop. s.startIOLoop() s.clients = newClientStore(s.store) s.channels = newChannelStore(&s, s.store) // If no NATS server url is provided, it means that we embed the NATS Server if sOpts.NATSServerURL == "" { if err := s.startNATSServer(); err != nil { return nil, err } } // Check for monitoring if nOpts.HTTPPort != 0 || nOpts.HTTPSPort != 0 { if err := s.startMonitoring(nOpts); err != nil { return nil, err } } // Create our connections if err := s.createNatsConnections(); err != nil { return nil, err } // In FT mode, server cannot recover the store until it is elected leader. if s.opts.FTGroupName != "" { if err := s.ftSetup(); err != nil { return nil, err } s.wg.Add(1) go func() { defer s.wg.Done() if err := s.ftStart(); err != nil { s.setLastError(err) } }() } else { state := Standalone if s.isClustered { state = Clustered } if err := s.start(state); err != nil { return nil, err } } if s.opts.HandleSignals { s.handleSignals() } return &s, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1639-L1678
go
train
// Logging in STAN // // The STAN logger is an instance of a NATS logger, (basically duplicated // from the NATS server code), and is passed into the NATS server. // // A note on Debugf and Tracef: These will be enabled within the log if // either STAN or the NATS server enables them. However, STAN will only // trace/debug if the local STAN debug/trace flags are set. NATS will do // the same with it's logger flags. This enables us to use the same logger, // but differentiate between STAN and NATS debug/trace.
func (s *StanServer) configureLogger()
// Logging in STAN // // The STAN logger is an instance of a NATS logger, (basically duplicated // from the NATS server code), and is passed into the NATS server. // // A note on Debugf and Tracef: These will be enabled within the log if // either STAN or the NATS server enables them. However, STAN will only // trace/debug if the local STAN debug/trace flags are set. NATS will do // the same with it's logger flags. This enables us to use the same logger, // but differentiate between STAN and NATS debug/trace. func (s *StanServer) configureLogger()
{ var newLogger logger.Logger sOpts := s.opts nOpts := s.natsOpts enableDebug := nOpts.Debug || sOpts.Debug enableTrace := nOpts.Trace || sOpts.Trace syslog := nOpts.Syslog // Enable syslog if no log file is specified and we're running as a // Windows service so that logs are written to the Windows event log. if isWindowsService() && nOpts.LogFile == "" { syslog = true } // If we have a syslog name specified, make sure we will use this name. // This is for syslog and remote syslogs running on Windows. if sOpts.SyslogName != "" { natsdLogger.SetSyslogName(sOpts.SyslogName) } if nOpts.LogFile != "" { newLogger = natsdLogger.NewFileLogger(nOpts.LogFile, nOpts.Logtime, enableDebug, enableTrace, true) } else if nOpts.RemoteSyslog != "" { newLogger = natsdLogger.NewRemoteSysLogger(nOpts.RemoteSyslog, enableDebug, enableTrace) } else if syslog { newLogger = natsdLogger.NewSysLogger(enableDebug, enableTrace) } else { colors := true // Check to see if stderr is being redirected and if so turn off color // Also turn off colors if we're running on Windows where os.Stderr.Stat() returns an invalid handle-error stat, err := os.Stderr.Stat() if err != nil || (stat.Mode()&os.ModeCharDevice) == 0 { colors = false } newLogger = natsdLogger.NewStdLogger(nOpts.Logtime, enableDebug, enableTrace, colors, true) } s.log.SetLogger(newLogger, nOpts.Logtime, sOpts.Debug, sOpts.Trace, nOpts.LogFile) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1686-L1874
go
train
// This is either running inside RunServerWithOpts() and before any reference // to the server is returned, so locking is not really an issue, or it is // running from a go-routine when the server has been elected the FT active. // Therefore, this function grabs the server lock for the duration of this // call and so care must be taken to not invoke - directly or indirectly - // code that would attempt to grab the server lock.
func (s *StanServer) start(runningState State) error
// This is either running inside RunServerWithOpts() and before any reference // to the server is returned, so locking is not really an issue, or it is // running from a go-routine when the server has been elected the FT active. // Therefore, this function grabs the server lock for the duration of this // call and so care must be taken to not invoke - directly or indirectly - // code that would attempt to grab the server lock. func (s *StanServer) start(runningState State) error
{ s.mu.Lock() defer s.mu.Unlock() if s.shutdown { return nil } // If using partitioning, send our list and start go routines handling // channels list requests. if s.opts.Partitioning { if err := s.initPartitions(); err != nil { return err } } s.state = runningState var ( err error recoveredState *stores.RecoveredState recoveredSubs []*subState callStoreInit bool ) // Recover the state. s.log.Noticef("Recovering the state...") recoveredState, err = s.store.Recover() if err != nil { return err } if recoveredState != nil { s.log.Noticef("Recovered %v channel(s)", len(recoveredState.Channels)) } else { s.log.Noticef("No recovered state") } subjID := s.opts.ID // In FT or with static channels (aka partitioning), we use the cluster ID // as part of the subjects prefix, not a NUID. if runningState == Standalone && s.partitions == nil { subjID = nuid.Next() } if recoveredState != nil { // Copy content s.info = *recoveredState.Info // Check cluster IDs match if s.opts.ID != s.info.ClusterID { return fmt.Errorf("cluster ID %q does not match recovered value of %q", s.opts.ID, s.info.ClusterID) } // Check to see if SubClose subject is present or not. // If not, it means we recovered from an older server, so // need to update. if s.info.SubClose == "" { s.info.SubClose = fmt.Sprintf("%s.%s", DefaultSubClosePrefix, subjID) // Update the store with the server info callStoreInit = true } // If clustering was enabled but we are recovering a server that was // previously not clustered, return an error. This is not allowed // because there is preexisting state that is not represented in the // Raft log. if s.isClustered && s.info.NodeID == "" { return ErrClusteredRestart } // Use recovered clustering node ID. s.opts.Clustering.NodeID = s.info.NodeID // Restore clients state s.processRecoveredClients(recoveredState.Clients) // Default Raft log path to ./<cluster-id>/<node-id> if not set. This // must be done here before recovering channels since that will // initialize Raft groups if clustered. if s.opts.Clustering.RaftLogPath == "" { s.opts.Clustering.RaftLogPath = filepath.Join(s.opts.ID, s.opts.Clustering.NodeID) } // Process recovered channels (if any). recoveredSubs, err = s.processRecoveredChannels(recoveredState.Channels) if err != nil { return err } } else { s.info.ClusterID = s.opts.ID // Generate Subjects s.info.Discovery = fmt.Sprintf("%s.%s", s.opts.DiscoverPrefix, s.info.ClusterID) s.info.Publish = fmt.Sprintf("%s.%s", DefaultPubPrefix, subjID) s.info.Subscribe = fmt.Sprintf("%s.%s", DefaultSubPrefix, subjID) s.info.SubClose = fmt.Sprintf("%s.%s", DefaultSubClosePrefix, subjID) s.info.Unsubscribe = fmt.Sprintf("%s.%s", DefaultUnSubPrefix, subjID) s.info.Close = fmt.Sprintf("%s.%s", DefaultClosePrefix, subjID) s.info.AcksSubs = fmt.Sprintf("%s.%s", defaultAcksPrefix, subjID) if s.opts.Clustering.Clustered { // If clustered, assign a random cluster node ID if not provided. if s.opts.Clustering.NodeID == "" { s.opts.Clustering.NodeID = nuid.Next() } s.info.NodeID = s.opts.Clustering.NodeID } callStoreInit = true } if callStoreInit { // Initialize the store with the server info if err := s.store.Init(&s.info); err != nil { return fmt.Errorf("unable to initialize the store: %v", err) } } // We don't do the check if we are running FT and/or if // static channels (partitioning) is in play. if runningState == Standalone && s.partitions == nil { if err := s.ensureRunningStandAlone(); err != nil { return err } } // If clustered, start Raft group. if s.isClustered { s.ssarepl = &subsSentAndAckReplication{ ready: &sync.Map{}, waiting: &sync.Map{}, gates: &sync.Map{}, notifyCh: make(chan struct{}, 1), } s.wg.Add(1) go s.subsSentAndAckReplicator() // Default Raft log path to ./<cluster-id>/<node-id> if not set. if s.opts.Clustering.RaftLogPath == "" { s.opts.Clustering.RaftLogPath = filepath.Join(s.opts.ID, s.opts.Clustering.NodeID) } s.log.Noticef("Cluster Node ID : %s", s.info.NodeID) s.log.Noticef("Cluster Log Path: %s", s.opts.Clustering.RaftLogPath) if err := s.startRaftNode(recoveredState != nil); err != nil { return err } } // Start the go-routine responsible to start sending messages to newly // started subscriptions. We do that before opening the gates in // s.initSupscriptions() (which is where the internal subscriptions // are created). s.wg.Add(1) go s.processSubscriptionsStart() if err := s.initSubscriptions(); err != nil { return err } if recoveredState != nil { // Do some post recovery processing setup some timers, etc...) s.postRecoveryProcessing(recoveredState.Clients, recoveredSubs) } // Flush to make sure all subscriptions are processed before // we return control to the user. if err := s.nc.Flush(); err != nil { return fmt.Errorf("could not flush the subscriptions, %v", err) } s.log.Noticef("Message store is %s", s.store.Name()) if s.opts.FilestoreDir != "" { s.log.Noticef("Store location: %v", s.opts.FilestoreDir) } // The store has a copy of the limits and the inheritance // was not applied to our limits. To have them displayed correctly, // call Build() on them (we know that this is not going to fail, // otherwise we would not have been able to create the store). s.opts.StoreLimits.Build() storeLimitsLines := (&s.opts.StoreLimits).Print() for _, l := range storeLimitsLines { s.log.Noticef(l) } // Execute (in a go routine) redelivery of unacknowledged messages, // and release newOnHold. We only do this if not clustered. If // clustered, the leader will handle redelivery upon election. if !s.isClustered { s.wg.Add(1) go func() { s.performRedeliveryOnStartup(recoveredSubs) s.wg.Done() }() } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1878-L1933
go
train
// startRaftNode creates and starts the Raft group. // This should only be called if the server is running in clustered mode.
func (s *StanServer) startRaftNode(hasStreamingState bool) error
// startRaftNode creates and starts the Raft group. // This should only be called if the server is running in clustered mode. func (s *StanServer) startRaftNode(hasStreamingState bool) error
{ if err := s.createServerRaftNode(hasStreamingState); err != nil { return err } node := s.raft leaderWait := make(chan struct{}, 1) leaderReady := func() { select { case leaderWait <- struct{}{}: default: } } if node.State() != raft.Leader { leaderReady() } s.wg.Add(1) go func() { defer s.wg.Done() for { select { case isLeader := <-node.notifyCh: if isLeader { err := s.leadershipAcquired() leaderReady() if err != nil { s.log.Errorf("Error on leadership acquired: %v", err) switch { case err == raft.ErrRaftShutdown: // Node shutdown, just return. return case err == raft.ErrLeadershipLost: case err == raft.ErrNotLeader: // Node lost leadership, continue loop. continue default: // TODO: probably step down as leader? panic(err) } } } else { s.leadershipLost() } case <-s.shutdownCh: // Signal channel here to handle edge case where we might // otherwise block forever on the channel when shutdown. leaderReady() return } } }() <-leaderWait return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1945-L2037
go
train
// leadershipAcquired should be called when this node is elected leader. // This should only be called when the server is running in clustered mode.
func (s *StanServer) leadershipAcquired() error
// leadershipAcquired should be called when this node is elected leader. // This should only be called when the server is running in clustered mode. func (s *StanServer) leadershipAcquired() error
{ s.log.Noticef("server became leader, performing leader promotion actions") defer s.log.Noticef("finished leader promotion actions") // If we were not the leader, there should be nothing in the ioChannel // (processing of client publishes). However, since a node could go // from leader to follower to leader again, let's make sure that we // synchronize with the ioLoop before we touch the channels' nextSequence. sc, sdc := s.sendSynchronziationRequest() // Wait for the ioLoop to reach that special iopm and notifies us (or // give up if server is shutting down). select { case <-sc: case <-s.ioChannelQuit: close(sdc) return nil } // Then, we will notify it back to unlock it when were are done here. defer close(sdc) // Start listening to snapshot restore requests here... if err := s.subToSnapshotRestoreRequests(); err != nil { return err } // Use a barrier to ensure all preceding operations are applied to the FSM if err := s.raft.Barrier(0).Error(); err != nil { return err } channels := s.channels.getAll() for _, c := range channels { // Update next sequence to assign. lastSequence, err := c.store.Msgs.LastSequence() if err != nil { return err } // It is possible that nextSequence be set when restoring // from snapshots. Set it to the max value. if c.nextSequence <= lastSequence { c.nextSequence = lastSequence + 1 } } // Setup client heartbeats and subscribe to acks for each sub. for _, client := range s.clients.getClients() { client.RLock() cID := client.info.ID for _, sub := range client.subs { if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil { client.RUnlock() return err } } client.RUnlock() s.clients.setClientHB(cID, s.opts.ClientHBInterval, func() { s.checkClientHealth(cID) }) } // Start the internal subscriptions so we receive protocols from clients. if err := s.initInternalSubs(true); err != nil { return err } var allSubs []*subState for _, c := range channels { subs := c.ss.getAllSubs() if len(subs) > 0 { allSubs = append(allSubs, subs...) } if c.activity != nil { s.channels.maybeStartChannelDeleteTimer(c.name, c) } } if len(allSubs) > 0 { s.startGoRoutine(func() { s.performRedeliveryOnStartup(allSubs) s.wg.Done() }) } if err := s.nc.Flush(); err != nil { return err } if err := s.nca.Flush(); err != nil { return err } atomic.StoreInt64(&s.raft.leader, 1) return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2041-L2074
go
train
// leadershipLost should be called when this node loses leadership. // This should only be called when the server is running in clustered mode.
func (s *StanServer) leadershipLost()
// leadershipLost should be called when this node loses leadership. // This should only be called when the server is running in clustered mode. func (s *StanServer) leadershipLost()
{ s.log.Noticef("server lost leadership, performing leader stepdown actions") defer s.log.Noticef("finished leader stepdown actions") // Cancel outstanding client heartbeats. We aren't concerned about races // where new clients might be connecting because at this point, the server // will no longer accept new client connections, but even if it did, the // heartbeat would be automatically removed when it fires. for _, client := range s.clients.getClients() { s.clients.removeClientHB(client) // Ensure subs ackTimer is stopped subs := client.getSubsCopy() for _, sub := range subs { sub.Lock() sub.stopAckSub() sub.clearAckTimer() s.clearSentAndAck(sub) sub.Unlock() } } // Unsubscribe to the snapshot request per channel since we are no longer // leader. for _, c := range s.channels.getAll() { if c.activity != nil { s.channels.stopDeleteTimer(c) } } // Only the leader will receive protocols from clients s.unsubscribeInternalSubs() atomic.StoreInt64(&s.raft.leader, 0) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2077-L2133
go
train
// TODO: Explore parameter passing in gnatsd. Keep separate for now.
func (s *StanServer) configureClusterOpts() error
// TODO: Explore parameter passing in gnatsd. Keep separate for now. func (s *StanServer) configureClusterOpts() error
{ opts := s.natsOpts // If we don't have cluster defined in the configuration // file and no cluster listen string override, but we do // have a routes override, we need to report misconfiguration. if opts.Cluster.ListenStr == "" && opts.Cluster.Host == "" && opts.Cluster.Port == 0 { if opts.RoutesStr != "" { err := fmt.Errorf("solicited routes require cluster capabilities, e.g. --cluster") s.log.Fatalf(err.Error()) // Also return error in case server is started from application // and no logger has been set. return err } return nil } // If cluster flag override, process it if opts.Cluster.ListenStr != "" { clusterURL, err := url.Parse(opts.Cluster.ListenStr) if err != nil { return err } h, p, err := net.SplitHostPort(clusterURL.Host) if err != nil { return err } opts.Cluster.Host = h _, err = fmt.Sscan(p, &opts.Cluster.Port) if err != nil { return err } if clusterURL.User != nil { pass, hasPassword := clusterURL.User.Password() if !hasPassword { return fmt.Errorf("expected cluster password to be set") } opts.Cluster.Password = pass user := clusterURL.User.Username() opts.Cluster.Username = user } else { // Since we override from flag and there is no user/pwd, make // sure we clear what we may have gotten from config file. opts.Cluster.Username = "" opts.Cluster.Password = "" } } // If we have routes but no config file, fill in here. if opts.RoutesStr != "" && opts.Routes == nil { opts.Routes = server.RoutesFromStr(opts.RoutesStr) } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2138-L2168
go
train
// configureNATSServerTLS sets up TLS for the NATS Server. // Additional TLS parameters (e.g. cipher suites) will need to be placed // in a configuration file specified through the -config parameter.
func (s *StanServer) configureNATSServerTLS() error
// configureNATSServerTLS sets up TLS for the NATS Server. // Additional TLS parameters (e.g. cipher suites) will need to be placed // in a configuration file specified through the -config parameter. func (s *StanServer) configureNATSServerTLS() error
{ opts := s.natsOpts tlsSet := false tc := server.TLSConfigOpts{} if opts.TLSCert != "" { tc.CertFile = opts.TLSCert tlsSet = true } if opts.TLSKey != "" { tc.KeyFile = opts.TLSKey tlsSet = true } if opts.TLSCaCert != "" { tc.CaFile = opts.TLSCaCert tlsSet = true } if opts.TLSVerify { tc.Verify = true tlsSet = true } var err error if tlsSet { if opts.TLSConfig, err = server.GenTLSConfig(&tc); err != nil { // The connection will fail later if the problem is severe enough. return fmt.Errorf("unable to setup NATS Server TLS: %v", err) } } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2172-L2194
go
train
// startNATSServer starts the embedded NATS server, possibly updating // the NATS Server's clustering and/or TLS options.
func (s *StanServer) startNATSServer() error
// startNATSServer starts the embedded NATS server, possibly updating // the NATS Server's clustering and/or TLS options. func (s *StanServer) startNATSServer() error
{ if err := s.configureClusterOpts(); err != nil { return err } if err := s.configureNATSServerTLS(); err != nil { return err } opts := s.natsOpts s.natsServer = server.New(opts) if s.natsServer == nil { return fmt.Errorf("no NATS Server object returned") } if stanLogger := s.log.GetLogger(); stanLogger != nil { s.natsServer.SetLogger(stanLogger, opts.Debug, opts.Trace) } // Run server in Go routine. go s.natsServer.Start() // Wait for accept loop(s) to be started if !s.natsServer.ReadyForConnections(10 * time.Second) { return fmt.Errorf("unable to start a NATS Server on %s:%d", opts.Host, opts.Port) } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2200-L2232
go
train
// ensureRunningStandAlone prevents this streaming server from starting // if another is found using the same cluster ID - a possibility when // routing is enabled. // This runs under sever's lock so nothing should grab the server lock here.
func (s *StanServer) ensureRunningStandAlone() error
// ensureRunningStandAlone prevents this streaming server from starting // if another is found using the same cluster ID - a possibility when // routing is enabled. // This runs under sever's lock so nothing should grab the server lock here. func (s *StanServer) ensureRunningStandAlone() error
{ clusterID := s.info.ClusterID hbInbox := nats.NewInbox() timeout := time.Millisecond * 250 // We cannot use the client's API here as it will create a dependency // cycle in the streaming client, so build our request and see if we // get a response. req := &pb.ConnectRequest{ClientID: clusterID, HeartbeatInbox: hbInbox} b, _ := req.Marshal() reply, err := s.nc.Request(s.info.Discovery, b, timeout) if err == nats.ErrTimeout { s.log.Debugf("Did not detect another server instance") return nil } if err != nil { return fmt.Errorf("request error detecting another server instance: %v", err) } // See if the response is valid and can be unmarshalled. cr := &pb.ConnectResponse{} err = cr.Unmarshal(reply.Data) if err != nil { // Something other than a compatible streaming server responded. // This may cause other problems in the long run, so better fail // the startup early. return fmt.Errorf("unmarshall error while detecting another server instance: %v", err) } // Another streaming server was found, cleanup then return error. clreq := &pb.CloseRequest{ClientID: clusterID} b, _ = clreq.Marshal() s.nc.Request(cr.CloseRequests, b, timeout) return fmt.Errorf("discovered another streaming server with cluster ID %q", clusterID) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2235-L2239
go
train
// Binds server's view of a client with stored Client objects.
func (s *StanServer) processRecoveredClients(clients []*stores.Client)
// Binds server's view of a client with stored Client objects. func (s *StanServer) processRecoveredClients(clients []*stores.Client)
{ if !s.isClustered { s.clients.recoverClients(clients) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2242-L2270
go
train
// Reconstruct the subscription state on restart.
func (s *StanServer) processRecoveredChannels(channels map[string]*stores.RecoveredChannel) ([]*subState, error)
// Reconstruct the subscription state on restart. func (s *StanServer) processRecoveredChannels(channels map[string]*stores.RecoveredChannel) ([]*subState, error)
{ allSubs := make([]*subState, 0, 16) for channelName, recoveredChannel := range channels { channel, err := s.channels.create(s, channelName, recoveredChannel.Channel) if err != nil { return nil, err } if !s.isClustered { // Get the recovered subscriptions for this channel. for _, recSub := range recoveredChannel.Subscriptions { sub := s.recoverOneSub(channel, recSub.Sub, recSub.Pending, nil) if sub != nil { // Subscribe to subscription ACKs if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil { return nil, err } allSubs = append(allSubs, sub) } } // Now that we have recovered possible subscriptions for this channel, // check if we should start the delete timer. if channel.activity != nil { s.channels.maybeStartChannelDeleteTimer(channelName, channel) } } } return allSubs, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2363-L2385
go
train
// Do some final setup. Be minded of locking here since the server // has started communication with NATS server/clients.
func (s *StanServer) postRecoveryProcessing(recoveredClients []*stores.Client, recoveredSubs []*subState)
// Do some final setup. Be minded of locking here since the server // has started communication with NATS server/clients. func (s *StanServer) postRecoveryProcessing(recoveredClients []*stores.Client, recoveredSubs []*subState)
{ for _, sub := range recoveredSubs { sub.Lock() // Consider this subscription initialized. Note that it may // still have newOnHold == true, which would prevent incoming // messages to be delivered before we attempt to redeliver // unacknowledged messages in performRedeliveryOnStartup. sub.initialized = true sub.Unlock() } // Go through the list of clients and ensure their Hb timer is set. Only do // this for standalone mode. If clustered, timers will be setup on leader // election. if !s.isClustered { for _, sc := range recoveredClients { // Because of the loop, we need to make copy for the closure cID := sc.ID s.clients.setClientHB(cID, s.opts.ClientHBInterval, func() { s.checkClientHealth(cID) }) } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2389-L2432
go
train
// Redelivers unacknowledged messages, releases the hold for new messages delivery, // and kicks delivery of available messages.
func (s *StanServer) performRedeliveryOnStartup(recoveredSubs []*subState)
// Redelivers unacknowledged messages, releases the hold for new messages delivery, // and kicks delivery of available messages. func (s *StanServer) performRedeliveryOnStartup(recoveredSubs []*subState)
{ queues := make(map[*queueState]*channel) for _, sub := range recoveredSubs { // Ignore subs that did not have any ack pendings on startup. sub.Lock() // Consider this subscription ready to receive messages sub.initialized = true // If this is a durable and it is offline, then skip the rest. if sub.isOfflineDurableSubscriber() { sub.newOnHold = false sub.Unlock() continue } // Unlock in order to call function below sub.Unlock() // Send old messages (lock is acquired in that function) s.performAckExpirationRedelivery(sub, true) // Regrab lock sub.Lock() // Allow new messages to be delivered sub.newOnHold = false subject := sub.subject qs := sub.qstate sub.Unlock() c := s.channels.get(subject) if c == nil { continue } // Kick delivery of (possible) new messages if qs != nil { queues[qs] = c } else { s.sendAvailableMessages(c, sub) } } // Kick delivery for queues that had members with newOnHold for qs, c := range queues { qs.Lock() qs.newOnHold = false qs.Unlock() s.sendAvailableMessagesToQueue(c, qs) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2435-L2471
go
train
// initSubscriptions will setup initial subscriptions for discovery etc.
func (s *StanServer) initSubscriptions() error
// initSubscriptions will setup initial subscriptions for discovery etc. func (s *StanServer) initSubscriptions() error
{ // Do not create internal subscriptions in clustered mode, // the leader will when it gets elected. if !s.isClustered { createSubOnClientPublish := true if s.partitions != nil { // Receive published messages from clients, but only on the list // of static channels. if err := s.partitions.initSubscriptions(); err != nil { return err } // Since we create a subscription per channel, do not create // the internal subscription on the > wildcard createSubOnClientPublish = false } if err := s.initInternalSubs(createSubOnClientPublish); err != nil { return err } } s.log.Debugf("Discover subject: %s", s.info.Discovery) // For partitions, we actually print the list of channels // in the startup banner, so we don't need to repeat them here. if s.partitions != nil { s.log.Debugf("Publish subjects root: %s", s.info.Publish) } else { s.log.Debugf("Publish subject: %s.>", s.info.Publish) } s.log.Debugf("Subscribe subject: %s", s.info.Subscribe) s.log.Debugf("Subscription Close subject: %s", s.info.SubClose) s.log.Debugf("Unsubscribe subject: %s", s.info.Unsubscribe) s.log.Debugf("Close subject: %s", s.info.Close) return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2558-L2613
go
train
// Process a client connect request
func (s *StanServer) connectCB(m *nats.Msg)
// Process a client connect request func (s *StanServer) connectCB(m *nats.Msg)
{ req := &pb.ConnectRequest{} err := req.Unmarshal(m.Data) if err != nil || req.HeartbeatInbox == "" { s.log.Errorf("[Client:?] Invalid conn request: ClientID=%s, Inbox=%s, err=%v", req.ClientID, req.HeartbeatInbox, err) s.sendConnectErr(m.Reply, ErrInvalidConnReq.Error()) return } if !clientIDRegEx.MatchString(req.ClientID) { s.log.Errorf("[Client:%s] Invalid ClientID, only alphanumeric and `-` or `_` characters allowed", req.ClientID) s.sendConnectErr(m.Reply, ErrInvalidClientID.Error()) return } // If the client ID is already registered, check to see if it's the case // that the client refreshed (e.g. it crashed and came back) or if the // connection is a duplicate. If it refreshed, we will close the old // client and open a new one. client := s.clients.lookup(req.ClientID) if client != nil { // When detecting a duplicate, the processing of the connect request // is going to be processed in a go-routine. We need however to keep // track and fail another request on the same client ID until the // current one has finished. s.cliDupCIDsMu.Lock() if _, exists := s.cliDipCIDsMap[req.ClientID]; exists { s.cliDupCIDsMu.Unlock() s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID) s.sendConnectErr(m.Reply, ErrInvalidClient.Error()) return } s.cliDipCIDsMap[req.ClientID] = struct{}{} s.cliDupCIDsMu.Unlock() s.startGoRoutine(func() { defer s.wg.Done() isDup := false if s.isDuplicateConnect(client) { s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID) s.sendConnectErr(m.Reply, ErrInvalidClient.Error()) isDup = true } s.cliDupCIDsMu.Lock() if !isDup { s.handleConnect(req, m, true) } delete(s.cliDipCIDsMap, req.ClientID) s.cliDupCIDsMu.Unlock() }) return } s.cliDupCIDsMu.Lock() s.handleConnect(req, m, false) s.cliDupCIDsMu.Unlock() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2638-L2650
go
train
// isDuplicateConnect determines if the given client ID is a duplicate // connection by pinging the old client's heartbeat inbox and checking if it // responds. If it does, it's a duplicate connection.
func (s *StanServer) isDuplicateConnect(client *client) bool
// isDuplicateConnect determines if the given client ID is a duplicate // connection by pinging the old client's heartbeat inbox and checking if it // responds. If it does, it's a duplicate connection. func (s *StanServer) isDuplicateConnect(client *client) bool
{ client.RLock() hbInbox := client.info.HbInbox client.RUnlock() // This is the HbInbox from the "old" client. See if it is up and // running by sending a ping to that inbox. _, err := s.nc.Request(hbInbox, nil, s.dupCIDTimeout) // If err is nil, the currently registered client responded, so this is a // duplicate. return err == nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2658-L2669
go
train
// When calling ApplyFuture.Error(), if we get an error it means // that raft failed to commit to its log. // But if we also want the result of FSM.Apply(), which in this // case is StanServer.Apply(), we need to check the Response(). // So we first check error from future.Error(). If nil, then we // check the Response.
func waitForReplicationErrResponse(f raft.ApplyFuture) error
// When calling ApplyFuture.Error(), if we get an error it means // that raft failed to commit to its log. // But if we also want the result of FSM.Apply(), which in this // case is StanServer.Apply(), we need to check the Response(). // So we first check error from future.Error(). If nil, then we // check the Response. func waitForReplicationErrResponse(f raft.ApplyFuture) error
{ err := f.Error() if err == nil { resp := f.Response() // We call this function when we know that FutureApply's // Response is an error object. if resp != nil { err = f.Response().(error) } } return err }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2672-L2692
go
train
// Leader invokes this to replicate the command to delete a channel.
func (s *StanServer) replicateDeleteChannel(channel string)
// Leader invokes this to replicate the command to delete a channel. func (s *StanServer) replicateDeleteChannel(channel string)
{ op := &spb.RaftOperation{ OpType: spb.RaftOperation_DeleteChannel, Channel: channel, } data, err := op.Marshal() if err != nil { panic(err) } // Wait on result of replication. if err = s.raft.Apply(data, 0).Error(); err != nil { // If we have lost leadership, clear the deleteInProgress flag. cs := s.channels cs.Lock() c := cs.channels[channel] if c != nil && c.activity != nil { c.activity.deleteInProgress = false } cs.Unlock() } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2696-L2745
go
train
// Check if the channel can be deleted. If so, do it in place. // This is called from the ioLoop by the leader or a standlone server.
func (s *StanServer) handleChannelDelete(c *channel)
// Check if the channel can be deleted. If so, do it in place. // This is called from the ioLoop by the leader or a standlone server. func (s *StanServer) handleChannelDelete(c *channel)
{ delete := false cs := s.channels cs.Lock() a := c.activity if a.preventDelete || a.deleteInProgress || c.ss.hasActiveSubs() { if s.debug { s.log.Debugf("Channel %q cannot be deleted: preventDelete=%v inProgress=%v hasActiveSubs=%v", c.name, a.preventDelete, a.deleteInProgress, c.ss.hasActiveSubs()) } c.stopDeleteTimer() } else { elapsed := time.Since(a.last) if elapsed >= a.maxInactivity { if s.debug { s.log.Debugf("Channel %q is being deleted", c.name) } c.stopDeleteTimer() // Leave in map for now, but mark as deleted. If we removed before // completion of the removal, a new lookup could re-create while // in the process of deleting it. a.deleteInProgress = true delete = true } else { var next time.Duration if elapsed < 0 { next = a.maxInactivity } else { // elapsed < a.maxInactivity next = a.maxInactivity - elapsed } if s.debug { s.log.Debugf("Channel %q cannot be deleted now, reset timer to fire in %v", c.name, next) } c.resetDeleteTimer(next) } } cs.Unlock() if delete { if testDeleteChannel { time.Sleep(time.Second) } if s.isClustered { s.replicateDeleteChannel(c.name) } else { s.processDeleteChannel(c.name) } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2748-L2772
go
train
// Actual deletetion of the channel.
func (s *StanServer) processDeleteChannel(channel string)
// Actual deletetion of the channel. func (s *StanServer) processDeleteChannel(channel string)
{ cs := s.channels cs.Lock() defer cs.Unlock() c := cs.channels[channel] if c == nil { s.log.Errorf("Error deleting channel %q: not found", channel) return } if c.activity != nil && c.activity.preventDelete { s.log.Errorf("The channel %q cannot be deleted at this time since a subscription has been created", channel) return } // Delete from store if err := cs.store.DeleteChannel(channel); err != nil { s.log.Errorf("Error deleting channel %q: %v", channel, err) if c.activity != nil { c.activity.deleteInProgress = false c.startDeleteTimer() } return } delete(s.channels.channels, channel) s.log.Noticef("Channel %q has been deleted", channel) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2859-L2944
go
train
// Send a heartbeat call to the client.
func (s *StanServer) checkClientHealth(clientID string)
// Send a heartbeat call to the client. func (s *StanServer) checkClientHealth(clientID string)
{ client := s.clients.lookup(clientID) if client == nil { return } // If clustered and we lost leadership, we should stop // heartbeating as the new leader will take over. if s.isClustered && !s.isLeader() { // Do not remove client HB here. We do that in // leadershipLost. We could be here because the // callback fired while we are not yet finished // acquiring leadership. client.Lock() if client.hbt != nil { client.hbt.Reset(s.opts.ClientHBInterval) } client.Unlock() return } client.RLock() hbInbox := client.info.HbInbox client.RUnlock() // Sends the HB request. This call blocks for ClientHBTimeout, // do not hold the lock for that long! _, err := s.nc.Request(hbInbox, nil, s.opts.ClientHBTimeout) // Grab the lock now. client.Lock() // Client could have been unregistered, in which case // client.hbt will be nil. if client.hbt == nil { client.Unlock() return } hadFailed := client.fhb > 0 // If we did not get the reply, increase the number of // failed heartbeats. if err != nil { client.fhb++ // If we have reached the max number of failures if client.fhb > s.opts.ClientHBFailCount { s.log.Debugf("[Client:%s] Timed out on heartbeats", clientID) // close the client (connection). This locks the // client object internally so unlock here. client.Unlock() // If clustered, thread operations through Raft. if s.isClustered { s.barrier(func() { if err := s.replicateConnClose(&pb.CloseRequest{ClientID: clientID}); err != nil { s.log.Errorf("[Client:%s] Failed to replicate disconnect on heartbeat expiration: %v", clientID, err) } }) } else { s.closeClient(clientID) } return } } else { // We got the reply, reset the number of failed heartbeats. client.fhb = 0 } // Reset the timer to fire again. client.hbt.Reset(s.opts.ClientHBInterval) var ( subs []*subState hasFailedHB = client.fhb > 0 ) if (hadFailed && !hasFailedHB) || (!hadFailed && hasFailedHB) { // Get a copy of subscribers and client.fhb while under lock subs = client.getSubsCopy() } client.Unlock() if len(subs) > 0 { // Push the info about presence of failed heartbeats down to // subscribers, so they have easier access to that info in // the redelivery attempt code. for _, sub := range subs { sub.Lock() sub.hasFailedHB = hasFailedHB sub.Unlock() } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2947-L2972
go
train
// Close a client
func (s *StanServer) closeClient(clientID string) error
// Close a client func (s *StanServer) closeClient(clientID string) error
{ s.closeMu.Lock() defer s.closeMu.Unlock() // Lookup client first, will unregister only after removing its subscriptions client := s.clients.lookup(clientID) if client == nil { s.log.Errorf("Unknown client %q in close request", clientID) return ErrUnknownClient } // Remove all non-durable subscribers. s.removeAllNonDurableSubscribers(client) // Remove from our clientStore. if _, err := s.clients.unregister(clientID); err != nil { s.log.Errorf("Error unregistering client %q: %v", clientID, err) } if s.debug { client.RLock() hbInbox := client.info.HbInbox client.RUnlock() s.log.Debugf("[Client:%s] Closed (Inbox=%v)", clientID, hbInbox) } return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2975-L2997
go
train
// processCloseRequest will process connection close requests from clients.
func (s *StanServer) processCloseRequest(m *nats.Msg)
// processCloseRequest will process connection close requests from clients. func (s *StanServer) processCloseRequest(m *nats.Msg)
{ req := &pb.CloseRequest{} err := req.Unmarshal(m.Data) if err != nil { s.log.Errorf("Received invalid close request, subject=%s", m.Subject) s.sendCloseResponse(m.Reply, ErrInvalidCloseReq) return } s.barrier(func() { var err error // If clustered, thread operations through Raft. if s.isClustered { err = s.replicateConnClose(req) } else { err = s.closeClient(req.ClientID) } // If there was an error, it has been already logged. // Send response, if err is nil, will be a success response. s.sendCloseResponse(m.Reply, err) }) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3030-L3070
go
train
// processClientPublish process inbound messages from clients.
func (s *StanServer) processClientPublish(m *nats.Msg)
// processClientPublish process inbound messages from clients. func (s *StanServer) processClientPublish(m *nats.Msg)
{ iopm := &ioPendingMsg{m: m} pm := &iopm.pm if pm.Unmarshal(m.Data) != nil { if s.processCtrlMsg(m) { return } // else we will report an error below... } // Make sure we have a guid and valid channel name. if pm.Guid == "" || !util.IsChannelNameValid(pm.Subject, false) { s.log.Errorf("Received invalid client publish message %v", pm) s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq) return } if s.debug { s.log.Tracef("[Client:%s] Received message from publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid) } // Check if the client is valid. We do this after the clustered check so // that only the leader performs this check. valid := false if s.partitions != nil { // In partitioning mode it is possible that we get there // before the connect request is processed. If so, make sure we wait // for conn request to be processed first. Check clientCheckTimeout // doc for details. valid = s.clients.isValidWithTimeout(pm.ClientID, pm.ConnID, clientCheckTimeout) } else { valid = s.clients.isValid(pm.ClientID, pm.ConnID) } if !valid { s.log.Errorf("Received invalid client publish message %v", pm) s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq) return } s.ioChannel <- iopm }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3075-L3113
go
train
// processClientPings receives a PING from a client. The payload is the client's UID. // If the client is present, a response with nil payload is sent back to indicate // success, otherwise the payload contains an error message.
func (s *StanServer) processClientPings(m *nats.Msg)
// processClientPings receives a PING from a client. The payload is the client's UID. // If the client is present, a response with nil payload is sent back to indicate // success, otherwise the payload contains an error message. func (s *StanServer) processClientPings(m *nats.Msg)
{ if len(m.Data) == 0 { return } ping := &pb.Ping{} if err := ping.Unmarshal(m.Data); err != nil { return } var reply []byte client := s.clients.lookupByConnID(ping.ConnID) if client != nil { // If the client has failed heartbeats and since the // server just received a PING from the client, reset // the server-to-client HB timer so that a PING is // sent soon and the client's subscriptions failedHB // is cleared. client.RLock() hasFailedHBs := client.fhb > 0 client.RUnlock() if hasFailedHBs { client.Lock() client.hbt.Reset(time.Millisecond) client.Unlock() } if s.pingResponseOKBytes == nil { s.pingResponseOKBytes, _ = (&pb.PingResponse{}).Marshal() } reply = s.pingResponseOKBytes } else { if s.pingResponseInvalidClientBytes == nil { pingError := &pb.PingResponse{ Error: "client has been replaced or is no longer registered", } s.pingResponseInvalidClientBytes, _ = pingError.Marshal() } reply = s.pingResponseInvalidClientBytes } s.ncs.Publish(m.Reply, reply) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3122-L3127
go
train
// CtrlMsg are no longer used to solve connection and subscription close/unsub // ordering issues. However, a (newer) server may still receive those from // older servers in the same NATS cluster. // Since original behavior was to ignore control messages sent from a server // other than itself, and since new server do not send those (in this context // at least), this function simply make sure that if it is a properly formed // CtrlMsg, we just ignore.
func (s *StanServer) processCtrlMsg(m *nats.Msg) bool
// CtrlMsg are no longer used to solve connection and subscription close/unsub // ordering issues. However, a (newer) server may still receive those from // older servers in the same NATS cluster. // Since original behavior was to ignore control messages sent from a server // other than itself, and since new server do not send those (in this context // at least), this function simply make sure that if it is a properly formed // CtrlMsg, we just ignore. func (s *StanServer) processCtrlMsg(m *nats.Msg) bool
{ cm := &spb.CtrlMsg{} // Since we don't use CtrlMsg for connection/subscription close/unsub, // simply return true if CtrlMsg is valid so that this message is ignored. return cm.Unmarshal(m.Data) == nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3138-L3170
go
train
// FIXME(dlc) - place holder to pick sub that has least outstanding, should just sort, // or use insertion sort, etc.
func findBestQueueSub(sl []*subState) *subState
// FIXME(dlc) - place holder to pick sub that has least outstanding, should just sort, // or use insertion sort, etc. func findBestQueueSub(sl []*subState) *subState
{ var ( leastOutstanding = int(^uint(0) >> 1) rsub *subState ) for _, sub := range sl { sub.RLock() sOut := len(sub.acksPending) sStalled := sub.stalled sHasFailedHB := sub.hasFailedHB sub.RUnlock() // Favor non stalled subscribers and clients that do not have failed heartbeats if !sStalled && !sHasFailedHB { if sOut < leastOutstanding { leastOutstanding = sOut rsub = sub } } } len := len(sl) if rsub == nil && len > 0 { rsub = sl[0] } if len > 1 && rsub == sl[0] { copy(sl, sl[1:len]) sl[len-1] = rsub } return rsub }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3174-L3192
go
train
// Send a message to the queue group // Assumes qs lock held for write
func (s *StanServer) sendMsgToQueueGroup(qs *queueState, m *pb.MsgProto, force bool) (*subState, bool, bool)
// Send a message to the queue group // Assumes qs lock held for write func (s *StanServer) sendMsgToQueueGroup(qs *queueState, m *pb.MsgProto, force bool) (*subState, bool, bool)
{ sub := findBestQueueSub(qs.subs) if sub == nil { return nil, false, false } sub.Lock() wasStalled := sub.stalled didSend, sendMore := s.sendMsgToSub(sub, m, force) // If this is not a redelivery and the sub was not stalled, but now is, // bump the number of stalled members. if !force && !wasStalled && sub.stalled { qs.stalledSubCount++ } if didSend && sub.LastSent > qs.lastSent { qs.lastSent = sub.LastSent } sub.Unlock() return sub, didSend, sendMore }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3195-L3210
go
train
// processMsg will process a message, and possibly send to clients, etc.
func (s *StanServer) processMsg(c *channel)
// processMsg will process a message, and possibly send to clients, etc. func (s *StanServer) processMsg(c *channel)
{ ss := c.ss // Since we iterate through them all. ss.RLock() // Walk the plain subscribers and deliver to each one for _, sub := range ss.psubs { s.sendAvailableMessages(c, sub) } // Check the queue subscribers for _, qs := range ss.qsubs { s.sendAvailableMessagesToQueue(c, qs) } ss.RUnlock() }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3220-L3227
go
train
// Returns an array of message sequence numbers ordered by sequence.
func makeSortedSequences(sequences map[uint64]int64) []uint64
// Returns an array of message sequence numbers ordered by sequence. func makeSortedSequences(sequences map[uint64]int64) []uint64
{ results := make([]uint64, 0, len(sequences)) for seq := range sequences { results = append(results, seq) } sort.Sort(bySeq(results)) return results }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3246-L3253
go
train
// Returns an array of pendingMsg ordered by expiration date, unless // the expiration date in the pendingMsgs map is not set (0), which // happens after a server restart. In this case, the array is ordered // by message sequence numbers.
func makeSortedPendingMsgs(pendingMsgs map[uint64]int64) []*pendingMsg
// Returns an array of pendingMsg ordered by expiration date, unless // the expiration date in the pendingMsgs map is not set (0), which // happens after a server restart. In this case, the array is ordered // by message sequence numbers. func makeSortedPendingMsgs(pendingMsgs map[uint64]int64) []*pendingMsg
{ results := make([]*pendingMsg, 0, len(pendingMsgs)) for seq, expire := range pendingMsgs { results = append(results, &pendingMsg{seq: seq, expire: expire}) } sort.Sort(byExpire(results)) return results }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3256-L3303
go
train
// Redeliver all outstanding messages to a durable subscriber, used on resubscribe.
func (s *StanServer) performDurableRedelivery(c *channel, sub *subState)
// Redeliver all outstanding messages to a durable subscriber, used on resubscribe. func (s *StanServer) performDurableRedelivery(c *channel, sub *subState)
{ // Sort our messages outstanding from acksPending, grab some state and unlock. sub.RLock() sortedSeqs := makeSortedSequences(sub.acksPending) clientID := sub.ClientID newOnHold := sub.newOnHold subID := sub.ID sub.RUnlock() if s.debug && len(sortedSeqs) > 0 { sub.RLock() durName := sub.DurableName if durName == "" { durName = sub.QGroup } sub.RUnlock() s.log.Debugf("[Client:%s] Redelivering to subid=%d, durable=%s", clientID, subID, durName) } // If we don't find the client, we are done. if s.clients.lookup(clientID) != nil { // Go through all messages for _, seq := range sortedSeqs { m := s.getMsgForRedelivery(c, sub, seq) if m == nil { continue } if s.trace { s.log.Tracef("[Client:%s] Redelivering to subid=%d, seq=%d", clientID, subID, m.Sequence) } // Flag as redelivered. m.Redelivered = true sub.Lock() // Force delivery s.sendMsgToSub(sub, m, forceDelivery) sub.Unlock() } } // Release newOnHold if needed. if newOnHold { sub.Lock() sub.newOnHold = false sub.Unlock() } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3306-L3431
go
train
// Redeliver all outstanding messages that have expired.
func (s *StanServer) performAckExpirationRedelivery(sub *subState, isStartup bool)
// Redeliver all outstanding messages that have expired. func (s *StanServer) performAckExpirationRedelivery(sub *subState, isStartup bool)
{ // Sort our messages outstanding from acksPending, grab some state and unlock. sub.Lock() sortedPendingMsgs := makeSortedPendingMsgs(sub.acksPending) if len(sortedPendingMsgs) == 0 { sub.clearAckTimer() sub.Unlock() return } expTime := int64(sub.ackWait) subject := sub.subject qs := sub.qstate clientID := sub.ClientID subID := sub.ID if sub.ackTimer == nil { s.setupAckTimer(sub, sub.ackWait) } if qs == nil { // If the client has some failed heartbeats, ignore this request. if sub.hasFailedHB { // Reset the timer sub.ackTimer.Reset(sub.ackWait) sub.Unlock() if s.debug { s.log.Debugf("[Client:%s] Skipping redelivery to subid=%d due to missed client heartbeat", clientID, subID) } return } } sub.Unlock() c := s.channels.get(subject) if c == nil { s.log.Errorf("[Client:%s] Aborting redelivery to subid=%d for non existing channel %s", clientID, subID, subject) sub.Lock() sub.clearAckTimer() sub.Unlock() return } // In cluster mode we will always redeliver to the same queue member. // This is to avoid to have to replicated sent/ack when a message would // be redelivered (removed from one member to be sent to another member) isClustered := s.isClustered now := time.Now().UnixNano() // limit is now plus a buffer of 15ms to avoid repeated timer callbacks. limit := now + int64(15*time.Millisecond) var ( pick *subState sent bool tracePrinted bool foundWithZero bool nextExpirationTime int64 ) // We will move through acksPending(sorted) and see what needs redelivery. for _, pm := range sortedPendingMsgs { m := s.getMsgForRedelivery(c, sub, pm.seq) if m == nil { continue } // If we found any pm.expire with 0 in the array (due to a server restart), // ensure that all have now an expiration set, then reschedule right away. if foundWithZero || pm.expire == 0 { foundWithZero = true if pm.expire == 0 { sub.Lock() // Is message still pending? if _, present := sub.acksPending[pm.seq]; present { sub.acksPending[pm.seq] = m.Timestamp + expTime } sub.Unlock() } continue } // If this message has not yet expired, reset timer for next callback if pm.expire > limit { nextExpirationTime = pm.expire if !tracePrinted && s.trace { tracePrinted = true s.log.Tracef("[Client:%s] Redelivery for subid=%d, skipping seq=%d", clientID, subID, m.Sequence) } break } // Flag as redelivered. m.Redelivered = true // Handle QueueSubscribers differently, since we will choose best subscriber // to redeliver to, not necessarily the same one. // However, on startup, resends only to member that had previously this message // otherwise this could cause a message to be redelivered to multiple members. if !isClustered && qs != nil && !isStartup { qs.Lock() pick, sent, _ = s.sendMsgToQueueGroup(qs, m, forceDelivery) qs.Unlock() if pick == nil { s.log.Errorf("[Client:%s] Unable to find queue subscriber for subid=%d", clientID, subID) break } // If the message is redelivered to a different queue subscriber, // we need to process an implicit ack for the original subscriber. // We do this only after confirmation that it was successfully added // as pending on the other queue subscriber. if pick != sub && sent { s.processAck(c, sub, m.Sequence, false) } } else { sub.Lock() s.sendMsgToSub(sub, m, forceDelivery) sub.Unlock() } } if foundWithZero { // Restart expiration now that ackPending map's expire values are properly // set. Note that messages may have been added/removed in the meantime. s.performAckExpirationRedelivery(sub, isStartup) return } // Adjust the timer sub.adjustAckTimer(nextExpirationTime) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3437-L3451
go
train
// getMsgForRedelivery looks up the message from storage. If not found - // because it has been removed due to limit - processes an ACK for this // sub/sequence number and returns nil, otherwise return a copy of the // message (since it is going to be modified: m.Redelivered = true)
func (s *StanServer) getMsgForRedelivery(c *channel, sub *subState, seq uint64) *pb.MsgProto
// getMsgForRedelivery looks up the message from storage. If not found - // because it has been removed due to limit - processes an ACK for this // sub/sequence number and returns nil, otherwise return a copy of the // message (since it is going to be modified: m.Redelivered = true) func (s *StanServer) getMsgForRedelivery(c *channel, sub *subState, seq uint64) *pb.MsgProto
{ m, err := c.store.Msgs.Lookup(seq) if m == nil || err != nil { if err != nil { s.log.Errorf("Error getting message for redelivery subid=%d, seq=%d, err=%v", sub.ID, seq, err) } // Ack it so that it does not reincarnate on restart s.processAck(c, sub, seq, false) return nil } // The store implementation does not return a copy, we need one mcopy := *m return &mcopy }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3466-L3491
go
train
// Keep track of sent or ack messages. // If the number of operations reach a certain threshold, // the sub is added to list of subs that should be flushed asap. // This call does not do actual RAFT replication and should not block. // Caller holds the sub's Lock.
func (s *StanServer) collectSentOrAck(sub *subState, sent bool, sequence uint64)
// Keep track of sent or ack messages. // If the number of operations reach a certain threshold, // the sub is added to list of subs that should be flushed asap. // This call does not do actual RAFT replication and should not block. // Caller holds the sub's Lock. func (s *StanServer) collectSentOrAck(sub *subState, sent bool, sequence uint64)
{ sr := s.ssarepl if sub.replicate == nil { sub.replicate = &subSentAndAck{ sent: make([]uint64, 0, 100), ack: make([]uint64, 0, 100), } } r := sub.replicate if sent { r.sent = append(r.sent, sequence) } else { r.ack = append(r.ack, sequence) } // This function is called with exactly one event at a time. // Use exact count to decide when to add to given map. This // avoid the need for booleans to not add more than once. l := len(r.sent) + len(r.ack) if l == 1 { sr.waiting.Store(sub, struct{}{}) } else if l == 100 { sr.waiting.Delete(sub) sr.ready.Store(sub, struct{}{}) signalCh(sr.notifyCh) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3494-L3530
go
train
// Replicates through RAFT
func (s *StanServer) replicateSubSentAndAck(sub *subState)
// Replicates through RAFT func (s *StanServer) replicateSubSentAndAck(sub *subState)
{ var data []byte sr := s.ssarepl sub.Lock() r := sub.replicate if r != nil && len(r.sent)+len(r.ack) > 0 { data = createSubSentAndAckProto(sub, r) r.sent = r.sent[:0] r.ack = r.ack[:0] r.applying = true } sub.Unlock() if data != nil { if testSubSentAndAckSlowApply { time.Sleep(100 * time.Millisecond) } s.raft.Apply(data, 0) sub.Lock() r = sub.replicate // If r is nil it means either that the leader lost leadrship, // in which case we don't do anything, or the sub/conn is being // closed and endSubSentAndAckReplication() is waiting on a // channel stored in "gates" map. If we find it, signal. if r == nil { if c, ok := sr.gates.Load(sub); ok { sr.gates.Delete(sub) signalCh(c.(chan struct{})) } } else { r.applying = false } sub.Unlock() } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3534-L3549
go
train
// Little helper function to create a RaftOperation_SendAndAck protocol // and serialize it.
func createSubSentAndAckProto(sub *subState, r *subSentAndAck) []byte
// Little helper function to create a RaftOperation_SendAndAck protocol // and serialize it. func createSubSentAndAckProto(sub *subState, r *subSentAndAck) []byte
{ op := &spb.RaftOperation{ OpType: spb.RaftOperation_SendAndAck, SubSentAck: &spb.SubSentAndAck{ Channel: sub.subject, AckInbox: sub.AckInbox, Sent: r.sent, Ack: r.ack, }, } data, err := op.Marshal() if err != nil { panic(err) } return data }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3556-L3585
go
train
// This is called when a subscription is closed or unsubscribed, or // a connection is closed, but prior to the RAFT replication of such // event. // Depending on the type of event, we want to make sure that we flush // the possibly remaning sent/ack events.
func (s *StanServer) endSubSentAndAckReplication(sub *subState, unsub bool)
// This is called when a subscription is closed or unsubscribed, or // a connection is closed, but prior to the RAFT replication of such // event. // Depending on the type of event, we want to make sure that we flush // the possibly remaning sent/ack events. func (s *StanServer) endSubSentAndAckReplication(sub *subState, unsub bool)
{ var ch chan struct{} var data []byte sub.Lock() r := sub.replicate if r == nil { sub.Unlock() return } if !unsub && (sub.IsDurable || sub.qstate != nil) && len(r.sent)+len(r.ack) > 0 { data = createSubSentAndAckProto(sub, r) } // If the replicator is about to apply, or in middle of it, we // want to wait for it to finish regardless if we have to replicate // something or not. We are not expecting this situation to occur often. if r.applying { ch = make(chan struct{}, 1) s.ssarepl.gates.Store(sub, ch) } s.clearSentAndAck(sub) sub.Unlock() if ch != nil { <-ch } if data != nil { s.raft.Apply(data, 0) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3588-L3593
go
train
// Sub lock is held on entry
func (s *StanServer) clearSentAndAck(sub *subState)
// Sub lock is held on entry func (s *StanServer) clearSentAndAck(sub *subState)
{ sr := s.ssarepl sr.waiting.Delete(sub) sr.ready.Delete(sub) sub.replicate = nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3597-L3639
go
train
// long-lived go-routine that performs RAFT replication of subscriptions' // sent/ack operations.
func (s *StanServer) subsSentAndAckReplicator()
// long-lived go-routine that performs RAFT replication of subscriptions' // sent/ack operations. func (s *StanServer) subsSentAndAckReplicator()
{ defer s.wg.Done() s.mu.Lock() sr := s.ssarepl ready := sr.ready waiting := sr.waiting notifyCh := sr.notifyCh s.mu.Unlock() ticker := time.NewTicker(lazyReplicationInterval) flush := func() { ready.Range(func(k, _ interface{}) bool { ready.Delete(k) s.replicateSubSentAndAck(k.(*subState)) return true }) } for { select { case <-s.shutdownCh: return case <-ticker.C: addedToReady := false waiting.Range(func(k, _ interface{}) bool { waiting.Delete(k) // Move to ready map ready.Store(k, struct{}{}) addedToReady = true return true }) // If some were transferred and nobody has signaled // to flush the ready ones, do it here if addedToReady && len(notifyCh) == 0 { flush() } case <-notifyCh: flush() } } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3645-L3681
go
train
// This is invoked from raft thread on a follower. It persists given // sequence number to subscription of given AckInbox. It updates the // sub (and queue state) LastSent value. It adds the sequence to the // map of acksPending.
func (s *StanServer) processReplicatedSendAndAck(ssa *spb.SubSentAndAck)
// This is invoked from raft thread on a follower. It persists given // sequence number to subscription of given AckInbox. It updates the // sub (and queue state) LastSent value. It adds the sequence to the // map of acksPending. func (s *StanServer) processReplicatedSendAndAck(ssa *spb.SubSentAndAck)
{ c, err := s.lookupOrCreateChannel(ssa.Channel) if err != nil { return } sub := c.ss.LookupByAckInbox(ssa.AckInbox) if sub == nil { return } sub.Lock() defer sub.Unlock() // This is not optimized. The leader sent all accumulated sent and ack // sequences. For queue members, there is no much that can be done // because by nature seq will not be contiguous, but for non queue // subs, this could be optimized. for _, sequence := range ssa.Sent { // Update LastSent if applicable if sequence > sub.LastSent { sub.LastSent = sequence } // In case this is a queue member, update queue state's LastSent. if sub.qstate != nil && sequence > sub.qstate.lastSent { sub.qstate.lastSent = sequence } // Set 0 for expiration time. This will be computed // when the follower becomes leader and attempts to // redeliver messages. sub.acksPending[sequence] = 0 } // Now remove the acks pending that we potentially just added ;-) for _, sequence := range ssa.Ack { delete(sub.acksPending, sequence) } // Don't set the sub.stalled here. Let that be done if the server // becomes leader and attempt the first deliveries. }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3688-L3778
go
train
// Sends the message to the subscriber // Unless `force` is true, in which case message is always sent, if the number // of acksPending is greater or equal to the sub's MaxInFlight limit, messages // are not sent and subscriber is marked as stalled. // Sub lock should be held before calling.
func (s *StanServer) sendMsgToSub(sub *subState, m *pb.MsgProto, force bool) (bool, bool)
// Sends the message to the subscriber // Unless `force` is true, in which case message is always sent, if the number // of acksPending is greater or equal to the sub's MaxInFlight limit, messages // are not sent and subscriber is marked as stalled. // Sub lock should be held before calling. func (s *StanServer) sendMsgToSub(sub *subState, m *pb.MsgProto, force bool) (bool, bool)
{ if sub == nil || m == nil || !sub.initialized || (sub.newOnHold && !m.Redelivered) { return false, false } // Don't send if we have too many outstanding already, unless forced to send. ap := int32(len(sub.acksPending)) if !force && (ap >= sub.MaxInFlight) { sub.stalled = true return false, false } if s.trace { var action string if m.Redelivered { action = "Redelivering" } else { action = "Delivering" } s.log.Tracef("[Client:%s] %s msg to subid=%d, subject=%s, seq=%d", sub.ClientID, action, sub.ID, m.Subject, m.Sequence) } // Marshal of a pb.MsgProto cannot fail b, _ := m.Marshal() // but protect against a store implementation that may incorrectly // return an empty message. if len(b) == 0 { panic("store implementation returned an empty message") } if err := s.ncs.Publish(sub.Inbox, b); err != nil { s.log.Errorf("[Client:%s] Failed sending to subid=%d, subject=%s, seq=%d, err=%v", sub.ClientID, sub.ID, m.Subject, m.Sequence, err) return false, false } // Setup the ackTimer as needed now. I don't want to use defer in this // function, and want to make sure that if we exit before the end, the // timer is set. It will be adjusted/stopped as needed. if sub.ackTimer == nil { s.setupAckTimer(sub, sub.ackWait) } // If this message is already pending, do not add it again to the store. if expTime, present := sub.acksPending[m.Sequence]; present { // However, update the next expiration time. if expTime == 0 { // That can happen after a server restart, so need to use // the current time. expTime = time.Now().UnixNano() } // bump the next expiration time with the sub's ackWait. expTime += int64(sub.ackWait) sub.acksPending[m.Sequence] = expTime return true, true } // If in cluster mode, schedule replication of the sent event. if s.isClustered { s.collectSentOrAck(sub, replicateSent, m.Sequence) } // Store in storage if err := sub.store.AddSeqPending(sub.ID, m.Sequence); err != nil { s.log.Errorf("[Client:%s] Unable to add pending message to subid=%d, subject=%s, seq=%d, err=%v", sub.ClientID, sub.ID, sub.subject, m.Sequence, err) return false, false } // Update LastSent if applicable if m.Sequence > sub.LastSent { sub.LastSent = m.Sequence } // Store in ackPending. // Use current time to compute expiration time instead of m.Timestamp. // A message can be persisted in the log and send much later to a // new subscriber. Basing expiration time on m.Timestamp would // likely set the expiration time in the past! sub.acksPending[m.Sequence] = time.Now().UnixNano() + int64(sub.ackWait) // Now that we have added to acksPending, check again if we // have reached the max and tell the caller that it should not // be sending more at this time. if !force && (ap+1 == sub.MaxInFlight) { sub.stalled = true return true, false } return true, true }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3782-L3786
go
train
// Sets up the ackTimer to fire at the given duration. // sub's lock held on entry.
func (s *StanServer) setupAckTimer(sub *subState, d time.Duration)
// Sets up the ackTimer to fire at the given duration. // sub's lock held on entry. func (s *StanServer) setupAckTimer(sub *subState, d time.Duration)
{ sub.ackTimer = time.AfterFunc(d, func() { s.performAckExpirationRedelivery(sub, false) }) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4007-L4010
go
train
// Sends a special ioPendingMsg to indicate that we should attempt // to delete the given channel.
func (s *StanServer) sendDeleteChannelRequest(c *channel)
// Sends a special ioPendingMsg to indicate that we should attempt // to delete the given channel. func (s *StanServer) sendDeleteChannelRequest(c *channel)
{ iopm := &ioPendingMsg{c: c, dc: true} s.ioChannel <- iopm }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4016-L4049
go
train
// replicate will replicate the batch of messages to followers and return // futures (one for each channel messages were replicated for) which, when // waited upon, will indicate if the replication was successful or not. This // should only be called if running in clustered mode.
func (s *StanServer) replicate(iopms []*ioPendingMsg) (map[*channel]raft.Future, error)
// replicate will replicate the batch of messages to followers and return // futures (one for each channel messages were replicated for) which, when // waited upon, will indicate if the replication was successful or not. This // should only be called if running in clustered mode. func (s *StanServer) replicate(iopms []*ioPendingMsg) (map[*channel]raft.Future, error)
{ var ( futures = make(map[*channel]raft.Future) batches = make(map[*channel]*spb.Batch) ) for _, iopm := range iopms { pm := &iopm.pm c, err := s.lookupOrCreateChannel(pm.Subject) if err != nil { return nil, err } msg := c.pubMsgToMsgProto(pm, c.nextSequence) batch := batches[c] if batch == nil { batch = &spb.Batch{} batches[c] = batch } batch.Messages = append(batch.Messages, msg) iopm.c = c c.nextSequence++ } for c, batch := range batches { op := &spb.RaftOperation{ OpType: spb.RaftOperation_Publish, PublishBatch: batch, } data, err := op.Marshal() if err != nil { panic(err) } futures[c] = s.raft.Apply(data, 0) } return futures, nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4052-L4063
go
train
// ackPublisher sends the ack for a message.
func (s *StanServer) ackPublisher(iopm *ioPendingMsg)
// ackPublisher sends the ack for a message. func (s *StanServer) ackPublisher(iopm *ioPendingMsg)
{ msgAck := &iopm.pa msgAck.Guid = iopm.pm.Guid needed := msgAck.Size() s.tmpBuf = util.EnsureBufBigEnough(s.tmpBuf, needed) n, _ := msgAck.MarshalTo(s.tmpBuf) if s.trace { pm := &iopm.pm s.log.Tracef("[Client:%s] Acking Publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid) } s.ncs.Publish(iopm.m.Reply, s.tmpBuf[:n]) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4066-L4076
go
train
// Delete a sub from a given list.
func (sub *subState) deleteFromList(sl []*subState) ([]*subState, bool)
// Delete a sub from a given list. func (sub *subState) deleteFromList(sl []*subState) ([]*subState, bool)
{ for i := 0; i < len(sl); i++ { if sl[i] == sub { sl[i] = sl[len(sl)-1] sl[len(sl)-1] = nil sl = sl[:len(sl)-1] return shrinkSubListIfNeeded(sl), true } } return sl, false }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4080-L4092
go
train
// Checks if we need to do a resize. This is for very large growth then // subsequent return to a more normal size.
func shrinkSubListIfNeeded(sl []*subState) []*subState
// Checks if we need to do a resize. This is for very large growth then // subsequent return to a more normal size. func shrinkSubListIfNeeded(sl []*subState) []*subState
{ lsl := len(sl) csl := cap(sl) // Don't bother if list not too big if csl <= 8 { return sl } pFree := float32(csl-lsl) / float32(csl) if pFree > 0.50 { return append([]*subState(nil), sl...) } return sl }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4095-L4137
go
train
// removeAllNonDurableSubscribers will remove all non-durable subscribers for the client.
func (s *StanServer) removeAllNonDurableSubscribers(client *client)
// removeAllNonDurableSubscribers will remove all non-durable subscribers for the client. func (s *StanServer) removeAllNonDurableSubscribers(client *client)
{ // client has been unregistered and no other routine can add/remove // subscriptions, so it is safe to use the original. client.RLock() subs := client.subs clientID := client.info.ID client.RUnlock() var ( storesToFlush = map[string]stores.SubStore{} channels = map[string]struct{}{} ) for _, sub := range subs { sub.RLock() subject := sub.subject isDurable := sub.IsDurable subStore := sub.store sub.RUnlock() // Get the channel c := s.channels.get(subject) if c == nil { continue } // Don't remove durables c.ss.Remove(c, sub, false) // If the sub is a durable, there may have been an update to storage, // so we will want to flush the store. In clustering, during replay, // subStore may be nil. if isDurable && subStore != nil { storesToFlush[subject] = subStore } channels[subject] = struct{}{} } if len(storesToFlush) > 0 { for subject, subStore := range storesToFlush { if err := subStore.Flush(); err != nil { s.log.Errorf("[Client:%s] Error flushing store while removing subscriptions: subject=%s, err=%v", clientID, subject, err) } } } for channel := range channels { s.channels.maybeStartChannelDeleteTimer(channel, nil) } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4140-L4149
go
train
// processUnsubscribeRequest will process a unsubscribe request.
func (s *StanServer) processUnsubscribeRequest(m *nats.Msg)
// processUnsubscribeRequest will process a unsubscribe request. func (s *StanServer) processUnsubscribeRequest(m *nats.Msg)
{ req := &pb.UnsubscribeRequest{} err := req.Unmarshal(m.Data) if err != nil { s.log.Errorf("Invalid unsub request from %s", m.Subject) s.sendSubscriptionResponseErr(m.Reply, ErrInvalidUnsubReq) return } s.performmUnsubOrCloseSubscription(m, req, false) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4172-L4180
go
train
// Used when processing protocol messages to guarantee ordering. // Since protocols handlers use different subscriptions, a client // may send a message then close the connection, but those protocols // are processed by different internal subscriptions in the server. // Using nats's Conn.Barrier() we ensure that messages have been // processed in their respective callbacks before invoking `f`. // Since we also use a separate connection to handle acks, we // also need to flush the connection used to process ack's and // chained Barrier calls between s.nc and s.nca.
func (s *StanServer) barrier(f func())
// Used when processing protocol messages to guarantee ordering. // Since protocols handlers use different subscriptions, a client // may send a message then close the connection, but those protocols // are processed by different internal subscriptions in the server. // Using nats's Conn.Barrier() we ensure that messages have been // processed in their respective callbacks before invoking `f`. // Since we also use a separate connection to handle acks, we // also need to flush the connection used to process ack's and // chained Barrier calls between s.nc and s.nca. func (s *StanServer) barrier(f func())
{ s.nc.Barrier(func() { // Ensure all pending acks are received by the connection s.nca.Flush() // Then ensure that all acks have been processed in processAckMsg callbacks // before executing the closing function. s.nca.Barrier(f) }) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4184-L4219
go
train
// performmUnsubOrCloseSubscription processes the unsub or close subscription // request.
func (s *StanServer) performmUnsubOrCloseSubscription(m *nats.Msg, req *pb.UnsubscribeRequest, isSubClose bool)
// performmUnsubOrCloseSubscription processes the unsub or close subscription // request. func (s *StanServer) performmUnsubOrCloseSubscription(m *nats.Msg, req *pb.UnsubscribeRequest, isSubClose bool)
{ // With partitioning, first verify that this server is handling this // channel. If not, do not return an error, since another server will // handle it. If no other server is, the client will get a timeout. if s.partitions != nil { if r := s.partitions.sl.Match(req.Subject); len(r) == 0 { return } } s.barrier(func() { var err error if s.isClustered { if isSubClose { err = s.replicateCloseSubscription(req) } else { err = s.replicateRemoveSubscription(req) } } else { s.closeMu.Lock() err = s.unsubscribe(req, isSubClose) s.closeMu.Unlock() } // If there was an error, it has been already logged. if err == nil { // This will check if the channel has MaxInactivity defined, // if so and there is no active subscription, it will start the // delete timer. s.channels.maybeStartChannelDeleteTimer(req.Subject, nil) } // If err is nil, it will be a non-error response s.sendSubscriptionResponseErr(m.Reply, err) }) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4305-L4310
go
train
// Clear the ackTimer. // sub Lock held in entry.
func (sub *subState) clearAckTimer()
// Clear the ackTimer. // sub Lock held in entry. func (sub *subState) clearAckTimer()
{ if sub.ackTimer != nil { sub.ackTimer.Stop() sub.ackTimer = nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4319-L4347
go
train
// adjustAckTimer adjusts the timer based on a given next // expiration time. // The timer will be stopped if there is no more pending ack. // If there are pending acks, the timer will be reset to the // default sub.ackWait value if the given expiration time is // 0 or in the past. Otherwise, it is set to the remaining time // between the given expiration time and now.
func (sub *subState) adjustAckTimer(nextExpirationTime int64)
// adjustAckTimer adjusts the timer based on a given next // expiration time. // The timer will be stopped if there is no more pending ack. // If there are pending acks, the timer will be reset to the // default sub.ackWait value if the given expiration time is // 0 or in the past. Otherwise, it is set to the remaining time // between the given expiration time and now. func (sub *subState) adjustAckTimer(nextExpirationTime int64)
{ sub.Lock() defer sub.Unlock() // Possible that the subscriber has been destroyed, and timer cleared if sub.ackTimer == nil { return } // Check if there are still pending acks if len(sub.acksPending) > 0 { // Capture time now := time.Now().UnixNano() // If the next expiration time is 0 or less than now, // use the default ackWait if nextExpirationTime <= now { sub.ackTimer.Reset(sub.ackWait) } else { // Compute the time the ackTimer should fire, based // on the given next expiration time and now. fireIn := (nextExpirationTime - now) sub.ackTimer.Reset(time.Duration(fireIn)) } } else { // No more pending acks, clear the timer. sub.clearAckTimer() } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4352-L4365
go
train
// Subscribes to the AckInbox subject in order to process subscription's acks // if not already done. // This function grabs and releases the sub's lock.
func (sub *subState) startAckSub(nc *nats.Conn, cb nats.MsgHandler) error
// Subscribes to the AckInbox subject in order to process subscription's acks // if not already done. // This function grabs and releases the sub's lock. func (sub *subState) startAckSub(nc *nats.Conn, cb nats.MsgHandler) error
{ ackSub, err := nc.Subscribe(sub.AckInbox, cb) if err != nil { return err } sub.Lock() // Should not occur, but if it was already set, // unsubscribe old and replace. sub.stopAckSub() sub.ackSub = ackSub sub.ackSub.SetPendingLimits(-1, -1) sub.Unlock() return nil }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4369-L4374
go
train
// Stops subscribing to AckInbox. // Lock assumed held on entry.
func (sub *subState) stopAckSub()
// Stops subscribing to AckInbox. // Lock assumed held on entry. func (sub *subState) stopAckSub()
{ if sub.ackSub != nil { sub.ackSub.Unsubscribe() sub.ackSub = nil } }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4377-L4382
go
train
// Used to generate durable key. This should not be called on non-durables.
func (sub *subState) durableKey() string
// Used to generate durable key. This should not be called on non-durables. func (sub *subState) durableKey() string
{ if sub.DurableName == "" { return "" } return fmt.Sprintf("%s-%s-%s", sub.ClientID, sub.subject, sub.DurableName) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4395-L4397
go
train
// Returns true if this is a "shadow" durable queue subscriber
func (sub *subState) isShadowQueueDurable() bool
// Returns true if this is a "shadow" durable queue subscriber func (sub *subState) isShadowQueueDurable() bool
{ return sub.IsDurable && sub.QGroup != "" && sub.ClientID == "" }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4410-L4415
go
train
// Used to generate durable key. This should not be called on non-durables.
func durableKey(sr *pb.SubscriptionRequest) string
// Used to generate durable key. This should not be called on non-durables. func durableKey(sr *pb.SubscriptionRequest) string
{ if sr.DurableName == "" { return "" } return fmt.Sprintf("%s-%s-%s", sr.ClientID, sr.Subject, sr.DurableName) }
nats-io/nats-streaming-server
57c6c84265c0012a1efef365703c221329804d4c
server/server.go
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4419-L4439
go
train
// replicateSub replicates the SubscriptionRequest to nodes in the cluster via // Raft.
func (s *StanServer) replicateSub(sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error)
// replicateSub replicates the SubscriptionRequest to nodes in the cluster via // Raft. func (s *StanServer) replicateSub(sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error)
{ op := &spb.RaftOperation{ OpType: spb.RaftOperation_Subscribe, Sub: &spb.AddSubscription{ Request: sr, AckInbox: ackInbox, ID: subID, }, } data, err := op.Marshal() if err != nil { panic(err) } // Replicate operation and wait on result. future := s.raft.Apply(data, 0) if err := future.Error(); err != nil { return nil, err } rs := future.Response().(*replicatedSub) return rs.sub, rs.err }