CombinedText stringlengths 4 3.42M |
|---|
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package server contains the code to run the dbnode server.
package server
import (
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"os"
"path"
"runtime"
"runtime/debug"
"strings"
"sync"
"time"
clusterclient "github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/client/etcd"
"github.com/m3db/m3/src/cluster/generated/proto/commonpb"
"github.com/m3db/m3/src/cluster/generated/proto/kvpb"
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cmd/services/m3dbnode/config"
queryconfig "github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/encoding/proto"
"github.com/m3db/m3/src/dbnode/environment"
"github.com/m3db/m3/src/dbnode/kvconfig"
"github.com/m3db/m3/src/dbnode/namespace"
hjcluster "github.com/m3db/m3/src/dbnode/network/server/httpjson/cluster"
hjnode "github.com/m3db/m3/src/dbnode/network/server/httpjson/node"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift"
ttcluster "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/cluster"
ttnode "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/node"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/ratelimit"
"github.com/m3db/m3/src/dbnode/retention"
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/cluster"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/limits"
"github.com/m3db/m3/src/dbnode/storage/limits/permits"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
xtchannel "github.com/m3db/m3/src/dbnode/x/tchannel"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
m3ninxindex "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/postings/roaring"
"github.com/m3db/m3/src/query/api/v1/handler/placement"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
xconfig "github.com/m3db/m3/src/x/config"
xcontext "github.com/m3db/m3/src/x/context"
xdebug "github.com/m3db/m3/src/x/debug"
xdocs "github.com/m3db/m3/src/x/docs"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/mmap"
xos "github.com/m3db/m3/src/x/os"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
apachethrift "github.com/apache/thrift/lib/go/thrift"
"github.com/m3dbx/vellum/levenshtein"
"github.com/m3dbx/vellum/levenshtein2"
"github.com/m3dbx/vellum/regexp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/uber-go/tally"
"github.com/uber/tchannel-go"
"go.etcd.io/etcd/embed"
"go.uber.org/zap"
)
const (
bootstrapConfigInitTimeout = 10 * time.Second
serverGracefulCloseTimeout = 10 * time.Second
bgProcessLimitInterval = 10 * time.Second
maxBgProcessLimitMonitorDuration = 5 * time.Minute
cpuProfileDuration = 5 * time.Second
filePathPrefixLockFile = ".lock"
defaultServiceName = "m3dbnode"
skipRaiseProcessLimitsEnvVar = "SKIP_PROCESS_LIMITS_RAISE"
skipRaiseProcessLimitsEnvVarTrue = "true"
mmapReporterMetricName = "mmap-mapped-bytes"
mmapReporterTagName = "map-name"
)
// RunOptions provides options for running the server
// with backwards compatibility if only solely adding fields.
type RunOptions struct {
// ConfigFile is the YAML configuration file to use to run the server.
ConfigFile string
// Config is an alternate way to provide configuration and will be used
// instead of parsing ConfigFile if ConfigFile is not specified.
Config config.DBConfiguration
// BootstrapCh is a channel to listen on to be notified of bootstrap.
BootstrapCh chan<- struct{}
// EmbeddedKVCh is a channel to listen on to be notified that the embedded KV has bootstrapped.
EmbeddedKVCh chan<- struct{}
// ClientCh is a channel to listen on to share the same m3db client that this server uses.
ClientCh chan<- client.Client
// ClusterClientCh is a channel to listen on to share the same m3 cluster client that this server uses.
ClusterClientCh chan<- clusterclient.Client
// KVStoreCh is a channel to listen on to share the same m3 kv store client that this server uses.
KVStoreCh chan<- kv.Store
// InterruptCh is a programmatic interrupt channel to supply to
// interrupt and shutdown the server.
InterruptCh <-chan error
// CustomOptions are custom options to apply to the session.
CustomOptions []client.CustomAdminOption
// Transforms are transforms to apply to the database storage options.
Transforms []storage.OptionTransform
// StorageOptions are additional storage options.
StorageOptions StorageOptions
// CustomBuildTags are additional tags to be added to the instrument build
// reporter.
CustomBuildTags map[string]string
}
// Run runs the server programmatically given a filename for the
// configuration file.
func Run(runOpts RunOptions) {
var cfg config.DBConfiguration
if runOpts.ConfigFile != "" {
var rootCfg config.Configuration
if err := xconfig.LoadFile(&rootCfg, runOpts.ConfigFile, xconfig.Options{}); err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
fmt.Fprintf(os.Stderr, "unable to load %s: %v", runOpts.ConfigFile, err)
os.Exit(1)
}
cfg = *rootCfg.DB
} else {
cfg = runOpts.Config
}
err := cfg.Validate()
if err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
fmt.Fprintf(os.Stderr, "error initializing config defaults and validating config: %v", err)
os.Exit(1)
}
logger, err := cfg.LoggingOrDefault().BuildLogger()
if err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
fmt.Fprintf(os.Stderr, "unable to create logger: %v", err)
os.Exit(1)
}
defer logger.Sync()
cfg.Debug.SetRuntimeValues(logger)
xconfig.WarnOnDeprecation(cfg, logger)
// By default attempt to raise process limits, which is a benign operation.
skipRaiseLimits := strings.TrimSpace(os.Getenv(skipRaiseProcessLimitsEnvVar))
if skipRaiseLimits != skipRaiseProcessLimitsEnvVarTrue {
// Raise fd limits to nr_open system limit
result, err := xos.RaiseProcessNoFileToNROpen()
if err != nil {
logger.Warn("unable to raise rlimit", zap.Error(err))
} else {
logger.Info("raised rlimit no file fds limit",
zap.Bool("required", result.RaisePerformed),
zap.Uint64("sysNROpenValue", result.NROpenValue),
zap.Uint64("noFileMaxValue", result.NoFileMaxValue),
zap.Uint64("noFileCurrValue", result.NoFileCurrValue))
}
}
// Parse file and directory modes
newFileMode, err := cfg.Filesystem.ParseNewFileMode()
if err != nil {
logger.Fatal("could not parse new file mode", zap.Error(err))
}
newDirectoryMode, err := cfg.Filesystem.ParseNewDirectoryMode()
if err != nil {
logger.Fatal("could not parse new directory mode", zap.Error(err))
}
// Obtain a lock on `filePathPrefix`, or exit if another process already has it.
// The lock consists of a lock file (on the file system) and a lock in memory.
// When the process exits gracefully, both the lock file and the lock will be removed.
// If the process exits ungracefully, only the lock in memory will be removed, the lock
// file will remain on the file system. When a dbnode starts after an ungracefully stop,
// it will be able to acquire the lock despite the fact the the lock file exists.
lockPath := path.Join(cfg.Filesystem.FilePathPrefixOrDefault(), filePathPrefixLockFile)
fslock, err := createAndAcquireLockfile(lockPath, newDirectoryMode)
if err != nil {
logger.Fatal("could not acqurie lock", zap.String("path", lockPath), zap.Error(err))
}
// nolint: errcheck
defer fslock.releaseLockfile()
go bgValidateProcessLimits(logger)
debug.SetGCPercent(cfg.GCPercentageOrDefault())
scope, _, err := cfg.MetricsOrDefault().NewRootScope()
if err != nil {
logger.Fatal("could not connect to metrics", zap.Error(err))
}
hostID, err := cfg.HostIDOrDefault().Resolve()
if err != nil {
logger.Fatal("could not resolve local host ID", zap.Error(err))
}
var (
tracer opentracing.Tracer
traceCloser io.Closer
)
if cfg.Tracing == nil {
tracer = opentracing.NoopTracer{}
logger.Info("tracing disabled; set `tracing.backend` to enable")
} else {
// setup tracer
serviceName := cfg.Tracing.ServiceName
if serviceName == "" {
serviceName = defaultServiceName
}
tracer, traceCloser, err = cfg.Tracing.NewTracer(serviceName, scope.SubScope("jaeger"), logger)
if err != nil {
tracer = opentracing.NoopTracer{}
logger.Warn("could not initialize tracing; using no-op tracer instead",
zap.String("service", serviceName), zap.Error(err))
} else {
defer traceCloser.Close()
logger.Info("tracing enabled", zap.String("service", serviceName))
}
}
// Presence of KV server config indicates embedded etcd cluster
discoveryConfig := cfg.DiscoveryOrDefault()
envConfig, err := discoveryConfig.EnvironmentConfig(hostID)
if err != nil {
logger.Fatal("could not get env config from discovery config", zap.Error(err))
}
if envConfig.SeedNodes == nil {
logger.Info("no seed nodes set, using dedicated etcd cluster")
} else {
// Default etcd client clusters if not set already
service, err := envConfig.Services.SyncCluster()
if err != nil {
logger.Fatal("invalid cluster configuration", zap.Error(err))
}
clusters := service.Service.ETCDClusters
seedNodes := envConfig.SeedNodes.InitialCluster
if len(clusters) == 0 {
endpoints, err := config.InitialClusterEndpoints(seedNodes)
if err != nil {
logger.Fatal("unable to create etcd clusters", zap.Error(err))
}
zone := service.Service.Zone
logger.Info("using seed nodes etcd cluster",
zap.String("zone", zone), zap.Strings("endpoints", endpoints))
service.Service.ETCDClusters = []etcd.ClusterConfig{{
Zone: zone,
Endpoints: endpoints,
}}
}
seedNodeHostIDs := make([]string, 0, len(seedNodes))
for _, entry := range seedNodes {
seedNodeHostIDs = append(seedNodeHostIDs, entry.HostID)
}
logger.Info("resolving seed node configuration",
zap.String("hostID", hostID), zap.Strings("seedNodeHostIDs", seedNodeHostIDs),
)
if !config.IsSeedNode(seedNodes, hostID) {
logger.Info("not a seed node, using cluster seed nodes")
} else {
logger.Info("seed node, starting etcd server")
etcdCfg, err := config.NewEtcdEmbedConfig(cfg)
if err != nil {
logger.Fatal("unable to create etcd config", zap.Error(err))
}
e, err := embed.StartEtcd(etcdCfg)
if err != nil {
logger.Fatal("could not start embedded etcd", zap.Error(err))
}
if runOpts.EmbeddedKVCh != nil {
// Notify on embedded KV bootstrap chan if specified
runOpts.EmbeddedKVCh <- struct{}{}
}
defer e.Close()
}
}
// By default use histogram timers for timers that
// are constructed allowing for type to be picked
// by the caller using instrument.NewTimer(...).
timerOpts := instrument.NewHistogramTimerOptions(instrument.HistogramTimerOptions{})
timerOpts.StandardSampleRate = cfg.MetricsOrDefault().SampleRate()
var (
opts = storage.NewOptions()
iOpts = opts.InstrumentOptions().
SetLogger(logger).
SetMetricsScope(scope).
SetTimerOptions(timerOpts).
SetTracer(tracer).
SetCustomBuildTags(runOpts.CustomBuildTags)
)
opts = opts.SetInstrumentOptions(iOpts)
// Only override the default MemoryTracker (which has default limits) if a custom limit has
// been set.
if cfg.Limits.MaxOutstandingRepairedBytes > 0 {
memTrackerOptions := storage.NewMemoryTrackerOptions(cfg.Limits.MaxOutstandingRepairedBytes)
memTracker := storage.NewMemoryTracker(memTrackerOptions)
opts = opts.SetMemoryTracker(memTracker)
}
opentracing.SetGlobalTracer(tracer)
if cfg.Index.MaxQueryIDsConcurrency != 0 {
queryIDsWorkerPool := xsync.NewWorkerPool(cfg.Index.MaxQueryIDsConcurrency)
queryIDsWorkerPool.Init()
opts = opts.SetQueryIDsWorkerPool(queryIDsWorkerPool)
} else {
logger.Warn("max index query IDs concurrency was not set, falling back to default value")
}
// Set global index options.
if n := cfg.Index.RegexpDFALimitOrDefault(); n > 0 {
regexp.SetStateLimit(n)
levenshtein.SetStateLimit(n)
levenshtein2.SetStateLimit(n)
}
if n := cfg.Index.RegexpFSALimitOrDefault(); n > 0 {
regexp.SetDefaultLimit(n)
}
buildReporter := instrument.NewBuildReporter(iOpts)
if err := buildReporter.Start(); err != nil {
logger.Fatal("unable to start build reporter", zap.Error(err))
}
defer buildReporter.Stop()
mmapCfg := cfg.Filesystem.MmapConfigurationOrDefault()
shouldUseHugeTLB := mmapCfg.HugeTLB.Enabled
if shouldUseHugeTLB {
// Make sure the host supports HugeTLB before proceeding with it to prevent
// excessive log spam.
shouldUseHugeTLB, err = hostSupportsHugeTLB()
if err != nil {
logger.Fatal("could not determine if host supports HugeTLB", zap.Error(err))
}
if !shouldUseHugeTLB {
logger.Warn("host doesn't support HugeTLB, proceeding without it")
}
}
mmapReporter := newMmapReporter(scope)
mmapReporterCtx, cancel := context.WithCancel(context.Background())
defer cancel()
go mmapReporter.Run(mmapReporterCtx)
opts = opts.SetMmapReporter(mmapReporter)
runtimeOpts := m3dbruntime.NewOptions().
SetPersistRateLimitOptions(ratelimit.NewOptions().
SetLimitEnabled(true).
SetLimitMbps(cfg.Filesystem.ThroughputLimitMbpsOrDefault()).
SetLimitCheckEvery(cfg.Filesystem.ThroughputCheckEveryOrDefault())).
SetWriteNewSeriesAsync(cfg.WriteNewSeriesAsyncOrDefault()).
SetWriteNewSeriesBackoffDuration(cfg.WriteNewSeriesBackoffDurationOrDefault())
if lruCfg := cfg.Cache.SeriesConfiguration().LRU; lruCfg != nil {
runtimeOpts = runtimeOpts.SetMaxWiredBlocks(lruCfg.MaxBlocks)
}
// Setup query stats tracking.
var (
docsLimit = limits.DefaultLookbackLimitOptions()
bytesReadLimit = limits.DefaultLookbackLimitOptions()
diskSeriesReadLimit = limits.DefaultLookbackLimitOptions()
aggDocsLimit = limits.DefaultLookbackLimitOptions()
)
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesBlocks; limitConfig != nil {
docsLimit.Limit = limitConfig.Value
docsLimit.Lookback = limitConfig.Lookback
}
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskBytesRead; limitConfig != nil {
bytesReadLimit.Limit = limitConfig.Value
bytesReadLimit.Lookback = limitConfig.Lookback
}
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskRead; limitConfig != nil {
diskSeriesReadLimit.Limit = limitConfig.Value
diskSeriesReadLimit.Lookback = limitConfig.Lookback
}
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedMetadata; limitConfig != nil {
aggDocsLimit.Limit = limitConfig.Value
aggDocsLimit.Lookback = limitConfig.Lookback
}
limitOpts := limits.NewOptions().
SetDocsLimitOpts(docsLimit).
SetBytesReadLimitOpts(bytesReadLimit).
SetDiskSeriesReadLimitOpts(diskSeriesReadLimit).
SetAggregateDocsLimitOpts(aggDocsLimit).
SetInstrumentOptions(iOpts)
if builder := opts.SourceLoggerBuilder(); builder != nil {
limitOpts = limitOpts.SetSourceLoggerBuilder(builder)
}
queryLimits, err := limits.NewQueryLimits(limitOpts)
if err != nil {
logger.Fatal("could not construct docs query limits from config", zap.Error(err))
}
queryLimits.Start()
defer queryLimits.Stop()
seriesReadPermits := permits.NewLookbackLimitPermitsManager(
"disk-series-read",
diskSeriesReadLimit,
iOpts,
limitOpts.SourceLoggerBuilder(),
runOpts.Config.FetchTagged.SeriesBlocksPerBatchOrDefault(),
)
seriesReadPermits.Start()
defer seriesReadPermits.Stop()
opts = opts.SetPermitsOptions(opts.PermitsOptions().
SetSeriesReadPermitsManager(seriesReadPermits))
// Setup postings list cache.
var (
plCacheConfig = cfg.Cache.PostingsListConfiguration()
plCacheSize = plCacheConfig.SizeOrDefault()
plCacheOptions = index.PostingsListCacheOptions{
InstrumentOptions: opts.InstrumentOptions().
SetMetricsScope(scope.SubScope("postings-list-cache")),
}
)
postingsListCache, stopReporting, err := index.NewPostingsListCache(plCacheSize, plCacheOptions)
if err != nil {
logger.Fatal("could not construct postings list cache", zap.Error(err))
}
defer stopReporting()
// Setup index regexp compilation cache.
m3ninxindex.SetRegexpCacheOptions(m3ninxindex.RegexpCacheOptions{
Size: cfg.Cache.RegexpConfiguration().SizeOrDefault(),
Scope: iOpts.MetricsScope(),
})
for _, transform := range runOpts.Transforms {
opts = transform(opts)
}
// FOLLOWUP(prateek): remove this once we have the runtime options<->index wiring done
indexOpts := opts.IndexOptions()
insertMode := index.InsertSync
if cfg.WriteNewSeriesAsyncOrDefault() {
insertMode = index.InsertAsync
}
indexOpts = indexOpts.SetInsertMode(insertMode).
SetPostingsListCache(postingsListCache).
SetReadThroughSegmentOptions(index.ReadThroughSegmentOptions{
CacheRegexp: plCacheConfig.CacheRegexpOrDefault(),
CacheTerms: plCacheConfig.CacheTermsOrDefault(),
}).
SetMmapReporter(mmapReporter).
SetQueryLimits(queryLimits)
opts = opts.SetIndexOptions(indexOpts)
if tick := cfg.Tick; tick != nil {
runtimeOpts = runtimeOpts.
SetTickSeriesBatchSize(tick.SeriesBatchSize).
SetTickPerSeriesSleepDuration(tick.PerSeriesSleepDuration).
SetTickMinimumInterval(tick.MinimumInterval)
}
runtimeOptsMgr := m3dbruntime.NewOptionsManager()
if err := runtimeOptsMgr.Update(runtimeOpts); err != nil {
logger.Fatal("could not set initial runtime options", zap.Error(err))
}
defer runtimeOptsMgr.Close()
opts = opts.SetRuntimeOptionsManager(runtimeOptsMgr)
policy, err := cfg.PoolingPolicyOrDefault()
if err != nil {
logger.Fatal("could not get pooling policy", zap.Error(err))
}
tagEncoderPool := serialize.NewTagEncoderPool(
serialize.NewTagEncoderOptions(),
poolOptions(
policy.TagEncoderPool,
scope.SubScope("tag-encoder-pool")))
tagEncoderPool.Init()
tagDecoderPool := serialize.NewTagDecoderPool(
serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
poolOptions(
policy.TagDecoderPool,
scope.SubScope("tag-decoder-pool")))
tagDecoderPool.Init()
// Pass nil for block.LeaseVerifier for now and it will be set after the
// db is constructed (since the db is required to construct a
// block.LeaseVerifier). Initialized here because it needs to be propagated
// to both the DB and the blockRetriever.
blockLeaseManager := block.NewLeaseManager(nil)
opts = opts.SetBlockLeaseManager(blockLeaseManager)
fsopts := fs.NewOptions().
SetClockOptions(opts.ClockOptions()).
SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope.SubScope("database.fs"))).
SetFilePathPrefix(cfg.Filesystem.FilePathPrefixOrDefault()).
SetNewFileMode(newFileMode).
SetNewDirectoryMode(newDirectoryMode).
SetWriterBufferSize(cfg.Filesystem.WriteBufferSizeOrDefault()).
SetDataReaderBufferSize(cfg.Filesystem.DataReadBufferSizeOrDefault()).
SetInfoReaderBufferSize(cfg.Filesystem.InfoReadBufferSizeOrDefault()).
SetSeekReaderBufferSize(cfg.Filesystem.SeekReadBufferSizeOrDefault()).
SetMmapEnableHugeTLB(shouldUseHugeTLB).
SetMmapHugeTLBThreshold(mmapCfg.HugeTLB.Threshold).
SetRuntimeOptionsManager(runtimeOptsMgr).
SetTagEncoderPool(tagEncoderPool).
SetTagDecoderPool(tagDecoderPool).
SetForceIndexSummariesMmapMemory(cfg.Filesystem.ForceIndexSummariesMmapMemoryOrDefault()).
SetForceBloomFilterMmapMemory(cfg.Filesystem.ForceBloomFilterMmapMemoryOrDefault()).
SetIndexBloomFilterFalsePositivePercent(cfg.Filesystem.BloomFilterFalsePositivePercentOrDefault()).
SetMmapReporter(mmapReporter)
var commitLogQueueSize int
cfgCommitLog := cfg.CommitLogOrDefault()
specified := cfgCommitLog.Queue.Size
switch cfgCommitLog.Queue.CalculationType {
case config.CalculationTypeFixed:
commitLogQueueSize = specified
case config.CalculationTypePerCPU:
commitLogQueueSize = specified * runtime.NumCPU()
default:
logger.Fatal("unknown commit log queue size type",
zap.Any("type", cfgCommitLog.Queue.CalculationType))
}
var commitLogQueueChannelSize int
if cfgCommitLog.QueueChannel != nil {
specified := cfgCommitLog.QueueChannel.Size
switch cfgCommitLog.Queue.CalculationType {
case config.CalculationTypeFixed:
commitLogQueueChannelSize = specified
case config.CalculationTypePerCPU:
commitLogQueueChannelSize = specified * runtime.NumCPU()
default:
logger.Fatal("unknown commit log queue channel size type",
zap.Any("type", cfgCommitLog.Queue.CalculationType))
}
} else {
commitLogQueueChannelSize = int(float64(commitLogQueueSize) / commitlog.MaximumQueueSizeQueueChannelSizeRatio)
}
// Set the series cache policy.
seriesCachePolicy := cfg.Cache.SeriesConfiguration().Policy
opts = opts.SetSeriesCachePolicy(seriesCachePolicy)
// Apply pooling options.
poolingPolicy, err := cfg.PoolingPolicyOrDefault()
if err != nil {
logger.Fatal("could not get pooling policy", zap.Error(err))
}
opts = withEncodingAndPoolingOptions(cfg, logger, opts, poolingPolicy)
opts = opts.SetCommitLogOptions(opts.CommitLogOptions().
SetInstrumentOptions(opts.InstrumentOptions()).
SetFilesystemOptions(fsopts).
SetStrategy(commitlog.StrategyWriteBehind).
SetFlushSize(cfgCommitLog.FlushMaxBytes).
SetFlushInterval(cfgCommitLog.FlushEvery).
SetBacklogQueueSize(commitLogQueueSize).
SetBacklogQueueChannelSize(commitLogQueueChannelSize))
// Setup the block retriever
switch seriesCachePolicy {
case series.CacheAll:
// No options needed to be set
default:
// All other caching strategies require retrieving series from disk
// to service a cache miss
retrieverOpts := fs.NewBlockRetrieverOptions().
SetBytesPool(opts.BytesPool()).
SetRetrieveRequestPool(opts.RetrieveRequestPool()).
SetIdentifierPool(opts.IdentifierPool()).
SetBlockLeaseManager(blockLeaseManager).
SetQueryLimits(queryLimits)
if blockRetrieveCfg := cfg.BlockRetrieve; blockRetrieveCfg != nil {
if v := blockRetrieveCfg.FetchConcurrency; v != nil {
retrieverOpts = retrieverOpts.SetFetchConcurrency(*v)
}
if v := blockRetrieveCfg.CacheBlocksOnRetrieve; v != nil {
retrieverOpts = retrieverOpts.SetCacheBlocksOnRetrieve(*v)
}
}
blockRetrieverMgr := block.NewDatabaseBlockRetrieverManager(
func(md namespace.Metadata, shardSet sharding.ShardSet) (block.DatabaseBlockRetriever, error) {
retriever, err := fs.NewBlockRetriever(retrieverOpts, fsopts)
if err != nil {
return nil, err
}
if err := retriever.Open(md, shardSet); err != nil {
return nil, err
}
return retriever, nil
})
opts = opts.SetDatabaseBlockRetrieverManager(blockRetrieverMgr)
}
// Set the persistence manager
pm, err := fs.NewPersistManager(fsopts)
if err != nil {
logger.Fatal("could not create persist manager", zap.Error(err))
}
opts = opts.SetPersistManager(pm)
// Set the index claims manager
icm, err := fs.NewIndexClaimsManager(fsopts)
if err != nil {
logger.Fatal("could not create index claims manager", zap.Error(err))
}
defer func() {
// Reset counter of index claims managers after server teardown.
fs.ResetIndexClaimsManagersUnsafe()
}()
opts = opts.SetIndexClaimsManager(icm)
if value := cfg.ForceColdWritesEnabled; value != nil {
// Allow forcing cold writes to be enabled by config.
opts = opts.SetForceColdWritesEnabled(*value)
}
forceColdWrites := opts.ForceColdWritesEnabled()
var envCfgResults environment.ConfigureResults
if len(envConfig.Statics) == 0 {
logger.Info("creating dynamic config service client with m3cluster")
envCfgResults, err = envConfig.Configure(environment.ConfigurationParameters{
InstrumentOpts: iOpts,
HashingSeed: cfg.Hashing.Seed,
NewDirectoryMode: newDirectoryMode,
ForceColdWritesEnabled: forceColdWrites,
})
if err != nil {
logger.Fatal("could not initialize dynamic config", zap.Error(err))
}
} else {
logger.Info("creating static config service client with m3cluster")
envCfgResults, err = envConfig.Configure(environment.ConfigurationParameters{
InstrumentOpts: iOpts,
HostID: hostID,
ForceColdWritesEnabled: forceColdWrites,
})
if err != nil {
logger.Fatal("could not initialize static config", zap.Error(err))
}
}
syncCfg, err := envCfgResults.SyncCluster()
if err != nil {
logger.Fatal("invalid cluster config", zap.Error(err))
}
if runOpts.ClusterClientCh != nil {
runOpts.ClusterClientCh <- syncCfg.ClusterClient
}
if runOpts.KVStoreCh != nil {
runOpts.KVStoreCh <- syncCfg.KVStore
}
opts = opts.SetNamespaceInitializer(syncCfg.NamespaceInitializer)
// Set tchannelthrift options.
ttopts := tchannelthrift.NewOptions().
SetClockOptions(opts.ClockOptions()).
SetInstrumentOptions(opts.InstrumentOptions()).
SetTopologyInitializer(syncCfg.TopologyInitializer).
SetIdentifierPool(opts.IdentifierPool()).
SetTagEncoderPool(tagEncoderPool).
SetTagDecoderPool(tagDecoderPool).
SetCheckedBytesWrapperPool(opts.CheckedBytesWrapperPool()).
SetMaxOutstandingWriteRequests(cfg.Limits.MaxOutstandingWriteRequests).
SetMaxOutstandingReadRequests(cfg.Limits.MaxOutstandingReadRequests).
SetQueryLimits(queryLimits).
SetPermitsOptions(opts.PermitsOptions())
// Start servers before constructing the DB so orchestration tools can check health endpoints
// before topology is set.
var (
contextPool = opts.ContextPool()
tchannelOpts = xtchannel.NewDefaultChannelOptions()
// Pass nil for the database argument because we haven't constructed it yet. We'll call
// SetDatabase() once we've initialized it.
service = ttnode.NewService(nil, ttopts)
)
if cfg.TChannel != nil {
tchannelOpts.MaxIdleTime = cfg.TChannel.MaxIdleTime
tchannelOpts.IdleCheckInterval = cfg.TChannel.IdleCheckInterval
}
tchanOpts := ttnode.NewOptions(tchannelOpts).
SetInstrumentOptions(opts.InstrumentOptions())
if fn := runOpts.StorageOptions.TChanChannelFn; fn != nil {
tchanOpts = tchanOpts.SetTChanChannelFn(fn)
}
if fn := runOpts.StorageOptions.TChanNodeServerFn; fn != nil {
tchanOpts = tchanOpts.SetTChanNodeServerFn(fn)
}
listenAddress := cfg.ListenAddressOrDefault()
tchannelthriftNodeClose, err := ttnode.NewServer(service,
listenAddress, contextPool, tchanOpts).ListenAndServe()
if err != nil {
logger.Fatal("could not open tchannelthrift interface",
zap.String("address", listenAddress), zap.Error(err))
}
defer tchannelthriftNodeClose()
logger.Info("node tchannelthrift: listening", zap.String("address", listenAddress))
httpListenAddress := cfg.HTTPNodeListenAddressOrDefault()
httpjsonNodeClose, err := hjnode.NewServer(service,
httpListenAddress, contextPool, nil).ListenAndServe()
if err != nil {
logger.Fatal("could not open httpjson interface",
zap.String("address", httpListenAddress), zap.Error(err))
}
defer httpjsonNodeClose()
logger.Info("node httpjson: listening", zap.String("address", httpListenAddress))
debugListenAddress := cfg.DebugListenAddressOrDefault()
if debugListenAddress != "" {
var debugWriter xdebug.ZipWriter
handlerOpts, err := placement.NewHandlerOptions(syncCfg.ClusterClient,
queryconfig.Configuration{}, nil, iOpts)
if err != nil {
logger.Warn("could not create handler options for debug writer", zap.Error(err))
} else {
envCfgCluster, err := envConfig.Services.SyncCluster()
if err != nil || envCfgCluster.Service == nil {
logger.Warn("could not get cluster config for debug writer",
zap.Error(err),
zap.Bool("envCfgClusterServiceIsNil", envCfgCluster.Service == nil))
} else {
debugWriter, err = xdebug.NewPlacementAndNamespaceZipWriterWithDefaultSources(
cpuProfileDuration,
syncCfg.ClusterClient,
handlerOpts,
[]handleroptions.ServiceNameAndDefaults{
{
ServiceName: handleroptions.M3DBServiceName,
Defaults: []handleroptions.ServiceOptionsDefault{
handleroptions.WithDefaultServiceEnvironment(envCfgCluster.Service.Env),
handleroptions.WithDefaultServiceZone(envCfgCluster.Service.Zone),
},
},
},
iOpts)
if err != nil {
logger.Error("unable to create debug writer", zap.Error(err))
}
}
}
go func() {
mux := http.DefaultServeMux
if debugWriter != nil {
if err := debugWriter.RegisterHandler(xdebug.DebugURL, mux); err != nil {
logger.Error("unable to register debug writer endpoint", zap.Error(err))
}
}
if err := http.ListenAndServe(debugListenAddress, mux); err != nil {
logger.Error("debug server could not listen",
zap.String("address", debugListenAddress), zap.Error(err))
} else {
logger.Info("debug server listening",
zap.String("address", debugListenAddress),
)
}
}()
}
topo, err := syncCfg.TopologyInitializer.Init()
if err != nil {
logger.Fatal("could not initialize m3db topology", zap.Error(err))
}
var protoEnabled bool
if cfg.Proto != nil && cfg.Proto.Enabled {
protoEnabled = true
}
schemaRegistry := namespace.NewSchemaRegistry(protoEnabled, logger)
// For application m3db client integration test convenience (where a local dbnode is started as a docker container),
// we allow loading user schema from local file into schema registry.
if protoEnabled {
for nsID, protoConfig := range cfg.Proto.SchemaRegistry {
dummyDeployID := "fromconfig"
if err := namespace.LoadSchemaRegistryFromFile(schemaRegistry, ident.StringID(nsID),
dummyDeployID,
protoConfig.SchemaFilePath, protoConfig.MessageName); err != nil {
logger.Fatal("could not load schema from configuration", zap.Error(err))
}
}
}
origin := topology.NewHost(hostID, "")
m3dbClient, err := newAdminClient(
cfg.Client, iOpts, tchannelOpts, syncCfg.TopologyInitializer,
runtimeOptsMgr, origin, protoEnabled, schemaRegistry,
syncCfg.KVStore, logger, runOpts.CustomOptions)
if err != nil {
logger.Fatal("could not create m3db client", zap.Error(err))
}
if runOpts.ClientCh != nil {
runOpts.ClientCh <- m3dbClient
}
documentsBuilderAlloc := index.NewBootstrapResultDocumentsBuilderAllocator(
opts.IndexOptions())
rsOpts := result.NewOptions().
SetInstrumentOptions(opts.InstrumentOptions()).
SetDatabaseBlockOptions(opts.DatabaseBlockOptions()).
SetSeriesCachePolicy(opts.SeriesCachePolicy()).
SetIndexDocumentsBuilderAllocator(documentsBuilderAlloc)
var repairClients []client.AdminClient
if cfg.Repair != nil && cfg.Repair.Enabled {
repairClients = append(repairClients, m3dbClient)
}
if cfg.Replication != nil {
for _, cluster := range cfg.Replication.Clusters {
if !cluster.RepairEnabled {
continue
}
// Pass nil for the topology initializer because we want to create
// a new one for the cluster we wish to replicate from, not use the
// same one as the cluster this node belongs to.
var topologyInitializer topology.Initializer
// Guaranteed to not be nil if repair is enabled by config validation.
clientCfg := *cluster.Client
clusterClient, err := newAdminClient(
clientCfg, iOpts, tchannelOpts, topologyInitializer,
runtimeOptsMgr, origin, protoEnabled, schemaRegistry,
syncCfg.KVStore, logger, runOpts.CustomOptions)
if err != nil {
logger.Fatal(
"unable to create client for replicated cluster",
zap.String("clusterName", cluster.Name), zap.Error(err))
}
repairClients = append(repairClients, clusterClient)
}
}
repairEnabled := len(repairClients) > 0
if repairEnabled {
repairOpts := opts.RepairOptions().
SetAdminClients(repairClients)
if cfg.Repair != nil {
repairOpts = repairOpts.
SetResultOptions(rsOpts).
SetDebugShadowComparisonsEnabled(cfg.Repair.DebugShadowComparisonsEnabled)
if cfg.Repair.Throttle > 0 {
repairOpts = repairOpts.SetRepairThrottle(cfg.Repair.Throttle)
}
if cfg.Repair.CheckInterval > 0 {
repairOpts = repairOpts.SetRepairCheckInterval(cfg.Repair.CheckInterval)
}
if cfg.Repair.DebugShadowComparisonsPercentage > 0 {
// Set conditionally to avoid stomping on the default value of 1.0.
repairOpts = repairOpts.SetDebugShadowComparisonsPercentage(cfg.Repair.DebugShadowComparisonsPercentage)
}
}
opts = opts.
SetRepairEnabled(true).
SetRepairOptions(repairOpts)
} else {
opts = opts.SetRepairEnabled(false)
}
// Set bootstrap options - We need to create a topology map provider from the
// same topology that will be passed to the cluster so that when we make
// bootstrapping decisions they are in sync with the clustered database
// which is triggering the actual bootstraps. This way, when the clustered
// database receives a topology update and decides to kick off a bootstrap,
// the bootstrap process will receaive a topology map that is at least as
// recent as the one that triggered the bootstrap, if not newer.
// See GitHub issue #1013 for more details.
topoMapProvider := newTopoMapProvider(topo)
bs, err := cfg.Bootstrap.New(
rsOpts, opts, topoMapProvider, origin, m3dbClient,
)
if err != nil {
logger.Fatal("could not create bootstrap process", zap.Error(err))
}
opts = opts.SetBootstrapProcessProvider(bs)
// Start the cluster services now that the M3DB client is available.
clusterListenAddress := cfg.ClusterListenAddressOrDefault()
tchannelthriftClusterClose, err := ttcluster.NewServer(m3dbClient,
clusterListenAddress, contextPool, tchannelOpts).ListenAndServe()
if err != nil {
logger.Fatal("could not open tchannelthrift interface",
zap.String("address", clusterListenAddress), zap.Error(err))
}
defer tchannelthriftClusterClose()
logger.Info("cluster tchannelthrift: listening", zap.String("address", clusterListenAddress))
httpClusterListenAddress := cfg.HTTPClusterListenAddressOrDefault()
httpjsonClusterClose, err := hjcluster.NewServer(m3dbClient,
httpClusterListenAddress, contextPool, nil).ListenAndServe()
if err != nil {
logger.Fatal("could not open httpjson interface",
zap.String("address", httpClusterListenAddress), zap.Error(err))
}
defer httpjsonClusterClose()
logger.Info("cluster httpjson: listening", zap.String("address", httpClusterListenAddress))
// Initialize clustered database.
clusterTopoWatch, err := topo.Watch()
if err != nil {
logger.Fatal("could not create cluster topology watch", zap.Error(err))
}
opts = opts.SetSchemaRegistry(schemaRegistry).
SetAdminClient(m3dbClient)
if cfg.WideConfig != nil && cfg.WideConfig.BatchSize > 0 {
opts = opts.SetWideBatchSize(cfg.WideConfig.BatchSize)
}
db, err := cluster.NewDatabase(hostID, topo, clusterTopoWatch, opts)
if err != nil {
logger.Fatal("could not construct database", zap.Error(err))
}
// Now that the database has been created it can be set as the block lease verifier
// on the block lease manager.
leaseVerifier := storage.NewLeaseVerifier(db)
blockLeaseManager.SetLeaseVerifier(leaseVerifier)
if err := db.Open(); err != nil {
logger.Fatal("could not open database", zap.Error(err))
}
// Now that we've initialized the database we can set it on the service.
service.SetDatabase(db)
go func() {
if runOpts.BootstrapCh != nil {
// Notify on bootstrap chan if specified.
defer func() {
runOpts.BootstrapCh <- struct{}{}
}()
}
// Bootstrap asynchronously so we can handle interrupt.
if err := db.Bootstrap(); err != nil {
logger.Fatal("could not bootstrap database", zap.Error(err))
}
logger.Info("bootstrapped")
// Only set the write new series limit after bootstrapping
kvWatchNewSeriesLimitPerShard(syncCfg.KVStore, logger, topo,
runtimeOptsMgr, cfg.Limits.WriteNewSeriesPerSecond)
kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger,
runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock)
kvWatchQueryLimit(syncCfg.KVStore, logger,
queryLimits.FetchDocsLimit(),
queryLimits.BytesReadLimit(),
// For backwards compatibility as M3 moves toward permits instead of time-based limits,
// the series-read path uses permits which are implemented with limits, and so we support
// dynamic updates to this limit-based permit still be passing downstream the limit itself.
seriesReadPermits.Limit,
queryLimits.AggregateDocsLimit(),
limitOpts,
)
}()
// Wait for process interrupt.
xos.WaitForInterrupt(logger, xos.InterruptOptions{
InterruptCh: runOpts.InterruptCh,
})
// Attempt graceful server close.
closedCh := make(chan struct{})
go func() {
err := db.Terminate()
if err != nil {
logger.Error("close database error", zap.Error(err))
}
closedCh <- struct{}{}
}()
// Wait then close or hard close.
closeTimeout := serverGracefulCloseTimeout
select {
case <-closedCh:
logger.Info("server closed")
case <-time.After(closeTimeout):
logger.Error("server closed after timeout", zap.Duration("timeout", closeTimeout))
}
}
func bgValidateProcessLimits(logger *zap.Logger) {
// If unable to validate process limits on the current configuration,
// do not run background validator task.
if canValidate, message := canValidateProcessLimits(); !canValidate {
logger.Warn("cannot validate process limits: invalid configuration found",
zap.String("message", message))
return
}
start := time.Now()
t := time.NewTicker(bgProcessLimitInterval)
defer t.Stop()
for {
// only monitor for first `maxBgProcessLimitMonitorDuration` of process lifetime
if time.Since(start) > maxBgProcessLimitMonitorDuration {
return
}
err := validateProcessLimits()
if err == nil {
return
}
logger.Warn("invalid configuration found, refer to linked documentation for more information",
zap.String("url", xdocs.Path("operational_guide/kernel_configuration")),
zap.Error(err),
)
<-t.C
}
}
func kvWatchNewSeriesLimitPerShard(
store kv.Store,
logger *zap.Logger,
topo topology.Topology,
runtimeOptsMgr m3dbruntime.OptionsManager,
defaultClusterNewSeriesLimit int,
) {
var initClusterLimit int
value, err := store.Get(kvconfig.ClusterNewSeriesInsertLimitKey)
if err == nil {
protoValue := &commonpb.Int64Proto{}
err = value.Unmarshal(protoValue)
if err == nil {
initClusterLimit = int(protoValue.Value)
}
}
if err != nil {
if err != kv.ErrNotFound {
logger.Warn("error resolving cluster new series insert limit", zap.Error(err))
}
initClusterLimit = defaultClusterNewSeriesLimit
}
err = setNewSeriesLimitPerShardOnChange(topo, runtimeOptsMgr, initClusterLimit)
if err != nil {
logger.Warn("unable to set cluster new series insert limit", zap.Error(err))
}
watch, err := store.Watch(kvconfig.ClusterNewSeriesInsertLimitKey)
if err != nil {
logger.Error("could not watch cluster new series insert limit", zap.Error(err))
return
}
go func() {
protoValue := &commonpb.Int64Proto{}
for range watch.C() {
value := defaultClusterNewSeriesLimit
if newValue := watch.Get(); newValue != nil {
if err := newValue.Unmarshal(protoValue); err != nil {
logger.Warn("unable to parse new cluster new series insert limit", zap.Error(err))
continue
}
value = int(protoValue.Value)
}
err = setNewSeriesLimitPerShardOnChange(topo, runtimeOptsMgr, value)
if err != nil {
logger.Warn("unable to set cluster new series insert limit", zap.Error(err))
continue
}
}
}()
}
func kvWatchEncodersPerBlockLimit(
store kv.Store,
logger *zap.Logger,
runtimeOptsMgr m3dbruntime.OptionsManager,
defaultEncodersPerBlockLimit int,
) {
var initEncoderLimit int
value, err := store.Get(kvconfig.EncodersPerBlockLimitKey)
if err == nil {
protoValue := &commonpb.Int64Proto{}
err = value.Unmarshal(protoValue)
if err == nil {
initEncoderLimit = int(protoValue.Value)
}
}
if err != nil {
if err != kv.ErrNotFound {
logger.Warn("error resolving encoder per block limit", zap.Error(err))
}
initEncoderLimit = defaultEncodersPerBlockLimit
}
err = setEncodersPerBlockLimitOnChange(runtimeOptsMgr, initEncoderLimit)
if err != nil {
logger.Warn("unable to set encoder per block limit", zap.Error(err))
}
watch, err := store.Watch(kvconfig.EncodersPerBlockLimitKey)
if err != nil {
logger.Error("could not watch encoder per block limit", zap.Error(err))
return
}
go func() {
protoValue := &commonpb.Int64Proto{}
for range watch.C() {
value := defaultEncodersPerBlockLimit
if newValue := watch.Get(); newValue != nil {
if err := newValue.Unmarshal(protoValue); err != nil {
logger.Warn("unable to parse new encoder per block limit", zap.Error(err))
continue
}
value = int(protoValue.Value)
}
err = setEncodersPerBlockLimitOnChange(runtimeOptsMgr, value)
if err != nil {
logger.Warn("unable to set encoder per block limit", zap.Error(err))
continue
}
}
}()
}
func kvWatchQueryLimit(
store kv.Store,
logger *zap.Logger,
docsLimit limits.LookbackLimit,
bytesReadLimit limits.LookbackLimit,
diskSeriesReadLimit limits.LookbackLimit,
aggregateDocsLimit limits.LookbackLimit,
defaultOpts limits.Options,
) {
value, err := store.Get(kvconfig.QueryLimits)
if err == nil {
dynamicLimits := &kvpb.QueryLimits{}
err = value.Unmarshal(dynamicLimits)
if err == nil {
updateQueryLimits(
logger, docsLimit, bytesReadLimit, diskSeriesReadLimit,
aggregateDocsLimit, dynamicLimits, defaultOpts)
}
} else if !errors.Is(err, kv.ErrNotFound) {
logger.Warn("error resolving query limit", zap.Error(err))
}
watch, err := store.Watch(kvconfig.QueryLimits)
if err != nil {
logger.Error("could not watch query limit", zap.Error(err))
return
}
go func() {
dynamicLimits := &kvpb.QueryLimits{}
for range watch.C() {
if newValue := watch.Get(); newValue != nil {
if err := newValue.Unmarshal(dynamicLimits); err != nil {
logger.Warn("unable to parse new query limits", zap.Error(err))
continue
}
updateQueryLimits(
logger, docsLimit, bytesReadLimit, diskSeriesReadLimit,
aggregateDocsLimit, dynamicLimits, defaultOpts)
}
}
}()
}
func updateQueryLimits(
logger *zap.Logger,
docsLimit limits.LookbackLimit,
bytesReadLimit limits.LookbackLimit,
diskSeriesReadLimit limits.LookbackLimit,
aggregateDocsLimit limits.LookbackLimit,
dynamicOpts *kvpb.QueryLimits,
configOpts limits.Options,
) {
var (
// Default to the config-based limits if unset in dynamic limits.
// Otherwise, use the dynamic limit.
docsLimitOpts = configOpts.DocsLimitOpts()
bytesReadLimitOpts = configOpts.BytesReadLimitOpts()
diskSeriesReadLimitOpts = configOpts.DiskSeriesReadLimitOpts()
aggDocsLimitOpts = configOpts.AggregateDocsLimitOpts()
)
if dynamicOpts != nil {
if dynamicOpts.MaxRecentlyQueriedSeriesBlocks != nil {
docsLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesBlocks)
}
if dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead != nil {
bytesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead)
}
if dynamicOpts.MaxRecentlyQueriedSeriesDiskRead != nil {
diskSeriesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskRead)
}
if dynamicOpts.MaxRecentlyQueriedMetadataRead != nil {
aggDocsLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedMetadataRead)
}
}
if err := updateQueryLimit(docsLimit, docsLimitOpts); err != nil {
logger.Error("error updating docs limit", zap.Error(err))
}
if err := updateQueryLimit(bytesReadLimit, bytesReadLimitOpts); err != nil {
logger.Error("error updating bytes read limit", zap.Error(err))
}
if err := updateQueryLimit(diskSeriesReadLimit, diskSeriesReadLimitOpts); err != nil {
logger.Error("error updating series read limit", zap.Error(err))
}
if err := updateQueryLimit(aggregateDocsLimit, aggDocsLimitOpts); err != nil {
logger.Error("error updating metadata read limit", zap.Error(err))
}
}
func updateQueryLimit(
limit limits.LookbackLimit,
newOpts limits.LookbackLimitOptions,
) error {
old := limit.Options()
if old.Equals(newOpts) {
return nil
}
return limit.Update(newOpts)
}
func dynamicLimitToLimitOpts(dynamicLimit *kvpb.QueryLimit) limits.LookbackLimitOptions {
return limits.LookbackLimitOptions{
Limit: dynamicLimit.Limit,
Lookback: time.Duration(dynamicLimit.LookbackSeconds) * time.Second,
ForceExceeded: dynamicLimit.ForceExceeded,
}
}
func kvWatchClientConsistencyLevels(
store kv.Store,
logger *zap.Logger,
clientOpts client.AdminOptions,
runtimeOptsMgr m3dbruntime.OptionsManager,
) {
setReadConsistencyLevel := func(
v string,
applyFn func(topology.ReadConsistencyLevel, m3dbruntime.Options) m3dbruntime.Options,
) error {
for _, level := range topology.ValidReadConsistencyLevels() {
if level.String() == v {
runtimeOpts := applyFn(level, runtimeOptsMgr.Get())
return runtimeOptsMgr.Update(runtimeOpts)
}
}
return fmt.Errorf("invalid read consistency level set: %s", v)
}
setConsistencyLevel := func(
v string,
applyFn func(topology.ConsistencyLevel, m3dbruntime.Options) m3dbruntime.Options,
) error {
for _, level := range topology.ValidConsistencyLevels() {
if level.String() == v {
runtimeOpts := applyFn(level, runtimeOptsMgr.Get())
return runtimeOptsMgr.Update(runtimeOpts)
}
}
return fmt.Errorf("invalid consistency level set: %s", v)
}
kvWatchStringValue(store, logger,
kvconfig.ClientBootstrapConsistencyLevel,
func(value string) error {
return setReadConsistencyLevel(value,
func(level topology.ReadConsistencyLevel, opts m3dbruntime.Options) m3dbruntime.Options {
return opts.SetClientBootstrapConsistencyLevel(level)
})
},
func() error {
return runtimeOptsMgr.Update(runtimeOptsMgr.Get().
SetClientBootstrapConsistencyLevel(clientOpts.BootstrapConsistencyLevel()))
})
kvWatchStringValue(store, logger,
kvconfig.ClientReadConsistencyLevel,
func(value string) error {
return setReadConsistencyLevel(value,
func(level topology.ReadConsistencyLevel, opts m3dbruntime.Options) m3dbruntime.Options {
return opts.SetClientReadConsistencyLevel(level)
})
},
func() error {
return runtimeOptsMgr.Update(runtimeOptsMgr.Get().
SetClientReadConsistencyLevel(clientOpts.ReadConsistencyLevel()))
})
kvWatchStringValue(store, logger,
kvconfig.ClientWriteConsistencyLevel,
func(value string) error {
return setConsistencyLevel(value,
func(level topology.ConsistencyLevel, opts m3dbruntime.Options) m3dbruntime.Options {
return opts.SetClientWriteConsistencyLevel(level)
})
},
func() error {
return runtimeOptsMgr.Update(runtimeOptsMgr.Get().
SetClientWriteConsistencyLevel(clientOpts.WriteConsistencyLevel()))
})
}
func kvWatchStringValue(
store kv.Store,
logger *zap.Logger,
key string,
onValue func(value string) error,
onDelete func() error,
) {
protoValue := &commonpb.StringProto{}
// First try to eagerly set the value so it doesn't flap if the
// watch returns but not immediately for an existing value
value, err := store.Get(key)
if err != nil && err != kv.ErrNotFound {
logger.Error("could not resolve KV", zap.String("key", key), zap.Error(err))
}
if err == nil {
if err := value.Unmarshal(protoValue); err != nil {
logger.Error("could not unmarshal KV key", zap.String("key", key), zap.Error(err))
} else if err := onValue(protoValue.Value); err != nil {
logger.Error("could not process value of KV", zap.String("key", key), zap.Error(err))
} else {
logger.Info("set KV key", zap.String("key", key), zap.Any("value", protoValue.Value))
}
}
watch, err := store.Watch(key)
if err != nil {
logger.Error("could not watch KV key", zap.String("key", key), zap.Error(err))
return
}
go func() {
for range watch.C() {
newValue := watch.Get()
if newValue == nil {
if err := onDelete(); err != nil {
logger.Warn("could not set default for KV key", zap.String("key", key), zap.Error(err))
}
continue
}
err := newValue.Unmarshal(protoValue)
if err != nil {
logger.Warn("could not unmarshal KV key", zap.String("key", key), zap.Error(err))
continue
}
if err := onValue(protoValue.Value); err != nil {
logger.Warn("could not process change for KV key", zap.String("key", key), zap.Error(err))
continue
}
logger.Info("set KV key", zap.String("key", key), zap.Any("value", protoValue.Value))
}
}()
}
func setNewSeriesLimitPerShardOnChange(
topo topology.Topology,
runtimeOptsMgr m3dbruntime.OptionsManager,
clusterLimit int,
) error {
perPlacedShardLimit := clusterLimitToPlacedShardLimit(topo, clusterLimit)
runtimeOpts := runtimeOptsMgr.Get()
if runtimeOpts.WriteNewSeriesLimitPerShardPerSecond() == perPlacedShardLimit {
// Not changed, no need to set the value and trigger a runtime options update
return nil
}
newRuntimeOpts := runtimeOpts.
SetWriteNewSeriesLimitPerShardPerSecond(perPlacedShardLimit)
return runtimeOptsMgr.Update(newRuntimeOpts)
}
func clusterLimitToPlacedShardLimit(topo topology.Topology, clusterLimit int) int {
if clusterLimit < 1 {
return 0
}
topoMap := topo.Get()
numShards := len(topoMap.ShardSet().AllIDs())
numPlacedShards := numShards * topoMap.Replicas()
if numPlacedShards < 1 {
return 0
}
nodeLimit := int(math.Ceil(
float64(clusterLimit) / float64(numPlacedShards)))
return nodeLimit
}
func setEncodersPerBlockLimitOnChange(
runtimeOptsMgr m3dbruntime.OptionsManager,
encoderLimit int,
) error {
runtimeOpts := runtimeOptsMgr.Get()
if runtimeOpts.EncodersPerBlockLimit() == encoderLimit {
// Not changed, no need to set the value and trigger a runtime options update
return nil
}
newRuntimeOpts := runtimeOpts.
SetEncodersPerBlockLimit(encoderLimit)
return runtimeOptsMgr.Update(newRuntimeOpts)
}
func withEncodingAndPoolingOptions(
cfg config.DBConfiguration,
logger *zap.Logger,
opts storage.Options,
policy config.PoolingPolicy,
) storage.Options {
iOpts := opts.InstrumentOptions()
scope := opts.InstrumentOptions().MetricsScope()
// Set the max bytes pool byte slice alloc size for the thrift pooling.
thriftBytesAllocSize := policy.ThriftBytesPoolAllocSizeOrDefault()
logger.Info("set thrift bytes pool alloc size",
zap.Int("size", thriftBytesAllocSize))
apachethrift.SetMaxBytesPoolAlloc(thriftBytesAllocSize)
bytesPoolOpts := pool.NewObjectPoolOptions().
SetInstrumentOptions(iOpts.SetMetricsScope(scope.SubScope("bytes-pool")))
checkedBytesPoolOpts := bytesPoolOpts.
SetInstrumentOptions(iOpts.SetMetricsScope(scope.SubScope("checked-bytes-pool")))
buckets := make([]pool.Bucket, len(policy.BytesPool.Buckets))
for i, bucket := range policy.BytesPool.Buckets {
var b pool.Bucket
b.Capacity = bucket.CapacityOrDefault()
b.Count = bucket.SizeOrDefault()
b.Options = bytesPoolOpts.
SetRefillLowWatermark(bucket.RefillLowWaterMarkOrDefault()).
SetRefillHighWatermark(bucket.RefillHighWaterMarkOrDefault())
buckets[i] = b
logger.Info("bytes pool configured",
zap.Int("capacity", bucket.CapacityOrDefault()),
zap.Int("size", bucket.SizeOrDefault()),
zap.Float64("refillLowWaterMark", bucket.RefillLowWaterMarkOrDefault()),
zap.Float64("refillHighWaterMark", bucket.RefillHighWaterMarkOrDefault()))
}
var bytesPool pool.CheckedBytesPool
switch policy.TypeOrDefault() {
case config.SimplePooling:
bytesPool = pool.NewCheckedBytesPool(
buckets,
checkedBytesPoolOpts,
func(s []pool.Bucket) pool.BytesPool {
return pool.NewBytesPool(s, bytesPoolOpts)
})
default:
logger.Fatal("unrecognized pooling type", zap.Any("type", policy.Type))
}
{
// Avoid polluting the rest of the function with `l` var
l := logger
if t := policy.Type; t != nil {
l = l.With(zap.String("policy", string(*t)))
}
l.Info("bytes pool init start")
bytesPool.Init()
l.Info("bytes pool init end")
}
segmentReaderPool := xio.NewSegmentReaderPool(
poolOptions(
policy.SegmentReaderPool,
scope.SubScope("segment-reader-pool")))
segmentReaderPool.Init()
encoderPool := encoding.NewEncoderPool(
poolOptions(
policy.EncoderPool,
scope.SubScope("encoder-pool")))
closersPoolOpts := poolOptions(
policy.ClosersPool,
scope.SubScope("closers-pool"))
contextPoolOpts := poolOptions(
policy.ContextPool,
scope.SubScope("context-pool"))
contextPool := xcontext.NewPool(xcontext.NewOptions().
SetContextPoolOptions(contextPoolOpts).
SetFinalizerPoolOptions(closersPoolOpts))
iteratorPool := encoding.NewReaderIteratorPool(
poolOptions(
policy.IteratorPool,
scope.SubScope("iterator-pool")))
multiIteratorPool := encoding.NewMultiReaderIteratorPool(
poolOptions(
policy.IteratorPool,
scope.SubScope("multi-iterator-pool")))
var writeBatchPoolInitialBatchSize *int
if policy.WriteBatchPool.InitialBatchSize != nil {
// Use config value if available.
writeBatchPoolInitialBatchSize = policy.WriteBatchPool.InitialBatchSize
} else {
// Otherwise use the default batch size that the client will use.
clientDefaultSize := client.DefaultWriteBatchSize
writeBatchPoolInitialBatchSize = &clientDefaultSize
}
var writeBatchPoolMaxBatchSize *int
if policy.WriteBatchPool.MaxBatchSize != nil {
writeBatchPoolMaxBatchSize = policy.WriteBatchPool.MaxBatchSize
}
var writeBatchPoolSize int
if policy.WriteBatchPool.Size != nil {
writeBatchPoolSize = *policy.WriteBatchPool.Size
} else {
// If no value set, calculate a reasonable value based on the commit log
// queue size. We base it off the commitlog queue size because we will
// want to be able to buffer at least one full commitlog queues worth of
// writes without allocating because these objects are very expensive to
// allocate.
commitlogQueueSize := opts.CommitLogOptions().BacklogQueueSize()
expectedBatchSize := *writeBatchPoolInitialBatchSize
writeBatchPoolSize = commitlogQueueSize / expectedBatchSize
}
writeBatchPoolOpts := pool.NewObjectPoolOptions()
writeBatchPoolOpts = writeBatchPoolOpts.
SetSize(writeBatchPoolSize).
// Set watermarks to zero because this pool is sized to be as large as we
// ever need it to be, so background allocations are usually wasteful.
SetRefillLowWatermark(0.0).
SetRefillHighWatermark(0.0).
SetInstrumentOptions(
writeBatchPoolOpts.
InstrumentOptions().
SetMetricsScope(scope.SubScope("write-batch-pool")))
writeBatchPool := writes.NewWriteBatchPool(
writeBatchPoolOpts,
writeBatchPoolInitialBatchSize,
writeBatchPoolMaxBatchSize)
tagPoolPolicy := policy.TagsPool
identifierPool := ident.NewPool(bytesPool, ident.PoolOptions{
IDPoolOptions: poolOptions(
policy.IdentifierPool, scope.SubScope("identifier-pool")),
TagsPoolOptions: maxCapacityPoolOptions(tagPoolPolicy, scope.SubScope("tags-pool")),
TagsCapacity: tagPoolPolicy.CapacityOrDefault(),
TagsMaxCapacity: tagPoolPolicy.MaxCapacityOrDefault(),
TagsIteratorPoolOptions: poolOptions(
policy.TagsIteratorPool,
scope.SubScope("tags-iterator-pool")),
})
fetchBlockMetadataResultsPoolPolicy := policy.FetchBlockMetadataResultsPool
fetchBlockMetadataResultsPool := block.NewFetchBlockMetadataResultsPool(
capacityPoolOptions(
fetchBlockMetadataResultsPoolPolicy,
scope.SubScope("fetch-block-metadata-results-pool")),
fetchBlockMetadataResultsPoolPolicy.CapacityOrDefault())
fetchBlocksMetadataResultsPoolPolicy := policy.FetchBlocksMetadataResultsPool
fetchBlocksMetadataResultsPool := block.NewFetchBlocksMetadataResultsPool(
capacityPoolOptions(
fetchBlocksMetadataResultsPoolPolicy,
scope.SubScope("fetch-blocks-metadata-results-pool")),
fetchBlocksMetadataResultsPoolPolicy.CapacityOrDefault())
bytesWrapperPoolOpts := poolOptions(
policy.CheckedBytesWrapperPool,
scope.SubScope("checked-bytes-wrapper-pool"))
bytesWrapperPool := xpool.NewCheckedBytesWrapperPool(
bytesWrapperPoolOpts)
bytesWrapperPool.Init()
encodingOpts := encoding.NewOptions().
SetEncoderPool(encoderPool).
SetReaderIteratorPool(iteratorPool).
SetBytesPool(bytesPool).
SetSegmentReaderPool(segmentReaderPool).
SetCheckedBytesWrapperPool(bytesWrapperPool)
encoderPool.Init(func() encoding.Encoder {
if cfg.Proto != nil && cfg.Proto.Enabled {
enc := proto.NewEncoder(time.Time{}, encodingOpts)
return enc
}
return m3tsz.NewEncoder(time.Time{}, nil, m3tsz.DefaultIntOptimizationEnabled, encodingOpts)
})
iteratorPool.Init(func(r xio.Reader64, descr namespace.SchemaDescr) encoding.ReaderIterator {
if cfg.Proto != nil && cfg.Proto.Enabled {
return proto.NewIterator(r, descr, encodingOpts)
}
return m3tsz.NewReaderIterator(r, m3tsz.DefaultIntOptimizationEnabled, encodingOpts)
})
multiIteratorPool.Init(func(r xio.Reader64, descr namespace.SchemaDescr) encoding.ReaderIterator {
iter := iteratorPool.Get()
iter.Reset(r, descr)
return iter
})
writeBatchPool.Init()
bucketPool := series.NewBufferBucketPool(
poolOptions(policy.BufferBucketPool, scope.SubScope("buffer-bucket-pool")))
bucketVersionsPool := series.NewBufferBucketVersionsPool(
poolOptions(policy.BufferBucketVersionsPool, scope.SubScope("buffer-bucket-versions-pool")))
retrieveRequestPool := fs.NewRetrieveRequestPool(segmentReaderPool,
poolOptions(policy.RetrieveRequestPool, scope.SubScope("retrieve-request-pool")))
retrieveRequestPool.Init()
opts = opts.
SetBytesPool(bytesPool).
SetContextPool(contextPool).
SetEncoderPool(encoderPool).
SetReaderIteratorPool(iteratorPool).
SetMultiReaderIteratorPool(multiIteratorPool).
SetIdentifierPool(identifierPool).
SetFetchBlockMetadataResultsPool(fetchBlockMetadataResultsPool).
SetFetchBlocksMetadataResultsPool(fetchBlocksMetadataResultsPool).
SetWriteBatchPool(writeBatchPool).
SetBufferBucketPool(bucketPool).
SetBufferBucketVersionsPool(bucketVersionsPool).
SetRetrieveRequestPool(retrieveRequestPool).
SetCheckedBytesWrapperPool(bytesWrapperPool)
blockOpts := opts.DatabaseBlockOptions().
SetDatabaseBlockAllocSize(policy.BlockAllocSizeOrDefault()).
SetContextPool(contextPool).
SetEncoderPool(encoderPool).
SetReaderIteratorPool(iteratorPool).
SetMultiReaderIteratorPool(multiIteratorPool).
SetSegmentReaderPool(segmentReaderPool).
SetBytesPool(bytesPool)
if opts.SeriesCachePolicy() == series.CacheLRU {
var (
runtimeOpts = opts.RuntimeOptionsManager()
wiredListOpts = block.WiredListOptions{
RuntimeOptionsManager: runtimeOpts,
InstrumentOptions: iOpts,
ClockOptions: opts.ClockOptions(),
}
lruCfg = cfg.Cache.SeriesConfiguration().LRU
)
if lruCfg != nil && lruCfg.EventsChannelSize > 0 {
wiredListOpts.EventsChannelSize = int(lruCfg.EventsChannelSize)
}
wiredList := block.NewWiredList(wiredListOpts)
blockOpts = blockOpts.SetWiredList(wiredList)
}
blockPool := block.NewDatabaseBlockPool(
poolOptions(
policy.BlockPool,
scope.SubScope("block-pool")))
blockPool.Init(func() block.DatabaseBlock {
return block.NewDatabaseBlock(time.Time{}, 0, ts.Segment{}, blockOpts, namespace.Context{})
})
blockOpts = blockOpts.SetDatabaseBlockPool(blockPool)
opts = opts.SetDatabaseBlockOptions(blockOpts)
// NB(prateek): retention opts are overridden per namespace during series creation
retentionOpts := retention.NewOptions()
seriesOpts := storage.NewSeriesOptionsFromOptions(opts, retentionOpts).
SetFetchBlockMetadataResultsPool(opts.FetchBlockMetadataResultsPool())
seriesPool := series.NewDatabaseSeriesPool(
poolOptions(
policy.SeriesPool,
scope.SubScope("series-pool")))
opts = opts.
SetSeriesOptions(seriesOpts).
SetDatabaseSeriesPool(seriesPool)
opts = opts.SetCommitLogOptions(opts.CommitLogOptions().
SetBytesPool(bytesPool).
SetIdentifierPool(identifierPool))
postingsListOpts := poolOptions(policy.PostingsListPool, scope.SubScope("postingslist-pool"))
postingsList := postings.NewPool(postingsListOpts, roaring.NewPostingsList)
queryResultsPool := index.NewQueryResultsPool(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-query-results-pool")))
aggregateQueryResultsPool := index.NewAggregateResultsPool(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-aggregate-results-pool")))
aggregateQueryValuesPool := index.NewAggregateValuesPool(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-aggregate-values-pool")))
// Set value transformation options.
opts = opts.SetTruncateType(cfg.Transforms.TruncateBy)
forcedValue := cfg.Transforms.ForcedValue
if forcedValue != nil {
opts = opts.SetWriteTransformOptions(series.WriteTransformOptions{
ForceValueEnabled: true,
ForceValue: *forcedValue,
})
}
// Set index options.
indexOpts := opts.IndexOptions().
SetInstrumentOptions(iOpts).
SetMemSegmentOptions(
opts.IndexOptions().MemSegmentOptions().
SetPostingsListPool(postingsList).
SetInstrumentOptions(iOpts)).
SetFSTSegmentOptions(
opts.IndexOptions().FSTSegmentOptions().
SetPostingsListPool(postingsList).
SetInstrumentOptions(iOpts).
SetContextPool(opts.ContextPool())).
SetSegmentBuilderOptions(
opts.IndexOptions().SegmentBuilderOptions().
SetPostingsListPool(postingsList)).
SetIdentifierPool(identifierPool).
SetCheckedBytesPool(bytesPool).
SetQueryResultsPool(queryResultsPool).
SetAggregateResultsPool(aggregateQueryResultsPool).
SetAggregateValuesPool(aggregateQueryValuesPool).
SetForwardIndexProbability(cfg.Index.ForwardIndexProbability).
SetForwardIndexThreshold(cfg.Index.ForwardIndexThreshold)
queryResultsPool.Init(func() index.QueryResults {
// NB(r): Need to initialize after setting the index opts so
// it sees the same reference of the options as is set for the DB.
return index.NewQueryResults(nil, index.QueryResultsOptions{}, indexOpts)
})
aggregateQueryResultsPool.Init(func() index.AggregateResults {
// NB(r): Need to initialize after setting the index opts so
// it sees the same reference of the options as is set for the DB.
return index.NewAggregateResults(nil, index.AggregateResultsOptions{}, indexOpts)
})
aggregateQueryValuesPool.Init(func() index.AggregateValues {
// NB(r): Need to initialize after setting the index opts so
// it sees the same reference of the options as is set for the DB.
return index.NewAggregateValues(indexOpts)
})
return opts.SetIndexOptions(indexOpts)
}
func newAdminClient(
config client.Configuration,
iOpts instrument.Options,
tchannelOpts *tchannel.ChannelOptions,
topologyInitializer topology.Initializer,
runtimeOptsMgr m3dbruntime.OptionsManager,
origin topology.Host,
protoEnabled bool,
schemaRegistry namespace.SchemaRegistry,
kvStore kv.Store,
logger *zap.Logger,
custom []client.CustomAdminOption,
) (client.AdminClient, error) {
if config.EnvironmentConfig != nil {
// If the user has provided an override for the dynamic client configuration
// then we need to honor it by not passing our own topology initializer.
topologyInitializer = nil
}
// NB: append custom options coming from run options to existing options.
options := []client.CustomAdminOption{
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetChannelOptions(tchannelOpts).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetRuntimeOptionsManager(runtimeOptsMgr).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetContextPool(opts.ContextPool()).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetOrigin(origin).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
if protoEnabled {
return opts.SetEncodingProto(encoding.NewOptions()).(client.AdminOptions)
}
return opts
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetSchemaRegistry(schemaRegistry).(client.AdminOptions)
},
}
options = append(options, custom...)
m3dbClient, err := config.NewAdminClient(
client.ConfigurationParameters{
InstrumentOptions: iOpts.
SetMetricsScope(iOpts.MetricsScope().SubScope("m3dbclient")),
TopologyInitializer: topologyInitializer,
},
options...,
)
if err != nil {
return nil, err
}
// Kick off runtime options manager KV watches.
clientAdminOpts := m3dbClient.Options().(client.AdminOptions)
kvWatchClientConsistencyLevels(kvStore, logger,
clientAdminOpts, runtimeOptsMgr)
return m3dbClient, nil
}
func poolOptions(
policy config.PoolPolicy,
scope tally.Scope,
) pool.ObjectPoolOptions {
var (
opts = pool.NewObjectPoolOptions()
size = policy.SizeOrDefault()
refillLowWaterMark = policy.RefillLowWaterMarkOrDefault()
refillHighWaterMark = policy.RefillHighWaterMarkOrDefault()
)
if size > 0 {
opts = opts.SetSize(size)
if refillLowWaterMark > 0 &&
refillHighWaterMark > 0 &&
refillHighWaterMark > refillLowWaterMark {
opts = opts.
SetRefillLowWatermark(refillLowWaterMark).
SetRefillHighWatermark(refillHighWaterMark)
}
}
if scope != nil {
opts = opts.SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope))
}
return opts
}
func capacityPoolOptions(
policy config.CapacityPoolPolicy,
scope tally.Scope,
) pool.ObjectPoolOptions {
var (
opts = pool.NewObjectPoolOptions()
size = policy.SizeOrDefault()
refillLowWaterMark = policy.RefillLowWaterMarkOrDefault()
refillHighWaterMark = policy.RefillHighWaterMarkOrDefault()
)
if size > 0 {
opts = opts.SetSize(size)
if refillLowWaterMark > 0 &&
refillHighWaterMark > 0 &&
refillHighWaterMark > refillLowWaterMark {
opts = opts.SetRefillLowWatermark(refillLowWaterMark)
opts = opts.SetRefillHighWatermark(refillHighWaterMark)
}
}
if scope != nil {
opts = opts.SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope))
}
return opts
}
func maxCapacityPoolOptions(
policy config.MaxCapacityPoolPolicy,
scope tally.Scope,
) pool.ObjectPoolOptions {
var (
opts = pool.NewObjectPoolOptions()
size = policy.SizeOrDefault()
refillLowWaterMark = policy.RefillLowWaterMarkOrDefault()
refillHighWaterMark = policy.RefillHighWaterMarkOrDefault()
)
if size > 0 {
opts = opts.SetSize(size)
if refillLowWaterMark > 0 &&
refillHighWaterMark > 0 &&
refillHighWaterMark > refillLowWaterMark {
opts = opts.SetRefillLowWatermark(refillLowWaterMark)
opts = opts.SetRefillHighWatermark(refillHighWaterMark)
}
}
if scope != nil {
opts = opts.SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope))
}
return opts
}
func hostSupportsHugeTLB() (bool, error) {
// Try and determine if the host supports HugeTLB in the first place
withHugeTLB, err := mmap.Bytes(10, mmap.Options{
HugeTLB: mmap.HugeTLBOptions{
Enabled: true,
Threshold: 0,
},
})
if err != nil {
return false, fmt.Errorf("could not mmap anonymous region: %v", err)
}
defer mmap.Munmap(withHugeTLB)
if withHugeTLB.Warning == nil {
// If there was no warning, then the host didn't complain about
// usa of huge TLB
return true, nil
}
// If we got a warning, try mmap'ing without HugeTLB
withoutHugeTLB, err := mmap.Bytes(10, mmap.Options{})
if err != nil {
return false, fmt.Errorf("could not mmap anonymous region: %v", err)
}
defer mmap.Munmap(withoutHugeTLB)
if withoutHugeTLB.Warning == nil {
// The machine doesn't support HugeTLB, proceed without it
return false, nil
}
// The warning was probably caused by something else, proceed using HugeTLB
return true, nil
}
func newTopoMapProvider(t topology.Topology) *topoMapProvider {
return &topoMapProvider{t}
}
type topoMapProvider struct {
t topology.Topology
}
func (t *topoMapProvider) TopologyMap() (topology.Map, error) {
if t.t == nil {
return nil, errors.New("topology map provider has not be set yet")
}
return t.t.Get(), nil
}
// Ensure mmap reporter implements mmap.Reporter
var _ mmap.Reporter = (*mmapReporter)(nil)
type mmapReporter struct {
sync.Mutex
scope tally.Scope
entries map[string]*mmapReporterEntry
}
type mmapReporterEntry struct {
value int64
gauge tally.Gauge
}
func newMmapReporter(scope tally.Scope) *mmapReporter {
return &mmapReporter{
scope: scope,
entries: make(map[string]*mmapReporterEntry),
}
}
func (r *mmapReporter) Run(ctx context.Context) {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
r.Lock()
for _, r := range r.entries {
r.gauge.Update(float64(r.value))
}
r.Unlock()
}
}
}
func (r *mmapReporter) entryKeyAndTags(ctx mmap.Context) (string, map[string]string) {
numTags := 1
if ctx.Metadata != nil {
numTags += len(ctx.Metadata)
}
tags := make(map[string]string, numTags)
tags[mmapReporterTagName] = ctx.Name
if ctx.Metadata != nil {
for k, v := range ctx.Metadata {
tags[k] = v
}
}
entryKey := tally.KeyForStringMap(tags)
return entryKey, tags
}
func (r *mmapReporter) ReportMap(ctx mmap.Context) error {
if ctx.Name == "" {
return fmt.Errorf("report mmap map missing context name: %+v", ctx)
}
entryKey, entryTags := r.entryKeyAndTags(ctx)
r.Lock()
defer r.Unlock()
entry, ok := r.entries[entryKey]
if !ok {
entry = &mmapReporterEntry{
gauge: r.scope.Tagged(entryTags).Gauge(mmapReporterMetricName),
}
r.entries[entryKey] = entry
}
entry.value += ctx.Size
return nil
}
func (r *mmapReporter) ReportUnmap(ctx mmap.Context) error {
if ctx.Name == "" {
return fmt.Errorf("report mmap unmap missing context name: %+v", ctx)
}
entryKey, _ := r.entryKeyAndTags(ctx)
r.Lock()
defer r.Unlock()
entry, ok := r.entries[entryKey]
if !ok {
return fmt.Errorf("report mmap unmap missing entry for context: %+v", ctx)
}
entry.value -= ctx.Size
if entry.value == 0 {
// No more similar mmaps active for this context name, garbage collect
delete(r.entries, entryKey)
}
return nil
}
[dbnode] Set blocksPerBatch on fetchTaggedIter from config (#3249)
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package server contains the code to run the dbnode server.
package server
import (
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"os"
"path"
"runtime"
"runtime/debug"
"strings"
"sync"
"time"
clusterclient "github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/client/etcd"
"github.com/m3db/m3/src/cluster/generated/proto/commonpb"
"github.com/m3db/m3/src/cluster/generated/proto/kvpb"
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cmd/services/m3dbnode/config"
queryconfig "github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/encoding/proto"
"github.com/m3db/m3/src/dbnode/environment"
"github.com/m3db/m3/src/dbnode/kvconfig"
"github.com/m3db/m3/src/dbnode/namespace"
hjcluster "github.com/m3db/m3/src/dbnode/network/server/httpjson/cluster"
hjnode "github.com/m3db/m3/src/dbnode/network/server/httpjson/node"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift"
ttcluster "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/cluster"
ttnode "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/node"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/ratelimit"
"github.com/m3db/m3/src/dbnode/retention"
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/cluster"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/limits"
"github.com/m3db/m3/src/dbnode/storage/limits/permits"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
xtchannel "github.com/m3db/m3/src/dbnode/x/tchannel"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
m3ninxindex "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/postings/roaring"
"github.com/m3db/m3/src/query/api/v1/handler/placement"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
xconfig "github.com/m3db/m3/src/x/config"
xcontext "github.com/m3db/m3/src/x/context"
xdebug "github.com/m3db/m3/src/x/debug"
xdocs "github.com/m3db/m3/src/x/docs"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/mmap"
xos "github.com/m3db/m3/src/x/os"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
apachethrift "github.com/apache/thrift/lib/go/thrift"
"github.com/m3dbx/vellum/levenshtein"
"github.com/m3dbx/vellum/levenshtein2"
"github.com/m3dbx/vellum/regexp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/uber-go/tally"
"github.com/uber/tchannel-go"
"go.etcd.io/etcd/embed"
"go.uber.org/zap"
)
const (
bootstrapConfigInitTimeout = 10 * time.Second
serverGracefulCloseTimeout = 10 * time.Second
bgProcessLimitInterval = 10 * time.Second
maxBgProcessLimitMonitorDuration = 5 * time.Minute
cpuProfileDuration = 5 * time.Second
filePathPrefixLockFile = ".lock"
defaultServiceName = "m3dbnode"
skipRaiseProcessLimitsEnvVar = "SKIP_PROCESS_LIMITS_RAISE"
skipRaiseProcessLimitsEnvVarTrue = "true"
mmapReporterMetricName = "mmap-mapped-bytes"
mmapReporterTagName = "map-name"
)
// RunOptions provides options for running the server
// with backwards compatibility if only solely adding fields.
type RunOptions struct {
// ConfigFile is the YAML configuration file to use to run the server.
ConfigFile string
// Config is an alternate way to provide configuration and will be used
// instead of parsing ConfigFile if ConfigFile is not specified.
Config config.DBConfiguration
// BootstrapCh is a channel to listen on to be notified of bootstrap.
BootstrapCh chan<- struct{}
// EmbeddedKVCh is a channel to listen on to be notified that the embedded KV has bootstrapped.
EmbeddedKVCh chan<- struct{}
// ClientCh is a channel to listen on to share the same m3db client that this server uses.
ClientCh chan<- client.Client
// ClusterClientCh is a channel to listen on to share the same m3 cluster client that this server uses.
ClusterClientCh chan<- clusterclient.Client
// KVStoreCh is a channel to listen on to share the same m3 kv store client that this server uses.
KVStoreCh chan<- kv.Store
// InterruptCh is a programmatic interrupt channel to supply to
// interrupt and shutdown the server.
InterruptCh <-chan error
// CustomOptions are custom options to apply to the session.
CustomOptions []client.CustomAdminOption
// Transforms are transforms to apply to the database storage options.
Transforms []storage.OptionTransform
// StorageOptions are additional storage options.
StorageOptions StorageOptions
// CustomBuildTags are additional tags to be added to the instrument build
// reporter.
CustomBuildTags map[string]string
}
// Run runs the server programmatically given a filename for the
// configuration file.
func Run(runOpts RunOptions) {
var cfg config.DBConfiguration
if runOpts.ConfigFile != "" {
var rootCfg config.Configuration
if err := xconfig.LoadFile(&rootCfg, runOpts.ConfigFile, xconfig.Options{}); err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
fmt.Fprintf(os.Stderr, "unable to load %s: %v", runOpts.ConfigFile, err)
os.Exit(1)
}
cfg = *rootCfg.DB
} else {
cfg = runOpts.Config
}
err := cfg.Validate()
if err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
fmt.Fprintf(os.Stderr, "error initializing config defaults and validating config: %v", err)
os.Exit(1)
}
logger, err := cfg.LoggingOrDefault().BuildLogger()
if err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
fmt.Fprintf(os.Stderr, "unable to create logger: %v", err)
os.Exit(1)
}
defer logger.Sync()
cfg.Debug.SetRuntimeValues(logger)
xconfig.WarnOnDeprecation(cfg, logger)
// By default attempt to raise process limits, which is a benign operation.
skipRaiseLimits := strings.TrimSpace(os.Getenv(skipRaiseProcessLimitsEnvVar))
if skipRaiseLimits != skipRaiseProcessLimitsEnvVarTrue {
// Raise fd limits to nr_open system limit
result, err := xos.RaiseProcessNoFileToNROpen()
if err != nil {
logger.Warn("unable to raise rlimit", zap.Error(err))
} else {
logger.Info("raised rlimit no file fds limit",
zap.Bool("required", result.RaisePerformed),
zap.Uint64("sysNROpenValue", result.NROpenValue),
zap.Uint64("noFileMaxValue", result.NoFileMaxValue),
zap.Uint64("noFileCurrValue", result.NoFileCurrValue))
}
}
// Parse file and directory modes
newFileMode, err := cfg.Filesystem.ParseNewFileMode()
if err != nil {
logger.Fatal("could not parse new file mode", zap.Error(err))
}
newDirectoryMode, err := cfg.Filesystem.ParseNewDirectoryMode()
if err != nil {
logger.Fatal("could not parse new directory mode", zap.Error(err))
}
// Obtain a lock on `filePathPrefix`, or exit if another process already has it.
// The lock consists of a lock file (on the file system) and a lock in memory.
// When the process exits gracefully, both the lock file and the lock will be removed.
// If the process exits ungracefully, only the lock in memory will be removed, the lock
// file will remain on the file system. When a dbnode starts after an ungracefully stop,
// it will be able to acquire the lock despite the fact the the lock file exists.
lockPath := path.Join(cfg.Filesystem.FilePathPrefixOrDefault(), filePathPrefixLockFile)
fslock, err := createAndAcquireLockfile(lockPath, newDirectoryMode)
if err != nil {
logger.Fatal("could not acqurie lock", zap.String("path", lockPath), zap.Error(err))
}
// nolint: errcheck
defer fslock.releaseLockfile()
go bgValidateProcessLimits(logger)
debug.SetGCPercent(cfg.GCPercentageOrDefault())
scope, _, err := cfg.MetricsOrDefault().NewRootScope()
if err != nil {
logger.Fatal("could not connect to metrics", zap.Error(err))
}
hostID, err := cfg.HostIDOrDefault().Resolve()
if err != nil {
logger.Fatal("could not resolve local host ID", zap.Error(err))
}
var (
tracer opentracing.Tracer
traceCloser io.Closer
)
if cfg.Tracing == nil {
tracer = opentracing.NoopTracer{}
logger.Info("tracing disabled; set `tracing.backend` to enable")
} else {
// setup tracer
serviceName := cfg.Tracing.ServiceName
if serviceName == "" {
serviceName = defaultServiceName
}
tracer, traceCloser, err = cfg.Tracing.NewTracer(serviceName, scope.SubScope("jaeger"), logger)
if err != nil {
tracer = opentracing.NoopTracer{}
logger.Warn("could not initialize tracing; using no-op tracer instead",
zap.String("service", serviceName), zap.Error(err))
} else {
defer traceCloser.Close()
logger.Info("tracing enabled", zap.String("service", serviceName))
}
}
// Presence of KV server config indicates embedded etcd cluster
discoveryConfig := cfg.DiscoveryOrDefault()
envConfig, err := discoveryConfig.EnvironmentConfig(hostID)
if err != nil {
logger.Fatal("could not get env config from discovery config", zap.Error(err))
}
if envConfig.SeedNodes == nil {
logger.Info("no seed nodes set, using dedicated etcd cluster")
} else {
// Default etcd client clusters if not set already
service, err := envConfig.Services.SyncCluster()
if err != nil {
logger.Fatal("invalid cluster configuration", zap.Error(err))
}
clusters := service.Service.ETCDClusters
seedNodes := envConfig.SeedNodes.InitialCluster
if len(clusters) == 0 {
endpoints, err := config.InitialClusterEndpoints(seedNodes)
if err != nil {
logger.Fatal("unable to create etcd clusters", zap.Error(err))
}
zone := service.Service.Zone
logger.Info("using seed nodes etcd cluster",
zap.String("zone", zone), zap.Strings("endpoints", endpoints))
service.Service.ETCDClusters = []etcd.ClusterConfig{{
Zone: zone,
Endpoints: endpoints,
}}
}
seedNodeHostIDs := make([]string, 0, len(seedNodes))
for _, entry := range seedNodes {
seedNodeHostIDs = append(seedNodeHostIDs, entry.HostID)
}
logger.Info("resolving seed node configuration",
zap.String("hostID", hostID), zap.Strings("seedNodeHostIDs", seedNodeHostIDs),
)
if !config.IsSeedNode(seedNodes, hostID) {
logger.Info("not a seed node, using cluster seed nodes")
} else {
logger.Info("seed node, starting etcd server")
etcdCfg, err := config.NewEtcdEmbedConfig(cfg)
if err != nil {
logger.Fatal("unable to create etcd config", zap.Error(err))
}
e, err := embed.StartEtcd(etcdCfg)
if err != nil {
logger.Fatal("could not start embedded etcd", zap.Error(err))
}
if runOpts.EmbeddedKVCh != nil {
// Notify on embedded KV bootstrap chan if specified
runOpts.EmbeddedKVCh <- struct{}{}
}
defer e.Close()
}
}
// By default use histogram timers for timers that
// are constructed allowing for type to be picked
// by the caller using instrument.NewTimer(...).
timerOpts := instrument.NewHistogramTimerOptions(instrument.HistogramTimerOptions{})
timerOpts.StandardSampleRate = cfg.MetricsOrDefault().SampleRate()
var (
opts = storage.NewOptions()
iOpts = opts.InstrumentOptions().
SetLogger(logger).
SetMetricsScope(scope).
SetTimerOptions(timerOpts).
SetTracer(tracer).
SetCustomBuildTags(runOpts.CustomBuildTags)
)
opts = opts.SetInstrumentOptions(iOpts)
// Only override the default MemoryTracker (which has default limits) if a custom limit has
// been set.
if cfg.Limits.MaxOutstandingRepairedBytes > 0 {
memTrackerOptions := storage.NewMemoryTrackerOptions(cfg.Limits.MaxOutstandingRepairedBytes)
memTracker := storage.NewMemoryTracker(memTrackerOptions)
opts = opts.SetMemoryTracker(memTracker)
}
opentracing.SetGlobalTracer(tracer)
if cfg.Index.MaxQueryIDsConcurrency != 0 {
queryIDsWorkerPool := xsync.NewWorkerPool(cfg.Index.MaxQueryIDsConcurrency)
queryIDsWorkerPool.Init()
opts = opts.SetQueryIDsWorkerPool(queryIDsWorkerPool)
} else {
logger.Warn("max index query IDs concurrency was not set, falling back to default value")
}
// Set global index options.
if n := cfg.Index.RegexpDFALimitOrDefault(); n > 0 {
regexp.SetStateLimit(n)
levenshtein.SetStateLimit(n)
levenshtein2.SetStateLimit(n)
}
if n := cfg.Index.RegexpFSALimitOrDefault(); n > 0 {
regexp.SetDefaultLimit(n)
}
buildReporter := instrument.NewBuildReporter(iOpts)
if err := buildReporter.Start(); err != nil {
logger.Fatal("unable to start build reporter", zap.Error(err))
}
defer buildReporter.Stop()
mmapCfg := cfg.Filesystem.MmapConfigurationOrDefault()
shouldUseHugeTLB := mmapCfg.HugeTLB.Enabled
if shouldUseHugeTLB {
// Make sure the host supports HugeTLB before proceeding with it to prevent
// excessive log spam.
shouldUseHugeTLB, err = hostSupportsHugeTLB()
if err != nil {
logger.Fatal("could not determine if host supports HugeTLB", zap.Error(err))
}
if !shouldUseHugeTLB {
logger.Warn("host doesn't support HugeTLB, proceeding without it")
}
}
mmapReporter := newMmapReporter(scope)
mmapReporterCtx, cancel := context.WithCancel(context.Background())
defer cancel()
go mmapReporter.Run(mmapReporterCtx)
opts = opts.SetMmapReporter(mmapReporter)
runtimeOpts := m3dbruntime.NewOptions().
SetPersistRateLimitOptions(ratelimit.NewOptions().
SetLimitEnabled(true).
SetLimitMbps(cfg.Filesystem.ThroughputLimitMbpsOrDefault()).
SetLimitCheckEvery(cfg.Filesystem.ThroughputCheckEveryOrDefault())).
SetWriteNewSeriesAsync(cfg.WriteNewSeriesAsyncOrDefault()).
SetWriteNewSeriesBackoffDuration(cfg.WriteNewSeriesBackoffDurationOrDefault())
if lruCfg := cfg.Cache.SeriesConfiguration().LRU; lruCfg != nil {
runtimeOpts = runtimeOpts.SetMaxWiredBlocks(lruCfg.MaxBlocks)
}
// Setup query stats tracking.
var (
docsLimit = limits.DefaultLookbackLimitOptions()
bytesReadLimit = limits.DefaultLookbackLimitOptions()
diskSeriesReadLimit = limits.DefaultLookbackLimitOptions()
aggDocsLimit = limits.DefaultLookbackLimitOptions()
)
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesBlocks; limitConfig != nil {
docsLimit.Limit = limitConfig.Value
docsLimit.Lookback = limitConfig.Lookback
}
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskBytesRead; limitConfig != nil {
bytesReadLimit.Limit = limitConfig.Value
bytesReadLimit.Lookback = limitConfig.Lookback
}
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskRead; limitConfig != nil {
diskSeriesReadLimit.Limit = limitConfig.Value
diskSeriesReadLimit.Lookback = limitConfig.Lookback
}
if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedMetadata; limitConfig != nil {
aggDocsLimit.Limit = limitConfig.Value
aggDocsLimit.Lookback = limitConfig.Lookback
}
limitOpts := limits.NewOptions().
SetDocsLimitOpts(docsLimit).
SetBytesReadLimitOpts(bytesReadLimit).
SetDiskSeriesReadLimitOpts(diskSeriesReadLimit).
SetAggregateDocsLimitOpts(aggDocsLimit).
SetInstrumentOptions(iOpts)
if builder := opts.SourceLoggerBuilder(); builder != nil {
limitOpts = limitOpts.SetSourceLoggerBuilder(builder)
}
queryLimits, err := limits.NewQueryLimits(limitOpts)
if err != nil {
logger.Fatal("could not construct docs query limits from config", zap.Error(err))
}
queryLimits.Start()
defer queryLimits.Stop()
seriesReadPermits := permits.NewLookbackLimitPermitsManager(
"disk-series-read",
diskSeriesReadLimit,
iOpts,
limitOpts.SourceLoggerBuilder(),
runOpts.Config.FetchTagged.SeriesBlocksPerBatchOrDefault(),
)
seriesReadPermits.Start()
defer seriesReadPermits.Stop()
opts = opts.SetPermitsOptions(opts.PermitsOptions().
SetSeriesReadPermitsManager(seriesReadPermits))
// Setup postings list cache.
var (
plCacheConfig = cfg.Cache.PostingsListConfiguration()
plCacheSize = plCacheConfig.SizeOrDefault()
plCacheOptions = index.PostingsListCacheOptions{
InstrumentOptions: opts.InstrumentOptions().
SetMetricsScope(scope.SubScope("postings-list-cache")),
}
)
postingsListCache, stopReporting, err := index.NewPostingsListCache(plCacheSize, plCacheOptions)
if err != nil {
logger.Fatal("could not construct postings list cache", zap.Error(err))
}
defer stopReporting()
// Setup index regexp compilation cache.
m3ninxindex.SetRegexpCacheOptions(m3ninxindex.RegexpCacheOptions{
Size: cfg.Cache.RegexpConfiguration().SizeOrDefault(),
Scope: iOpts.MetricsScope(),
})
for _, transform := range runOpts.Transforms {
opts = transform(opts)
}
// FOLLOWUP(prateek): remove this once we have the runtime options<->index wiring done
indexOpts := opts.IndexOptions()
insertMode := index.InsertSync
if cfg.WriteNewSeriesAsyncOrDefault() {
insertMode = index.InsertAsync
}
indexOpts = indexOpts.SetInsertMode(insertMode).
SetPostingsListCache(postingsListCache).
SetReadThroughSegmentOptions(index.ReadThroughSegmentOptions{
CacheRegexp: plCacheConfig.CacheRegexpOrDefault(),
CacheTerms: plCacheConfig.CacheTermsOrDefault(),
}).
SetMmapReporter(mmapReporter).
SetQueryLimits(queryLimits)
opts = opts.SetIndexOptions(indexOpts)
if tick := cfg.Tick; tick != nil {
runtimeOpts = runtimeOpts.
SetTickSeriesBatchSize(tick.SeriesBatchSize).
SetTickPerSeriesSleepDuration(tick.PerSeriesSleepDuration).
SetTickMinimumInterval(tick.MinimumInterval)
}
runtimeOptsMgr := m3dbruntime.NewOptionsManager()
if err := runtimeOptsMgr.Update(runtimeOpts); err != nil {
logger.Fatal("could not set initial runtime options", zap.Error(err))
}
defer runtimeOptsMgr.Close()
opts = opts.SetRuntimeOptionsManager(runtimeOptsMgr)
policy, err := cfg.PoolingPolicyOrDefault()
if err != nil {
logger.Fatal("could not get pooling policy", zap.Error(err))
}
tagEncoderPool := serialize.NewTagEncoderPool(
serialize.NewTagEncoderOptions(),
poolOptions(
policy.TagEncoderPool,
scope.SubScope("tag-encoder-pool")))
tagEncoderPool.Init()
tagDecoderPool := serialize.NewTagDecoderPool(
serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
poolOptions(
policy.TagDecoderPool,
scope.SubScope("tag-decoder-pool")))
tagDecoderPool.Init()
// Pass nil for block.LeaseVerifier for now and it will be set after the
// db is constructed (since the db is required to construct a
// block.LeaseVerifier). Initialized here because it needs to be propagated
// to both the DB and the blockRetriever.
blockLeaseManager := block.NewLeaseManager(nil)
opts = opts.SetBlockLeaseManager(blockLeaseManager)
fsopts := fs.NewOptions().
SetClockOptions(opts.ClockOptions()).
SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope.SubScope("database.fs"))).
SetFilePathPrefix(cfg.Filesystem.FilePathPrefixOrDefault()).
SetNewFileMode(newFileMode).
SetNewDirectoryMode(newDirectoryMode).
SetWriterBufferSize(cfg.Filesystem.WriteBufferSizeOrDefault()).
SetDataReaderBufferSize(cfg.Filesystem.DataReadBufferSizeOrDefault()).
SetInfoReaderBufferSize(cfg.Filesystem.InfoReadBufferSizeOrDefault()).
SetSeekReaderBufferSize(cfg.Filesystem.SeekReadBufferSizeOrDefault()).
SetMmapEnableHugeTLB(shouldUseHugeTLB).
SetMmapHugeTLBThreshold(mmapCfg.HugeTLB.Threshold).
SetRuntimeOptionsManager(runtimeOptsMgr).
SetTagEncoderPool(tagEncoderPool).
SetTagDecoderPool(tagDecoderPool).
SetForceIndexSummariesMmapMemory(cfg.Filesystem.ForceIndexSummariesMmapMemoryOrDefault()).
SetForceBloomFilterMmapMemory(cfg.Filesystem.ForceBloomFilterMmapMemoryOrDefault()).
SetIndexBloomFilterFalsePositivePercent(cfg.Filesystem.BloomFilterFalsePositivePercentOrDefault()).
SetMmapReporter(mmapReporter)
var commitLogQueueSize int
cfgCommitLog := cfg.CommitLogOrDefault()
specified := cfgCommitLog.Queue.Size
switch cfgCommitLog.Queue.CalculationType {
case config.CalculationTypeFixed:
commitLogQueueSize = specified
case config.CalculationTypePerCPU:
commitLogQueueSize = specified * runtime.NumCPU()
default:
logger.Fatal("unknown commit log queue size type",
zap.Any("type", cfgCommitLog.Queue.CalculationType))
}
var commitLogQueueChannelSize int
if cfgCommitLog.QueueChannel != nil {
specified := cfgCommitLog.QueueChannel.Size
switch cfgCommitLog.Queue.CalculationType {
case config.CalculationTypeFixed:
commitLogQueueChannelSize = specified
case config.CalculationTypePerCPU:
commitLogQueueChannelSize = specified * runtime.NumCPU()
default:
logger.Fatal("unknown commit log queue channel size type",
zap.Any("type", cfgCommitLog.Queue.CalculationType))
}
} else {
commitLogQueueChannelSize = int(float64(commitLogQueueSize) / commitlog.MaximumQueueSizeQueueChannelSizeRatio)
}
// Set the series cache policy.
seriesCachePolicy := cfg.Cache.SeriesConfiguration().Policy
opts = opts.SetSeriesCachePolicy(seriesCachePolicy)
// Apply pooling options.
poolingPolicy, err := cfg.PoolingPolicyOrDefault()
if err != nil {
logger.Fatal("could not get pooling policy", zap.Error(err))
}
opts = withEncodingAndPoolingOptions(cfg, logger, opts, poolingPolicy)
opts = opts.SetCommitLogOptions(opts.CommitLogOptions().
SetInstrumentOptions(opts.InstrumentOptions()).
SetFilesystemOptions(fsopts).
SetStrategy(commitlog.StrategyWriteBehind).
SetFlushSize(cfgCommitLog.FlushMaxBytes).
SetFlushInterval(cfgCommitLog.FlushEvery).
SetBacklogQueueSize(commitLogQueueSize).
SetBacklogQueueChannelSize(commitLogQueueChannelSize))
// Setup the block retriever
switch seriesCachePolicy {
case series.CacheAll:
// No options needed to be set
default:
// All other caching strategies require retrieving series from disk
// to service a cache miss
retrieverOpts := fs.NewBlockRetrieverOptions().
SetBytesPool(opts.BytesPool()).
SetRetrieveRequestPool(opts.RetrieveRequestPool()).
SetIdentifierPool(opts.IdentifierPool()).
SetBlockLeaseManager(blockLeaseManager).
SetQueryLimits(queryLimits)
if blockRetrieveCfg := cfg.BlockRetrieve; blockRetrieveCfg != nil {
if v := blockRetrieveCfg.FetchConcurrency; v != nil {
retrieverOpts = retrieverOpts.SetFetchConcurrency(*v)
}
if v := blockRetrieveCfg.CacheBlocksOnRetrieve; v != nil {
retrieverOpts = retrieverOpts.SetCacheBlocksOnRetrieve(*v)
}
}
blockRetrieverMgr := block.NewDatabaseBlockRetrieverManager(
func(md namespace.Metadata, shardSet sharding.ShardSet) (block.DatabaseBlockRetriever, error) {
retriever, err := fs.NewBlockRetriever(retrieverOpts, fsopts)
if err != nil {
return nil, err
}
if err := retriever.Open(md, shardSet); err != nil {
return nil, err
}
return retriever, nil
})
opts = opts.SetDatabaseBlockRetrieverManager(blockRetrieverMgr)
}
// Set the persistence manager
pm, err := fs.NewPersistManager(fsopts)
if err != nil {
logger.Fatal("could not create persist manager", zap.Error(err))
}
opts = opts.SetPersistManager(pm)
// Set the index claims manager
icm, err := fs.NewIndexClaimsManager(fsopts)
if err != nil {
logger.Fatal("could not create index claims manager", zap.Error(err))
}
defer func() {
// Reset counter of index claims managers after server teardown.
fs.ResetIndexClaimsManagersUnsafe()
}()
opts = opts.SetIndexClaimsManager(icm)
if value := cfg.ForceColdWritesEnabled; value != nil {
// Allow forcing cold writes to be enabled by config.
opts = opts.SetForceColdWritesEnabled(*value)
}
forceColdWrites := opts.ForceColdWritesEnabled()
var envCfgResults environment.ConfigureResults
if len(envConfig.Statics) == 0 {
logger.Info("creating dynamic config service client with m3cluster")
envCfgResults, err = envConfig.Configure(environment.ConfigurationParameters{
InstrumentOpts: iOpts,
HashingSeed: cfg.Hashing.Seed,
NewDirectoryMode: newDirectoryMode,
ForceColdWritesEnabled: forceColdWrites,
})
if err != nil {
logger.Fatal("could not initialize dynamic config", zap.Error(err))
}
} else {
logger.Info("creating static config service client with m3cluster")
envCfgResults, err = envConfig.Configure(environment.ConfigurationParameters{
InstrumentOpts: iOpts,
HostID: hostID,
ForceColdWritesEnabled: forceColdWrites,
})
if err != nil {
logger.Fatal("could not initialize static config", zap.Error(err))
}
}
syncCfg, err := envCfgResults.SyncCluster()
if err != nil {
logger.Fatal("invalid cluster config", zap.Error(err))
}
if runOpts.ClusterClientCh != nil {
runOpts.ClusterClientCh <- syncCfg.ClusterClient
}
if runOpts.KVStoreCh != nil {
runOpts.KVStoreCh <- syncCfg.KVStore
}
opts = opts.SetNamespaceInitializer(syncCfg.NamespaceInitializer)
// Set tchannelthrift options.
ttopts := tchannelthrift.NewOptions().
SetClockOptions(opts.ClockOptions()).
SetInstrumentOptions(opts.InstrumentOptions()).
SetTopologyInitializer(syncCfg.TopologyInitializer).
SetIdentifierPool(opts.IdentifierPool()).
SetTagEncoderPool(tagEncoderPool).
SetTagDecoderPool(tagDecoderPool).
SetCheckedBytesWrapperPool(opts.CheckedBytesWrapperPool()).
SetMaxOutstandingWriteRequests(cfg.Limits.MaxOutstandingWriteRequests).
SetMaxOutstandingReadRequests(cfg.Limits.MaxOutstandingReadRequests).
SetQueryLimits(queryLimits).
SetFetchTaggedSeriesBlocksPerBatch(cfg.FetchTagged.SeriesBlocksPerBatchOrDefault()).
SetPermitsOptions(opts.PermitsOptions())
// Start servers before constructing the DB so orchestration tools can check health endpoints
// before topology is set.
var (
contextPool = opts.ContextPool()
tchannelOpts = xtchannel.NewDefaultChannelOptions()
// Pass nil for the database argument because we haven't constructed it yet. We'll call
// SetDatabase() once we've initialized it.
service = ttnode.NewService(nil, ttopts)
)
if cfg.TChannel != nil {
tchannelOpts.MaxIdleTime = cfg.TChannel.MaxIdleTime
tchannelOpts.IdleCheckInterval = cfg.TChannel.IdleCheckInterval
}
tchanOpts := ttnode.NewOptions(tchannelOpts).
SetInstrumentOptions(opts.InstrumentOptions())
if fn := runOpts.StorageOptions.TChanChannelFn; fn != nil {
tchanOpts = tchanOpts.SetTChanChannelFn(fn)
}
if fn := runOpts.StorageOptions.TChanNodeServerFn; fn != nil {
tchanOpts = tchanOpts.SetTChanNodeServerFn(fn)
}
listenAddress := cfg.ListenAddressOrDefault()
tchannelthriftNodeClose, err := ttnode.NewServer(service,
listenAddress, contextPool, tchanOpts).ListenAndServe()
if err != nil {
logger.Fatal("could not open tchannelthrift interface",
zap.String("address", listenAddress), zap.Error(err))
}
defer tchannelthriftNodeClose()
logger.Info("node tchannelthrift: listening", zap.String("address", listenAddress))
httpListenAddress := cfg.HTTPNodeListenAddressOrDefault()
httpjsonNodeClose, err := hjnode.NewServer(service,
httpListenAddress, contextPool, nil).ListenAndServe()
if err != nil {
logger.Fatal("could not open httpjson interface",
zap.String("address", httpListenAddress), zap.Error(err))
}
defer httpjsonNodeClose()
logger.Info("node httpjson: listening", zap.String("address", httpListenAddress))
debugListenAddress := cfg.DebugListenAddressOrDefault()
if debugListenAddress != "" {
var debugWriter xdebug.ZipWriter
handlerOpts, err := placement.NewHandlerOptions(syncCfg.ClusterClient,
queryconfig.Configuration{}, nil, iOpts)
if err != nil {
logger.Warn("could not create handler options for debug writer", zap.Error(err))
} else {
envCfgCluster, err := envConfig.Services.SyncCluster()
if err != nil || envCfgCluster.Service == nil {
logger.Warn("could not get cluster config for debug writer",
zap.Error(err),
zap.Bool("envCfgClusterServiceIsNil", envCfgCluster.Service == nil))
} else {
debugWriter, err = xdebug.NewPlacementAndNamespaceZipWriterWithDefaultSources(
cpuProfileDuration,
syncCfg.ClusterClient,
handlerOpts,
[]handleroptions.ServiceNameAndDefaults{
{
ServiceName: handleroptions.M3DBServiceName,
Defaults: []handleroptions.ServiceOptionsDefault{
handleroptions.WithDefaultServiceEnvironment(envCfgCluster.Service.Env),
handleroptions.WithDefaultServiceZone(envCfgCluster.Service.Zone),
},
},
},
iOpts)
if err != nil {
logger.Error("unable to create debug writer", zap.Error(err))
}
}
}
go func() {
mux := http.DefaultServeMux
if debugWriter != nil {
if err := debugWriter.RegisterHandler(xdebug.DebugURL, mux); err != nil {
logger.Error("unable to register debug writer endpoint", zap.Error(err))
}
}
if err := http.ListenAndServe(debugListenAddress, mux); err != nil {
logger.Error("debug server could not listen",
zap.String("address", debugListenAddress), zap.Error(err))
} else {
logger.Info("debug server listening",
zap.String("address", debugListenAddress),
)
}
}()
}
topo, err := syncCfg.TopologyInitializer.Init()
if err != nil {
logger.Fatal("could not initialize m3db topology", zap.Error(err))
}
var protoEnabled bool
if cfg.Proto != nil && cfg.Proto.Enabled {
protoEnabled = true
}
schemaRegistry := namespace.NewSchemaRegistry(protoEnabled, logger)
// For application m3db client integration test convenience (where a local dbnode is started as a docker container),
// we allow loading user schema from local file into schema registry.
if protoEnabled {
for nsID, protoConfig := range cfg.Proto.SchemaRegistry {
dummyDeployID := "fromconfig"
if err := namespace.LoadSchemaRegistryFromFile(schemaRegistry, ident.StringID(nsID),
dummyDeployID,
protoConfig.SchemaFilePath, protoConfig.MessageName); err != nil {
logger.Fatal("could not load schema from configuration", zap.Error(err))
}
}
}
origin := topology.NewHost(hostID, "")
m3dbClient, err := newAdminClient(
cfg.Client, iOpts, tchannelOpts, syncCfg.TopologyInitializer,
runtimeOptsMgr, origin, protoEnabled, schemaRegistry,
syncCfg.KVStore, logger, runOpts.CustomOptions)
if err != nil {
logger.Fatal("could not create m3db client", zap.Error(err))
}
if runOpts.ClientCh != nil {
runOpts.ClientCh <- m3dbClient
}
documentsBuilderAlloc := index.NewBootstrapResultDocumentsBuilderAllocator(
opts.IndexOptions())
rsOpts := result.NewOptions().
SetInstrumentOptions(opts.InstrumentOptions()).
SetDatabaseBlockOptions(opts.DatabaseBlockOptions()).
SetSeriesCachePolicy(opts.SeriesCachePolicy()).
SetIndexDocumentsBuilderAllocator(documentsBuilderAlloc)
var repairClients []client.AdminClient
if cfg.Repair != nil && cfg.Repair.Enabled {
repairClients = append(repairClients, m3dbClient)
}
if cfg.Replication != nil {
for _, cluster := range cfg.Replication.Clusters {
if !cluster.RepairEnabled {
continue
}
// Pass nil for the topology initializer because we want to create
// a new one for the cluster we wish to replicate from, not use the
// same one as the cluster this node belongs to.
var topologyInitializer topology.Initializer
// Guaranteed to not be nil if repair is enabled by config validation.
clientCfg := *cluster.Client
clusterClient, err := newAdminClient(
clientCfg, iOpts, tchannelOpts, topologyInitializer,
runtimeOptsMgr, origin, protoEnabled, schemaRegistry,
syncCfg.KVStore, logger, runOpts.CustomOptions)
if err != nil {
logger.Fatal(
"unable to create client for replicated cluster",
zap.String("clusterName", cluster.Name), zap.Error(err))
}
repairClients = append(repairClients, clusterClient)
}
}
repairEnabled := len(repairClients) > 0
if repairEnabled {
repairOpts := opts.RepairOptions().
SetAdminClients(repairClients)
if cfg.Repair != nil {
repairOpts = repairOpts.
SetResultOptions(rsOpts).
SetDebugShadowComparisonsEnabled(cfg.Repair.DebugShadowComparisonsEnabled)
if cfg.Repair.Throttle > 0 {
repairOpts = repairOpts.SetRepairThrottle(cfg.Repair.Throttle)
}
if cfg.Repair.CheckInterval > 0 {
repairOpts = repairOpts.SetRepairCheckInterval(cfg.Repair.CheckInterval)
}
if cfg.Repair.DebugShadowComparisonsPercentage > 0 {
// Set conditionally to avoid stomping on the default value of 1.0.
repairOpts = repairOpts.SetDebugShadowComparisonsPercentage(cfg.Repair.DebugShadowComparisonsPercentage)
}
}
opts = opts.
SetRepairEnabled(true).
SetRepairOptions(repairOpts)
} else {
opts = opts.SetRepairEnabled(false)
}
// Set bootstrap options - We need to create a topology map provider from the
// same topology that will be passed to the cluster so that when we make
// bootstrapping decisions they are in sync with the clustered database
// which is triggering the actual bootstraps. This way, when the clustered
// database receives a topology update and decides to kick off a bootstrap,
// the bootstrap process will receaive a topology map that is at least as
// recent as the one that triggered the bootstrap, if not newer.
// See GitHub issue #1013 for more details.
topoMapProvider := newTopoMapProvider(topo)
bs, err := cfg.Bootstrap.New(
rsOpts, opts, topoMapProvider, origin, m3dbClient,
)
if err != nil {
logger.Fatal("could not create bootstrap process", zap.Error(err))
}
opts = opts.SetBootstrapProcessProvider(bs)
// Start the cluster services now that the M3DB client is available.
clusterListenAddress := cfg.ClusterListenAddressOrDefault()
tchannelthriftClusterClose, err := ttcluster.NewServer(m3dbClient,
clusterListenAddress, contextPool, tchannelOpts).ListenAndServe()
if err != nil {
logger.Fatal("could not open tchannelthrift interface",
zap.String("address", clusterListenAddress), zap.Error(err))
}
defer tchannelthriftClusterClose()
logger.Info("cluster tchannelthrift: listening", zap.String("address", clusterListenAddress))
httpClusterListenAddress := cfg.HTTPClusterListenAddressOrDefault()
httpjsonClusterClose, err := hjcluster.NewServer(m3dbClient,
httpClusterListenAddress, contextPool, nil).ListenAndServe()
if err != nil {
logger.Fatal("could not open httpjson interface",
zap.String("address", httpClusterListenAddress), zap.Error(err))
}
defer httpjsonClusterClose()
logger.Info("cluster httpjson: listening", zap.String("address", httpClusterListenAddress))
// Initialize clustered database.
clusterTopoWatch, err := topo.Watch()
if err != nil {
logger.Fatal("could not create cluster topology watch", zap.Error(err))
}
opts = opts.SetSchemaRegistry(schemaRegistry).
SetAdminClient(m3dbClient)
if cfg.WideConfig != nil && cfg.WideConfig.BatchSize > 0 {
opts = opts.SetWideBatchSize(cfg.WideConfig.BatchSize)
}
db, err := cluster.NewDatabase(hostID, topo, clusterTopoWatch, opts)
if err != nil {
logger.Fatal("could not construct database", zap.Error(err))
}
// Now that the database has been created it can be set as the block lease verifier
// on the block lease manager.
leaseVerifier := storage.NewLeaseVerifier(db)
blockLeaseManager.SetLeaseVerifier(leaseVerifier)
if err := db.Open(); err != nil {
logger.Fatal("could not open database", zap.Error(err))
}
// Now that we've initialized the database we can set it on the service.
service.SetDatabase(db)
go func() {
if runOpts.BootstrapCh != nil {
// Notify on bootstrap chan if specified.
defer func() {
runOpts.BootstrapCh <- struct{}{}
}()
}
// Bootstrap asynchronously so we can handle interrupt.
if err := db.Bootstrap(); err != nil {
logger.Fatal("could not bootstrap database", zap.Error(err))
}
logger.Info("bootstrapped")
// Only set the write new series limit after bootstrapping
kvWatchNewSeriesLimitPerShard(syncCfg.KVStore, logger, topo,
runtimeOptsMgr, cfg.Limits.WriteNewSeriesPerSecond)
kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger,
runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock)
kvWatchQueryLimit(syncCfg.KVStore, logger,
queryLimits.FetchDocsLimit(),
queryLimits.BytesReadLimit(),
// For backwards compatibility as M3 moves toward permits instead of time-based limits,
// the series-read path uses permits which are implemented with limits, and so we support
// dynamic updates to this limit-based permit still be passing downstream the limit itself.
seriesReadPermits.Limit,
queryLimits.AggregateDocsLimit(),
limitOpts,
)
}()
// Wait for process interrupt.
xos.WaitForInterrupt(logger, xos.InterruptOptions{
InterruptCh: runOpts.InterruptCh,
})
// Attempt graceful server close.
closedCh := make(chan struct{})
go func() {
err := db.Terminate()
if err != nil {
logger.Error("close database error", zap.Error(err))
}
closedCh <- struct{}{}
}()
// Wait then close or hard close.
closeTimeout := serverGracefulCloseTimeout
select {
case <-closedCh:
logger.Info("server closed")
case <-time.After(closeTimeout):
logger.Error("server closed after timeout", zap.Duration("timeout", closeTimeout))
}
}
func bgValidateProcessLimits(logger *zap.Logger) {
// If unable to validate process limits on the current configuration,
// do not run background validator task.
if canValidate, message := canValidateProcessLimits(); !canValidate {
logger.Warn("cannot validate process limits: invalid configuration found",
zap.String("message", message))
return
}
start := time.Now()
t := time.NewTicker(bgProcessLimitInterval)
defer t.Stop()
for {
// only monitor for first `maxBgProcessLimitMonitorDuration` of process lifetime
if time.Since(start) > maxBgProcessLimitMonitorDuration {
return
}
err := validateProcessLimits()
if err == nil {
return
}
logger.Warn("invalid configuration found, refer to linked documentation for more information",
zap.String("url", xdocs.Path("operational_guide/kernel_configuration")),
zap.Error(err),
)
<-t.C
}
}
func kvWatchNewSeriesLimitPerShard(
store kv.Store,
logger *zap.Logger,
topo topology.Topology,
runtimeOptsMgr m3dbruntime.OptionsManager,
defaultClusterNewSeriesLimit int,
) {
var initClusterLimit int
value, err := store.Get(kvconfig.ClusterNewSeriesInsertLimitKey)
if err == nil {
protoValue := &commonpb.Int64Proto{}
err = value.Unmarshal(protoValue)
if err == nil {
initClusterLimit = int(protoValue.Value)
}
}
if err != nil {
if err != kv.ErrNotFound {
logger.Warn("error resolving cluster new series insert limit", zap.Error(err))
}
initClusterLimit = defaultClusterNewSeriesLimit
}
err = setNewSeriesLimitPerShardOnChange(topo, runtimeOptsMgr, initClusterLimit)
if err != nil {
logger.Warn("unable to set cluster new series insert limit", zap.Error(err))
}
watch, err := store.Watch(kvconfig.ClusterNewSeriesInsertLimitKey)
if err != nil {
logger.Error("could not watch cluster new series insert limit", zap.Error(err))
return
}
go func() {
protoValue := &commonpb.Int64Proto{}
for range watch.C() {
value := defaultClusterNewSeriesLimit
if newValue := watch.Get(); newValue != nil {
if err := newValue.Unmarshal(protoValue); err != nil {
logger.Warn("unable to parse new cluster new series insert limit", zap.Error(err))
continue
}
value = int(protoValue.Value)
}
err = setNewSeriesLimitPerShardOnChange(topo, runtimeOptsMgr, value)
if err != nil {
logger.Warn("unable to set cluster new series insert limit", zap.Error(err))
continue
}
}
}()
}
func kvWatchEncodersPerBlockLimit(
store kv.Store,
logger *zap.Logger,
runtimeOptsMgr m3dbruntime.OptionsManager,
defaultEncodersPerBlockLimit int,
) {
var initEncoderLimit int
value, err := store.Get(kvconfig.EncodersPerBlockLimitKey)
if err == nil {
protoValue := &commonpb.Int64Proto{}
err = value.Unmarshal(protoValue)
if err == nil {
initEncoderLimit = int(protoValue.Value)
}
}
if err != nil {
if err != kv.ErrNotFound {
logger.Warn("error resolving encoder per block limit", zap.Error(err))
}
initEncoderLimit = defaultEncodersPerBlockLimit
}
err = setEncodersPerBlockLimitOnChange(runtimeOptsMgr, initEncoderLimit)
if err != nil {
logger.Warn("unable to set encoder per block limit", zap.Error(err))
}
watch, err := store.Watch(kvconfig.EncodersPerBlockLimitKey)
if err != nil {
logger.Error("could not watch encoder per block limit", zap.Error(err))
return
}
go func() {
protoValue := &commonpb.Int64Proto{}
for range watch.C() {
value := defaultEncodersPerBlockLimit
if newValue := watch.Get(); newValue != nil {
if err := newValue.Unmarshal(protoValue); err != nil {
logger.Warn("unable to parse new encoder per block limit", zap.Error(err))
continue
}
value = int(protoValue.Value)
}
err = setEncodersPerBlockLimitOnChange(runtimeOptsMgr, value)
if err != nil {
logger.Warn("unable to set encoder per block limit", zap.Error(err))
continue
}
}
}()
}
func kvWatchQueryLimit(
store kv.Store,
logger *zap.Logger,
docsLimit limits.LookbackLimit,
bytesReadLimit limits.LookbackLimit,
diskSeriesReadLimit limits.LookbackLimit,
aggregateDocsLimit limits.LookbackLimit,
defaultOpts limits.Options,
) {
value, err := store.Get(kvconfig.QueryLimits)
if err == nil {
dynamicLimits := &kvpb.QueryLimits{}
err = value.Unmarshal(dynamicLimits)
if err == nil {
updateQueryLimits(
logger, docsLimit, bytesReadLimit, diskSeriesReadLimit,
aggregateDocsLimit, dynamicLimits, defaultOpts)
}
} else if !errors.Is(err, kv.ErrNotFound) {
logger.Warn("error resolving query limit", zap.Error(err))
}
watch, err := store.Watch(kvconfig.QueryLimits)
if err != nil {
logger.Error("could not watch query limit", zap.Error(err))
return
}
go func() {
dynamicLimits := &kvpb.QueryLimits{}
for range watch.C() {
if newValue := watch.Get(); newValue != nil {
if err := newValue.Unmarshal(dynamicLimits); err != nil {
logger.Warn("unable to parse new query limits", zap.Error(err))
continue
}
updateQueryLimits(
logger, docsLimit, bytesReadLimit, diskSeriesReadLimit,
aggregateDocsLimit, dynamicLimits, defaultOpts)
}
}
}()
}
func updateQueryLimits(
logger *zap.Logger,
docsLimit limits.LookbackLimit,
bytesReadLimit limits.LookbackLimit,
diskSeriesReadLimit limits.LookbackLimit,
aggregateDocsLimit limits.LookbackLimit,
dynamicOpts *kvpb.QueryLimits,
configOpts limits.Options,
) {
var (
// Default to the config-based limits if unset in dynamic limits.
// Otherwise, use the dynamic limit.
docsLimitOpts = configOpts.DocsLimitOpts()
bytesReadLimitOpts = configOpts.BytesReadLimitOpts()
diskSeriesReadLimitOpts = configOpts.DiskSeriesReadLimitOpts()
aggDocsLimitOpts = configOpts.AggregateDocsLimitOpts()
)
if dynamicOpts != nil {
if dynamicOpts.MaxRecentlyQueriedSeriesBlocks != nil {
docsLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesBlocks)
}
if dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead != nil {
bytesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead)
}
if dynamicOpts.MaxRecentlyQueriedSeriesDiskRead != nil {
diskSeriesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskRead)
}
if dynamicOpts.MaxRecentlyQueriedMetadataRead != nil {
aggDocsLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedMetadataRead)
}
}
if err := updateQueryLimit(docsLimit, docsLimitOpts); err != nil {
logger.Error("error updating docs limit", zap.Error(err))
}
if err := updateQueryLimit(bytesReadLimit, bytesReadLimitOpts); err != nil {
logger.Error("error updating bytes read limit", zap.Error(err))
}
if err := updateQueryLimit(diskSeriesReadLimit, diskSeriesReadLimitOpts); err != nil {
logger.Error("error updating series read limit", zap.Error(err))
}
if err := updateQueryLimit(aggregateDocsLimit, aggDocsLimitOpts); err != nil {
logger.Error("error updating metadata read limit", zap.Error(err))
}
}
func updateQueryLimit(
limit limits.LookbackLimit,
newOpts limits.LookbackLimitOptions,
) error {
old := limit.Options()
if old.Equals(newOpts) {
return nil
}
return limit.Update(newOpts)
}
func dynamicLimitToLimitOpts(dynamicLimit *kvpb.QueryLimit) limits.LookbackLimitOptions {
return limits.LookbackLimitOptions{
Limit: dynamicLimit.Limit,
Lookback: time.Duration(dynamicLimit.LookbackSeconds) * time.Second,
ForceExceeded: dynamicLimit.ForceExceeded,
}
}
func kvWatchClientConsistencyLevels(
store kv.Store,
logger *zap.Logger,
clientOpts client.AdminOptions,
runtimeOptsMgr m3dbruntime.OptionsManager,
) {
setReadConsistencyLevel := func(
v string,
applyFn func(topology.ReadConsistencyLevel, m3dbruntime.Options) m3dbruntime.Options,
) error {
for _, level := range topology.ValidReadConsistencyLevels() {
if level.String() == v {
runtimeOpts := applyFn(level, runtimeOptsMgr.Get())
return runtimeOptsMgr.Update(runtimeOpts)
}
}
return fmt.Errorf("invalid read consistency level set: %s", v)
}
setConsistencyLevel := func(
v string,
applyFn func(topology.ConsistencyLevel, m3dbruntime.Options) m3dbruntime.Options,
) error {
for _, level := range topology.ValidConsistencyLevels() {
if level.String() == v {
runtimeOpts := applyFn(level, runtimeOptsMgr.Get())
return runtimeOptsMgr.Update(runtimeOpts)
}
}
return fmt.Errorf("invalid consistency level set: %s", v)
}
kvWatchStringValue(store, logger,
kvconfig.ClientBootstrapConsistencyLevel,
func(value string) error {
return setReadConsistencyLevel(value,
func(level topology.ReadConsistencyLevel, opts m3dbruntime.Options) m3dbruntime.Options {
return opts.SetClientBootstrapConsistencyLevel(level)
})
},
func() error {
return runtimeOptsMgr.Update(runtimeOptsMgr.Get().
SetClientBootstrapConsistencyLevel(clientOpts.BootstrapConsistencyLevel()))
})
kvWatchStringValue(store, logger,
kvconfig.ClientReadConsistencyLevel,
func(value string) error {
return setReadConsistencyLevel(value,
func(level topology.ReadConsistencyLevel, opts m3dbruntime.Options) m3dbruntime.Options {
return opts.SetClientReadConsistencyLevel(level)
})
},
func() error {
return runtimeOptsMgr.Update(runtimeOptsMgr.Get().
SetClientReadConsistencyLevel(clientOpts.ReadConsistencyLevel()))
})
kvWatchStringValue(store, logger,
kvconfig.ClientWriteConsistencyLevel,
func(value string) error {
return setConsistencyLevel(value,
func(level topology.ConsistencyLevel, opts m3dbruntime.Options) m3dbruntime.Options {
return opts.SetClientWriteConsistencyLevel(level)
})
},
func() error {
return runtimeOptsMgr.Update(runtimeOptsMgr.Get().
SetClientWriteConsistencyLevel(clientOpts.WriteConsistencyLevel()))
})
}
func kvWatchStringValue(
store kv.Store,
logger *zap.Logger,
key string,
onValue func(value string) error,
onDelete func() error,
) {
protoValue := &commonpb.StringProto{}
// First try to eagerly set the value so it doesn't flap if the
// watch returns but not immediately for an existing value
value, err := store.Get(key)
if err != nil && err != kv.ErrNotFound {
logger.Error("could not resolve KV", zap.String("key", key), zap.Error(err))
}
if err == nil {
if err := value.Unmarshal(protoValue); err != nil {
logger.Error("could not unmarshal KV key", zap.String("key", key), zap.Error(err))
} else if err := onValue(protoValue.Value); err != nil {
logger.Error("could not process value of KV", zap.String("key", key), zap.Error(err))
} else {
logger.Info("set KV key", zap.String("key", key), zap.Any("value", protoValue.Value))
}
}
watch, err := store.Watch(key)
if err != nil {
logger.Error("could not watch KV key", zap.String("key", key), zap.Error(err))
return
}
go func() {
for range watch.C() {
newValue := watch.Get()
if newValue == nil {
if err := onDelete(); err != nil {
logger.Warn("could not set default for KV key", zap.String("key", key), zap.Error(err))
}
continue
}
err := newValue.Unmarshal(protoValue)
if err != nil {
logger.Warn("could not unmarshal KV key", zap.String("key", key), zap.Error(err))
continue
}
if err := onValue(protoValue.Value); err != nil {
logger.Warn("could not process change for KV key", zap.String("key", key), zap.Error(err))
continue
}
logger.Info("set KV key", zap.String("key", key), zap.Any("value", protoValue.Value))
}
}()
}
func setNewSeriesLimitPerShardOnChange(
topo topology.Topology,
runtimeOptsMgr m3dbruntime.OptionsManager,
clusterLimit int,
) error {
perPlacedShardLimit := clusterLimitToPlacedShardLimit(topo, clusterLimit)
runtimeOpts := runtimeOptsMgr.Get()
if runtimeOpts.WriteNewSeriesLimitPerShardPerSecond() == perPlacedShardLimit {
// Not changed, no need to set the value and trigger a runtime options update
return nil
}
newRuntimeOpts := runtimeOpts.
SetWriteNewSeriesLimitPerShardPerSecond(perPlacedShardLimit)
return runtimeOptsMgr.Update(newRuntimeOpts)
}
func clusterLimitToPlacedShardLimit(topo topology.Topology, clusterLimit int) int {
if clusterLimit < 1 {
return 0
}
topoMap := topo.Get()
numShards := len(topoMap.ShardSet().AllIDs())
numPlacedShards := numShards * topoMap.Replicas()
if numPlacedShards < 1 {
return 0
}
nodeLimit := int(math.Ceil(
float64(clusterLimit) / float64(numPlacedShards)))
return nodeLimit
}
func setEncodersPerBlockLimitOnChange(
runtimeOptsMgr m3dbruntime.OptionsManager,
encoderLimit int,
) error {
runtimeOpts := runtimeOptsMgr.Get()
if runtimeOpts.EncodersPerBlockLimit() == encoderLimit {
// Not changed, no need to set the value and trigger a runtime options update
return nil
}
newRuntimeOpts := runtimeOpts.
SetEncodersPerBlockLimit(encoderLimit)
return runtimeOptsMgr.Update(newRuntimeOpts)
}
func withEncodingAndPoolingOptions(
cfg config.DBConfiguration,
logger *zap.Logger,
opts storage.Options,
policy config.PoolingPolicy,
) storage.Options {
iOpts := opts.InstrumentOptions()
scope := opts.InstrumentOptions().MetricsScope()
// Set the max bytes pool byte slice alloc size for the thrift pooling.
thriftBytesAllocSize := policy.ThriftBytesPoolAllocSizeOrDefault()
logger.Info("set thrift bytes pool alloc size",
zap.Int("size", thriftBytesAllocSize))
apachethrift.SetMaxBytesPoolAlloc(thriftBytesAllocSize)
bytesPoolOpts := pool.NewObjectPoolOptions().
SetInstrumentOptions(iOpts.SetMetricsScope(scope.SubScope("bytes-pool")))
checkedBytesPoolOpts := bytesPoolOpts.
SetInstrumentOptions(iOpts.SetMetricsScope(scope.SubScope("checked-bytes-pool")))
buckets := make([]pool.Bucket, len(policy.BytesPool.Buckets))
for i, bucket := range policy.BytesPool.Buckets {
var b pool.Bucket
b.Capacity = bucket.CapacityOrDefault()
b.Count = bucket.SizeOrDefault()
b.Options = bytesPoolOpts.
SetRefillLowWatermark(bucket.RefillLowWaterMarkOrDefault()).
SetRefillHighWatermark(bucket.RefillHighWaterMarkOrDefault())
buckets[i] = b
logger.Info("bytes pool configured",
zap.Int("capacity", bucket.CapacityOrDefault()),
zap.Int("size", bucket.SizeOrDefault()),
zap.Float64("refillLowWaterMark", bucket.RefillLowWaterMarkOrDefault()),
zap.Float64("refillHighWaterMark", bucket.RefillHighWaterMarkOrDefault()))
}
var bytesPool pool.CheckedBytesPool
switch policy.TypeOrDefault() {
case config.SimplePooling:
bytesPool = pool.NewCheckedBytesPool(
buckets,
checkedBytesPoolOpts,
func(s []pool.Bucket) pool.BytesPool {
return pool.NewBytesPool(s, bytesPoolOpts)
})
default:
logger.Fatal("unrecognized pooling type", zap.Any("type", policy.Type))
}
{
// Avoid polluting the rest of the function with `l` var
l := logger
if t := policy.Type; t != nil {
l = l.With(zap.String("policy", string(*t)))
}
l.Info("bytes pool init start")
bytesPool.Init()
l.Info("bytes pool init end")
}
segmentReaderPool := xio.NewSegmentReaderPool(
poolOptions(
policy.SegmentReaderPool,
scope.SubScope("segment-reader-pool")))
segmentReaderPool.Init()
encoderPool := encoding.NewEncoderPool(
poolOptions(
policy.EncoderPool,
scope.SubScope("encoder-pool")))
closersPoolOpts := poolOptions(
policy.ClosersPool,
scope.SubScope("closers-pool"))
contextPoolOpts := poolOptions(
policy.ContextPool,
scope.SubScope("context-pool"))
contextPool := xcontext.NewPool(xcontext.NewOptions().
SetContextPoolOptions(contextPoolOpts).
SetFinalizerPoolOptions(closersPoolOpts))
iteratorPool := encoding.NewReaderIteratorPool(
poolOptions(
policy.IteratorPool,
scope.SubScope("iterator-pool")))
multiIteratorPool := encoding.NewMultiReaderIteratorPool(
poolOptions(
policy.IteratorPool,
scope.SubScope("multi-iterator-pool")))
var writeBatchPoolInitialBatchSize *int
if policy.WriteBatchPool.InitialBatchSize != nil {
// Use config value if available.
writeBatchPoolInitialBatchSize = policy.WriteBatchPool.InitialBatchSize
} else {
// Otherwise use the default batch size that the client will use.
clientDefaultSize := client.DefaultWriteBatchSize
writeBatchPoolInitialBatchSize = &clientDefaultSize
}
var writeBatchPoolMaxBatchSize *int
if policy.WriteBatchPool.MaxBatchSize != nil {
writeBatchPoolMaxBatchSize = policy.WriteBatchPool.MaxBatchSize
}
var writeBatchPoolSize int
if policy.WriteBatchPool.Size != nil {
writeBatchPoolSize = *policy.WriteBatchPool.Size
} else {
// If no value set, calculate a reasonable value based on the commit log
// queue size. We base it off the commitlog queue size because we will
// want to be able to buffer at least one full commitlog queues worth of
// writes without allocating because these objects are very expensive to
// allocate.
commitlogQueueSize := opts.CommitLogOptions().BacklogQueueSize()
expectedBatchSize := *writeBatchPoolInitialBatchSize
writeBatchPoolSize = commitlogQueueSize / expectedBatchSize
}
writeBatchPoolOpts := pool.NewObjectPoolOptions()
writeBatchPoolOpts = writeBatchPoolOpts.
SetSize(writeBatchPoolSize).
// Set watermarks to zero because this pool is sized to be as large as we
// ever need it to be, so background allocations are usually wasteful.
SetRefillLowWatermark(0.0).
SetRefillHighWatermark(0.0).
SetInstrumentOptions(
writeBatchPoolOpts.
InstrumentOptions().
SetMetricsScope(scope.SubScope("write-batch-pool")))
writeBatchPool := writes.NewWriteBatchPool(
writeBatchPoolOpts,
writeBatchPoolInitialBatchSize,
writeBatchPoolMaxBatchSize)
tagPoolPolicy := policy.TagsPool
identifierPool := ident.NewPool(bytesPool, ident.PoolOptions{
IDPoolOptions: poolOptions(
policy.IdentifierPool, scope.SubScope("identifier-pool")),
TagsPoolOptions: maxCapacityPoolOptions(tagPoolPolicy, scope.SubScope("tags-pool")),
TagsCapacity: tagPoolPolicy.CapacityOrDefault(),
TagsMaxCapacity: tagPoolPolicy.MaxCapacityOrDefault(),
TagsIteratorPoolOptions: poolOptions(
policy.TagsIteratorPool,
scope.SubScope("tags-iterator-pool")),
})
fetchBlockMetadataResultsPoolPolicy := policy.FetchBlockMetadataResultsPool
fetchBlockMetadataResultsPool := block.NewFetchBlockMetadataResultsPool(
capacityPoolOptions(
fetchBlockMetadataResultsPoolPolicy,
scope.SubScope("fetch-block-metadata-results-pool")),
fetchBlockMetadataResultsPoolPolicy.CapacityOrDefault())
fetchBlocksMetadataResultsPoolPolicy := policy.FetchBlocksMetadataResultsPool
fetchBlocksMetadataResultsPool := block.NewFetchBlocksMetadataResultsPool(
capacityPoolOptions(
fetchBlocksMetadataResultsPoolPolicy,
scope.SubScope("fetch-blocks-metadata-results-pool")),
fetchBlocksMetadataResultsPoolPolicy.CapacityOrDefault())
bytesWrapperPoolOpts := poolOptions(
policy.CheckedBytesWrapperPool,
scope.SubScope("checked-bytes-wrapper-pool"))
bytesWrapperPool := xpool.NewCheckedBytesWrapperPool(
bytesWrapperPoolOpts)
bytesWrapperPool.Init()
encodingOpts := encoding.NewOptions().
SetEncoderPool(encoderPool).
SetReaderIteratorPool(iteratorPool).
SetBytesPool(bytesPool).
SetSegmentReaderPool(segmentReaderPool).
SetCheckedBytesWrapperPool(bytesWrapperPool)
encoderPool.Init(func() encoding.Encoder {
if cfg.Proto != nil && cfg.Proto.Enabled {
enc := proto.NewEncoder(time.Time{}, encodingOpts)
return enc
}
return m3tsz.NewEncoder(time.Time{}, nil, m3tsz.DefaultIntOptimizationEnabled, encodingOpts)
})
iteratorPool.Init(func(r xio.Reader64, descr namespace.SchemaDescr) encoding.ReaderIterator {
if cfg.Proto != nil && cfg.Proto.Enabled {
return proto.NewIterator(r, descr, encodingOpts)
}
return m3tsz.NewReaderIterator(r, m3tsz.DefaultIntOptimizationEnabled, encodingOpts)
})
multiIteratorPool.Init(func(r xio.Reader64, descr namespace.SchemaDescr) encoding.ReaderIterator {
iter := iteratorPool.Get()
iter.Reset(r, descr)
return iter
})
writeBatchPool.Init()
bucketPool := series.NewBufferBucketPool(
poolOptions(policy.BufferBucketPool, scope.SubScope("buffer-bucket-pool")))
bucketVersionsPool := series.NewBufferBucketVersionsPool(
poolOptions(policy.BufferBucketVersionsPool, scope.SubScope("buffer-bucket-versions-pool")))
retrieveRequestPool := fs.NewRetrieveRequestPool(segmentReaderPool,
poolOptions(policy.RetrieveRequestPool, scope.SubScope("retrieve-request-pool")))
retrieveRequestPool.Init()
opts = opts.
SetBytesPool(bytesPool).
SetContextPool(contextPool).
SetEncoderPool(encoderPool).
SetReaderIteratorPool(iteratorPool).
SetMultiReaderIteratorPool(multiIteratorPool).
SetIdentifierPool(identifierPool).
SetFetchBlockMetadataResultsPool(fetchBlockMetadataResultsPool).
SetFetchBlocksMetadataResultsPool(fetchBlocksMetadataResultsPool).
SetWriteBatchPool(writeBatchPool).
SetBufferBucketPool(bucketPool).
SetBufferBucketVersionsPool(bucketVersionsPool).
SetRetrieveRequestPool(retrieveRequestPool).
SetCheckedBytesWrapperPool(bytesWrapperPool)
blockOpts := opts.DatabaseBlockOptions().
SetDatabaseBlockAllocSize(policy.BlockAllocSizeOrDefault()).
SetContextPool(contextPool).
SetEncoderPool(encoderPool).
SetReaderIteratorPool(iteratorPool).
SetMultiReaderIteratorPool(multiIteratorPool).
SetSegmentReaderPool(segmentReaderPool).
SetBytesPool(bytesPool)
if opts.SeriesCachePolicy() == series.CacheLRU {
var (
runtimeOpts = opts.RuntimeOptionsManager()
wiredListOpts = block.WiredListOptions{
RuntimeOptionsManager: runtimeOpts,
InstrumentOptions: iOpts,
ClockOptions: opts.ClockOptions(),
}
lruCfg = cfg.Cache.SeriesConfiguration().LRU
)
if lruCfg != nil && lruCfg.EventsChannelSize > 0 {
wiredListOpts.EventsChannelSize = int(lruCfg.EventsChannelSize)
}
wiredList := block.NewWiredList(wiredListOpts)
blockOpts = blockOpts.SetWiredList(wiredList)
}
blockPool := block.NewDatabaseBlockPool(
poolOptions(
policy.BlockPool,
scope.SubScope("block-pool")))
blockPool.Init(func() block.DatabaseBlock {
return block.NewDatabaseBlock(time.Time{}, 0, ts.Segment{}, blockOpts, namespace.Context{})
})
blockOpts = blockOpts.SetDatabaseBlockPool(blockPool)
opts = opts.SetDatabaseBlockOptions(blockOpts)
// NB(prateek): retention opts are overridden per namespace during series creation
retentionOpts := retention.NewOptions()
seriesOpts := storage.NewSeriesOptionsFromOptions(opts, retentionOpts).
SetFetchBlockMetadataResultsPool(opts.FetchBlockMetadataResultsPool())
seriesPool := series.NewDatabaseSeriesPool(
poolOptions(
policy.SeriesPool,
scope.SubScope("series-pool")))
opts = opts.
SetSeriesOptions(seriesOpts).
SetDatabaseSeriesPool(seriesPool)
opts = opts.SetCommitLogOptions(opts.CommitLogOptions().
SetBytesPool(bytesPool).
SetIdentifierPool(identifierPool))
postingsListOpts := poolOptions(policy.PostingsListPool, scope.SubScope("postingslist-pool"))
postingsList := postings.NewPool(postingsListOpts, roaring.NewPostingsList)
queryResultsPool := index.NewQueryResultsPool(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-query-results-pool")))
aggregateQueryResultsPool := index.NewAggregateResultsPool(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-aggregate-results-pool")))
aggregateQueryValuesPool := index.NewAggregateValuesPool(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-aggregate-values-pool")))
// Set value transformation options.
opts = opts.SetTruncateType(cfg.Transforms.TruncateBy)
forcedValue := cfg.Transforms.ForcedValue
if forcedValue != nil {
opts = opts.SetWriteTransformOptions(series.WriteTransformOptions{
ForceValueEnabled: true,
ForceValue: *forcedValue,
})
}
// Set index options.
indexOpts := opts.IndexOptions().
SetInstrumentOptions(iOpts).
SetMemSegmentOptions(
opts.IndexOptions().MemSegmentOptions().
SetPostingsListPool(postingsList).
SetInstrumentOptions(iOpts)).
SetFSTSegmentOptions(
opts.IndexOptions().FSTSegmentOptions().
SetPostingsListPool(postingsList).
SetInstrumentOptions(iOpts).
SetContextPool(opts.ContextPool())).
SetSegmentBuilderOptions(
opts.IndexOptions().SegmentBuilderOptions().
SetPostingsListPool(postingsList)).
SetIdentifierPool(identifierPool).
SetCheckedBytesPool(bytesPool).
SetQueryResultsPool(queryResultsPool).
SetAggregateResultsPool(aggregateQueryResultsPool).
SetAggregateValuesPool(aggregateQueryValuesPool).
SetForwardIndexProbability(cfg.Index.ForwardIndexProbability).
SetForwardIndexThreshold(cfg.Index.ForwardIndexThreshold)
queryResultsPool.Init(func() index.QueryResults {
// NB(r): Need to initialize after setting the index opts so
// it sees the same reference of the options as is set for the DB.
return index.NewQueryResults(nil, index.QueryResultsOptions{}, indexOpts)
})
aggregateQueryResultsPool.Init(func() index.AggregateResults {
// NB(r): Need to initialize after setting the index opts so
// it sees the same reference of the options as is set for the DB.
return index.NewAggregateResults(nil, index.AggregateResultsOptions{}, indexOpts)
})
aggregateQueryValuesPool.Init(func() index.AggregateValues {
// NB(r): Need to initialize after setting the index opts so
// it sees the same reference of the options as is set for the DB.
return index.NewAggregateValues(indexOpts)
})
return opts.SetIndexOptions(indexOpts)
}
func newAdminClient(
config client.Configuration,
iOpts instrument.Options,
tchannelOpts *tchannel.ChannelOptions,
topologyInitializer topology.Initializer,
runtimeOptsMgr m3dbruntime.OptionsManager,
origin topology.Host,
protoEnabled bool,
schemaRegistry namespace.SchemaRegistry,
kvStore kv.Store,
logger *zap.Logger,
custom []client.CustomAdminOption,
) (client.AdminClient, error) {
if config.EnvironmentConfig != nil {
// If the user has provided an override for the dynamic client configuration
// then we need to honor it by not passing our own topology initializer.
topologyInitializer = nil
}
// NB: append custom options coming from run options to existing options.
options := []client.CustomAdminOption{
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetChannelOptions(tchannelOpts).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetRuntimeOptionsManager(runtimeOptsMgr).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetContextPool(opts.ContextPool()).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetOrigin(origin).(client.AdminOptions)
},
func(opts client.AdminOptions) client.AdminOptions {
if protoEnabled {
return opts.SetEncodingProto(encoding.NewOptions()).(client.AdminOptions)
}
return opts
},
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetSchemaRegistry(schemaRegistry).(client.AdminOptions)
},
}
options = append(options, custom...)
m3dbClient, err := config.NewAdminClient(
client.ConfigurationParameters{
InstrumentOptions: iOpts.
SetMetricsScope(iOpts.MetricsScope().SubScope("m3dbclient")),
TopologyInitializer: topologyInitializer,
},
options...,
)
if err != nil {
return nil, err
}
// Kick off runtime options manager KV watches.
clientAdminOpts := m3dbClient.Options().(client.AdminOptions)
kvWatchClientConsistencyLevels(kvStore, logger,
clientAdminOpts, runtimeOptsMgr)
return m3dbClient, nil
}
func poolOptions(
policy config.PoolPolicy,
scope tally.Scope,
) pool.ObjectPoolOptions {
var (
opts = pool.NewObjectPoolOptions()
size = policy.SizeOrDefault()
refillLowWaterMark = policy.RefillLowWaterMarkOrDefault()
refillHighWaterMark = policy.RefillHighWaterMarkOrDefault()
)
if size > 0 {
opts = opts.SetSize(size)
if refillLowWaterMark > 0 &&
refillHighWaterMark > 0 &&
refillHighWaterMark > refillLowWaterMark {
opts = opts.
SetRefillLowWatermark(refillLowWaterMark).
SetRefillHighWatermark(refillHighWaterMark)
}
}
if scope != nil {
opts = opts.SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope))
}
return opts
}
func capacityPoolOptions(
policy config.CapacityPoolPolicy,
scope tally.Scope,
) pool.ObjectPoolOptions {
var (
opts = pool.NewObjectPoolOptions()
size = policy.SizeOrDefault()
refillLowWaterMark = policy.RefillLowWaterMarkOrDefault()
refillHighWaterMark = policy.RefillHighWaterMarkOrDefault()
)
if size > 0 {
opts = opts.SetSize(size)
if refillLowWaterMark > 0 &&
refillHighWaterMark > 0 &&
refillHighWaterMark > refillLowWaterMark {
opts = opts.SetRefillLowWatermark(refillLowWaterMark)
opts = opts.SetRefillHighWatermark(refillHighWaterMark)
}
}
if scope != nil {
opts = opts.SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope))
}
return opts
}
func maxCapacityPoolOptions(
policy config.MaxCapacityPoolPolicy,
scope tally.Scope,
) pool.ObjectPoolOptions {
var (
opts = pool.NewObjectPoolOptions()
size = policy.SizeOrDefault()
refillLowWaterMark = policy.RefillLowWaterMarkOrDefault()
refillHighWaterMark = policy.RefillHighWaterMarkOrDefault()
)
if size > 0 {
opts = opts.SetSize(size)
if refillLowWaterMark > 0 &&
refillHighWaterMark > 0 &&
refillHighWaterMark > refillLowWaterMark {
opts = opts.SetRefillLowWatermark(refillLowWaterMark)
opts = opts.SetRefillHighWatermark(refillHighWaterMark)
}
}
if scope != nil {
opts = opts.SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(scope))
}
return opts
}
func hostSupportsHugeTLB() (bool, error) {
// Try and determine if the host supports HugeTLB in the first place
withHugeTLB, err := mmap.Bytes(10, mmap.Options{
HugeTLB: mmap.HugeTLBOptions{
Enabled: true,
Threshold: 0,
},
})
if err != nil {
return false, fmt.Errorf("could not mmap anonymous region: %v", err)
}
defer mmap.Munmap(withHugeTLB)
if withHugeTLB.Warning == nil {
// If there was no warning, then the host didn't complain about
// usa of huge TLB
return true, nil
}
// If we got a warning, try mmap'ing without HugeTLB
withoutHugeTLB, err := mmap.Bytes(10, mmap.Options{})
if err != nil {
return false, fmt.Errorf("could not mmap anonymous region: %v", err)
}
defer mmap.Munmap(withoutHugeTLB)
if withoutHugeTLB.Warning == nil {
// The machine doesn't support HugeTLB, proceed without it
return false, nil
}
// The warning was probably caused by something else, proceed using HugeTLB
return true, nil
}
func newTopoMapProvider(t topology.Topology) *topoMapProvider {
return &topoMapProvider{t}
}
type topoMapProvider struct {
t topology.Topology
}
func (t *topoMapProvider) TopologyMap() (topology.Map, error) {
if t.t == nil {
return nil, errors.New("topology map provider has not be set yet")
}
return t.t.Get(), nil
}
// Ensure mmap reporter implements mmap.Reporter
var _ mmap.Reporter = (*mmapReporter)(nil)
type mmapReporter struct {
sync.Mutex
scope tally.Scope
entries map[string]*mmapReporterEntry
}
type mmapReporterEntry struct {
value int64
gauge tally.Gauge
}
func newMmapReporter(scope tally.Scope) *mmapReporter {
return &mmapReporter{
scope: scope,
entries: make(map[string]*mmapReporterEntry),
}
}
func (r *mmapReporter) Run(ctx context.Context) {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
r.Lock()
for _, r := range r.entries {
r.gauge.Update(float64(r.value))
}
r.Unlock()
}
}
}
func (r *mmapReporter) entryKeyAndTags(ctx mmap.Context) (string, map[string]string) {
numTags := 1
if ctx.Metadata != nil {
numTags += len(ctx.Metadata)
}
tags := make(map[string]string, numTags)
tags[mmapReporterTagName] = ctx.Name
if ctx.Metadata != nil {
for k, v := range ctx.Metadata {
tags[k] = v
}
}
entryKey := tally.KeyForStringMap(tags)
return entryKey, tags
}
func (r *mmapReporter) ReportMap(ctx mmap.Context) error {
if ctx.Name == "" {
return fmt.Errorf("report mmap map missing context name: %+v", ctx)
}
entryKey, entryTags := r.entryKeyAndTags(ctx)
r.Lock()
defer r.Unlock()
entry, ok := r.entries[entryKey]
if !ok {
entry = &mmapReporterEntry{
gauge: r.scope.Tagged(entryTags).Gauge(mmapReporterMetricName),
}
r.entries[entryKey] = entry
}
entry.value += ctx.Size
return nil
}
func (r *mmapReporter) ReportUnmap(ctx mmap.Context) error {
if ctx.Name == "" {
return fmt.Errorf("report mmap unmap missing context name: %+v", ctx)
}
entryKey, _ := r.entryKeyAndTags(ctx)
r.Lock()
defer r.Unlock()
entry, ok := r.entries[entryKey]
if !ok {
return fmt.Errorf("report mmap unmap missing entry for context: %+v", ctx)
}
entry.value -= ctx.Size
if entry.value == 0 {
// No more similar mmaps active for this context name, garbage collect
delete(r.entries, entryKey)
}
return nil
}
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Represents JSON data structure using native Go types: booleans, floats,
// strings, arrays, and maps.
package json
import (
"encoding"
"encoding/base64"
"fmt"
"reflect"
"strconv"
"strings"
"unicode"
"unicode/utf16"
"unicode/utf8"
)
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v. If v is nil or not a pointer,
// Unmarshal returns an InvalidUnmarshalError.
//
// Unmarshal uses the inverse of the encodings that
// Marshal uses, allocating maps, slices, and pointers as necessary,
// with the following additional rules:
//
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
// the JSON being the JSON literal null. In that case, Unmarshal sets
// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
// allocates a new value for it to point to.
//
// To unmarshal JSON into a value implementing the Unmarshaler interface,
// Unmarshal calls that value's UnmarshalJSON method, including
// when the input is a JSON null.
// Otherwise, if the value implements encoding.TextUnmarshaler
// and the input is a JSON quoted string, Unmarshal calls that value's
// UnmarshalText method with the unquoted form of the string.
//
// To unmarshal JSON into a struct, Unmarshal matches incoming object
// keys to the keys used by Marshal (either the struct field name or its tag),
// preferring an exact match but also accepting a case-insensitive match. By
// default, object keys which don't have a corresponding struct field are
// ignored (see Decoder.DisallowUnknownFields for an alternative).
//
// To unmarshal JSON into an interface value,
// Unmarshal stores one of these in the interface value:
//
// bool, for JSON booleans
// float64, for JSON numbers
// string, for JSON strings
// []interface{}, for JSON arrays
// map[string]interface{}, for JSON objects
// nil for JSON null
//
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
// to zero and then appends each element to the slice.
// As a special case, to unmarshal an empty JSON array into a slice,
// Unmarshal replaces the slice with a new empty slice.
//
// To unmarshal a JSON array into a Go array, Unmarshal decodes
// JSON array elements into corresponding Go array elements.
// If the Go array is smaller than the JSON array,
// the additional JSON array elements are discarded.
// If the JSON array is smaller than the Go array,
// the additional Go array elements are set to zero values.
//
// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
// reuses the existing map, keeping existing entries. Unmarshal then stores
// key-value pairs from the JSON object into the map. The map's key type must
// either be a string, an integer, or implement encoding.TextUnmarshaler.
//
// If a JSON value is not appropriate for a given target type,
// or if a JSON number overflows the target type, Unmarshal
// skips that field and completes the unmarshaling as best it can.
// If no more serious errors are encountered, Unmarshal returns
// an UnmarshalTypeError describing the earliest such error. In any
// case, it's not guaranteed that all the remaining fields following
// the problematic one will be unmarshaled into the target object.
//
// The JSON null value unmarshals into an interface, map, pointer, or slice
// by setting that Go value to nil. Because null is often used in JSON to mean
// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
// on the value and produces no error.
//
// When unmarshaling quoted strings, invalid UTF-8 or
// invalid UTF-16 surrogate pairs are not treated as an error.
// Instead, they are replaced by the Unicode replacement
// character U+FFFD.
//
func Unmarshal(data []byte, v interface{}) error {
// Check for well-formedness.
// Avoids filling out half a data structure
// before discovering a JSON syntax error.
var d decodeState
err := checkValid(data, &d.scan)
if err != nil {
return err
}
d.init(data)
return d.unmarshal(v)
}
// Unmarshaler is the interface implemented by types
// that can unmarshal a JSON description of themselves.
// The input can be assumed to be a valid encoding of
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
//
// By convention, to approximate the behavior of Unmarshal itself,
// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
}
// An UnmarshalTypeError describes a JSON value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
Value string // description of JSON value - "bool", "array", "number -5"
Type reflect.Type // type of Go value it could not be assigned to
Offset int64 // error occurred after reading Offset bytes
Struct string // name of the struct type containing the field
Field string // the full path from root node to the field
}
func (e *UnmarshalTypeError) Error() string {
if e.Struct != "" || e.Field != "" {
return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String()
}
return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
// An UnmarshalFieldError describes a JSON object key that
// led to an unexported (and therefore unwritable) struct field.
//
// Deprecated: No longer used; kept for compatibility.
type UnmarshalFieldError struct {
Key string
Type reflect.Type
Field reflect.StructField
}
func (e *UnmarshalFieldError) Error() string {
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
}
// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
// (The argument to Unmarshal must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
func (e *InvalidUnmarshalError) Error() string {
if e.Type == nil {
return "json: Unmarshal(nil)"
}
if e.Type.Kind() != reflect.Ptr {
return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
}
return "json: Unmarshal(nil " + e.Type.String() + ")"
}
func (d *decodeState) unmarshal(v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return &InvalidUnmarshalError{reflect.TypeOf(v)}
}
d.scan.reset()
d.scanWhile(scanSkipSpace)
// We decode rv not rv.Elem because the Unmarshaler interface
// test must be applied at the top level of the value.
err := d.value(rv)
if err != nil {
return d.addErrorContext(err)
}
return d.savedError
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
// decodeState represents the state while decoding a JSON value.
type decodeState struct {
data []byte
off int // next read offset in data
opcode int // last read result
scan scanner
errorContext struct { // provides context for type errors
Struct reflect.Type
FieldStack []string
}
savedError error
useNumber bool
disallowUnknownFields bool
}
// readIndex returns the position of the last byte read.
func (d *decodeState) readIndex() int {
return d.off - 1
}
// phasePanicMsg is used as a panic message when we end up with something that
// shouldn't happen. It can indicate a bug in the JSON decoder, or that
// something is editing the data slice while the decoder executes.
const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?"
func (d *decodeState) init(data []byte) *decodeState {
d.data = data
d.off = 0
d.savedError = nil
d.errorContext.Struct = nil
// Reuse the allocated space for the FieldStack slice.
d.errorContext.FieldStack = d.errorContext.FieldStack[:0]
return d
}
// saveError saves the first err it is called with,
// for reporting at the end of the unmarshal.
func (d *decodeState) saveError(err error) {
if d.savedError == nil {
d.savedError = d.addErrorContext(err)
}
}
// addErrorContext returns a new error enhanced with information from d.errorContext
func (d *decodeState) addErrorContext(err error) error {
if d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0 {
switch err := err.(type) {
case *UnmarshalTypeError:
err.Struct = d.errorContext.Struct.Name()
err.Field = strings.Join(d.errorContext.FieldStack, ".")
return err
}
}
return err
}
// skip scans to the end of what was started.
func (d *decodeState) skip() {
s, data, i := &d.scan, d.data, d.off
depth := len(s.parseState)
for {
op := s.step(s, data[i])
i++
if len(s.parseState) < depth {
d.off = i
d.opcode = op
return
}
}
}
// scanNext processes the byte at d.data[d.off].
func (d *decodeState) scanNext() {
if d.off < len(d.data) {
d.opcode = d.scan.step(&d.scan, d.data[d.off])
d.off++
} else {
d.opcode = d.scan.eof()
d.off = len(d.data) + 1 // mark processed EOF with len+1
}
}
// scanWhile processes bytes in d.data[d.off:] until it
// receives a scan code not equal to op.
func (d *decodeState) scanWhile(op int) {
s, data, i := &d.scan, d.data, d.off
for i < len(data) {
newOp := s.step(s, data[i])
i++
if newOp != op {
d.opcode = newOp
d.off = i
return
}
}
d.off = len(data) + 1 // mark processed EOF with len+1
d.opcode = d.scan.eof()
}
// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the
// common case where we're decoding a literal. The decoder scans the input
// twice, once for syntax errors and to check the length of the value, and the
// second to perform the decoding.
//
// Only in the second step do we use decodeState to tokenize literals, so we
// know there aren't any syntax errors. We can take advantage of that knowledge,
// and scan a literal's bytes much more quickly.
func (d *decodeState) rescanLiteral() {
data, i := d.data, d.off
Switch:
switch data[i-1] {
case '"': // string
for ; i < len(data); i++ {
switch data[i] {
case '\\':
i++ // escaped char
case '"':
i++ // tokenize the closing quote too
break Switch
}
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number
for ; i < len(data); i++ {
switch data[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.', 'e', 'E', '+', '-':
default:
break Switch
}
}
case 't': // true
i += len("rue")
case 'f': // false
i += len("alse")
case 'n': // null
i += len("ull")
}
if i < len(data) {
d.opcode = stateEndValue(&d.scan, data[i])
} else {
d.opcode = scanEnd
}
d.off = i + 1
}
// value consumes a JSON value from d.data[d.off-1:], decoding into v, and
// reads the following byte ahead. If v is invalid, the value is discarded.
// The first byte of the value has been read already.
func (d *decodeState) value(v reflect.Value) error {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray:
if v.IsValid() {
if err := d.array(v); err != nil {
return err
}
} else {
d.skip()
}
d.scanNext()
case scanBeginObject:
if v.IsValid() {
if err := d.object(v); err != nil {
return err
}
} else {
d.skip()
}
d.scanNext()
case scanBeginLiteral:
// All bytes inside literal return scanContinue op code.
start := d.readIndex()
d.rescanLiteral()
if v.IsValid() {
if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil {
return err
}
}
}
return nil
}
type unquotedValue struct{}
// valueQuoted is like value but decodes a
// quoted string literal or literal null into an interface value.
// If it finds anything other than a quoted string literal or null,
// valueQuoted returns unquotedValue{}.
func (d *decodeState) valueQuoted() interface{} {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray, scanBeginObject:
d.skip()
d.scanNext()
case scanBeginLiteral:
v := d.literalInterface()
switch v.(type) {
case nil, string:
return v
}
}
return unquotedValue{}
}
// indirect walks down v allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// Issue #24153 indicates that it is generally not a guaranteed property
// that you may round-trip a reflect.Value by calling Value.Addr().Elem()
// and expect the value to still be settable for values derived from
// unexported embedded struct fields.
//
// The logic below effectively does this when it first addresses the value
// (to satisfy possible pointer methods) and continues to dereference
// subsequent pointers as necessary.
//
// After the first round-trip, we set v back to the original value to
// preserve the original RW flags contained in reflect.Value.
v0 := v
haveAddr := false
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
haveAddr = true
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
haveAddr = false
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
// Prevent infinite loop if v is an interface pointing to its own address:
// var v interface{}
// v = &v
if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v {
v = v.Elem()
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 && v.CanInterface() {
if u, ok := v.Interface().(Unmarshaler); ok {
return u, nil, reflect.Value{}
}
if !decodingNull {
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
}
if haveAddr {
v = v0 // restore original value after round-trip Value.Addr().Elem()
haveAddr = false
} else {
v = v.Elem()
}
}
return nil, nil, v
}
// array consumes an array from d.data[d.off-1:], decoding into v.
// The first byte of the array ('[') has been read already.
func (d *decodeState) array(v reflect.Value) error {
// Check for unmarshaler.
u, ut, pv := indirect(v, false)
if u != nil {
start := d.readIndex()
d.skip()
return u.UnmarshalJSON(d.data[start:d.off])
}
if ut != nil {
d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
ai := d.arrayInterface()
v.Set(reflect.ValueOf(ai))
return nil
}
// Otherwise it's invalid.
fallthrough
default:
d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
case reflect.Array, reflect.Slice:
break
}
i := 0
for {
// Look ahead for ] - can only happen on first iteration.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndArray {
break
}
// Get element of array, growing if necessary.
if v.Kind() == reflect.Slice {
// Grow slice if necessary
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
if err := d.value(v.Index(i)); err != nil {
return err
}
} else {
// Ran out of fixed array: skip.
if err := d.value(reflect.Value{}); err != nil {
return err
}
}
i++
// Next token must be , or ].
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndArray {
break
}
if d.opcode != scanArrayValue {
panic(phasePanicMsg)
}
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Array. Zero the rest.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
return nil
}
var nullLiteral = []byte("null")
var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
// object consumes an object from d.data[d.off-1:], decoding into v.
// The first byte ('{') of the object has been read already.
func (d *decodeState) object(v reflect.Value) error {
// Check for unmarshaler.
u, ut, pv := indirect(v, false)
if u != nil {
start := d.readIndex()
d.skip()
return u.UnmarshalJSON(d.data[start:d.off])
}
if ut != nil {
d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
}
v = pv
t := v.Type()
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
oi := d.objectInterface()
v.Set(reflect.ValueOf(oi))
return nil
}
var fields structFields
// Check type of target:
// struct or
// map[T1]T2 where T1 is string, an integer type,
// or an encoding.TextUnmarshaler
switch v.Kind() {
case reflect.Map:
// Map key must either have string kind, have an integer kind,
// or be an encoding.TextUnmarshaler.
switch t.Key().Kind() {
case reflect.String,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
default:
if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
d.skip()
return nil
}
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
}
case reflect.Struct:
fields = cachedTypeFields(t)
// ok
default:
d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
d.skip()
return nil
}
var mapElem reflect.Value
origErrorContext := d.errorContext
for {
// Read opening " of string key or closing }.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if d.opcode != scanBeginLiteral {
panic(phasePanicMsg)
}
// Read key.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := unquoteBytes(item)
if !ok {
panic(phasePanicMsg)
}
// Figure out field corresponding to key.
var subv reflect.Value
destring := false // whether the value is wrapped in a string to be decoded first
if v.Kind() == reflect.Map {
elemType := t.Elem()
if !mapElem.IsValid() {
mapElem = reflect.New(elemType).Elem()
} else {
mapElem.Set(reflect.Zero(elemType))
}
subv = mapElem
} else {
var f *field
if i, ok := fields.nameIndex[string(key)]; ok {
// Found an exact name match.
f = &fields.list[i]
} else {
// Fall back to the expensive case-insensitive
// linear search.
for i := range fields.list {
ff := &fields.list[i]
if ff.equalFold(ff.nameBytes, key) {
f = ff
break
}
}
}
if f != nil {
subv = v
destring = f.quoted
for _, i := range f.index {
if subv.Kind() == reflect.Ptr {
if subv.IsNil() {
// If a struct embeds a pointer to an unexported type,
// it is not possible to set a newly allocated value
// since the field is unexported.
//
// See https://golang.org/issue/21357
if !subv.CanSet() {
d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem()))
// Invalidate subv to ensure d.value(subv) skips over
// the JSON value without assigning it to subv.
subv = reflect.Value{}
destring = false
break
}
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
subv = subv.Field(i)
}
d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
d.errorContext.Struct = t
} else if d.disallowUnknownFields {
d.saveError(fmt.Errorf("json: unknown field %q", key))
}
}
// Read : before value.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode != scanObjectKey {
panic(phasePanicMsg)
}
d.scanWhile(scanSkipSpace)
if destring {
switch qv := d.valueQuoted().(type) {
case nil:
if err := d.literalStore(nullLiteral, subv, false); err != nil {
return err
}
case string:
if err := d.literalStore([]byte(qv), subv, true); err != nil {
return err
}
default:
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
}
} else {
if err := d.value(subv); err != nil {
return err
}
}
// Write value back to map;
// if using struct, subv points into struct already.
if v.Kind() == reflect.Map {
kt := t.Key()
var kv reflect.Value
switch {
case kt.Kind() == reflect.String:
kv = reflect.ValueOf(key).Convert(kt)
case reflect.PtrTo(kt).Implements(textUnmarshalerType):
kv = reflect.New(kt)
if err := d.literalStore(item, kv, true); err != nil {
return err
}
kv = kv.Elem()
default:
switch kt.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := string(key)
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || reflect.Zero(kt).OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
kv = reflect.ValueOf(n).Convert(kt)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s := string(key)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || reflect.Zero(kt).OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
kv = reflect.ValueOf(n).Convert(kt)
default:
panic("json: Unexpected key type") // should never occur
}
}
if kv.IsValid() {
v.SetMapIndex(kv, subv)
}
}
// Next token must be , or }.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
// Reset errorContext to its original state.
// Keep the same underlying array for FieldStack, to reuse the
// space and avoid unnecessary allocs.
d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)]
d.errorContext.Struct = origErrorContext.Struct
if d.opcode == scanEndObject {
break
}
if d.opcode != scanObjectValue {
panic(phasePanicMsg)
}
}
return nil
}
// convertNumber converts the number literal s to a float64 or a Number
// depending on the setting of d.useNumber.
func (d *decodeState) convertNumber(s string) (interface{}, error) {
if d.useNumber {
return Number(s), nil
}
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
}
return f, nil
}
var numberType = reflect.TypeOf(Number(""))
// literalStore decodes a literal stored in item into v.
//
// fromQuoted indicates whether this literal came from unwrapping a
// string from the ",string" struct tag option. this is used only to
// produce more helpful error messages.
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
// Check for unmarshaler.
if len(item) == 0 {
//Empty string given
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
isNull := item[0] == 'n' // null
u, ut, pv := indirect(v, isNull)
if u != nil {
return u.UnmarshalJSON(item)
}
if ut != nil {
if item[0] != '"' {
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
val := "number"
switch item[0] {
case 'n':
val = "null"
case 't', 'f':
val = "bool"
}
d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())})
return nil
}
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
return ut.UnmarshalText(s)
}
v = pv
switch c := item[0]; c {
case 'n': // null
// The main parser checks that only true and false can reach here,
// but if this was a quoted string input, it could be anything.
if fromQuoted && string(item) != "null" {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
break
}
switch v.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
v.Set(reflect.Zero(v.Type()))
// otherwise, ignore null for primitives/string
}
case 't', 'f': // true, false
value := item[0] == 't'
// The main parser checks that only true and false can reach here,
// but if this was a quoted string input, it could be anything.
if fromQuoted && string(item) != "true" && string(item) != "false" {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
break
}
switch v.Kind() {
default:
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
}
case reflect.Bool:
v.SetBool(value)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(value))
} else {
d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
}
}
case '"': // string
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
switch v.Kind() {
default:
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Slice:
if v.Type().Elem().Kind() != reflect.Uint8 {
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
break
}
b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
n, err := base64.StdEncoding.Decode(b, s)
if err != nil {
d.saveError(err)
break
}
v.SetBytes(b[:n])
case reflect.String:
v.SetString(string(s))
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(string(s)))
} else {
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
}
}
default: // number
if c != '-' && (c < '0' || c > '9') {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
s := string(item)
switch v.Kind() {
default:
if v.Kind() == reflect.String && v.Type() == numberType {
// s must be a valid number, because it's
// already been tokenized.
v.SetString(s)
break
}
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Interface:
n, err := d.convertNumber(s)
if err != nil {
d.saveError(err)
break
}
if v.NumMethod() != 0 {
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.Set(reflect.ValueOf(n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || v.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || v.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetUint(n)
case reflect.Float32, reflect.Float64:
n, err := strconv.ParseFloat(s, v.Type().Bits())
if err != nil || v.OverflowFloat(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetFloat(n)
}
}
return nil
}
// The xxxInterface routines build up a value to be stored
// in an empty interface. They are not strictly necessary,
// but they avoid the weight of reflection in this common case.
// valueInterface is like value but returns interface{}
func (d *decodeState) valueInterface() (val interface{}) {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray:
val = d.arrayInterface()
d.scanNext()
case scanBeginObject:
val = d.objectInterface()
d.scanNext()
case scanBeginLiteral:
val = d.literalInterface()
}
return
}
// arrayInterface is like array but returns []interface{}.
func (d *decodeState) arrayInterface() []interface{} {
var v = make([]interface{}, 0)
for {
// Look ahead for ] - can only happen on first iteration.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndArray {
break
}
v = append(v, d.valueInterface())
// Next token must be , or ].
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndArray {
break
}
if d.opcode != scanArrayValue {
panic(phasePanicMsg)
}
}
return v
}
// objectInterface is like object but returns map[string]interface{}.
func (d *decodeState) objectInterface() map[string]interface{} {
m := make(map[string]interface{})
for {
// Read opening " of string key or closing }.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if d.opcode != scanBeginLiteral {
panic(phasePanicMsg)
}
// Read string key.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
// Read : before value.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode != scanObjectKey {
panic(phasePanicMsg)
}
d.scanWhile(scanSkipSpace)
// Read value.
m[key] = d.valueInterface()
// Next token must be , or }.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndObject {
break
}
if d.opcode != scanObjectValue {
panic(phasePanicMsg)
}
}
return m
}
// literalInterface consumes and returns a literal from d.data[d.off-1:] and
// it reads the following byte ahead. The first byte of the literal has been
// read already (that's how the caller knows it's a literal).
func (d *decodeState) literalInterface() interface{} {
// All bytes inside literal return scanContinue op code.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
switch c := item[0]; c {
case 'n': // null
return nil
case 't', 'f': // true, false
return c == 't'
case '"': // string
s, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
return s
default: // number
if c != '-' && (c < '0' || c > '9') {
panic(phasePanicMsg)
}
n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
}
return n
}
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
var r rune
for _, c := range s[2:6] {
switch {
case '0' <= c && c <= '9':
c = c - '0'
case 'a' <= c && c <= 'f':
c = c - 'a' + 10
case 'A' <= c && c <= 'F':
c = c - 'A' + 10
default:
return -1
}
r = r*16 + rune(c)
}
return r
}
// unquote converts a quoted JSON string literal s into an actual string t.
// The rules are different than for Go, so cannot use strconv.Unquote.
func unquote(s []byte) (t string, ok bool) {
s, ok = unquoteBytes(s)
t = string(s)
return
}
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError && size == 1 {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}
encoding/json: clarify Unmarshal behavior for map keys
This is a documentation-only change
Fixes #33298
Change-Id: I816058a872b57dc868dff11887214d9de92d9342
Reviewed-on: https://go-review.googlesource.com/c/go/+/188821
Reviewed-by: Daniel Martí <29bf28b42a938ace12a5819da4ed4ba03d82f315@mvdan.cc>
Run-TryBot: Daniel Martí <29bf28b42a938ace12a5819da4ed4ba03d82f315@mvdan.cc>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Represents JSON data structure using native Go types: booleans, floats,
// strings, arrays, and maps.
package json
import (
"encoding"
"encoding/base64"
"fmt"
"reflect"
"strconv"
"strings"
"unicode"
"unicode/utf16"
"unicode/utf8"
)
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v. If v is nil or not a pointer,
// Unmarshal returns an InvalidUnmarshalError.
//
// Unmarshal uses the inverse of the encodings that
// Marshal uses, allocating maps, slices, and pointers as necessary,
// with the following additional rules:
//
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
// the JSON being the JSON literal null. In that case, Unmarshal sets
// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
// allocates a new value for it to point to.
//
// To unmarshal JSON into a value implementing the Unmarshaler interface,
// Unmarshal calls that value's UnmarshalJSON method, including
// when the input is a JSON null.
// Otherwise, if the value implements encoding.TextUnmarshaler
// and the input is a JSON quoted string, Unmarshal calls that value's
// UnmarshalText method with the unquoted form of the string.
//
// To unmarshal JSON into a struct, Unmarshal matches incoming object
// keys to the keys used by Marshal (either the struct field name or its tag),
// preferring an exact match but also accepting a case-insensitive match. By
// default, object keys which don't have a corresponding struct field are
// ignored (see Decoder.DisallowUnknownFields for an alternative).
//
// To unmarshal JSON into an interface value,
// Unmarshal stores one of these in the interface value:
//
// bool, for JSON booleans
// float64, for JSON numbers
// string, for JSON strings
// []interface{}, for JSON arrays
// map[string]interface{}, for JSON objects
// nil for JSON null
//
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
// to zero and then appends each element to the slice.
// As a special case, to unmarshal an empty JSON array into a slice,
// Unmarshal replaces the slice with a new empty slice.
//
// To unmarshal a JSON array into a Go array, Unmarshal decodes
// JSON array elements into corresponding Go array elements.
// If the Go array is smaller than the JSON array,
// the additional JSON array elements are discarded.
// If the JSON array is smaller than the Go array,
// the additional Go array elements are set to zero values.
//
// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
// reuses the existing map, keeping existing entries. Unmarshal then stores
// key-value pairs from the JSON object into the map. The map's key type must
// either be any string type, an integer, implement json.Unmarshaler, or
// implement encoding.TextUnmarshaler.
//
// If a JSON value is not appropriate for a given target type,
// or if a JSON number overflows the target type, Unmarshal
// skips that field and completes the unmarshaling as best it can.
// If no more serious errors are encountered, Unmarshal returns
// an UnmarshalTypeError describing the earliest such error. In any
// case, it's not guaranteed that all the remaining fields following
// the problematic one will be unmarshaled into the target object.
//
// The JSON null value unmarshals into an interface, map, pointer, or slice
// by setting that Go value to nil. Because null is often used in JSON to mean
// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
// on the value and produces no error.
//
// When unmarshaling quoted strings, invalid UTF-8 or
// invalid UTF-16 surrogate pairs are not treated as an error.
// Instead, they are replaced by the Unicode replacement
// character U+FFFD.
//
func Unmarshal(data []byte, v interface{}) error {
// Check for well-formedness.
// Avoids filling out half a data structure
// before discovering a JSON syntax error.
var d decodeState
err := checkValid(data, &d.scan)
if err != nil {
return err
}
d.init(data)
return d.unmarshal(v)
}
// Unmarshaler is the interface implemented by types
// that can unmarshal a JSON description of themselves.
// The input can be assumed to be a valid encoding of
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
//
// By convention, to approximate the behavior of Unmarshal itself,
// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
}
// An UnmarshalTypeError describes a JSON value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
Value string // description of JSON value - "bool", "array", "number -5"
Type reflect.Type // type of Go value it could not be assigned to
Offset int64 // error occurred after reading Offset bytes
Struct string // name of the struct type containing the field
Field string // the full path from root node to the field
}
func (e *UnmarshalTypeError) Error() string {
if e.Struct != "" || e.Field != "" {
return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String()
}
return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
// An UnmarshalFieldError describes a JSON object key that
// led to an unexported (and therefore unwritable) struct field.
//
// Deprecated: No longer used; kept for compatibility.
type UnmarshalFieldError struct {
Key string
Type reflect.Type
Field reflect.StructField
}
func (e *UnmarshalFieldError) Error() string {
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
}
// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
// (The argument to Unmarshal must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
func (e *InvalidUnmarshalError) Error() string {
if e.Type == nil {
return "json: Unmarshal(nil)"
}
if e.Type.Kind() != reflect.Ptr {
return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
}
return "json: Unmarshal(nil " + e.Type.String() + ")"
}
func (d *decodeState) unmarshal(v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return &InvalidUnmarshalError{reflect.TypeOf(v)}
}
d.scan.reset()
d.scanWhile(scanSkipSpace)
// We decode rv not rv.Elem because the Unmarshaler interface
// test must be applied at the top level of the value.
err := d.value(rv)
if err != nil {
return d.addErrorContext(err)
}
return d.savedError
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
// decodeState represents the state while decoding a JSON value.
type decodeState struct {
data []byte
off int // next read offset in data
opcode int // last read result
scan scanner
errorContext struct { // provides context for type errors
Struct reflect.Type
FieldStack []string
}
savedError error
useNumber bool
disallowUnknownFields bool
}
// readIndex returns the position of the last byte read.
func (d *decodeState) readIndex() int {
return d.off - 1
}
// phasePanicMsg is used as a panic message when we end up with something that
// shouldn't happen. It can indicate a bug in the JSON decoder, or that
// something is editing the data slice while the decoder executes.
const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?"
func (d *decodeState) init(data []byte) *decodeState {
d.data = data
d.off = 0
d.savedError = nil
d.errorContext.Struct = nil
// Reuse the allocated space for the FieldStack slice.
d.errorContext.FieldStack = d.errorContext.FieldStack[:0]
return d
}
// saveError saves the first err it is called with,
// for reporting at the end of the unmarshal.
func (d *decodeState) saveError(err error) {
if d.savedError == nil {
d.savedError = d.addErrorContext(err)
}
}
// addErrorContext returns a new error enhanced with information from d.errorContext
func (d *decodeState) addErrorContext(err error) error {
if d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0 {
switch err := err.(type) {
case *UnmarshalTypeError:
err.Struct = d.errorContext.Struct.Name()
err.Field = strings.Join(d.errorContext.FieldStack, ".")
return err
}
}
return err
}
// skip scans to the end of what was started.
func (d *decodeState) skip() {
s, data, i := &d.scan, d.data, d.off
depth := len(s.parseState)
for {
op := s.step(s, data[i])
i++
if len(s.parseState) < depth {
d.off = i
d.opcode = op
return
}
}
}
// scanNext processes the byte at d.data[d.off].
func (d *decodeState) scanNext() {
if d.off < len(d.data) {
d.opcode = d.scan.step(&d.scan, d.data[d.off])
d.off++
} else {
d.opcode = d.scan.eof()
d.off = len(d.data) + 1 // mark processed EOF with len+1
}
}
// scanWhile processes bytes in d.data[d.off:] until it
// receives a scan code not equal to op.
func (d *decodeState) scanWhile(op int) {
s, data, i := &d.scan, d.data, d.off
for i < len(data) {
newOp := s.step(s, data[i])
i++
if newOp != op {
d.opcode = newOp
d.off = i
return
}
}
d.off = len(data) + 1 // mark processed EOF with len+1
d.opcode = d.scan.eof()
}
// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the
// common case where we're decoding a literal. The decoder scans the input
// twice, once for syntax errors and to check the length of the value, and the
// second to perform the decoding.
//
// Only in the second step do we use decodeState to tokenize literals, so we
// know there aren't any syntax errors. We can take advantage of that knowledge,
// and scan a literal's bytes much more quickly.
func (d *decodeState) rescanLiteral() {
data, i := d.data, d.off
Switch:
switch data[i-1] {
case '"': // string
for ; i < len(data); i++ {
switch data[i] {
case '\\':
i++ // escaped char
case '"':
i++ // tokenize the closing quote too
break Switch
}
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number
for ; i < len(data); i++ {
switch data[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.', 'e', 'E', '+', '-':
default:
break Switch
}
}
case 't': // true
i += len("rue")
case 'f': // false
i += len("alse")
case 'n': // null
i += len("ull")
}
if i < len(data) {
d.opcode = stateEndValue(&d.scan, data[i])
} else {
d.opcode = scanEnd
}
d.off = i + 1
}
// value consumes a JSON value from d.data[d.off-1:], decoding into v, and
// reads the following byte ahead. If v is invalid, the value is discarded.
// The first byte of the value has been read already.
func (d *decodeState) value(v reflect.Value) error {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray:
if v.IsValid() {
if err := d.array(v); err != nil {
return err
}
} else {
d.skip()
}
d.scanNext()
case scanBeginObject:
if v.IsValid() {
if err := d.object(v); err != nil {
return err
}
} else {
d.skip()
}
d.scanNext()
case scanBeginLiteral:
// All bytes inside literal return scanContinue op code.
start := d.readIndex()
d.rescanLiteral()
if v.IsValid() {
if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil {
return err
}
}
}
return nil
}
type unquotedValue struct{}
// valueQuoted is like value but decodes a
// quoted string literal or literal null into an interface value.
// If it finds anything other than a quoted string literal or null,
// valueQuoted returns unquotedValue{}.
func (d *decodeState) valueQuoted() interface{} {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray, scanBeginObject:
d.skip()
d.scanNext()
case scanBeginLiteral:
v := d.literalInterface()
switch v.(type) {
case nil, string:
return v
}
}
return unquotedValue{}
}
// indirect walks down v allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// Issue #24153 indicates that it is generally not a guaranteed property
// that you may round-trip a reflect.Value by calling Value.Addr().Elem()
// and expect the value to still be settable for values derived from
// unexported embedded struct fields.
//
// The logic below effectively does this when it first addresses the value
// (to satisfy possible pointer methods) and continues to dereference
// subsequent pointers as necessary.
//
// After the first round-trip, we set v back to the original value to
// preserve the original RW flags contained in reflect.Value.
v0 := v
haveAddr := false
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
haveAddr = true
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
haveAddr = false
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
// Prevent infinite loop if v is an interface pointing to its own address:
// var v interface{}
// v = &v
if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v {
v = v.Elem()
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 && v.CanInterface() {
if u, ok := v.Interface().(Unmarshaler); ok {
return u, nil, reflect.Value{}
}
if !decodingNull {
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
}
if haveAddr {
v = v0 // restore original value after round-trip Value.Addr().Elem()
haveAddr = false
} else {
v = v.Elem()
}
}
return nil, nil, v
}
// array consumes an array from d.data[d.off-1:], decoding into v.
// The first byte of the array ('[') has been read already.
func (d *decodeState) array(v reflect.Value) error {
// Check for unmarshaler.
u, ut, pv := indirect(v, false)
if u != nil {
start := d.readIndex()
d.skip()
return u.UnmarshalJSON(d.data[start:d.off])
}
if ut != nil {
d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
ai := d.arrayInterface()
v.Set(reflect.ValueOf(ai))
return nil
}
// Otherwise it's invalid.
fallthrough
default:
d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
case reflect.Array, reflect.Slice:
break
}
i := 0
for {
// Look ahead for ] - can only happen on first iteration.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndArray {
break
}
// Get element of array, growing if necessary.
if v.Kind() == reflect.Slice {
// Grow slice if necessary
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
if err := d.value(v.Index(i)); err != nil {
return err
}
} else {
// Ran out of fixed array: skip.
if err := d.value(reflect.Value{}); err != nil {
return err
}
}
i++
// Next token must be , or ].
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndArray {
break
}
if d.opcode != scanArrayValue {
panic(phasePanicMsg)
}
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Array. Zero the rest.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
return nil
}
var nullLiteral = []byte("null")
var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
// object consumes an object from d.data[d.off-1:], decoding into v.
// The first byte ('{') of the object has been read already.
func (d *decodeState) object(v reflect.Value) error {
// Check for unmarshaler.
u, ut, pv := indirect(v, false)
if u != nil {
start := d.readIndex()
d.skip()
return u.UnmarshalJSON(d.data[start:d.off])
}
if ut != nil {
d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
}
v = pv
t := v.Type()
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
oi := d.objectInterface()
v.Set(reflect.ValueOf(oi))
return nil
}
var fields structFields
// Check type of target:
// struct or
// map[T1]T2 where T1 is string, an integer type,
// or an encoding.TextUnmarshaler
switch v.Kind() {
case reflect.Map:
// Map key must either have string kind, have an integer kind,
// or be an encoding.TextUnmarshaler.
switch t.Key().Kind() {
case reflect.String,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
default:
if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
d.skip()
return nil
}
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
}
case reflect.Struct:
fields = cachedTypeFields(t)
// ok
default:
d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
d.skip()
return nil
}
var mapElem reflect.Value
origErrorContext := d.errorContext
for {
// Read opening " of string key or closing }.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if d.opcode != scanBeginLiteral {
panic(phasePanicMsg)
}
// Read key.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := unquoteBytes(item)
if !ok {
panic(phasePanicMsg)
}
// Figure out field corresponding to key.
var subv reflect.Value
destring := false // whether the value is wrapped in a string to be decoded first
if v.Kind() == reflect.Map {
elemType := t.Elem()
if !mapElem.IsValid() {
mapElem = reflect.New(elemType).Elem()
} else {
mapElem.Set(reflect.Zero(elemType))
}
subv = mapElem
} else {
var f *field
if i, ok := fields.nameIndex[string(key)]; ok {
// Found an exact name match.
f = &fields.list[i]
} else {
// Fall back to the expensive case-insensitive
// linear search.
for i := range fields.list {
ff := &fields.list[i]
if ff.equalFold(ff.nameBytes, key) {
f = ff
break
}
}
}
if f != nil {
subv = v
destring = f.quoted
for _, i := range f.index {
if subv.Kind() == reflect.Ptr {
if subv.IsNil() {
// If a struct embeds a pointer to an unexported type,
// it is not possible to set a newly allocated value
// since the field is unexported.
//
// See https://golang.org/issue/21357
if !subv.CanSet() {
d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem()))
// Invalidate subv to ensure d.value(subv) skips over
// the JSON value without assigning it to subv.
subv = reflect.Value{}
destring = false
break
}
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
subv = subv.Field(i)
}
d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
d.errorContext.Struct = t
} else if d.disallowUnknownFields {
d.saveError(fmt.Errorf("json: unknown field %q", key))
}
}
// Read : before value.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode != scanObjectKey {
panic(phasePanicMsg)
}
d.scanWhile(scanSkipSpace)
if destring {
switch qv := d.valueQuoted().(type) {
case nil:
if err := d.literalStore(nullLiteral, subv, false); err != nil {
return err
}
case string:
if err := d.literalStore([]byte(qv), subv, true); err != nil {
return err
}
default:
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
}
} else {
if err := d.value(subv); err != nil {
return err
}
}
// Write value back to map;
// if using struct, subv points into struct already.
if v.Kind() == reflect.Map {
kt := t.Key()
var kv reflect.Value
switch {
case kt.Kind() == reflect.String:
kv = reflect.ValueOf(key).Convert(kt)
case reflect.PtrTo(kt).Implements(textUnmarshalerType):
kv = reflect.New(kt)
if err := d.literalStore(item, kv, true); err != nil {
return err
}
kv = kv.Elem()
default:
switch kt.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := string(key)
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || reflect.Zero(kt).OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
kv = reflect.ValueOf(n).Convert(kt)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s := string(key)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || reflect.Zero(kt).OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
kv = reflect.ValueOf(n).Convert(kt)
default:
panic("json: Unexpected key type") // should never occur
}
}
if kv.IsValid() {
v.SetMapIndex(kv, subv)
}
}
// Next token must be , or }.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
// Reset errorContext to its original state.
// Keep the same underlying array for FieldStack, to reuse the
// space and avoid unnecessary allocs.
d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)]
d.errorContext.Struct = origErrorContext.Struct
if d.opcode == scanEndObject {
break
}
if d.opcode != scanObjectValue {
panic(phasePanicMsg)
}
}
return nil
}
// convertNumber converts the number literal s to a float64 or a Number
// depending on the setting of d.useNumber.
func (d *decodeState) convertNumber(s string) (interface{}, error) {
if d.useNumber {
return Number(s), nil
}
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
}
return f, nil
}
var numberType = reflect.TypeOf(Number(""))
// literalStore decodes a literal stored in item into v.
//
// fromQuoted indicates whether this literal came from unwrapping a
// string from the ",string" struct tag option. this is used only to
// produce more helpful error messages.
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
// Check for unmarshaler.
if len(item) == 0 {
//Empty string given
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
isNull := item[0] == 'n' // null
u, ut, pv := indirect(v, isNull)
if u != nil {
return u.UnmarshalJSON(item)
}
if ut != nil {
if item[0] != '"' {
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
val := "number"
switch item[0] {
case 'n':
val = "null"
case 't', 'f':
val = "bool"
}
d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())})
return nil
}
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
return ut.UnmarshalText(s)
}
v = pv
switch c := item[0]; c {
case 'n': // null
// The main parser checks that only true and false can reach here,
// but if this was a quoted string input, it could be anything.
if fromQuoted && string(item) != "null" {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
break
}
switch v.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
v.Set(reflect.Zero(v.Type()))
// otherwise, ignore null for primitives/string
}
case 't', 'f': // true, false
value := item[0] == 't'
// The main parser checks that only true and false can reach here,
// but if this was a quoted string input, it could be anything.
if fromQuoted && string(item) != "true" && string(item) != "false" {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
break
}
switch v.Kind() {
default:
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
}
case reflect.Bool:
v.SetBool(value)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(value))
} else {
d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
}
}
case '"': // string
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
switch v.Kind() {
default:
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Slice:
if v.Type().Elem().Kind() != reflect.Uint8 {
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
break
}
b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
n, err := base64.StdEncoding.Decode(b, s)
if err != nil {
d.saveError(err)
break
}
v.SetBytes(b[:n])
case reflect.String:
v.SetString(string(s))
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(string(s)))
} else {
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
}
}
default: // number
if c != '-' && (c < '0' || c > '9') {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
s := string(item)
switch v.Kind() {
default:
if v.Kind() == reflect.String && v.Type() == numberType {
// s must be a valid number, because it's
// already been tokenized.
v.SetString(s)
break
}
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Interface:
n, err := d.convertNumber(s)
if err != nil {
d.saveError(err)
break
}
if v.NumMethod() != 0 {
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.Set(reflect.ValueOf(n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || v.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || v.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetUint(n)
case reflect.Float32, reflect.Float64:
n, err := strconv.ParseFloat(s, v.Type().Bits())
if err != nil || v.OverflowFloat(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetFloat(n)
}
}
return nil
}
// The xxxInterface routines build up a value to be stored
// in an empty interface. They are not strictly necessary,
// but they avoid the weight of reflection in this common case.
// valueInterface is like value but returns interface{}
func (d *decodeState) valueInterface() (val interface{}) {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray:
val = d.arrayInterface()
d.scanNext()
case scanBeginObject:
val = d.objectInterface()
d.scanNext()
case scanBeginLiteral:
val = d.literalInterface()
}
return
}
// arrayInterface is like array but returns []interface{}.
func (d *decodeState) arrayInterface() []interface{} {
var v = make([]interface{}, 0)
for {
// Look ahead for ] - can only happen on first iteration.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndArray {
break
}
v = append(v, d.valueInterface())
// Next token must be , or ].
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndArray {
break
}
if d.opcode != scanArrayValue {
panic(phasePanicMsg)
}
}
return v
}
// objectInterface is like object but returns map[string]interface{}.
func (d *decodeState) objectInterface() map[string]interface{} {
m := make(map[string]interface{})
for {
// Read opening " of string key or closing }.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if d.opcode != scanBeginLiteral {
panic(phasePanicMsg)
}
// Read string key.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
// Read : before value.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode != scanObjectKey {
panic(phasePanicMsg)
}
d.scanWhile(scanSkipSpace)
// Read value.
m[key] = d.valueInterface()
// Next token must be , or }.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndObject {
break
}
if d.opcode != scanObjectValue {
panic(phasePanicMsg)
}
}
return m
}
// literalInterface consumes and returns a literal from d.data[d.off-1:] and
// it reads the following byte ahead. The first byte of the literal has been
// read already (that's how the caller knows it's a literal).
func (d *decodeState) literalInterface() interface{} {
// All bytes inside literal return scanContinue op code.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
switch c := item[0]; c {
case 'n': // null
return nil
case 't', 'f': // true, false
return c == 't'
case '"': // string
s, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
return s
default: // number
if c != '-' && (c < '0' || c > '9') {
panic(phasePanicMsg)
}
n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
}
return n
}
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
var r rune
for _, c := range s[2:6] {
switch {
case '0' <= c && c <= '9':
c = c - '0'
case 'a' <= c && c <= 'f':
c = c - 'a' + 10
case 'A' <= c && c <= 'F':
c = c - 'A' + 10
default:
return -1
}
r = r*16 + rune(c)
}
return r
}
// unquote converts a quoted JSON string literal s into an actual string t.
// The rules are different than for Go, so cannot use strconv.Unquote.
func unquote(s []byte) (t string, ok bool) {
s, ok = unquoteBytes(s)
t = string(s)
return
}
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError && size == 1 {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}
|
package deploymentconfig
import (
"fmt"
"reflect"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
kcoreclient "k8s.io/client-go/kubernetes/typed/core/v1"
kcorelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
kcontroller "k8s.io/kubernetes/pkg/controller"
appsv1 "github.com/openshift/api/apps/v1"
appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
appsv1lister "github.com/openshift/client-go/apps/listers/apps/v1"
appsutil "github.com/openshift/origin/pkg/apps/util"
)
const (
// maxRetryCount is the number of times a deployment config will be retried before it is dropped out
// of the queue.
maxRetryCount = 15
)
// fatalError is an error which can't be retried.
type fatalError string
func (e fatalError) Error() string {
return fmt.Sprintf("fatal error handling deployment config: %s", string(e))
}
// DeploymentConfigController is responsible for creating a new deployment
// when:
//
// 1. The config version is > 0 and,
// 2. No deployment for the version exists.
//
// The controller reconciles deployments with the replica count specified on
// the config. The active deployment (that is, the latest successful
// deployment) will always be scaled to the config replica count. All other
// deployments will be scaled to zero.
//
// If a new version is observed for which no deployment exists, any running
// deployments will be cancelled. The controller will not attempt to scale
// running deployments.
type DeploymentConfigController struct {
// appsClient provides access to deploymentconfigs.
appsClient appsv1client.DeploymentConfigsGetter
// kubeClient provides access to replication controllers.
kubeClient kcoreclient.ReplicationControllersGetter
// queue contains deployment configs that need to be synced.
queue workqueue.RateLimitingInterface
dcIndex cache.Indexer
// dcLister provides a local cache for deployment configs.
dcLister appsv1lister.DeploymentConfigLister
// dcStoreSynced makes sure the dc store is synced before reconcling any deployment config.
dcStoreSynced func() bool
// rcLister can list/get replication controllers from a shared informer's cache
rcLister kcorelisters.ReplicationControllerLister
// rcListerSynced makes sure the rc shared informer is synced before reconcling any deployment config.
rcListerSynced func() bool
// rcControl is used for adopting/releasing replication controllers.
rcControl RCControlInterface
// codec is used to build deployments from configs.
codec runtime.Codec
// recorder is used to record events.
recorder record.EventRecorder
}
// Handle implements the loop that processes deployment configs. Since this controller started
// using caches, the provided config MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentConfigController) Handle(config *appsv1.DeploymentConfig) error {
glog.V(5).Infof("Reconciling %s/%s", config.Namespace, config.Name)
// There's nothing to reconcile until the version is nonzero.
if appsutil.IsInitialDeployment(config) && !appsutil.HasTrigger(config) {
return c.updateStatus(config, []*v1.ReplicationController{}, true)
}
// List all ReplicationControllers to find also those we own but that no longer match our selector.
// They will be orphaned by ClaimReplicationControllers().
rcList, err := c.rcLister.ReplicationControllers(config.Namespace).List(labels.Everything())
if err != nil {
return fmt.Errorf("error while deploymentConfigController listing replication controllers: %v", err)
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing ReplicationControllers (see Kubernetes #42639).
canAdoptFunc := kcontroller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := c.appsClient.DeploymentConfigs(config.Namespace).Get(config.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != config.UID {
return nil, fmt.Errorf("original DeploymentConfig %s/%s is gone: got uid %s, wanted %s", config.Namespace, config.Name, fresh.UID, config.UID)
}
return fresh, nil
})
cm := NewRCControllerRefManager(c.rcControl, config, appsutil.ConfigSelector(config.Name), appsv1.GroupVersion.WithKind("DeploymentConfig"), canAdoptFunc)
existingDeployments, err := cm.ClaimReplicationControllers(rcList)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming replication controllers: %v", err)
}
// In case the deployment config has been marked for deletion, merely update its status with
// the latest available information. Some deletions make take some time to complete so there
// is value in doing this.
if config.DeletionTimestamp != nil {
return c.updateStatus(config, existingDeployments, true)
}
// If the config is paused we shouldn't create new deployments for it.
if config.Spec.Paused {
// in order for revision history limit cleanup to work for paused
// deployments, we need to trigger it here
if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
}
return c.updateStatus(config, existingDeployments, true)
}
latestExists, latestDeployment := appsutil.LatestDeploymentInfo(config, existingDeployments)
if !latestExists {
if err := c.cancelRunningRollouts(config, existingDeployments, cm); err != nil {
return err
}
}
// Never deploy with invalid or unresolved images
for i, container := range config.Spec.Template.Spec.Containers {
if len(strings.TrimSpace(container.Image)) == 0 {
glog.V(4).Infof("Postponing rollout #%d for DeploymentConfig %s/%s because of invalid or unresolved image for container #%d (name=%s)", config.Status.LatestVersion, config.Namespace, config.Name, i, container.Name)
return c.updateStatus(config, existingDeployments, true)
}
}
configCopy := config.DeepCopy()
// Process triggers and start an initial rollouts
shouldTrigger, shouldSkip, err := triggerActivated(configCopy, latestExists, latestDeployment)
if err != nil {
return fmt.Errorf("triggerActivated failed: %v", err)
}
if shouldSkip {
return c.updateStatus(configCopy, existingDeployments, true)
}
if shouldTrigger {
configCopy.Status.LatestVersion++
_, err := c.appsClient.DeploymentConfigs(configCopy.Namespace).UpdateStatus(configCopy)
return err
}
// If the latest deployment already exists, reconcile existing deployments
// and return early.
if latestExists {
// If the latest deployment is still running, try again later. We don't
// want to compete with the deployer.
if !appsutil.IsTerminatedDeployment(latestDeployment) {
return c.updateStatus(config, existingDeployments, false)
}
return c.reconcileDeployments(existingDeployments, config, cm)
}
// No deployments are running and the latest deployment doesn't exist, so
// create the new deployment.
deployment, err := appsutil.MakeDeployment(config)
if err != nil {
return fatalError(fmt.Sprintf("couldn't make deployment from (potentially invalid) deployment config %s: %v", appsutil.LabelForDeploymentConfig(config), err))
}
created, err := c.kubeClient.ReplicationControllers(config.Namespace).Create(deployment)
if err != nil {
// We need to find out if our controller owns that deployment and report error if not
if kapierrors.IsAlreadyExists(err) {
rc, err := c.rcLister.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
if err != nil {
return fmt.Errorf("error while deploymentConfigController getting the replication controller %s/%s: %v", deployment.Namespace, deployment.Name, err)
}
// We need to make sure we own that RC or adopt it if possible
isOurs, err := cm.ClaimReplicationController(rc)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming the replication controller: %v", err)
}
if isOurs {
// If the deployment was already created, just move on. The cache could be
// stale, or another process could have already handled this update.
return c.updateStatus(config, existingDeployments, true)
} else {
err = fmt.Errorf("replication controller %s already exists and deployment config is not allowed to claim it", deployment.Name)
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %v", config.Status.LatestVersion, err)
return c.updateStatus(config, existingDeployments, true)
}
}
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %s", config.Status.LatestVersion, err)
// We don't care about this error since we need to report the create failure.
cond := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionFalse, appsutil.FailedRcCreateReason, err.Error())
_ = c.updateStatus(config, existingDeployments, true, *cond)
return fmt.Errorf("couldn't create deployment for deployment config %s: %v", appsutil.LabelForDeploymentConfig(config), err)
}
msg := fmt.Sprintf("Created new replication controller %q for version %d", created.Name, config.Status.LatestVersion)
c.recorder.Eventf(config, v1.EventTypeNormal, "DeploymentCreated", msg)
// As we've just created a new deployment, we need to make sure to clean
// up old deployments if we have reached our deployment history quota
existingDeployments = append(existingDeployments, created)
if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
}
cond := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionTrue, appsutil.NewReplicationControllerReason, msg)
return c.updateStatus(config, existingDeployments, true, *cond)
}
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []*v1.ReplicationController, config *appsv1.DeploymentConfig, cm *RCControllerRefManager) error {
activeDeployment := appsutil.ActiveDeployment(existingDeployments)
// Reconcile deployments. The active deployment follows the config, and all
// other deployments should be scaled to zero.
var updatedDeployments []*v1.ReplicationController
for i := range existingDeployments {
deployment := existingDeployments[i]
toAppend := deployment
isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name
oldReplicaCount := deployment.Spec.Replicas
if oldReplicaCount == nil {
zero := int32(0)
oldReplicaCount = &zero
}
newReplicaCount := int32(0)
if isActiveDeployment {
newReplicaCount = config.Spec.Replicas
}
if config.Spec.Test {
glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", appsutil.LabelForDeploymentConfig(config), appsutil.LabelForDeployment(deployment))
newReplicaCount = 0
}
// Only update if necessary.
var copied *v1.ReplicationController
if newReplicaCount != *oldReplicaCount {
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
// refresh the replication controller version
rc, err := c.rcLister.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
if err != nil {
return err
}
// We need to make sure we own that RC or adopt it if possible
isOurs, err := cm.ClaimReplicationController(rc)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming the replication controller %s/%s: %v", rc.Namespace, rc.Name, err)
}
if !isOurs {
return fmt.Errorf("deployment config %s/%s (%v) no longer owns replication controller %s/%s (%v)",
config.Namespace, config.Name, config.UID,
deployment.Namespace, deployment.Name, deployment.UID,
)
}
copied = rc.DeepCopy()
copied.Spec.Replicas = &newReplicaCount
copied, err = c.kubeClient.ReplicationControllers(copied.Namespace).Update(copied)
return err
}); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "ReplicationControllerScaleFailed",
"Failed to scale replication controler %q from %d to %d: %v", deployment.Name, *oldReplicaCount, newReplicaCount, err)
return err
}
c.recorder.Eventf(config, v1.EventTypeNormal, "ReplicationControllerScaled", "Scaled replication controller %q from %d to %d", copied.Name, *oldReplicaCount, newReplicaCount)
toAppend = copied
}
updatedDeployments = append(updatedDeployments, toAppend)
}
// As the deployment configuration has changed, we need to make sure to clean
// up old deployments if we have now reached our deployment history quota
if err := c.cleanupOldDeployments(updatedDeployments, config); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "ReplicationControllerCleanupFailed", "Couldn't clean up replication controllers: %v", err)
}
return c.updateStatus(config, updatedDeployments, true)
}
// Update the status of the provided deployment config. Additional conditions will override any other condition in the
// deployment config status.
func (c *DeploymentConfigController) updateStatus(config *appsv1.DeploymentConfig, deployments []*v1.ReplicationController, updateObservedGeneration bool, additional ...appsv1.DeploymentCondition) error {
newStatus := calculateStatus(config, deployments, updateObservedGeneration, additional...)
// NOTE: We should update the status of the deployment config only if we need to, otherwise
// we hotloop between updates.
if reflect.DeepEqual(newStatus, config.Status) {
return nil
}
copied := config.DeepCopy()
copied.Status = newStatus
// TODO: Retry update conficts
if _, err := c.appsClient.DeploymentConfigs(copied.Namespace).UpdateStatus(copied); err != nil {
return err
}
glog.V(4).Infof(fmt.Sprintf("Updated status for DeploymentConfig: %s, ", appsutil.LabelForDeploymentConfig(config)) +
fmt.Sprintf("replicas %d->%d (need %d), ", config.Status.Replicas, newStatus.Replicas, config.Spec.Replicas) +
fmt.Sprintf("readyReplicas %d->%d, ", config.Status.ReadyReplicas, newStatus.ReadyReplicas) +
fmt.Sprintf("availableReplicas %d->%d, ", config.Status.AvailableReplicas, newStatus.AvailableReplicas) +
fmt.Sprintf("unavailableReplicas %d->%d, ", config.Status.UnavailableReplicas, newStatus.UnavailableReplicas) +
fmt.Sprintf("sequence No: %v->%v", config.Status.ObservedGeneration, newStatus.ObservedGeneration))
return nil
}
// cancelRunningRollouts cancels existing rollouts when the latest deployment does not
// exists yet to allow new rollout superceded by the new config version.
func (c *DeploymentConfigController) cancelRunningRollouts(config *appsv1.DeploymentConfig, existingDeployments []*v1.ReplicationController, cm *RCControllerRefManager) error {
awaitingCancellations := false
for i := range existingDeployments {
deployment := existingDeployments[i]
// Skip deployments with an outcome.
if appsutil.IsTerminatedDeployment(deployment) {
continue
}
// Cancel running deployments.
awaitingCancellations = true
if appsutil.IsDeploymentCancelled(deployment) {
continue
}
// Retry faster on conflicts
var updatedDeployment *v1.ReplicationController
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
rc, err := c.rcLister.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
if kapierrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
// We need to make sure we own that RC or adopt it if possible
isOurs, err := cm.ClaimReplicationController(rc)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming the replication controller %s/%s: %v", rc.Namespace, rc.Name, err)
}
if !isOurs {
return nil
}
copied := rc.DeepCopy()
appsutil.SetCancelledByNewerDeployment(copied)
updatedDeployment, err = c.kubeClient.ReplicationControllers(copied.Namespace).Update(copied)
return err
})
if err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCancellationFailed", "Failed to cancel deployment %q superceded by version %d: %s", deployment.Name, config.Status.LatestVersion, err)
return err
}
if updatedDeployment != nil {
// replace the current deployment with the updated copy so that a future update has a chance at working
existingDeployments[i] = updatedDeployment
c.recorder.Eventf(config, v1.EventTypeNormal, "DeploymentCancelled", "Cancelled deployment %q superceded by version %d", deployment.Name, config.Status.LatestVersion)
}
}
// Wait for deployment cancellations before reconciling or creating a new
// deployment to avoid competing with existing deployment processes.
if awaitingCancellations {
c.recorder.Eventf(config, v1.EventTypeNormal, "DeploymentAwaitingCancellation", "Deployment of version %d awaiting cancellation of older running deployments", config.Status.LatestVersion)
return fmt.Errorf("found previous inflight deployment for %s - requeuing", appsutil.LabelForDeploymentConfig(config))
}
return nil
}
func calculateStatus(config *appsv1.DeploymentConfig, rcs []*v1.ReplicationController, updateObservedGeneration bool, additional ...appsv1.DeploymentCondition) appsv1.DeploymentConfigStatus {
// UpdatedReplicas represents the replicas that use the current deployment config template which means
// we should inform about the replicas of the latest deployment and not the active.
latestReplicas := int32(0)
latestExists, latestRC := appsutil.LatestDeploymentInfo(config, rcs)
if !latestExists {
latestRC = nil
} else {
latestReplicas = appsutil.GetStatusReplicaCountForDeployments([]*v1.ReplicationController{latestRC})
}
available := appsutil.GetAvailableReplicaCountForReplicationControllers(rcs)
total := appsutil.GetReplicaCountForDeployments(rcs)
unavailableReplicas := total - available
if unavailableReplicas < 0 {
unavailableReplicas = 0
}
generation := config.Status.ObservedGeneration
if updateObservedGeneration {
generation = config.Generation
}
status := appsv1.DeploymentConfigStatus{
LatestVersion: config.Status.LatestVersion,
Details: config.Status.Details,
ObservedGeneration: generation,
Replicas: appsutil.GetStatusReplicaCountForDeployments(rcs),
UpdatedReplicas: latestReplicas,
AvailableReplicas: available,
ReadyReplicas: appsutil.GetReadyReplicaCountForReplicationControllers(rcs),
UnavailableReplicas: unavailableReplicas,
Conditions: config.Status.Conditions,
}
updateConditions(config, &status, latestRC)
for _, cond := range additional {
appsutil.SetDeploymentCondition(&status, cond)
}
return status
}
func updateConditions(config *appsv1.DeploymentConfig, newStatus *appsv1.DeploymentConfigStatus, latestRC *v1.ReplicationController) {
// Availability condition.
if newStatus.AvailableReplicas >= config.Spec.Replicas-appsutil.MaxUnavailable(config) && newStatus.AvailableReplicas > 0 {
minAvailability := appsutil.NewDeploymentCondition(appsv1.DeploymentAvailable, v1.ConditionTrue, "",
"Deployment config has minimum availability.")
appsutil.SetDeploymentCondition(newStatus, *minAvailability)
} else {
noMinAvailability := appsutil.NewDeploymentCondition(appsv1.DeploymentAvailable, v1.ConditionFalse, "",
"Deployment config does not have minimum availability.")
appsutil.SetDeploymentCondition(newStatus, *noMinAvailability)
}
// Condition about progress.
if latestRC != nil {
switch appsutil.DeploymentStatusFor(latestRC) {
case appsv1.DeploymentStatusPending:
msg := fmt.Sprintf("replication controller %q is waiting for pod %q to run", latestRC.Name, appsutil.DeployerPodNameForDeployment(latestRC.Name))
condition := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionUnknown, "", msg)
appsutil.SetDeploymentCondition(newStatus, *condition)
case appsv1.DeploymentStatusRunning:
if appsutil.IsProgressing(config, newStatus) {
appsutil.RemoveDeploymentCondition(newStatus, appsv1.DeploymentProgressing)
msg := fmt.Sprintf("replication controller %q is progressing", latestRC.Name)
condition := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionTrue,
string(appsv1.ReplicationControllerUpdatedReason), msg)
// TODO: Right now, we use lastTransitionTime for storing the last time we had any progress instead
// of the last time the condition transitioned to a new status. We should probably change that.
appsutil.SetDeploymentCondition(newStatus, *condition)
}
case appsv1.DeploymentStatusFailed:
var condition *appsv1.DeploymentCondition
if appsutil.IsDeploymentCancelled(latestRC) {
msg := fmt.Sprintf("rollout of replication controller %q was cancelled", latestRC.Name)
condition = appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionFalse,
appsutil.CancelledRolloutReason, msg)
} else {
msg := fmt.Sprintf("replication controller %q has failed progressing", latestRC.Name)
condition = appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionFalse, appsutil.TimedOutReason, msg)
}
appsutil.SetDeploymentCondition(newStatus, *condition)
case appsv1.DeploymentStatusComplete:
msg := fmt.Sprintf("replication controller %q successfully rolled out", latestRC.Name)
condition := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionTrue, appsutil.NewRcAvailableReason, msg)
appsutil.SetDeploymentCondition(newStatus, *condition)
}
}
}
func (c *DeploymentConfigController) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
return
}
if _, isFatal := err.(fatalError); isFatal {
utilruntime.HandleError(err)
c.queue.Forget(key)
return
}
if c.queue.NumRequeues(key) < maxRetryCount {
glog.V(2).Infof("Error syncing deployment config %v: %v", key, err)
c.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping deployment config %q out of the queue: %v", key, err)
c.queue.Forget(key)
}
// cleanupOldDeployments deletes old replication controller deployments if their quota has been reached
func (c *DeploymentConfigController) cleanupOldDeployments(existingDeployments []*v1.ReplicationController, deploymentConfig *appsv1.DeploymentConfig) error {
if deploymentConfig.Spec.RevisionHistoryLimit == nil {
// there is no past deplyoment quota set
return nil
}
prunableDeployments := appsutil.DeploymentsForCleanup(deploymentConfig, existingDeployments)
if len(prunableDeployments) <= int(*deploymentConfig.Spec.RevisionHistoryLimit) {
// the past deployment quota has not been exceeded
return nil
}
deletionErrors := []error{}
for i := 0; i < (len(prunableDeployments) - int(*deploymentConfig.Spec.RevisionHistoryLimit)); i++ {
deployment := prunableDeployments[i]
if *deployment.Spec.Replicas != 0 {
// we do not want to clobber active older deployments, but we *do* want them to count
// against the quota so that they will be pruned when they're scaled down
continue
}
policy := metav1.DeletePropagationBackground
err := c.kubeClient.ReplicationControllers(deployment.Namespace).Delete(deployment.Name, &metav1.DeleteOptions{
PropagationPolicy: &policy,
})
if err != nil && !kapierrors.IsNotFound(err) {
deletionErrors = append(deletionErrors, err)
}
}
return kutilerrors.NewAggregate(deletionErrors)
}
// triggerActivated indicates whether we should proceed with new rollout as one of the
// triggers were activated (config change or image change). The first bool indicates that
// the triggers are active and second indicates if we should skip the rollout because we
// are waiting for the trigger to complete update (waiting for image for example).
func triggerActivated(config *appsv1.DeploymentConfig, latestExists bool, latestDeployment *v1.ReplicationController) (bool, bool, error) {
if config.Spec.Paused {
return false, false, nil
}
imageTrigger := appsutil.HasImageChangeTrigger(config)
configTrigger := appsutil.HasChangeTrigger(config)
hasTrigger := imageTrigger || configTrigger
// no-op when no triggers are defined.
if !hasTrigger {
return false, false, nil
}
// Handle initial rollouts
if appsutil.IsInitialDeployment(config) {
hasAvailableImages := appsutil.HasLastTriggeredImage(config)
// When config has an image trigger, wait until its images are available to trigger.
if imageTrigger {
if hasAvailableImages {
glog.V(4).Infof("Rolling out initial deployment for %s/%s as it now have images available", config.Namespace, config.Name)
// TODO: Technically this is not a config change cause, but we will have to report the image that caused the trigger.
// In some cases it might be difficult because config can have multiple ICT.
appsutil.RecordConfigChangeCause(config)
return true, false, nil
}
glog.V(4).Infof("Rolling out initial deployment for %s/%s deferred until its images are ready", config.Namespace, config.Name)
return false, true, nil
}
// Rollout if we only have config change trigger.
if configTrigger {
glog.V(4).Infof("Rolling out initial deployment for %s/%s", config.Namespace, config.Name)
appsutil.RecordConfigChangeCause(config)
return true, false, nil
}
// We are waiting for the initial RC to be created.
return false, false, nil
}
// Wait for the RC to be created
if !latestExists {
return false, false, nil
}
// We need existing deployment at this point to compare its template with current config template.
if latestDeployment == nil {
return false, false, nil
}
if imageTrigger {
if ok, imageNames := appsutil.HasUpdatedImages(config, latestDeployment); ok {
glog.V(4).Infof("Rolling out #%d deployment for %s/%s caused by image changes (%s)", config.Status.LatestVersion+1, config.Namespace, config.Name, strings.Join(imageNames, ","))
appsutil.RecordImageChangeCauses(config, imageNames)
return true, false, nil
}
}
if configTrigger {
isLatest, changes, err := appsutil.HasLatestPodTemplate(config, latestDeployment)
if err != nil {
return false, false, fmt.Errorf("error while checking for latest pod template in replication controller: %v", err)
}
if !isLatest {
glog.V(4).Infof("Rolling out #%d deployment for %s/%s caused by config change, diff: %s", config.Status.LatestVersion+1, config.Namespace, config.Name, changes)
appsutil.RecordConfigChangeCause(config)
return true, false, nil
}
}
return false, false, nil
}
deploymentconfig: log update conflicts using higher log level in controller
package deploymentconfig
import (
"fmt"
"reflect"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
kcoreclient "k8s.io/client-go/kubernetes/typed/core/v1"
kcorelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
kcontroller "k8s.io/kubernetes/pkg/controller"
appsv1 "github.com/openshift/api/apps/v1"
appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
appsv1lister "github.com/openshift/client-go/apps/listers/apps/v1"
appsutil "github.com/openshift/origin/pkg/apps/util"
)
const (
// maxRetryCount is the number of times a deployment config will be retried before it is dropped out
// of the queue.
maxRetryCount = 15
)
// fatalError is an error which can't be retried.
type fatalError string
func (e fatalError) Error() string {
return fmt.Sprintf("fatal error handling deployment config: %s", string(e))
}
// DeploymentConfigController is responsible for creating a new deployment
// when:
//
// 1. The config version is > 0 and,
// 2. No deployment for the version exists.
//
// The controller reconciles deployments with the replica count specified on
// the config. The active deployment (that is, the latest successful
// deployment) will always be scaled to the config replica count. All other
// deployments will be scaled to zero.
//
// If a new version is observed for which no deployment exists, any running
// deployments will be cancelled. The controller will not attempt to scale
// running deployments.
type DeploymentConfigController struct {
// appsClient provides access to deploymentconfigs.
appsClient appsv1client.DeploymentConfigsGetter
// kubeClient provides access to replication controllers.
kubeClient kcoreclient.ReplicationControllersGetter
// queue contains deployment configs that need to be synced.
queue workqueue.RateLimitingInterface
dcIndex cache.Indexer
// dcLister provides a local cache for deployment configs.
dcLister appsv1lister.DeploymentConfigLister
// dcStoreSynced makes sure the dc store is synced before reconcling any deployment config.
dcStoreSynced func() bool
// rcLister can list/get replication controllers from a shared informer's cache
rcLister kcorelisters.ReplicationControllerLister
// rcListerSynced makes sure the rc shared informer is synced before reconcling any deployment config.
rcListerSynced func() bool
// rcControl is used for adopting/releasing replication controllers.
rcControl RCControlInterface
// codec is used to build deployments from configs.
codec runtime.Codec
// recorder is used to record events.
recorder record.EventRecorder
}
// Handle implements the loop that processes deployment configs. Since this controller started
// using caches, the provided config MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentConfigController) Handle(config *appsv1.DeploymentConfig) error {
glog.V(5).Infof("Reconciling %s/%s", config.Namespace, config.Name)
// There's nothing to reconcile until the version is nonzero.
if appsutil.IsInitialDeployment(config) && !appsutil.HasTrigger(config) {
return c.updateStatus(config, []*v1.ReplicationController{}, true)
}
// List all ReplicationControllers to find also those we own but that no longer match our selector.
// They will be orphaned by ClaimReplicationControllers().
rcList, err := c.rcLister.ReplicationControllers(config.Namespace).List(labels.Everything())
if err != nil {
return fmt.Errorf("error while deploymentConfigController listing replication controllers: %v", err)
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing ReplicationControllers (see Kubernetes #42639).
canAdoptFunc := kcontroller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := c.appsClient.DeploymentConfigs(config.Namespace).Get(config.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != config.UID {
return nil, fmt.Errorf("original DeploymentConfig %s/%s is gone: got uid %s, wanted %s", config.Namespace, config.Name, fresh.UID, config.UID)
}
return fresh, nil
})
cm := NewRCControllerRefManager(c.rcControl, config, appsutil.ConfigSelector(config.Name), appsv1.GroupVersion.WithKind("DeploymentConfig"), canAdoptFunc)
existingDeployments, err := cm.ClaimReplicationControllers(rcList)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming replication controllers: %v", err)
}
// In case the deployment config has been marked for deletion, merely update its status with
// the latest available information. Some deletions make take some time to complete so there
// is value in doing this.
if config.DeletionTimestamp != nil {
return c.updateStatus(config, existingDeployments, true)
}
// If the config is paused we shouldn't create new deployments for it.
if config.Spec.Paused {
// in order for revision history limit cleanup to work for paused
// deployments, we need to trigger it here
if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
}
return c.updateStatus(config, existingDeployments, true)
}
latestExists, latestDeployment := appsutil.LatestDeploymentInfo(config, existingDeployments)
if !latestExists {
if err := c.cancelRunningRollouts(config, existingDeployments, cm); err != nil {
return err
}
}
// Never deploy with invalid or unresolved images
for i, container := range config.Spec.Template.Spec.Containers {
if len(strings.TrimSpace(container.Image)) == 0 {
glog.V(4).Infof("Postponing rollout #%d for DeploymentConfig %s/%s because of invalid or unresolved image for container #%d (name=%s)", config.Status.LatestVersion, config.Namespace, config.Name, i, container.Name)
return c.updateStatus(config, existingDeployments, true)
}
}
configCopy := config.DeepCopy()
// Process triggers and start an initial rollouts
shouldTrigger, shouldSkip, err := triggerActivated(configCopy, latestExists, latestDeployment)
if err != nil {
return fmt.Errorf("triggerActivated failed: %v", err)
}
if shouldSkip {
return c.updateStatus(configCopy, existingDeployments, true)
}
if shouldTrigger {
configCopy.Status.LatestVersion++
_, err := c.appsClient.DeploymentConfigs(configCopy.Namespace).UpdateStatus(configCopy)
return err
}
// If the latest deployment already exists, reconcile existing deployments
// and return early.
if latestExists {
// If the latest deployment is still running, try again later. We don't
// want to compete with the deployer.
if !appsutil.IsTerminatedDeployment(latestDeployment) {
return c.updateStatus(config, existingDeployments, false)
}
return c.reconcileDeployments(existingDeployments, config, cm)
}
// No deployments are running and the latest deployment doesn't exist, so
// create the new deployment.
deployment, err := appsutil.MakeDeployment(config)
if err != nil {
return fatalError(fmt.Sprintf("couldn't make deployment from (potentially invalid) deployment config %s: %v", appsutil.LabelForDeploymentConfig(config), err))
}
created, err := c.kubeClient.ReplicationControllers(config.Namespace).Create(deployment)
if err != nil {
// We need to find out if our controller owns that deployment and report error if not
if kapierrors.IsAlreadyExists(err) {
rc, err := c.rcLister.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
if err != nil {
return fmt.Errorf("error while deploymentConfigController getting the replication controller %s/%s: %v", deployment.Namespace, deployment.Name, err)
}
// We need to make sure we own that RC or adopt it if possible
isOurs, err := cm.ClaimReplicationController(rc)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming the replication controller: %v", err)
}
if isOurs {
// If the deployment was already created, just move on. The cache could be
// stale, or another process could have already handled this update.
return c.updateStatus(config, existingDeployments, true)
} else {
err = fmt.Errorf("replication controller %s already exists and deployment config is not allowed to claim it", deployment.Name)
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %v", config.Status.LatestVersion, err)
return c.updateStatus(config, existingDeployments, true)
}
}
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %s", config.Status.LatestVersion, err)
// We don't care about this error since we need to report the create failure.
cond := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionFalse, appsutil.FailedRcCreateReason, err.Error())
_ = c.updateStatus(config, existingDeployments, true, *cond)
return fmt.Errorf("couldn't create deployment for deployment config %s: %v", appsutil.LabelForDeploymentConfig(config), err)
}
msg := fmt.Sprintf("Created new replication controller %q for version %d", created.Name, config.Status.LatestVersion)
c.recorder.Eventf(config, v1.EventTypeNormal, "DeploymentCreated", msg)
// As we've just created a new deployment, we need to make sure to clean
// up old deployments if we have reached our deployment history quota
existingDeployments = append(existingDeployments, created)
if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
}
cond := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionTrue, appsutil.NewReplicationControllerReason, msg)
return c.updateStatus(config, existingDeployments, true, *cond)
}
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []*v1.ReplicationController, config *appsv1.DeploymentConfig, cm *RCControllerRefManager) error {
activeDeployment := appsutil.ActiveDeployment(existingDeployments)
// Reconcile deployments. The active deployment follows the config, and all
// other deployments should be scaled to zero.
var updatedDeployments []*v1.ReplicationController
for i := range existingDeployments {
deployment := existingDeployments[i]
toAppend := deployment
isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name
oldReplicaCount := deployment.Spec.Replicas
if oldReplicaCount == nil {
zero := int32(0)
oldReplicaCount = &zero
}
newReplicaCount := int32(0)
if isActiveDeployment {
newReplicaCount = config.Spec.Replicas
}
if config.Spec.Test {
glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", appsutil.LabelForDeploymentConfig(config), appsutil.LabelForDeployment(deployment))
newReplicaCount = 0
}
// Only update if necessary.
var copied *v1.ReplicationController
if newReplicaCount != *oldReplicaCount {
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
// refresh the replication controller version
rc, err := c.rcLister.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
if err != nil {
return err
}
// We need to make sure we own that RC or adopt it if possible
isOurs, err := cm.ClaimReplicationController(rc)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming the replication controller %s/%s: %v", rc.Namespace, rc.Name, err)
}
if !isOurs {
return fmt.Errorf("deployment config %s/%s (%v) no longer owns replication controller %s/%s (%v)",
config.Namespace, config.Name, config.UID,
deployment.Namespace, deployment.Name, deployment.UID,
)
}
copied = rc.DeepCopy()
copied.Spec.Replicas = &newReplicaCount
copied, err = c.kubeClient.ReplicationControllers(copied.Namespace).Update(copied)
return err
}); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "ReplicationControllerScaleFailed",
"Failed to scale replication controler %q from %d to %d: %v", deployment.Name, *oldReplicaCount, newReplicaCount, err)
return err
}
c.recorder.Eventf(config, v1.EventTypeNormal, "ReplicationControllerScaled", "Scaled replication controller %q from %d to %d", copied.Name, *oldReplicaCount, newReplicaCount)
toAppend = copied
}
updatedDeployments = append(updatedDeployments, toAppend)
}
// As the deployment configuration has changed, we need to make sure to clean
// up old deployments if we have now reached our deployment history quota
if err := c.cleanupOldDeployments(updatedDeployments, config); err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "ReplicationControllerCleanupFailed", "Couldn't clean up replication controllers: %v", err)
}
return c.updateStatus(config, updatedDeployments, true)
}
// Update the status of the provided deployment config. Additional conditions will override any other condition in the
// deployment config status.
func (c *DeploymentConfigController) updateStatus(config *appsv1.DeploymentConfig, deployments []*v1.ReplicationController, updateObservedGeneration bool, additional ...appsv1.DeploymentCondition) error {
newStatus := calculateStatus(config, deployments, updateObservedGeneration, additional...)
// NOTE: We should update the status of the deployment config only if we need to, otherwise
// we hotloop between updates.
if reflect.DeepEqual(newStatus, config.Status) {
return nil
}
copied := config.DeepCopy()
copied.Status = newStatus
// TODO: Retry update conficts
if _, err := c.appsClient.DeploymentConfigs(copied.Namespace).UpdateStatus(copied); err != nil {
return err
}
glog.V(4).Infof(fmt.Sprintf("Updated status for DeploymentConfig: %s, ", appsutil.LabelForDeploymentConfig(config)) +
fmt.Sprintf("replicas %d->%d (need %d), ", config.Status.Replicas, newStatus.Replicas, config.Spec.Replicas) +
fmt.Sprintf("readyReplicas %d->%d, ", config.Status.ReadyReplicas, newStatus.ReadyReplicas) +
fmt.Sprintf("availableReplicas %d->%d, ", config.Status.AvailableReplicas, newStatus.AvailableReplicas) +
fmt.Sprintf("unavailableReplicas %d->%d, ", config.Status.UnavailableReplicas, newStatus.UnavailableReplicas) +
fmt.Sprintf("sequence No: %v->%v", config.Status.ObservedGeneration, newStatus.ObservedGeneration))
return nil
}
// cancelRunningRollouts cancels existing rollouts when the latest deployment does not
// exists yet to allow new rollout superceded by the new config version.
func (c *DeploymentConfigController) cancelRunningRollouts(config *appsv1.DeploymentConfig, existingDeployments []*v1.ReplicationController, cm *RCControllerRefManager) error {
awaitingCancellations := false
for i := range existingDeployments {
deployment := existingDeployments[i]
// Skip deployments with an outcome.
if appsutil.IsTerminatedDeployment(deployment) {
continue
}
// Cancel running deployments.
awaitingCancellations = true
if appsutil.IsDeploymentCancelled(deployment) {
continue
}
// Retry faster on conflicts
var updatedDeployment *v1.ReplicationController
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
rc, err := c.rcLister.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
if kapierrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
// We need to make sure we own that RC or adopt it if possible
isOurs, err := cm.ClaimReplicationController(rc)
if err != nil {
return fmt.Errorf("error while deploymentConfigController claiming the replication controller %s/%s: %v", rc.Namespace, rc.Name, err)
}
if !isOurs {
return nil
}
copied := rc.DeepCopy()
appsutil.SetCancelledByNewerDeployment(copied)
updatedDeployment, err = c.kubeClient.ReplicationControllers(copied.Namespace).Update(copied)
return err
})
if err != nil {
c.recorder.Eventf(config, v1.EventTypeWarning, "DeploymentCancellationFailed", "Failed to cancel deployment %q superceded by version %d: %s", deployment.Name, config.Status.LatestVersion, err)
return err
}
if updatedDeployment != nil {
// replace the current deployment with the updated copy so that a future update has a chance at working
existingDeployments[i] = updatedDeployment
c.recorder.Eventf(config, v1.EventTypeNormal, "DeploymentCancelled", "Cancelled deployment %q superceded by version %d", deployment.Name, config.Status.LatestVersion)
}
}
// Wait for deployment cancellations before reconciling or creating a new
// deployment to avoid competing with existing deployment processes.
if awaitingCancellations {
c.recorder.Eventf(config, v1.EventTypeNormal, "DeploymentAwaitingCancellation", "Deployment of version %d awaiting cancellation of older running deployments", config.Status.LatestVersion)
return fmt.Errorf("found previous inflight deployment for %s - requeuing", appsutil.LabelForDeploymentConfig(config))
}
return nil
}
func calculateStatus(config *appsv1.DeploymentConfig, rcs []*v1.ReplicationController, updateObservedGeneration bool, additional ...appsv1.DeploymentCondition) appsv1.DeploymentConfigStatus {
// UpdatedReplicas represents the replicas that use the current deployment config template which means
// we should inform about the replicas of the latest deployment and not the active.
latestReplicas := int32(0)
latestExists, latestRC := appsutil.LatestDeploymentInfo(config, rcs)
if !latestExists {
latestRC = nil
} else {
latestReplicas = appsutil.GetStatusReplicaCountForDeployments([]*v1.ReplicationController{latestRC})
}
available := appsutil.GetAvailableReplicaCountForReplicationControllers(rcs)
total := appsutil.GetReplicaCountForDeployments(rcs)
unavailableReplicas := total - available
if unavailableReplicas < 0 {
unavailableReplicas = 0
}
generation := config.Status.ObservedGeneration
if updateObservedGeneration {
generation = config.Generation
}
status := appsv1.DeploymentConfigStatus{
LatestVersion: config.Status.LatestVersion,
Details: config.Status.Details,
ObservedGeneration: generation,
Replicas: appsutil.GetStatusReplicaCountForDeployments(rcs),
UpdatedReplicas: latestReplicas,
AvailableReplicas: available,
ReadyReplicas: appsutil.GetReadyReplicaCountForReplicationControllers(rcs),
UnavailableReplicas: unavailableReplicas,
Conditions: config.Status.Conditions,
}
updateConditions(config, &status, latestRC)
for _, cond := range additional {
appsutil.SetDeploymentCondition(&status, cond)
}
return status
}
func updateConditions(config *appsv1.DeploymentConfig, newStatus *appsv1.DeploymentConfigStatus, latestRC *v1.ReplicationController) {
// Availability condition.
if newStatus.AvailableReplicas >= config.Spec.Replicas-appsutil.MaxUnavailable(config) && newStatus.AvailableReplicas > 0 {
minAvailability := appsutil.NewDeploymentCondition(appsv1.DeploymentAvailable, v1.ConditionTrue, "",
"Deployment config has minimum availability.")
appsutil.SetDeploymentCondition(newStatus, *minAvailability)
} else {
noMinAvailability := appsutil.NewDeploymentCondition(appsv1.DeploymentAvailable, v1.ConditionFalse, "",
"Deployment config does not have minimum availability.")
appsutil.SetDeploymentCondition(newStatus, *noMinAvailability)
}
// Condition about progress.
if latestRC != nil {
switch appsutil.DeploymentStatusFor(latestRC) {
case appsv1.DeploymentStatusPending:
msg := fmt.Sprintf("replication controller %q is waiting for pod %q to run", latestRC.Name, appsutil.DeployerPodNameForDeployment(latestRC.Name))
condition := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionUnknown, "", msg)
appsutil.SetDeploymentCondition(newStatus, *condition)
case appsv1.DeploymentStatusRunning:
if appsutil.IsProgressing(config, newStatus) {
appsutil.RemoveDeploymentCondition(newStatus, appsv1.DeploymentProgressing)
msg := fmt.Sprintf("replication controller %q is progressing", latestRC.Name)
condition := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionTrue,
string(appsv1.ReplicationControllerUpdatedReason), msg)
// TODO: Right now, we use lastTransitionTime for storing the last time we had any progress instead
// of the last time the condition transitioned to a new status. We should probably change that.
appsutil.SetDeploymentCondition(newStatus, *condition)
}
case appsv1.DeploymentStatusFailed:
var condition *appsv1.DeploymentCondition
if appsutil.IsDeploymentCancelled(latestRC) {
msg := fmt.Sprintf("rollout of replication controller %q was cancelled", latestRC.Name)
condition = appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionFalse,
appsutil.CancelledRolloutReason, msg)
} else {
msg := fmt.Sprintf("replication controller %q has failed progressing", latestRC.Name)
condition = appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionFalse, appsutil.TimedOutReason, msg)
}
appsutil.SetDeploymentCondition(newStatus, *condition)
case appsv1.DeploymentStatusComplete:
msg := fmt.Sprintf("replication controller %q successfully rolled out", latestRC.Name)
condition := appsutil.NewDeploymentCondition(appsv1.DeploymentProgressing, v1.ConditionTrue, appsutil.NewRcAvailableReason, msg)
appsutil.SetDeploymentCondition(newStatus, *condition)
}
}
}
func (c *DeploymentConfigController) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
return
}
if _, isFatal := err.(fatalError); isFatal {
utilruntime.HandleError(err)
c.queue.Forget(key)
return
}
verbosity := glog.Level(2)
if c.queue.NumRequeues(key) < maxRetryCount {
if kapierrors.IsConflict(err) {
verbosity = glog.Level(4)
}
glog.V(verbosity).Infof("Error syncing deployment config %v: %v", key, err)
c.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping deployment config %q out of the queue: %v", key, err)
c.queue.Forget(key)
}
// cleanupOldDeployments deletes old replication controller deployments if their quota has been reached
func (c *DeploymentConfigController) cleanupOldDeployments(existingDeployments []*v1.ReplicationController, deploymentConfig *appsv1.DeploymentConfig) error {
if deploymentConfig.Spec.RevisionHistoryLimit == nil {
// there is no past deplyoment quota set
return nil
}
prunableDeployments := appsutil.DeploymentsForCleanup(deploymentConfig, existingDeployments)
if len(prunableDeployments) <= int(*deploymentConfig.Spec.RevisionHistoryLimit) {
// the past deployment quota has not been exceeded
return nil
}
deletionErrors := []error{}
for i := 0; i < (len(prunableDeployments) - int(*deploymentConfig.Spec.RevisionHistoryLimit)); i++ {
deployment := prunableDeployments[i]
if *deployment.Spec.Replicas != 0 {
// we do not want to clobber active older deployments, but we *do* want them to count
// against the quota so that they will be pruned when they're scaled down
continue
}
policy := metav1.DeletePropagationBackground
err := c.kubeClient.ReplicationControllers(deployment.Namespace).Delete(deployment.Name, &metav1.DeleteOptions{
PropagationPolicy: &policy,
})
if err != nil && !kapierrors.IsNotFound(err) {
deletionErrors = append(deletionErrors, err)
}
}
return kutilerrors.NewAggregate(deletionErrors)
}
// triggerActivated indicates whether we should proceed with new rollout as one of the
// triggers were activated (config change or image change). The first bool indicates that
// the triggers are active and second indicates if we should skip the rollout because we
// are waiting for the trigger to complete update (waiting for image for example).
func triggerActivated(config *appsv1.DeploymentConfig, latestExists bool, latestDeployment *v1.ReplicationController) (bool, bool, error) {
if config.Spec.Paused {
return false, false, nil
}
imageTrigger := appsutil.HasImageChangeTrigger(config)
configTrigger := appsutil.HasChangeTrigger(config)
hasTrigger := imageTrigger || configTrigger
// no-op when no triggers are defined.
if !hasTrigger {
return false, false, nil
}
// Handle initial rollouts
if appsutil.IsInitialDeployment(config) {
hasAvailableImages := appsutil.HasLastTriggeredImage(config)
// When config has an image trigger, wait until its images are available to trigger.
if imageTrigger {
if hasAvailableImages {
glog.V(4).Infof("Rolling out initial deployment for %s/%s as it now have images available", config.Namespace, config.Name)
// TODO: Technically this is not a config change cause, but we will have to report the image that caused the trigger.
// In some cases it might be difficult because config can have multiple ICT.
appsutil.RecordConfigChangeCause(config)
return true, false, nil
}
glog.V(4).Infof("Rolling out initial deployment for %s/%s deferred until its images are ready", config.Namespace, config.Name)
return false, true, nil
}
// Rollout if we only have config change trigger.
if configTrigger {
glog.V(4).Infof("Rolling out initial deployment for %s/%s", config.Namespace, config.Name)
appsutil.RecordConfigChangeCause(config)
return true, false, nil
}
// We are waiting for the initial RC to be created.
return false, false, nil
}
// Wait for the RC to be created
if !latestExists {
return false, false, nil
}
// We need existing deployment at this point to compare its template with current config template.
if latestDeployment == nil {
return false, false, nil
}
if imageTrigger {
if ok, imageNames := appsutil.HasUpdatedImages(config, latestDeployment); ok {
glog.V(4).Infof("Rolling out #%d deployment for %s/%s caused by image changes (%s)", config.Status.LatestVersion+1, config.Namespace, config.Name, strings.Join(imageNames, ","))
appsutil.RecordImageChangeCauses(config, imageNames)
return true, false, nil
}
}
if configTrigger {
isLatest, changes, err := appsutil.HasLatestPodTemplate(config, latestDeployment)
if err != nil {
return false, false, fmt.Errorf("error while checking for latest pod template in replication controller: %v", err)
}
if !isLatest {
glog.V(4).Infof("Rolling out #%d deployment for %s/%s caused by config change, diff: %s", config.Status.LatestVersion+1, config.Namespace, config.Name, changes)
appsutil.RecordConfigChangeCause(config)
return true, false, nil
}
}
return false, false, nil
}
|
eb8bed9a-2e54-11e5-9284-b827eb9e62be
eb910c6c-2e54-11e5-9284-b827eb9e62be
eb910c6c-2e54-11e5-9284-b827eb9e62be |
b17d106e-2e56-11e5-9284-b827eb9e62be
b182455c-2e56-11e5-9284-b827eb9e62be
b182455c-2e56-11e5-9284-b827eb9e62be |
c9d82c08-2e55-11e5-9284-b827eb9e62be
c9dd45a8-2e55-11e5-9284-b827eb9e62be
c9dd45a8-2e55-11e5-9284-b827eb9e62be |
6a820d82-2e55-11e5-9284-b827eb9e62be
6a872ede-2e55-11e5-9284-b827eb9e62be
6a872ede-2e55-11e5-9284-b827eb9e62be |
0e11b9ec-2e57-11e5-9284-b827eb9e62be
0e16f1fa-2e57-11e5-9284-b827eb9e62be
0e16f1fa-2e57-11e5-9284-b827eb9e62be |
d9624e3e-2e54-11e5-9284-b827eb9e62be
d9678b42-2e54-11e5-9284-b827eb9e62be
d9678b42-2e54-11e5-9284-b827eb9e62be |
035531b4-2e57-11e5-9284-b827eb9e62be
035a93b6-2e57-11e5-9284-b827eb9e62be
035a93b6-2e57-11e5-9284-b827eb9e62be |
bc4a27f8-2e55-11e5-9284-b827eb9e62be
bc4f429c-2e55-11e5-9284-b827eb9e62be
bc4f429c-2e55-11e5-9284-b827eb9e62be |
a9b22632-2e54-11e5-9284-b827eb9e62be
a9b750c6-2e54-11e5-9284-b827eb9e62be
a9b750c6-2e54-11e5-9284-b827eb9e62be |
5ca1a65a-2e55-11e5-9284-b827eb9e62be
5ca6cd42-2e55-11e5-9284-b827eb9e62be
5ca6cd42-2e55-11e5-9284-b827eb9e62be |
7ee8e304-2e55-11e5-9284-b827eb9e62be
7eee100e-2e55-11e5-9284-b827eb9e62be
7eee100e-2e55-11e5-9284-b827eb9e62be |
ac1bf04c-2e54-11e5-9284-b827eb9e62be
ac210a1e-2e54-11e5-9284-b827eb9e62be
ac210a1e-2e54-11e5-9284-b827eb9e62be |
e410bd10-2e55-11e5-9284-b827eb9e62be
e415d8b8-2e55-11e5-9284-b827eb9e62be
e415d8b8-2e55-11e5-9284-b827eb9e62be |
df5a2fdc-2e54-11e5-9284-b827eb9e62be
df5f9634-2e54-11e5-9284-b827eb9e62be
df5f9634-2e54-11e5-9284-b827eb9e62be |
95dca8d4-2e55-11e5-9284-b827eb9e62be
95e1bc48-2e55-11e5-9284-b827eb9e62be
95e1bc48-2e55-11e5-9284-b827eb9e62be |
08b97148-2e56-11e5-9284-b827eb9e62be
08bea762-2e56-11e5-9284-b827eb9e62be
08bea762-2e56-11e5-9284-b827eb9e62be |
f0f78294-2e54-11e5-9284-b827eb9e62be
f0fcb566-2e54-11e5-9284-b827eb9e62be
f0fcb566-2e54-11e5-9284-b827eb9e62be |
221971fc-2e55-11e5-9284-b827eb9e62be
221ec9ae-2e55-11e5-9284-b827eb9e62be
221ec9ae-2e55-11e5-9284-b827eb9e62be |
1beabb96-2e56-11e5-9284-b827eb9e62be
1beffe62-2e56-11e5-9284-b827eb9e62be
1beffe62-2e56-11e5-9284-b827eb9e62be |
51a3d6d2-2e56-11e5-9284-b827eb9e62be
51a90f26-2e56-11e5-9284-b827eb9e62be
51a90f26-2e56-11e5-9284-b827eb9e62be |
44c74634-2e55-11e5-9284-b827eb9e62be
44cc9134-2e55-11e5-9284-b827eb9e62be
44cc9134-2e55-11e5-9284-b827eb9e62be |
3be74e00-2e56-11e5-9284-b827eb9e62be
3bec95a4-2e56-11e5-9284-b827eb9e62be
3bec95a4-2e56-11e5-9284-b827eb9e62be |
fdf4e080-2e55-11e5-9284-b827eb9e62be
fdfa2e78-2e55-11e5-9284-b827eb9e62be
fdfa2e78-2e55-11e5-9284-b827eb9e62be |
0a658878-2e57-11e5-9284-b827eb9e62be
0a6aa79a-2e57-11e5-9284-b827eb9e62be
0a6aa79a-2e57-11e5-9284-b827eb9e62be |
767ceefe-2e55-11e5-9284-b827eb9e62be
76822888-2e55-11e5-9284-b827eb9e62be
76822888-2e55-11e5-9284-b827eb9e62be |
bc2c2f4c-2e54-11e5-9284-b827eb9e62be
bc316264-2e54-11e5-9284-b827eb9e62be
bc316264-2e54-11e5-9284-b827eb9e62be |
9564a920-2e54-11e5-9284-b827eb9e62be
9569badc-2e54-11e5-9284-b827eb9e62be
9569badc-2e54-11e5-9284-b827eb9e62be |
92bf02c8-2e55-11e5-9284-b827eb9e62be
92c413c6-2e55-11e5-9284-b827eb9e62be
92c413c6-2e55-11e5-9284-b827eb9e62be |
1a7a21a8-2e55-11e5-9284-b827eb9e62be
1a7f6f28-2e55-11e5-9284-b827eb9e62be
1a7f6f28-2e55-11e5-9284-b827eb9e62be |
0eaaac8a-2e55-11e5-9284-b827eb9e62be
0eaff884-2e55-11e5-9284-b827eb9e62be
0eaff884-2e55-11e5-9284-b827eb9e62be |
0b30a7e2-2e57-11e5-9284-b827eb9e62be
0b35ef9a-2e57-11e5-9284-b827eb9e62be
0b35ef9a-2e57-11e5-9284-b827eb9e62be |
61c8f7ea-2e56-11e5-9284-b827eb9e62be
61ce18ec-2e56-11e5-9284-b827eb9e62be
61ce18ec-2e56-11e5-9284-b827eb9e62be |
07dd4d6c-2e56-11e5-9284-b827eb9e62be
07e2867e-2e56-11e5-9284-b827eb9e62be
07e2867e-2e56-11e5-9284-b827eb9e62be |
1a63328a-2e56-11e5-9284-b827eb9e62be
1a685c7e-2e56-11e5-9284-b827eb9e62be
1a685c7e-2e56-11e5-9284-b827eb9e62be |
540677f4-2e56-11e5-9284-b827eb9e62be
540babfc-2e56-11e5-9284-b827eb9e62be
540babfc-2e56-11e5-9284-b827eb9e62be |
bce93550-2e55-11e5-9284-b827eb9e62be
bcee52ce-2e55-11e5-9284-b827eb9e62be
bcee52ce-2e55-11e5-9284-b827eb9e62be |
e742c12c-2e55-11e5-9284-b827eb9e62be
e747db26-2e55-11e5-9284-b827eb9e62be
e747db26-2e55-11e5-9284-b827eb9e62be |
3fc33d5a-2e55-11e5-9284-b827eb9e62be
3fc87d56-2e55-11e5-9284-b827eb9e62be
3fc87d56-2e55-11e5-9284-b827eb9e62be |
b0bea94e-2e56-11e5-9284-b827eb9e62be
b0c3c686-2e56-11e5-9284-b827eb9e62be
b0c3c686-2e56-11e5-9284-b827eb9e62be |
ab5c7fcc-2e55-11e5-9284-b827eb9e62be
ab619ae8-2e55-11e5-9284-b827eb9e62be
ab619ae8-2e55-11e5-9284-b827eb9e62be |
08d857e2-2e57-11e5-9284-b827eb9e62be
08dd7704-2e57-11e5-9284-b827eb9e62be
08dd7704-2e57-11e5-9284-b827eb9e62be |
e890ff86-2e54-11e5-9284-b827eb9e62be
e8961be2-2e54-11e5-9284-b827eb9e62be
e8961be2-2e54-11e5-9284-b827eb9e62be |
d54bbf90-2e56-11e5-9284-b827eb9e62be
d550dbce-2e56-11e5-9284-b827eb9e62be
d550dbce-2e56-11e5-9284-b827eb9e62be |
b57e8242-2e56-11e5-9284-b827eb9e62be
b583e02a-2e56-11e5-9284-b827eb9e62be
b583e02a-2e56-11e5-9284-b827eb9e62be |
c3c662d4-2e56-11e5-9284-b827eb9e62be
c3cb83ea-2e56-11e5-9284-b827eb9e62be
c3cb83ea-2e56-11e5-9284-b827eb9e62be |
d88e423c-2e55-11e5-9284-b827eb9e62be
d8935ca4-2e55-11e5-9284-b827eb9e62be
d8935ca4-2e55-11e5-9284-b827eb9e62be |
7f601fc2-2e56-11e5-9284-b827eb9e62be
7f6551ae-2e56-11e5-9284-b827eb9e62be
7f6551ae-2e56-11e5-9284-b827eb9e62be |
1d9ce56e-2e55-11e5-9284-b827eb9e62be
1da2432e-2e55-11e5-9284-b827eb9e62be
1da2432e-2e55-11e5-9284-b827eb9e62be |
607bee60-2e56-11e5-9284-b827eb9e62be
608109fe-2e56-11e5-9284-b827eb9e62be
608109fe-2e56-11e5-9284-b827eb9e62be |
f5772daa-2e55-11e5-9284-b827eb9e62be
f57c5ed8-2e55-11e5-9284-b827eb9e62be
f57c5ed8-2e55-11e5-9284-b827eb9e62be |
671d2d70-2e55-11e5-9284-b827eb9e62be
67224486-2e55-11e5-9284-b827eb9e62be
67224486-2e55-11e5-9284-b827eb9e62be |
7a677948-2e56-11e5-9284-b827eb9e62be
7a6c99b4-2e56-11e5-9284-b827eb9e62be
7a6c99b4-2e56-11e5-9284-b827eb9e62be |
be216d56-2e56-11e5-9284-b827eb9e62be
be268bf6-2e56-11e5-9284-b827eb9e62be
be268bf6-2e56-11e5-9284-b827eb9e62be |
d4d2e8f6-2e54-11e5-9284-b827eb9e62be
d4d80b92-2e54-11e5-9284-b827eb9e62be
d4d80b92-2e54-11e5-9284-b827eb9e62be |
45b543ca-2e55-11e5-9284-b827eb9e62be
45ba8c86-2e55-11e5-9284-b827eb9e62be
45ba8c86-2e55-11e5-9284-b827eb9e62be |
471a24e2-2e55-11e5-9284-b827eb9e62be
471f733e-2e55-11e5-9284-b827eb9e62be
471f733e-2e55-11e5-9284-b827eb9e62be |
d41a0aca-2e54-11e5-9284-b827eb9e62be
d41f3932-2e54-11e5-9284-b827eb9e62be
d41f3932-2e54-11e5-9284-b827eb9e62be |
2f5d846e-2e57-11e5-9284-b827eb9e62be
2f629f12-2e57-11e5-9284-b827eb9e62be
2f629f12-2e57-11e5-9284-b827eb9e62be |
3cbc4140-2e57-11e5-9284-b827eb9e62be
3cc16864-2e57-11e5-9284-b827eb9e62be
3cc16864-2e57-11e5-9284-b827eb9e62be |
35ce64f0-2e55-11e5-9284-b827eb9e62be
35d39c54-2e55-11e5-9284-b827eb9e62be
35d39c54-2e55-11e5-9284-b827eb9e62be |
3516fde2-2e55-11e5-9284-b827eb9e62be
351c376c-2e55-11e5-9284-b827eb9e62be
351c376c-2e55-11e5-9284-b827eb9e62be |
ddc39c3a-2e54-11e5-9284-b827eb9e62be
ddc8f7fc-2e54-11e5-9284-b827eb9e62be
ddc8f7fc-2e54-11e5-9284-b827eb9e62be |
c5d8d8d8-2e54-11e5-9284-b827eb9e62be
c5de0b8c-2e54-11e5-9284-b827eb9e62be
c5de0b8c-2e54-11e5-9284-b827eb9e62be |
c4652ff6-2e54-11e5-9284-b827eb9e62be
c46a7f2e-2e54-11e5-9284-b827eb9e62be
c46a7f2e-2e54-11e5-9284-b827eb9e62be |
d4713854-2e54-11e5-9284-b827eb9e62be
d4765438-2e54-11e5-9284-b827eb9e62be
d4765438-2e54-11e5-9284-b827eb9e62be |
426e88fc-2e55-11e5-9284-b827eb9e62be
4273b3cc-2e55-11e5-9284-b827eb9e62be
4273b3cc-2e55-11e5-9284-b827eb9e62be |
dd224a5a-2e55-11e5-9284-b827eb9e62be
dd2763be-2e55-11e5-9284-b827eb9e62be
dd2763be-2e55-11e5-9284-b827eb9e62be |
8f8c38a0-2e55-11e5-9284-b827eb9e62be
8f915402-2e55-11e5-9284-b827eb9e62be
8f915402-2e55-11e5-9284-b827eb9e62be |
03a50924-2e56-11e5-9284-b827eb9e62be
03aa8912-2e56-11e5-9284-b827eb9e62be
03aa8912-2e56-11e5-9284-b827eb9e62be |
0caf64a6-2e56-11e5-9284-b827eb9e62be
0cb4b10e-2e56-11e5-9284-b827eb9e62be
0cb4b10e-2e56-11e5-9284-b827eb9e62be |
bf8db3f2-2e56-11e5-9284-b827eb9e62be
bf92d8b4-2e56-11e5-9284-b827eb9e62be
bf92d8b4-2e56-11e5-9284-b827eb9e62be |
ad6f5a54-2e56-11e5-9284-b827eb9e62be
ad747aac-2e56-11e5-9284-b827eb9e62be
ad747aac-2e56-11e5-9284-b827eb9e62be |
cba5e4f4-2e54-11e5-9284-b827eb9e62be
cbab10be-2e54-11e5-9284-b827eb9e62be
cbab10be-2e54-11e5-9284-b827eb9e62be |
3fe9b0d8-2e56-11e5-9284-b827eb9e62be
3feeccbc-2e56-11e5-9284-b827eb9e62be
3feeccbc-2e56-11e5-9284-b827eb9e62be |
d1272a1e-2e54-11e5-9284-b827eb9e62be
d12c44b8-2e54-11e5-9284-b827eb9e62be
d12c44b8-2e54-11e5-9284-b827eb9e62be |
900c5648-2e55-11e5-9284-b827eb9e62be
901173e4-2e55-11e5-9284-b827eb9e62be
901173e4-2e55-11e5-9284-b827eb9e62be |
95181496-2e56-11e5-9284-b827eb9e62be
951d2f44-2e56-11e5-9284-b827eb9e62be
951d2f44-2e56-11e5-9284-b827eb9e62be |
235552ac-2e55-11e5-9284-b827eb9e62be
235a8ab0-2e55-11e5-9284-b827eb9e62be
235a8ab0-2e55-11e5-9284-b827eb9e62be |
392f0f72-2e56-11e5-9284-b827eb9e62be
393cfce0-2e56-11e5-9284-b827eb9e62be
393cfce0-2e56-11e5-9284-b827eb9e62be |
bd174828-2e55-11e5-9284-b827eb9e62be
bd1c63c6-2e55-11e5-9284-b827eb9e62be
bd1c63c6-2e55-11e5-9284-b827eb9e62be |
62a7cdf4-2e55-11e5-9284-b827eb9e62be
62ad167e-2e55-11e5-9284-b827eb9e62be
62ad167e-2e55-11e5-9284-b827eb9e62be |
3ba14f32-2e55-11e5-9284-b827eb9e62be
3ba68236-2e55-11e5-9284-b827eb9e62be
3ba68236-2e55-11e5-9284-b827eb9e62be |
96ecdc44-2e55-11e5-9284-b827eb9e62be
96f1fd64-2e55-11e5-9284-b827eb9e62be
96f1fd64-2e55-11e5-9284-b827eb9e62be |
d7470194-2e54-11e5-9284-b827eb9e62be
d74c3718-2e54-11e5-9284-b827eb9e62be
d74c3718-2e54-11e5-9284-b827eb9e62be |
02eaaa16-2e56-11e5-9284-b827eb9e62be
02efe152-2e56-11e5-9284-b827eb9e62be
02efe152-2e56-11e5-9284-b827eb9e62be |
66921348-2e55-11e5-9284-b827eb9e62be
6697601e-2e55-11e5-9284-b827eb9e62be
6697601e-2e55-11e5-9284-b827eb9e62be |
daf5f59c-2e55-11e5-9284-b827eb9e62be
dafb0c62-2e55-11e5-9284-b827eb9e62be
dafb0c62-2e55-11e5-9284-b827eb9e62be |
e86f426e-2e55-11e5-9284-b827eb9e62be
e8745f60-2e55-11e5-9284-b827eb9e62be
e8745f60-2e55-11e5-9284-b827eb9e62be |
d28eb5f2-2e54-11e5-9284-b827eb9e62be
d293d5aa-2e54-11e5-9284-b827eb9e62be
d293d5aa-2e54-11e5-9284-b827eb9e62be |
3c295952-2e57-11e5-9284-b827eb9e62be
3c2e75a4-2e57-11e5-9284-b827eb9e62be
3c2e75a4-2e57-11e5-9284-b827eb9e62be |
0d65196c-2e57-11e5-9284-b827eb9e62be
0d6a4fa4-2e57-11e5-9284-b827eb9e62be
0d6a4fa4-2e57-11e5-9284-b827eb9e62be |
2749601a-2e55-11e5-9284-b827eb9e62be
274e8fae-2e55-11e5-9284-b827eb9e62be
274e8fae-2e55-11e5-9284-b827eb9e62be |
eaa10832-2e56-11e5-9284-b827eb9e62be
eaa66494-2e56-11e5-9284-b827eb9e62be
eaa66494-2e56-11e5-9284-b827eb9e62be |
84530120-2e56-11e5-9284-b827eb9e62be
845839b0-2e56-11e5-9284-b827eb9e62be
845839b0-2e56-11e5-9284-b827eb9e62be |
84436a08-2e56-11e5-9284-b827eb9e62be
844897e4-2e56-11e5-9284-b827eb9e62be
844897e4-2e56-11e5-9284-b827eb9e62be |
987ba238-2e56-11e5-9284-b827eb9e62be
9880be80-2e56-11e5-9284-b827eb9e62be
9880be80-2e56-11e5-9284-b827eb9e62be |
d2e0f3c0-2e55-11e5-9284-b827eb9e62be
d2e6112a-2e55-11e5-9284-b827eb9e62be
d2e6112a-2e55-11e5-9284-b827eb9e62be |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.