repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
list | docstring
stringlengths 6
2.61k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 85
252
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
grafana/metrictank
|
mdata/cache/ccache.go
|
Search
|
func (c *CCache) Search(ctx context.Context, metric schema.AMKey, from, until uint32) (*CCSearchResult, error) {
if from >= until {
return nil, ErrInvalidRange
}
res := &CCSearchResult{
From: from,
Until: until,
}
if c == nil {
accnt.CacheMetricMiss.Inc()
return res, nil
}
ctx, span := tracing.NewSpan(ctx, c.tracer, "CCache.Search")
defer span.Finish()
c.RLock()
defer c.RUnlock()
cm, ok := c.metricCache[metric]
if !ok {
span.SetTag("cache", "miss")
accnt.CacheMetricMiss.Inc()
return res, nil
}
cm.Search(ctx, metric, res, from, until)
if len(res.Start) == 0 && len(res.End) == 0 {
span.SetTag("cache", "miss")
accnt.CacheMetricMiss.Inc()
} else {
accnt.CacheChunkHit.Add(len(res.Start) + len(res.End))
go func() {
c.accnt.HitChunks(metric, res.Start)
c.accnt.HitChunks(metric, res.End)
}()
if res.Complete {
span.SetTag("cache", "hit-full")
accnt.CacheMetricHitFull.Inc()
} else {
span.SetTag("cache", "hit-partial")
accnt.CacheMetricHitPartial.Inc()
}
}
return res, nil
}
|
go
|
func (c *CCache) Search(ctx context.Context, metric schema.AMKey, from, until uint32) (*CCSearchResult, error) {
if from >= until {
return nil, ErrInvalidRange
}
res := &CCSearchResult{
From: from,
Until: until,
}
if c == nil {
accnt.CacheMetricMiss.Inc()
return res, nil
}
ctx, span := tracing.NewSpan(ctx, c.tracer, "CCache.Search")
defer span.Finish()
c.RLock()
defer c.RUnlock()
cm, ok := c.metricCache[metric]
if !ok {
span.SetTag("cache", "miss")
accnt.CacheMetricMiss.Inc()
return res, nil
}
cm.Search(ctx, metric, res, from, until)
if len(res.Start) == 0 && len(res.End) == 0 {
span.SetTag("cache", "miss")
accnt.CacheMetricMiss.Inc()
} else {
accnt.CacheChunkHit.Add(len(res.Start) + len(res.End))
go func() {
c.accnt.HitChunks(metric, res.Start)
c.accnt.HitChunks(metric, res.End)
}()
if res.Complete {
span.SetTag("cache", "hit-full")
accnt.CacheMetricHitFull.Inc()
} else {
span.SetTag("cache", "hit-partial")
accnt.CacheMetricHitPartial.Inc()
}
}
return res, nil
}
|
[
"func",
"(",
"c",
"*",
"CCache",
")",
"Search",
"(",
"ctx",
"context",
".",
"Context",
",",
"metric",
"schema",
".",
"AMKey",
",",
"from",
",",
"until",
"uint32",
")",
"(",
"*",
"CCSearchResult",
",",
"error",
")",
"{",
"if",
"from",
">=",
"until",
"{",
"return",
"nil",
",",
"ErrInvalidRange",
"\n",
"}",
"\n\n",
"res",
":=",
"&",
"CCSearchResult",
"{",
"From",
":",
"from",
",",
"Until",
":",
"until",
",",
"}",
"\n\n",
"if",
"c",
"==",
"nil",
"{",
"accnt",
".",
"CacheMetricMiss",
".",
"Inc",
"(",
")",
"\n",
"return",
"res",
",",
"nil",
"\n",
"}",
"\n\n",
"ctx",
",",
"span",
":=",
"tracing",
".",
"NewSpan",
"(",
"ctx",
",",
"c",
".",
"tracer",
",",
"\"",
"\"",
")",
"\n",
"defer",
"span",
".",
"Finish",
"(",
")",
"\n\n",
"c",
".",
"RLock",
"(",
")",
"\n",
"defer",
"c",
".",
"RUnlock",
"(",
")",
"\n\n",
"cm",
",",
"ok",
":=",
"c",
".",
"metricCache",
"[",
"metric",
"]",
"\n",
"if",
"!",
"ok",
"{",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"accnt",
".",
"CacheMetricMiss",
".",
"Inc",
"(",
")",
"\n",
"return",
"res",
",",
"nil",
"\n",
"}",
"\n\n",
"cm",
".",
"Search",
"(",
"ctx",
",",
"metric",
",",
"res",
",",
"from",
",",
"until",
")",
"\n",
"if",
"len",
"(",
"res",
".",
"Start",
")",
"==",
"0",
"&&",
"len",
"(",
"res",
".",
"End",
")",
"==",
"0",
"{",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"accnt",
".",
"CacheMetricMiss",
".",
"Inc",
"(",
")",
"\n",
"}",
"else",
"{",
"accnt",
".",
"CacheChunkHit",
".",
"Add",
"(",
"len",
"(",
"res",
".",
"Start",
")",
"+",
"len",
"(",
"res",
".",
"End",
")",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"c",
".",
"accnt",
".",
"HitChunks",
"(",
"metric",
",",
"res",
".",
"Start",
")",
"\n",
"c",
".",
"accnt",
".",
"HitChunks",
"(",
"metric",
",",
"res",
".",
"End",
")",
"\n",
"}",
"(",
")",
"\n\n",
"if",
"res",
".",
"Complete",
"{",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"accnt",
".",
"CacheMetricHitFull",
".",
"Inc",
"(",
")",
"\n",
"}",
"else",
"{",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"accnt",
".",
"CacheMetricHitPartial",
".",
"Inc",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"res",
",",
"nil",
"\n",
"}"
] |
// Search looks for the requested metric and returns a complete-as-possible CCSearchResult
// from is inclusive, until is exclusive
|
[
"Search",
"looks",
"for",
"the",
"requested",
"metric",
"and",
"returns",
"a",
"complete",
"-",
"as",
"-",
"possible",
"CCSearchResult",
"from",
"is",
"inclusive",
"until",
"is",
"exclusive"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cache/ccache.go#L258-L308
|
train
|
grafana/metrictank
|
mdata/aggmetrics.go
|
GC
|
func (ms *AggMetrics) GC() {
for {
unix := time.Duration(time.Now().UnixNano())
diff := ms.gcInterval - (unix % ms.gcInterval)
time.Sleep(diff + time.Minute)
log.Info("checking for stale chunks that need persisting.")
now := uint32(time.Now().Unix())
chunkMinTs := now - uint32(ms.chunkMaxStale)
metricMinTs := now - uint32(ms.metricMaxStale)
// as this is the only goroutine that can delete from ms.Metrics
// we only need to lock long enough to get the list of orgs, then for each org
// get the list of active metrics.
// It doesn't matter if new orgs or metrics are added while we iterate these lists.
ms.RLock()
orgs := make([]uint32, 0, len(ms.Metrics))
for o := range ms.Metrics {
orgs = append(orgs, o)
}
ms.RUnlock()
for _, org := range orgs {
orgActiveMetrics := promActiveMetrics.WithLabelValues(strconv.Itoa(int(org)))
keys := make([]schema.Key, 0, len(ms.Metrics[org]))
ms.RLock()
for k := range ms.Metrics[org] {
keys = append(keys, k)
}
ms.RUnlock()
for _, key := range keys {
gcMetric.Inc()
ms.RLock()
a := ms.Metrics[org][key]
ms.RUnlock()
points, stale := a.GC(now, chunkMinTs, metricMinTs)
if stale {
log.Debugf("metric %s is stale. Purging data from memory.", key)
ms.Lock()
delete(ms.Metrics[org], key)
orgActiveMetrics.Set(float64(len(ms.Metrics[org])))
// note: this is racey. if a metric has just become unstale, it may have created a new chunk,
// pruning an older one. in which case we double-subtract those points
// hard to fix and super rare. see https://github.com/grafana/metrictank/pull/1242
totalPoints.DecUint64(uint64(points))
ms.Unlock()
}
}
ms.RLock()
orgActive := len(ms.Metrics[org])
orgActiveMetrics.Set(float64(orgActive))
ms.RUnlock()
// If this org has no keys, then delete the org from the map
if orgActive == 0 {
// To prevent races, we need to check that there are still no metrics for the org while holding a write lock
ms.Lock()
orgActive = len(ms.Metrics[org])
if orgActive == 0 {
delete(ms.Metrics, org)
}
ms.Unlock()
}
}
// Get the totalActive across all orgs.
totalActive := 0
ms.RLock()
for o := range ms.Metrics {
totalActive += len(ms.Metrics[o])
}
ms.RUnlock()
metricsActive.Set(totalActive)
}
}
|
go
|
func (ms *AggMetrics) GC() {
for {
unix := time.Duration(time.Now().UnixNano())
diff := ms.gcInterval - (unix % ms.gcInterval)
time.Sleep(diff + time.Minute)
log.Info("checking for stale chunks that need persisting.")
now := uint32(time.Now().Unix())
chunkMinTs := now - uint32(ms.chunkMaxStale)
metricMinTs := now - uint32(ms.metricMaxStale)
// as this is the only goroutine that can delete from ms.Metrics
// we only need to lock long enough to get the list of orgs, then for each org
// get the list of active metrics.
// It doesn't matter if new orgs or metrics are added while we iterate these lists.
ms.RLock()
orgs := make([]uint32, 0, len(ms.Metrics))
for o := range ms.Metrics {
orgs = append(orgs, o)
}
ms.RUnlock()
for _, org := range orgs {
orgActiveMetrics := promActiveMetrics.WithLabelValues(strconv.Itoa(int(org)))
keys := make([]schema.Key, 0, len(ms.Metrics[org]))
ms.RLock()
for k := range ms.Metrics[org] {
keys = append(keys, k)
}
ms.RUnlock()
for _, key := range keys {
gcMetric.Inc()
ms.RLock()
a := ms.Metrics[org][key]
ms.RUnlock()
points, stale := a.GC(now, chunkMinTs, metricMinTs)
if stale {
log.Debugf("metric %s is stale. Purging data from memory.", key)
ms.Lock()
delete(ms.Metrics[org], key)
orgActiveMetrics.Set(float64(len(ms.Metrics[org])))
// note: this is racey. if a metric has just become unstale, it may have created a new chunk,
// pruning an older one. in which case we double-subtract those points
// hard to fix and super rare. see https://github.com/grafana/metrictank/pull/1242
totalPoints.DecUint64(uint64(points))
ms.Unlock()
}
}
ms.RLock()
orgActive := len(ms.Metrics[org])
orgActiveMetrics.Set(float64(orgActive))
ms.RUnlock()
// If this org has no keys, then delete the org from the map
if orgActive == 0 {
// To prevent races, we need to check that there are still no metrics for the org while holding a write lock
ms.Lock()
orgActive = len(ms.Metrics[org])
if orgActive == 0 {
delete(ms.Metrics, org)
}
ms.Unlock()
}
}
// Get the totalActive across all orgs.
totalActive := 0
ms.RLock()
for o := range ms.Metrics {
totalActive += len(ms.Metrics[o])
}
ms.RUnlock()
metricsActive.Set(totalActive)
}
}
|
[
"func",
"(",
"ms",
"*",
"AggMetrics",
")",
"GC",
"(",
")",
"{",
"for",
"{",
"unix",
":=",
"time",
".",
"Duration",
"(",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
")",
"\n",
"diff",
":=",
"ms",
".",
"gcInterval",
"-",
"(",
"unix",
"%",
"ms",
".",
"gcInterval",
")",
"\n",
"time",
".",
"Sleep",
"(",
"diff",
"+",
"time",
".",
"Minute",
")",
"\n",
"log",
".",
"Info",
"(",
"\"",
"\"",
")",
"\n",
"now",
":=",
"uint32",
"(",
"time",
".",
"Now",
"(",
")",
".",
"Unix",
"(",
")",
")",
"\n",
"chunkMinTs",
":=",
"now",
"-",
"uint32",
"(",
"ms",
".",
"chunkMaxStale",
")",
"\n",
"metricMinTs",
":=",
"now",
"-",
"uint32",
"(",
"ms",
".",
"metricMaxStale",
")",
"\n\n",
"// as this is the only goroutine that can delete from ms.Metrics",
"// we only need to lock long enough to get the list of orgs, then for each org",
"// get the list of active metrics.",
"// It doesn't matter if new orgs or metrics are added while we iterate these lists.",
"ms",
".",
"RLock",
"(",
")",
"\n",
"orgs",
":=",
"make",
"(",
"[",
"]",
"uint32",
",",
"0",
",",
"len",
"(",
"ms",
".",
"Metrics",
")",
")",
"\n",
"for",
"o",
":=",
"range",
"ms",
".",
"Metrics",
"{",
"orgs",
"=",
"append",
"(",
"orgs",
",",
"o",
")",
"\n",
"}",
"\n",
"ms",
".",
"RUnlock",
"(",
")",
"\n",
"for",
"_",
",",
"org",
":=",
"range",
"orgs",
"{",
"orgActiveMetrics",
":=",
"promActiveMetrics",
".",
"WithLabelValues",
"(",
"strconv",
".",
"Itoa",
"(",
"int",
"(",
"org",
")",
")",
")",
"\n",
"keys",
":=",
"make",
"(",
"[",
"]",
"schema",
".",
"Key",
",",
"0",
",",
"len",
"(",
"ms",
".",
"Metrics",
"[",
"org",
"]",
")",
")",
"\n",
"ms",
".",
"RLock",
"(",
")",
"\n",
"for",
"k",
":=",
"range",
"ms",
".",
"Metrics",
"[",
"org",
"]",
"{",
"keys",
"=",
"append",
"(",
"keys",
",",
"k",
")",
"\n",
"}",
"\n",
"ms",
".",
"RUnlock",
"(",
")",
"\n",
"for",
"_",
",",
"key",
":=",
"range",
"keys",
"{",
"gcMetric",
".",
"Inc",
"(",
")",
"\n",
"ms",
".",
"RLock",
"(",
")",
"\n",
"a",
":=",
"ms",
".",
"Metrics",
"[",
"org",
"]",
"[",
"key",
"]",
"\n",
"ms",
".",
"RUnlock",
"(",
")",
"\n",
"points",
",",
"stale",
":=",
"a",
".",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"if",
"stale",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"key",
")",
"\n",
"ms",
".",
"Lock",
"(",
")",
"\n",
"delete",
"(",
"ms",
".",
"Metrics",
"[",
"org",
"]",
",",
"key",
")",
"\n",
"orgActiveMetrics",
".",
"Set",
"(",
"float64",
"(",
"len",
"(",
"ms",
".",
"Metrics",
"[",
"org",
"]",
")",
")",
")",
"\n",
"// note: this is racey. if a metric has just become unstale, it may have created a new chunk,",
"// pruning an older one. in which case we double-subtract those points",
"// hard to fix and super rare. see https://github.com/grafana/metrictank/pull/1242",
"totalPoints",
".",
"DecUint64",
"(",
"uint64",
"(",
"points",
")",
")",
"\n",
"ms",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"ms",
".",
"RLock",
"(",
")",
"\n",
"orgActive",
":=",
"len",
"(",
"ms",
".",
"Metrics",
"[",
"org",
"]",
")",
"\n",
"orgActiveMetrics",
".",
"Set",
"(",
"float64",
"(",
"orgActive",
")",
")",
"\n",
"ms",
".",
"RUnlock",
"(",
")",
"\n\n",
"// If this org has no keys, then delete the org from the map",
"if",
"orgActive",
"==",
"0",
"{",
"// To prevent races, we need to check that there are still no metrics for the org while holding a write lock",
"ms",
".",
"Lock",
"(",
")",
"\n",
"orgActive",
"=",
"len",
"(",
"ms",
".",
"Metrics",
"[",
"org",
"]",
")",
"\n",
"if",
"orgActive",
"==",
"0",
"{",
"delete",
"(",
"ms",
".",
"Metrics",
",",
"org",
")",
"\n",
"}",
"\n",
"ms",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Get the totalActive across all orgs.",
"totalActive",
":=",
"0",
"\n",
"ms",
".",
"RLock",
"(",
")",
"\n",
"for",
"o",
":=",
"range",
"ms",
".",
"Metrics",
"{",
"totalActive",
"+=",
"len",
"(",
"ms",
".",
"Metrics",
"[",
"o",
"]",
")",
"\n",
"}",
"\n",
"ms",
".",
"RUnlock",
"(",
")",
"\n",
"metricsActive",
".",
"Set",
"(",
"totalActive",
")",
"\n",
"}",
"\n",
"}"
] |
// periodically scan chunks and close any that have not received data in a while
|
[
"periodically",
"scan",
"chunks",
"and",
"close",
"any",
"that",
"have",
"not",
"received",
"data",
"in",
"a",
"while"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetrics.go#L47-L119
|
train
|
grafana/metrictank
|
cmd/mt-store-cat/chunk.go
|
printChunkSummary
|
func printChunkSummary(ctx context.Context, store *cassandra.CassandraStore, tables []cassandra.Table, metrics []Metric, groupTTL string) error {
now := uint32(time.Now().Unix())
endMonth := now / cassandra.Month_sec
for _, tbl := range tables {
// per store.FindExistingTables(), actual TTL may be up to 2x what's in tablename.
// we query up to 4x so that we also include data that should have been dropped already but still sticks around for whatever reason.
start := now - uint32(4*tbl.TTL*3600)
startMonth := start / cassandra.Month_sec
fmt.Println("## Table", tbl.Name)
if len(metrics) == 0 {
query := fmt.Sprintf("select key, ttl(data) from %s", tbl.Name)
iter := store.Session.Query(query).Iter()
showKeyTTL(iter, groupTTL)
} else {
for _, metric := range metrics {
for num := startMonth; num <= endMonth; num += 1 {
row_key := fmt.Sprintf("%s_%d", metric.AMKey.String(), num)
query := fmt.Sprintf("select key, ttl(data) from %s where key=?", tbl.Name)
iter := store.Session.Query(query, row_key).Iter()
showKeyTTL(iter, groupTTL)
}
}
}
}
return nil
}
|
go
|
func printChunkSummary(ctx context.Context, store *cassandra.CassandraStore, tables []cassandra.Table, metrics []Metric, groupTTL string) error {
now := uint32(time.Now().Unix())
endMonth := now / cassandra.Month_sec
for _, tbl := range tables {
// per store.FindExistingTables(), actual TTL may be up to 2x what's in tablename.
// we query up to 4x so that we also include data that should have been dropped already but still sticks around for whatever reason.
start := now - uint32(4*tbl.TTL*3600)
startMonth := start / cassandra.Month_sec
fmt.Println("## Table", tbl.Name)
if len(metrics) == 0 {
query := fmt.Sprintf("select key, ttl(data) from %s", tbl.Name)
iter := store.Session.Query(query).Iter()
showKeyTTL(iter, groupTTL)
} else {
for _, metric := range metrics {
for num := startMonth; num <= endMonth; num += 1 {
row_key := fmt.Sprintf("%s_%d", metric.AMKey.String(), num)
query := fmt.Sprintf("select key, ttl(data) from %s where key=?", tbl.Name)
iter := store.Session.Query(query, row_key).Iter()
showKeyTTL(iter, groupTTL)
}
}
}
}
return nil
}
|
[
"func",
"printChunkSummary",
"(",
"ctx",
"context",
".",
"Context",
",",
"store",
"*",
"cassandra",
".",
"CassandraStore",
",",
"tables",
"[",
"]",
"cassandra",
".",
"Table",
",",
"metrics",
"[",
"]",
"Metric",
",",
"groupTTL",
"string",
")",
"error",
"{",
"now",
":=",
"uint32",
"(",
"time",
".",
"Now",
"(",
")",
".",
"Unix",
"(",
")",
")",
"\n",
"endMonth",
":=",
"now",
"/",
"cassandra",
".",
"Month_sec",
"\n\n",
"for",
"_",
",",
"tbl",
":=",
"range",
"tables",
"{",
"// per store.FindExistingTables(), actual TTL may be up to 2x what's in tablename.",
"// we query up to 4x so that we also include data that should have been dropped already but still sticks around for whatever reason.",
"start",
":=",
"now",
"-",
"uint32",
"(",
"4",
"*",
"tbl",
".",
"TTL",
"*",
"3600",
")",
"\n",
"startMonth",
":=",
"start",
"/",
"cassandra",
".",
"Month_sec",
"\n",
"fmt",
".",
"Println",
"(",
"\"",
"\"",
",",
"tbl",
".",
"Name",
")",
"\n",
"if",
"len",
"(",
"metrics",
")",
"==",
"0",
"{",
"query",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"tbl",
".",
"Name",
")",
"\n",
"iter",
":=",
"store",
".",
"Session",
".",
"Query",
"(",
"query",
")",
".",
"Iter",
"(",
")",
"\n",
"showKeyTTL",
"(",
"iter",
",",
"groupTTL",
")",
"\n",
"}",
"else",
"{",
"for",
"_",
",",
"metric",
":=",
"range",
"metrics",
"{",
"for",
"num",
":=",
"startMonth",
";",
"num",
"<=",
"endMonth",
";",
"num",
"+=",
"1",
"{",
"row_key",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"metric",
".",
"AMKey",
".",
"String",
"(",
")",
",",
"num",
")",
"\n",
"query",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"tbl",
".",
"Name",
")",
"\n",
"iter",
":=",
"store",
".",
"Session",
".",
"Query",
"(",
"query",
",",
"row_key",
")",
".",
"Iter",
"(",
")",
"\n",
"showKeyTTL",
"(",
"iter",
",",
"groupTTL",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// printChunkSummary prints a summary of chunks in the store matching the given conditions, grouped in buckets of groupTTL size by their TTL
|
[
"printChunkSummary",
"prints",
"a",
"summary",
"of",
"chunks",
"in",
"the",
"store",
"matching",
"the",
"given",
"conditions",
"grouped",
"in",
"buckets",
"of",
"groupTTL",
"size",
"by",
"their",
"TTL"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/chunk.go#L13-L39
|
train
|
grafana/metrictank
|
cmd/mt-whisper-importer-reader/main.go
|
getMetricName
|
func getMetricName(file string) string {
// remove all leading '/' from file name
file = strings.TrimPrefix(file, *whisperDirectory)
for file[0] == '/' {
file = file[1:]
}
return *namePrefix + strings.Replace(strings.TrimSuffix(file, ".wsp"), "/", ".", -1)
}
|
go
|
func getMetricName(file string) string {
// remove all leading '/' from file name
file = strings.TrimPrefix(file, *whisperDirectory)
for file[0] == '/' {
file = file[1:]
}
return *namePrefix + strings.Replace(strings.TrimSuffix(file, ".wsp"), "/", ".", -1)
}
|
[
"func",
"getMetricName",
"(",
"file",
"string",
")",
"string",
"{",
"// remove all leading '/' from file name",
"file",
"=",
"strings",
".",
"TrimPrefix",
"(",
"file",
",",
"*",
"whisperDirectory",
")",
"\n",
"for",
"file",
"[",
"0",
"]",
"==",
"'/'",
"{",
"file",
"=",
"file",
"[",
"1",
":",
"]",
"\n",
"}",
"\n\n",
"return",
"*",
"namePrefix",
"+",
"strings",
".",
"Replace",
"(",
"strings",
".",
"TrimSuffix",
"(",
"file",
",",
"\"",
"\"",
")",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"-",
"1",
")",
"\n",
"}"
] |
// generate the metric name based on the file name and given prefix
|
[
"generate",
"the",
"metric",
"name",
"based",
"on",
"the",
"file",
"name",
"and",
"given",
"prefix"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-whisper-importer-reader/main.go#L231-L239
|
train
|
grafana/metrictank
|
cmd/mt-whisper-importer-reader/main.go
|
getFileListIntoChan
|
func getFileListIntoChan(pos *posTracker, fileChan chan string) {
filepath.Walk(
*whisperDirectory,
func(path string, info os.FileInfo, err error) error {
if path == *whisperDirectory {
return nil
}
name := getMetricName(path)
if !nameFilter.Match([]byte(getMetricName(name))) {
log.Debugf("Skipping file %s with name %s", path, name)
atomic.AddUint32(&skippedCount, 1)
return nil
}
if len(path) < 4 || path[len(path)-4:] != ".wsp" {
return nil
}
if pos != nil && pos.IsDone(path) {
log.Debugf("Skipping file %s because it was listed as already done", path)
return nil
}
fileChan <- path
return nil
},
)
close(fileChan)
}
|
go
|
func getFileListIntoChan(pos *posTracker, fileChan chan string) {
filepath.Walk(
*whisperDirectory,
func(path string, info os.FileInfo, err error) error {
if path == *whisperDirectory {
return nil
}
name := getMetricName(path)
if !nameFilter.Match([]byte(getMetricName(name))) {
log.Debugf("Skipping file %s with name %s", path, name)
atomic.AddUint32(&skippedCount, 1)
return nil
}
if len(path) < 4 || path[len(path)-4:] != ".wsp" {
return nil
}
if pos != nil && pos.IsDone(path) {
log.Debugf("Skipping file %s because it was listed as already done", path)
return nil
}
fileChan <- path
return nil
},
)
close(fileChan)
}
|
[
"func",
"getFileListIntoChan",
"(",
"pos",
"*",
"posTracker",
",",
"fileChan",
"chan",
"string",
")",
"{",
"filepath",
".",
"Walk",
"(",
"*",
"whisperDirectory",
",",
"func",
"(",
"path",
"string",
",",
"info",
"os",
".",
"FileInfo",
",",
"err",
"error",
")",
"error",
"{",
"if",
"path",
"==",
"*",
"whisperDirectory",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"name",
":=",
"getMetricName",
"(",
"path",
")",
"\n",
"if",
"!",
"nameFilter",
".",
"Match",
"(",
"[",
"]",
"byte",
"(",
"getMetricName",
"(",
"name",
")",
")",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"path",
",",
"name",
")",
"\n",
"atomic",
".",
"AddUint32",
"(",
"&",
"skippedCount",
",",
"1",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"len",
"(",
"path",
")",
"<",
"4",
"||",
"path",
"[",
"len",
"(",
"path",
")",
"-",
"4",
":",
"]",
"!=",
"\"",
"\"",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"pos",
"!=",
"nil",
"&&",
"pos",
".",
"IsDone",
"(",
"path",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"path",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n\n",
"fileChan",
"<-",
"path",
"\n",
"return",
"nil",
"\n",
"}",
",",
")",
"\n\n",
"close",
"(",
"fileChan",
")",
"\n",
"}"
] |
// scan a directory and feed the list of whisper files relative to base into the given channel
|
[
"scan",
"a",
"directory",
"and",
"feed",
"the",
"list",
"of",
"whisper",
"files",
"relative",
"to",
"base",
"into",
"the",
"given",
"channel"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-whisper-importer-reader/main.go#L399-L426
|
train
|
grafana/metrictank
|
store/cassandra/cassandra.go
|
ConvertTimeout
|
func ConvertTimeout(timeout string, defaultUnit time.Duration) time.Duration {
if timeoutI, err := strconv.Atoi(timeout); err == nil {
log.Warn("cassandra_store: specifying the timeout as integer is deprecated, please use a duration value")
return time.Duration(timeoutI) * defaultUnit
}
timeoutD, err := time.ParseDuration(timeout)
if err != nil {
log.Fatalf("cassandra_store: invalid duration value %q", timeout)
}
return timeoutD
}
|
go
|
func ConvertTimeout(timeout string, defaultUnit time.Duration) time.Duration {
if timeoutI, err := strconv.Atoi(timeout); err == nil {
log.Warn("cassandra_store: specifying the timeout as integer is deprecated, please use a duration value")
return time.Duration(timeoutI) * defaultUnit
}
timeoutD, err := time.ParseDuration(timeout)
if err != nil {
log.Fatalf("cassandra_store: invalid duration value %q", timeout)
}
return timeoutD
}
|
[
"func",
"ConvertTimeout",
"(",
"timeout",
"string",
",",
"defaultUnit",
"time",
".",
"Duration",
")",
"time",
".",
"Duration",
"{",
"if",
"timeoutI",
",",
"err",
":=",
"strconv",
".",
"Atoi",
"(",
"timeout",
")",
";",
"err",
"==",
"nil",
"{",
"log",
".",
"Warn",
"(",
"\"",
"\"",
")",
"\n",
"return",
"time",
".",
"Duration",
"(",
"timeoutI",
")",
"*",
"defaultUnit",
"\n",
"}",
"\n",
"timeoutD",
",",
"err",
":=",
"time",
".",
"ParseDuration",
"(",
"timeout",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
",",
"timeout",
")",
"\n",
"}",
"\n",
"return",
"timeoutD",
"\n",
"}"
] |
// ConvertTimeout provides backwards compatibility for values that used to be specified as integers,
// while also allowing them to be specified as durations.
|
[
"ConvertTimeout",
"provides",
"backwards",
"compatibility",
"for",
"values",
"that",
"used",
"to",
"be",
"specified",
"as",
"integers",
"while",
"also",
"allowing",
"them",
"to",
"be",
"specified",
"as",
"durations",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/store/cassandra/cassandra.go#L96-L106
|
train
|
grafana/metrictank
|
store/cassandra/cassandra.go
|
Search
|
func (c *CassandraStore) Search(ctx context.Context, key schema.AMKey, ttl, start, end uint32) ([]chunk.IterGen, error) {
table, ok := c.TTLTables[ttl]
if !ok {
return nil, errTableNotFound
}
return c.SearchTable(ctx, key, table, start, end)
}
|
go
|
func (c *CassandraStore) Search(ctx context.Context, key schema.AMKey, ttl, start, end uint32) ([]chunk.IterGen, error) {
table, ok := c.TTLTables[ttl]
if !ok {
return nil, errTableNotFound
}
return c.SearchTable(ctx, key, table, start, end)
}
|
[
"func",
"(",
"c",
"*",
"CassandraStore",
")",
"Search",
"(",
"ctx",
"context",
".",
"Context",
",",
"key",
"schema",
".",
"AMKey",
",",
"ttl",
",",
"start",
",",
"end",
"uint32",
")",
"(",
"[",
"]",
"chunk",
".",
"IterGen",
",",
"error",
")",
"{",
"table",
",",
"ok",
":=",
"c",
".",
"TTLTables",
"[",
"ttl",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"errTableNotFound",
"\n",
"}",
"\n",
"return",
"c",
".",
"SearchTable",
"(",
"ctx",
",",
"key",
",",
"table",
",",
"start",
",",
"end",
")",
"\n",
"}"
] |
// Basic search of cassandra in the table for given ttl
// start inclusive, end exclusive
|
[
"Basic",
"search",
"of",
"cassandra",
"in",
"the",
"table",
"for",
"given",
"ttl",
"start",
"inclusive",
"end",
"exclusive"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/store/cassandra/cassandra.go#L411-L417
|
train
|
grafana/metrictank
|
conf/indexrules.go
|
ReadIndexRules
|
func ReadIndexRules(file string) (IndexRules, error) {
config, err := configparser.Read(file)
if err != nil {
return IndexRules{}, err
}
sections, err := config.AllSections()
if err != nil {
return IndexRules{}, err
}
result := NewIndexRules()
for _, s := range sections {
item := IndexRule{}
item.Name = strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []")
if item.Name == "" || strings.HasPrefix(item.Name, "#") {
continue
}
item.Pattern, err = regexp.Compile(s.ValueOf("pattern"))
if err != nil {
return IndexRules{}, fmt.Errorf("[%s]: failed to parse pattern %q: %s", item.Name, s.ValueOf("pattern"), err.Error())
}
duration, err := dur.ParseDuration(s.ValueOf("max-stale"))
if err != nil {
return IndexRules{}, fmt.Errorf("[%s]: failed to parse max-stale %q: %s", item.Name, s.ValueOf("max-stale"), err.Error())
}
item.MaxStale = time.Duration(duration) * time.Second
result.Rules = append(result.Rules, item)
}
return result, nil
}
|
go
|
func ReadIndexRules(file string) (IndexRules, error) {
config, err := configparser.Read(file)
if err != nil {
return IndexRules{}, err
}
sections, err := config.AllSections()
if err != nil {
return IndexRules{}, err
}
result := NewIndexRules()
for _, s := range sections {
item := IndexRule{}
item.Name = strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []")
if item.Name == "" || strings.HasPrefix(item.Name, "#") {
continue
}
item.Pattern, err = regexp.Compile(s.ValueOf("pattern"))
if err != nil {
return IndexRules{}, fmt.Errorf("[%s]: failed to parse pattern %q: %s", item.Name, s.ValueOf("pattern"), err.Error())
}
duration, err := dur.ParseDuration(s.ValueOf("max-stale"))
if err != nil {
return IndexRules{}, fmt.Errorf("[%s]: failed to parse max-stale %q: %s", item.Name, s.ValueOf("max-stale"), err.Error())
}
item.MaxStale = time.Duration(duration) * time.Second
result.Rules = append(result.Rules, item)
}
return result, nil
}
|
[
"func",
"ReadIndexRules",
"(",
"file",
"string",
")",
"(",
"IndexRules",
",",
"error",
")",
"{",
"config",
",",
"err",
":=",
"configparser",
".",
"Read",
"(",
"file",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"IndexRules",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"sections",
",",
"err",
":=",
"config",
".",
"AllSections",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"IndexRules",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"result",
":=",
"NewIndexRules",
"(",
")",
"\n\n",
"for",
"_",
",",
"s",
":=",
"range",
"sections",
"{",
"item",
":=",
"IndexRule",
"{",
"}",
"\n",
"item",
".",
"Name",
"=",
"strings",
".",
"Trim",
"(",
"strings",
".",
"SplitN",
"(",
"s",
".",
"String",
"(",
")",
",",
"\"",
"\\n",
"\"",
",",
"2",
")",
"[",
"0",
"]",
",",
"\"",
"\"",
")",
"\n",
"if",
"item",
".",
"Name",
"==",
"\"",
"\"",
"||",
"strings",
".",
"HasPrefix",
"(",
"item",
".",
"Name",
",",
"\"",
"\"",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"item",
".",
"Pattern",
",",
"err",
"=",
"regexp",
".",
"Compile",
"(",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"IndexRules",
"{",
"}",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"item",
".",
"Name",
",",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"duration",
",",
"err",
":=",
"dur",
".",
"ParseDuration",
"(",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"IndexRules",
"{",
"}",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"item",
".",
"Name",
",",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"item",
".",
"MaxStale",
"=",
"time",
".",
"Duration",
"(",
"duration",
")",
"*",
"time",
".",
"Second",
"\n\n",
"result",
".",
"Rules",
"=",
"append",
"(",
"result",
".",
"Rules",
",",
"item",
")",
"\n",
"}",
"\n\n",
"return",
"result",
",",
"nil",
"\n",
"}"
] |
// ReadIndexRules returns the defined index rule from a index-rules.conf file
// and adds the default
|
[
"ReadIndexRules",
"returns",
"the",
"defined",
"index",
"rule",
"from",
"a",
"index",
"-",
"rules",
".",
"conf",
"file",
"and",
"adds",
"the",
"default"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/indexrules.go#L38-L71
|
train
|
grafana/metrictank
|
conf/indexrules.go
|
Match
|
func (a IndexRules) Match(metric string) (uint16, IndexRule) {
for i, s := range a.Rules {
if s.Pattern.MatchString(metric) {
return uint16(i), s
}
}
return uint16(len(a.Rules)), a.Default
}
|
go
|
func (a IndexRules) Match(metric string) (uint16, IndexRule) {
for i, s := range a.Rules {
if s.Pattern.MatchString(metric) {
return uint16(i), s
}
}
return uint16(len(a.Rules)), a.Default
}
|
[
"func",
"(",
"a",
"IndexRules",
")",
"Match",
"(",
"metric",
"string",
")",
"(",
"uint16",
",",
"IndexRule",
")",
"{",
"for",
"i",
",",
"s",
":=",
"range",
"a",
".",
"Rules",
"{",
"if",
"s",
".",
"Pattern",
".",
"MatchString",
"(",
"metric",
")",
"{",
"return",
"uint16",
"(",
"i",
")",
",",
"s",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"uint16",
"(",
"len",
"(",
"a",
".",
"Rules",
")",
")",
",",
"a",
".",
"Default",
"\n",
"}"
] |
// Match returns the correct index rule setting for the given metric
// it can always find a valid setting, because there's a default catch all
// also returns the index of the setting, to efficiently reference it
|
[
"Match",
"returns",
"the",
"correct",
"index",
"rule",
"setting",
"for",
"the",
"given",
"metric",
"it",
"can",
"always",
"find",
"a",
"valid",
"setting",
"because",
"there",
"s",
"a",
"default",
"catch",
"all",
"also",
"returns",
"the",
"index",
"of",
"the",
"setting",
"to",
"efficiently",
"reference",
"it"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/indexrules.go#L76-L83
|
train
|
grafana/metrictank
|
conf/indexrules.go
|
Get
|
func (a IndexRules) Get(i uint16) IndexRule {
if i >= uint16(len(a.Rules)) {
return a.Default
}
return a.Rules[i]
}
|
go
|
func (a IndexRules) Get(i uint16) IndexRule {
if i >= uint16(len(a.Rules)) {
return a.Default
}
return a.Rules[i]
}
|
[
"func",
"(",
"a",
"IndexRules",
")",
"Get",
"(",
"i",
"uint16",
")",
"IndexRule",
"{",
"if",
"i",
">=",
"uint16",
"(",
"len",
"(",
"a",
".",
"Rules",
")",
")",
"{",
"return",
"a",
".",
"Default",
"\n",
"}",
"\n",
"return",
"a",
".",
"Rules",
"[",
"i",
"]",
"\n",
"}"
] |
// Get returns the index rule setting corresponding to the given index
|
[
"Get",
"returns",
"the",
"index",
"rule",
"setting",
"corresponding",
"to",
"the",
"given",
"index"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/indexrules.go#L86-L91
|
train
|
grafana/metrictank
|
conf/indexrules.go
|
Prunable
|
func (a IndexRules) Prunable() bool {
for _, r := range a.Rules {
if r.MaxStale > 0 {
return true
}
}
return (a.Default.MaxStale > 0)
}
|
go
|
func (a IndexRules) Prunable() bool {
for _, r := range a.Rules {
if r.MaxStale > 0 {
return true
}
}
return (a.Default.MaxStale > 0)
}
|
[
"func",
"(",
"a",
"IndexRules",
")",
"Prunable",
"(",
")",
"bool",
"{",
"for",
"_",
",",
"r",
":=",
"range",
"a",
".",
"Rules",
"{",
"if",
"r",
".",
"MaxStale",
">",
"0",
"{",
"return",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"(",
"a",
".",
"Default",
".",
"MaxStale",
">",
"0",
")",
"\n",
"}"
] |
// Prunable returns whether there's any entries that require pruning
|
[
"Prunable",
"returns",
"whether",
"there",
"s",
"any",
"entries",
"that",
"require",
"pruning"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/indexrules.go#L94-L101
|
train
|
grafana/metrictank
|
conf/indexrules.go
|
Cutoffs
|
func (a IndexRules) Cutoffs(now time.Time) []int64 {
out := make([]int64, len(a.Rules)+1)
for i := 0; i <= len(a.Rules); i++ {
var rule IndexRule
if i < len(a.Rules) {
rule = a.Rules[i]
} else {
rule = a.Default
}
if rule.MaxStale == 0 {
out[i] = 0
} else {
out[i] = int64(now.Add(rule.MaxStale * -1).Unix())
}
}
return out
}
|
go
|
func (a IndexRules) Cutoffs(now time.Time) []int64 {
out := make([]int64, len(a.Rules)+1)
for i := 0; i <= len(a.Rules); i++ {
var rule IndexRule
if i < len(a.Rules) {
rule = a.Rules[i]
} else {
rule = a.Default
}
if rule.MaxStale == 0 {
out[i] = 0
} else {
out[i] = int64(now.Add(rule.MaxStale * -1).Unix())
}
}
return out
}
|
[
"func",
"(",
"a",
"IndexRules",
")",
"Cutoffs",
"(",
"now",
"time",
".",
"Time",
")",
"[",
"]",
"int64",
"{",
"out",
":=",
"make",
"(",
"[",
"]",
"int64",
",",
"len",
"(",
"a",
".",
"Rules",
")",
"+",
"1",
")",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<=",
"len",
"(",
"a",
".",
"Rules",
")",
";",
"i",
"++",
"{",
"var",
"rule",
"IndexRule",
"\n",
"if",
"i",
"<",
"len",
"(",
"a",
".",
"Rules",
")",
"{",
"rule",
"=",
"a",
".",
"Rules",
"[",
"i",
"]",
"\n",
"}",
"else",
"{",
"rule",
"=",
"a",
".",
"Default",
"\n",
"}",
"\n",
"if",
"rule",
".",
"MaxStale",
"==",
"0",
"{",
"out",
"[",
"i",
"]",
"=",
"0",
"\n",
"}",
"else",
"{",
"out",
"[",
"i",
"]",
"=",
"int64",
"(",
"now",
".",
"Add",
"(",
"rule",
".",
"MaxStale",
"*",
"-",
"1",
")",
".",
"Unix",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"out",
"\n",
"}"
] |
// Cutoffs returns a set of cutoffs corresponding to a given timestamp and the set of all rules
|
[
"Cutoffs",
"returns",
"a",
"set",
"of",
"cutoffs",
"corresponding",
"to",
"a",
"given",
"timestamp",
"and",
"the",
"set",
"of",
"all",
"rules"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/indexrules.go#L104-L120
|
train
|
grafana/metrictank
|
expr/plan.go
|
newplan
|
func newplan(e *expr, context Context, stable bool, reqs []Req) (GraphiteFunc, []Req, error) {
if e.etype != etFunc && e.etype != etName {
return nil, nil, errors.New("request must be a function call or metric pattern")
}
if e.etype == etName {
req := NewReq(e.str, context.from, context.to, context.consol)
reqs = append(reqs, req)
return NewGet(req), reqs, nil
} else if e.etype == etFunc && e.str == "seriesByTag" {
// `seriesByTag` function requires resolving expressions to series
// (similar to path expressions handled above). Since we need the
// arguments of seriesByTag to do the resolution, we store the function
// string back into the Query member of a new request to be parsed later.
// TODO - find a way to prevent this parse/encode/parse/encode loop
expressionStr := "seriesByTag(" + e.argsStr + ")"
req := NewReq(expressionStr, context.from, context.to, context.consol)
reqs = append(reqs, req)
return NewGet(req), reqs, nil
}
// here e.type is guaranteed to be etFunc
fdef, ok := funcs[e.str]
if !ok {
return nil, nil, ErrUnknownFunction(e.str)
}
if stable && !fdef.stable {
return nil, nil, ErrUnknownFunction(e.str)
}
fn := fdef.constr()
reqs, err := newplanFunc(e, fn, context, stable, reqs)
return fn, reqs, err
}
|
go
|
func newplan(e *expr, context Context, stable bool, reqs []Req) (GraphiteFunc, []Req, error) {
if e.etype != etFunc && e.etype != etName {
return nil, nil, errors.New("request must be a function call or metric pattern")
}
if e.etype == etName {
req := NewReq(e.str, context.from, context.to, context.consol)
reqs = append(reqs, req)
return NewGet(req), reqs, nil
} else if e.etype == etFunc && e.str == "seriesByTag" {
// `seriesByTag` function requires resolving expressions to series
// (similar to path expressions handled above). Since we need the
// arguments of seriesByTag to do the resolution, we store the function
// string back into the Query member of a new request to be parsed later.
// TODO - find a way to prevent this parse/encode/parse/encode loop
expressionStr := "seriesByTag(" + e.argsStr + ")"
req := NewReq(expressionStr, context.from, context.to, context.consol)
reqs = append(reqs, req)
return NewGet(req), reqs, nil
}
// here e.type is guaranteed to be etFunc
fdef, ok := funcs[e.str]
if !ok {
return nil, nil, ErrUnknownFunction(e.str)
}
if stable && !fdef.stable {
return nil, nil, ErrUnknownFunction(e.str)
}
fn := fdef.constr()
reqs, err := newplanFunc(e, fn, context, stable, reqs)
return fn, reqs, err
}
|
[
"func",
"newplan",
"(",
"e",
"*",
"expr",
",",
"context",
"Context",
",",
"stable",
"bool",
",",
"reqs",
"[",
"]",
"Req",
")",
"(",
"GraphiteFunc",
",",
"[",
"]",
"Req",
",",
"error",
")",
"{",
"if",
"e",
".",
"etype",
"!=",
"etFunc",
"&&",
"e",
".",
"etype",
"!=",
"etName",
"{",
"return",
"nil",
",",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"e",
".",
"etype",
"==",
"etName",
"{",
"req",
":=",
"NewReq",
"(",
"e",
".",
"str",
",",
"context",
".",
"from",
",",
"context",
".",
"to",
",",
"context",
".",
"consol",
")",
"\n",
"reqs",
"=",
"append",
"(",
"reqs",
",",
"req",
")",
"\n",
"return",
"NewGet",
"(",
"req",
")",
",",
"reqs",
",",
"nil",
"\n",
"}",
"else",
"if",
"e",
".",
"etype",
"==",
"etFunc",
"&&",
"e",
".",
"str",
"==",
"\"",
"\"",
"{",
"// `seriesByTag` function requires resolving expressions to series",
"// (similar to path expressions handled above). Since we need the",
"// arguments of seriesByTag to do the resolution, we store the function",
"// string back into the Query member of a new request to be parsed later.",
"// TODO - find a way to prevent this parse/encode/parse/encode loop",
"expressionStr",
":=",
"\"",
"\"",
"+",
"e",
".",
"argsStr",
"+",
"\"",
"\"",
"\n",
"req",
":=",
"NewReq",
"(",
"expressionStr",
",",
"context",
".",
"from",
",",
"context",
".",
"to",
",",
"context",
".",
"consol",
")",
"\n",
"reqs",
"=",
"append",
"(",
"reqs",
",",
"req",
")",
"\n",
"return",
"NewGet",
"(",
"req",
")",
",",
"reqs",
",",
"nil",
"\n",
"}",
"\n",
"// here e.type is guaranteed to be etFunc",
"fdef",
",",
"ok",
":=",
"funcs",
"[",
"e",
".",
"str",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"nil",
",",
"ErrUnknownFunction",
"(",
"e",
".",
"str",
")",
"\n",
"}",
"\n",
"if",
"stable",
"&&",
"!",
"fdef",
".",
"stable",
"{",
"return",
"nil",
",",
"nil",
",",
"ErrUnknownFunction",
"(",
"e",
".",
"str",
")",
"\n",
"}",
"\n\n",
"fn",
":=",
"fdef",
".",
"constr",
"(",
")",
"\n",
"reqs",
",",
"err",
":=",
"newplanFunc",
"(",
"e",
",",
"fn",
",",
"context",
",",
"stable",
",",
"reqs",
")",
"\n",
"return",
"fn",
",",
"reqs",
",",
"err",
"\n",
"}"
] |
// newplan adds requests as needed for the given expr, resolving function calls as needed
|
[
"newplan",
"adds",
"requests",
"as",
"needed",
"for",
"the",
"given",
"expr",
"resolving",
"function",
"calls",
"as",
"needed"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/plan.go#L92-L123
|
train
|
grafana/metrictank
|
expr/plan.go
|
newplanFunc
|
func newplanFunc(e *expr, fn GraphiteFunc, context Context, stable bool, reqs []Req) ([]Req, error) {
// first comes the interesting task of validating the arguments as specified by the function,
// against the arguments that were parsed.
argsExp, _ := fn.Signature()
var err error
// note:
// * signature may have seriesLists in it, which means one or more args of type seriesList
// so it's legal to have more e.args than signature args in that case.
// * we can't do extensive, accurate validation of the type here because what the output from a function we depend on
// might be dynamically typed. e.g. movingAvg returns 1..N series depending on how many it got as input
// first validate the mandatory args
pos := 0 // e.args[pos] : next given arg to process
cutoff := 0 // argsExp[cutoff] : will be first optional arg (if any)
var argExp Arg
for cutoff, argExp = range argsExp {
if argExp.Optional() {
break
}
if len(e.args) <= pos {
return nil, ErrMissingArg
}
pos, err = e.consumeBasicArg(pos, argExp)
if err != nil {
return nil, err
}
}
if !argExp.Optional() {
cutoff++
}
// we stopped iterating the mandatory args.
// any remaining args should be due to optional args otherwise there's too many
// we also track here which keywords can also be used for the given optional args
// so that those args should not be specified via their keys anymore.
seenKwargs := make(map[string]struct{})
for _, argOpt := range argsExp[cutoff:] {
if len(e.args) <= pos {
break // no more args specified. we're done.
}
pos, err = e.consumeBasicArg(pos, argOpt)
if err != nil {
return nil, err
}
seenKwargs[argOpt.Key()] = struct{}{}
}
if len(e.args) > pos {
return nil, ErrTooManyArg
}
// for any provided keyword args, verify that they are what the function stipulated
// and that they have not already been specified via their position
for key := range e.namedArgs {
_, ok := seenKwargs[key]
if ok {
return nil, ErrKwargSpecifiedTwice{key}
}
err = e.consumeKwarg(key, argsExp[cutoff:])
if err != nil {
return nil, err
}
seenKwargs[key] = struct{}{}
}
// functions now have their non-series input args set,
// so they should now be able to specify any context alterations
context = fn.Context(context)
// now that we know the needed context for the data coming into
// this function, we can set up the input arguments for the function
// that are series
pos = 0
for _, argExp = range argsExp {
if pos >= len(e.args) {
break // no more args specified. we're done.
}
switch argExp.(type) {
case ArgSeries, ArgSeriesList, ArgSeriesLists, ArgIn:
pos, reqs, err = e.consumeSeriesArg(pos, argExp, context, stable, reqs)
if err != nil {
return nil, err
}
default:
pos++
}
}
return reqs, err
}
|
go
|
func newplanFunc(e *expr, fn GraphiteFunc, context Context, stable bool, reqs []Req) ([]Req, error) {
// first comes the interesting task of validating the arguments as specified by the function,
// against the arguments that were parsed.
argsExp, _ := fn.Signature()
var err error
// note:
// * signature may have seriesLists in it, which means one or more args of type seriesList
// so it's legal to have more e.args than signature args in that case.
// * we can't do extensive, accurate validation of the type here because what the output from a function we depend on
// might be dynamically typed. e.g. movingAvg returns 1..N series depending on how many it got as input
// first validate the mandatory args
pos := 0 // e.args[pos] : next given arg to process
cutoff := 0 // argsExp[cutoff] : will be first optional arg (if any)
var argExp Arg
for cutoff, argExp = range argsExp {
if argExp.Optional() {
break
}
if len(e.args) <= pos {
return nil, ErrMissingArg
}
pos, err = e.consumeBasicArg(pos, argExp)
if err != nil {
return nil, err
}
}
if !argExp.Optional() {
cutoff++
}
// we stopped iterating the mandatory args.
// any remaining args should be due to optional args otherwise there's too many
// we also track here which keywords can also be used for the given optional args
// so that those args should not be specified via their keys anymore.
seenKwargs := make(map[string]struct{})
for _, argOpt := range argsExp[cutoff:] {
if len(e.args) <= pos {
break // no more args specified. we're done.
}
pos, err = e.consumeBasicArg(pos, argOpt)
if err != nil {
return nil, err
}
seenKwargs[argOpt.Key()] = struct{}{}
}
if len(e.args) > pos {
return nil, ErrTooManyArg
}
// for any provided keyword args, verify that they are what the function stipulated
// and that they have not already been specified via their position
for key := range e.namedArgs {
_, ok := seenKwargs[key]
if ok {
return nil, ErrKwargSpecifiedTwice{key}
}
err = e.consumeKwarg(key, argsExp[cutoff:])
if err != nil {
return nil, err
}
seenKwargs[key] = struct{}{}
}
// functions now have their non-series input args set,
// so they should now be able to specify any context alterations
context = fn.Context(context)
// now that we know the needed context for the data coming into
// this function, we can set up the input arguments for the function
// that are series
pos = 0
for _, argExp = range argsExp {
if pos >= len(e.args) {
break // no more args specified. we're done.
}
switch argExp.(type) {
case ArgSeries, ArgSeriesList, ArgSeriesLists, ArgIn:
pos, reqs, err = e.consumeSeriesArg(pos, argExp, context, stable, reqs)
if err != nil {
return nil, err
}
default:
pos++
}
}
return reqs, err
}
|
[
"func",
"newplanFunc",
"(",
"e",
"*",
"expr",
",",
"fn",
"GraphiteFunc",
",",
"context",
"Context",
",",
"stable",
"bool",
",",
"reqs",
"[",
"]",
"Req",
")",
"(",
"[",
"]",
"Req",
",",
"error",
")",
"{",
"// first comes the interesting task of validating the arguments as specified by the function,",
"// against the arguments that were parsed.",
"argsExp",
",",
"_",
":=",
"fn",
".",
"Signature",
"(",
")",
"\n",
"var",
"err",
"error",
"\n\n",
"// note:",
"// * signature may have seriesLists in it, which means one or more args of type seriesList",
"// so it's legal to have more e.args than signature args in that case.",
"// * we can't do extensive, accurate validation of the type here because what the output from a function we depend on",
"// might be dynamically typed. e.g. movingAvg returns 1..N series depending on how many it got as input",
"// first validate the mandatory args",
"pos",
":=",
"0",
"// e.args[pos] : next given arg to process",
"\n",
"cutoff",
":=",
"0",
"// argsExp[cutoff] : will be first optional arg (if any)",
"\n",
"var",
"argExp",
"Arg",
"\n",
"for",
"cutoff",
",",
"argExp",
"=",
"range",
"argsExp",
"{",
"if",
"argExp",
".",
"Optional",
"(",
")",
"{",
"break",
"\n",
"}",
"\n",
"if",
"len",
"(",
"e",
".",
"args",
")",
"<=",
"pos",
"{",
"return",
"nil",
",",
"ErrMissingArg",
"\n",
"}",
"\n",
"pos",
",",
"err",
"=",
"e",
".",
"consumeBasicArg",
"(",
"pos",
",",
"argExp",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"!",
"argExp",
".",
"Optional",
"(",
")",
"{",
"cutoff",
"++",
"\n",
"}",
"\n\n",
"// we stopped iterating the mandatory args.",
"// any remaining args should be due to optional args otherwise there's too many",
"// we also track here which keywords can also be used for the given optional args",
"// so that those args should not be specified via their keys anymore.",
"seenKwargs",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
")",
"\n",
"for",
"_",
",",
"argOpt",
":=",
"range",
"argsExp",
"[",
"cutoff",
":",
"]",
"{",
"if",
"len",
"(",
"e",
".",
"args",
")",
"<=",
"pos",
"{",
"break",
"// no more args specified. we're done.",
"\n",
"}",
"\n",
"pos",
",",
"err",
"=",
"e",
".",
"consumeBasicArg",
"(",
"pos",
",",
"argOpt",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"seenKwargs",
"[",
"argOpt",
".",
"Key",
"(",
")",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"if",
"len",
"(",
"e",
".",
"args",
")",
">",
"pos",
"{",
"return",
"nil",
",",
"ErrTooManyArg",
"\n",
"}",
"\n\n",
"// for any provided keyword args, verify that they are what the function stipulated",
"// and that they have not already been specified via their position",
"for",
"key",
":=",
"range",
"e",
".",
"namedArgs",
"{",
"_",
",",
"ok",
":=",
"seenKwargs",
"[",
"key",
"]",
"\n",
"if",
"ok",
"{",
"return",
"nil",
",",
"ErrKwargSpecifiedTwice",
"{",
"key",
"}",
"\n",
"}",
"\n",
"err",
"=",
"e",
".",
"consumeKwarg",
"(",
"key",
",",
"argsExp",
"[",
"cutoff",
":",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"seenKwargs",
"[",
"key",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n\n",
"// functions now have their non-series input args set,",
"// so they should now be able to specify any context alterations",
"context",
"=",
"fn",
".",
"Context",
"(",
"context",
")",
"\n",
"// now that we know the needed context for the data coming into",
"// this function, we can set up the input arguments for the function",
"// that are series",
"pos",
"=",
"0",
"\n",
"for",
"_",
",",
"argExp",
"=",
"range",
"argsExp",
"{",
"if",
"pos",
">=",
"len",
"(",
"e",
".",
"args",
")",
"{",
"break",
"// no more args specified. we're done.",
"\n",
"}",
"\n",
"switch",
"argExp",
".",
"(",
"type",
")",
"{",
"case",
"ArgSeries",
",",
"ArgSeriesList",
",",
"ArgSeriesLists",
",",
"ArgIn",
":",
"pos",
",",
"reqs",
",",
"err",
"=",
"e",
".",
"consumeSeriesArg",
"(",
"pos",
",",
"argExp",
",",
"context",
",",
"stable",
",",
"reqs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"default",
":",
"pos",
"++",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"reqs",
",",
"err",
"\n",
"}"
] |
// newplanFunc adds requests as needed for the given expr, and validates the function input
// provided you already know the expression is a function call to the given function
|
[
"newplanFunc",
"adds",
"requests",
"as",
"needed",
"for",
"the",
"given",
"expr",
"and",
"validates",
"the",
"function",
"input",
"provided",
"you",
"already",
"know",
"the",
"expression",
"is",
"a",
"function",
"call",
"to",
"the",
"given",
"function"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/plan.go#L127-L216
|
train
|
grafana/metrictank
|
mdata/aggregator.go
|
AggBoundary
|
func AggBoundary(ts uint32, span uint32) uint32 {
return ts + span - ((ts-1)%span + 1)
}
|
go
|
func AggBoundary(ts uint32, span uint32) uint32 {
return ts + span - ((ts-1)%span + 1)
}
|
[
"func",
"AggBoundary",
"(",
"ts",
"uint32",
",",
"span",
"uint32",
")",
"uint32",
"{",
"return",
"ts",
"+",
"span",
"-",
"(",
"(",
"ts",
"-",
"1",
")",
"%",
"span",
"+",
"1",
")",
"\n",
"}"
] |
// AggBoundary returns ts if it is a boundary, or the next boundary otherwise.
// see description for Aggregator and unit tests, for more details
|
[
"AggBoundary",
"returns",
"ts",
"if",
"it",
"is",
"a",
"boundary",
"or",
"the",
"next",
"boundary",
"otherwise",
".",
"see",
"description",
"for",
"Aggregator",
"and",
"unit",
"tests",
"for",
"more",
"details"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggregator.go#L11-L13
|
train
|
grafana/metrictank
|
mdata/aggregator.go
|
flush
|
func (agg *Aggregator) flush() {
if agg.minMetric != nil {
agg.minMetric.Add(agg.currentBoundary, agg.agg.Min)
}
if agg.maxMetric != nil {
agg.maxMetric.Add(agg.currentBoundary, agg.agg.Max)
}
if agg.sumMetric != nil {
agg.sumMetric.Add(agg.currentBoundary, agg.agg.Sum)
}
if agg.cntMetric != nil {
agg.cntMetric.Add(agg.currentBoundary, agg.agg.Cnt)
}
if agg.lstMetric != nil {
agg.lstMetric.Add(agg.currentBoundary, agg.agg.Lst)
}
//msg := fmt.Sprintf("flushed cnt %v sum %f min %f max %f, reset the block", agg.agg.cnt, agg.agg.sum, agg.agg.min, agg.agg.max)
agg.agg.Reset()
}
|
go
|
func (agg *Aggregator) flush() {
if agg.minMetric != nil {
agg.minMetric.Add(agg.currentBoundary, agg.agg.Min)
}
if agg.maxMetric != nil {
agg.maxMetric.Add(agg.currentBoundary, agg.agg.Max)
}
if agg.sumMetric != nil {
agg.sumMetric.Add(agg.currentBoundary, agg.agg.Sum)
}
if agg.cntMetric != nil {
agg.cntMetric.Add(agg.currentBoundary, agg.agg.Cnt)
}
if agg.lstMetric != nil {
agg.lstMetric.Add(agg.currentBoundary, agg.agg.Lst)
}
//msg := fmt.Sprintf("flushed cnt %v sum %f min %f max %f, reset the block", agg.agg.cnt, agg.agg.sum, agg.agg.min, agg.agg.max)
agg.agg.Reset()
}
|
[
"func",
"(",
"agg",
"*",
"Aggregator",
")",
"flush",
"(",
")",
"{",
"if",
"agg",
".",
"minMetric",
"!=",
"nil",
"{",
"agg",
".",
"minMetric",
".",
"Add",
"(",
"agg",
".",
"currentBoundary",
",",
"agg",
".",
"agg",
".",
"Min",
")",
"\n",
"}",
"\n",
"if",
"agg",
".",
"maxMetric",
"!=",
"nil",
"{",
"agg",
".",
"maxMetric",
".",
"Add",
"(",
"agg",
".",
"currentBoundary",
",",
"agg",
".",
"agg",
".",
"Max",
")",
"\n",
"}",
"\n",
"if",
"agg",
".",
"sumMetric",
"!=",
"nil",
"{",
"agg",
".",
"sumMetric",
".",
"Add",
"(",
"agg",
".",
"currentBoundary",
",",
"agg",
".",
"agg",
".",
"Sum",
")",
"\n",
"}",
"\n",
"if",
"agg",
".",
"cntMetric",
"!=",
"nil",
"{",
"agg",
".",
"cntMetric",
".",
"Add",
"(",
"agg",
".",
"currentBoundary",
",",
"agg",
".",
"agg",
".",
"Cnt",
")",
"\n",
"}",
"\n",
"if",
"agg",
".",
"lstMetric",
"!=",
"nil",
"{",
"agg",
".",
"lstMetric",
".",
"Add",
"(",
"agg",
".",
"currentBoundary",
",",
"agg",
".",
"agg",
".",
"Lst",
")",
"\n",
"}",
"\n",
"//msg := fmt.Sprintf(\"flushed cnt %v sum %f min %f max %f, reset the block\", agg.agg.cnt, agg.agg.sum, agg.agg.min, agg.agg.max)",
"agg",
".",
"agg",
".",
"Reset",
"(",
")",
"\n",
"}"
] |
// flush adds points to the aggregation-series and resets aggregation state
|
[
"flush",
"adds",
"points",
"to",
"the",
"aggregation",
"-",
"series",
"and",
"resets",
"aggregation",
"state"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggregator.go#L78-L96
|
train
|
grafana/metrictank
|
mdata/aggregator.go
|
GC
|
func (agg *Aggregator) GC(now, chunkMinTs, metricMinTs, lastWriteTime uint32) (uint32, bool) {
var points uint32
stale := true
if lastWriteTime+agg.span > chunkMinTs {
// Last datapoint was less than one aggregation window before chunkMinTs, hold out for more data
return 0, false
}
// Haven't seen datapoints in an entire aggregation window before chunkMinTs, time to flush
if agg.agg.Cnt != 0 {
agg.flush()
}
if agg.minMetric != nil {
p, s := agg.minMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.maxMetric != nil {
p, s := agg.maxMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.sumMetric != nil {
p, s := agg.sumMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.cntMetric != nil {
p, s := agg.cntMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.lstMetric != nil {
p, s := agg.lstMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
return points, stale
}
|
go
|
func (agg *Aggregator) GC(now, chunkMinTs, metricMinTs, lastWriteTime uint32) (uint32, bool) {
var points uint32
stale := true
if lastWriteTime+agg.span > chunkMinTs {
// Last datapoint was less than one aggregation window before chunkMinTs, hold out for more data
return 0, false
}
// Haven't seen datapoints in an entire aggregation window before chunkMinTs, time to flush
if agg.agg.Cnt != 0 {
agg.flush()
}
if agg.minMetric != nil {
p, s := agg.minMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.maxMetric != nil {
p, s := agg.maxMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.sumMetric != nil {
p, s := agg.sumMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.cntMetric != nil {
p, s := agg.cntMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
if agg.lstMetric != nil {
p, s := agg.lstMetric.GC(now, chunkMinTs, metricMinTs)
stale = stale && s
points += p
}
return points, stale
}
|
[
"func",
"(",
"agg",
"*",
"Aggregator",
")",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
",",
"lastWriteTime",
"uint32",
")",
"(",
"uint32",
",",
"bool",
")",
"{",
"var",
"points",
"uint32",
"\n",
"stale",
":=",
"true",
"\n\n",
"if",
"lastWriteTime",
"+",
"agg",
".",
"span",
">",
"chunkMinTs",
"{",
"// Last datapoint was less than one aggregation window before chunkMinTs, hold out for more data",
"return",
"0",
",",
"false",
"\n",
"}",
"\n\n",
"// Haven't seen datapoints in an entire aggregation window before chunkMinTs, time to flush",
"if",
"agg",
".",
"agg",
".",
"Cnt",
"!=",
"0",
"{",
"agg",
".",
"flush",
"(",
")",
"\n",
"}",
"\n\n",
"if",
"agg",
".",
"minMetric",
"!=",
"nil",
"{",
"p",
",",
"s",
":=",
"agg",
".",
"minMetric",
".",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"stale",
"=",
"stale",
"&&",
"s",
"\n",
"points",
"+=",
"p",
"\n\n",
"}",
"\n",
"if",
"agg",
".",
"maxMetric",
"!=",
"nil",
"{",
"p",
",",
"s",
":=",
"agg",
".",
"maxMetric",
".",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"stale",
"=",
"stale",
"&&",
"s",
"\n",
"points",
"+=",
"p",
"\n",
"}",
"\n",
"if",
"agg",
".",
"sumMetric",
"!=",
"nil",
"{",
"p",
",",
"s",
":=",
"agg",
".",
"sumMetric",
".",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"stale",
"=",
"stale",
"&&",
"s",
"\n",
"points",
"+=",
"p",
"\n",
"}",
"\n",
"if",
"agg",
".",
"cntMetric",
"!=",
"nil",
"{",
"p",
",",
"s",
":=",
"agg",
".",
"cntMetric",
".",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"stale",
"=",
"stale",
"&&",
"s",
"\n",
"points",
"+=",
"p",
"\n",
"}",
"\n",
"if",
"agg",
".",
"lstMetric",
"!=",
"nil",
"{",
"p",
",",
"s",
":=",
"agg",
".",
"lstMetric",
".",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"stale",
"=",
"stale",
"&&",
"s",
"\n",
"points",
"+=",
"p",
"\n",
"}",
"\n\n",
"return",
"points",
",",
"stale",
"\n",
"}"
] |
// GC returns whether all of the associated series are stale and can be removed, and their combined pointcount if so
|
[
"GC",
"returns",
"whether",
"all",
"of",
"the",
"associated",
"series",
"are",
"stale",
"and",
"can",
"be",
"removed",
"and",
"their",
"combined",
"pointcount",
"if",
"so"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggregator.go#L120-L162
|
train
|
grafana/metrictank
|
expr/func_aspercent.go
|
getTotalSeries
|
func getTotalSeries(totalSeriesLists, include map[string][]models.Series, cache map[Req][]models.Series) map[string]models.Series {
totalSeries := make(map[string]models.Series, len(totalSeriesLists))
for key := range totalSeriesLists {
if _, ok := include[key]; ok {
totalSeries[key] = sumSeries(totalSeriesLists[key], cache)
} else {
totalSeries[key] = totalSeriesLists[key][0]
}
}
return totalSeries
}
|
go
|
func getTotalSeries(totalSeriesLists, include map[string][]models.Series, cache map[Req][]models.Series) map[string]models.Series {
totalSeries := make(map[string]models.Series, len(totalSeriesLists))
for key := range totalSeriesLists {
if _, ok := include[key]; ok {
totalSeries[key] = sumSeries(totalSeriesLists[key], cache)
} else {
totalSeries[key] = totalSeriesLists[key][0]
}
}
return totalSeries
}
|
[
"func",
"getTotalSeries",
"(",
"totalSeriesLists",
",",
"include",
"map",
"[",
"string",
"]",
"[",
"]",
"models",
".",
"Series",
",",
"cache",
"map",
"[",
"Req",
"]",
"[",
"]",
"models",
".",
"Series",
")",
"map",
"[",
"string",
"]",
"models",
".",
"Series",
"{",
"totalSeries",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"models",
".",
"Series",
",",
"len",
"(",
"totalSeriesLists",
")",
")",
"\n",
"for",
"key",
":=",
"range",
"totalSeriesLists",
"{",
"if",
"_",
",",
"ok",
":=",
"include",
"[",
"key",
"]",
";",
"ok",
"{",
"totalSeries",
"[",
"key",
"]",
"=",
"sumSeries",
"(",
"totalSeriesLists",
"[",
"key",
"]",
",",
"cache",
")",
"\n",
"}",
"else",
"{",
"totalSeries",
"[",
"key",
"]",
"=",
"totalSeriesLists",
"[",
"key",
"]",
"[",
"0",
"]",
"\n",
"}",
"\n\n",
"}",
"\n",
"return",
"totalSeries",
"\n",
"}"
] |
// Sums each seriesList in map of seriesLists
|
[
"Sums",
"each",
"seriesList",
"in",
"map",
"of",
"seriesLists"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/func_aspercent.go#L229-L240
|
train
|
grafana/metrictank
|
expr/func_aspercent.go
|
sumSeries
|
func sumSeries(series []models.Series, cache map[Req][]models.Series) models.Series {
if len(series) == 1 {
return series[0]
}
out := pointSlicePool.Get().([]schema.Point)
crossSeriesSum(series, &out)
var queryPatts []string
Loop:
for _, v := range series {
// avoid duplicates
for _, qp := range queryPatts {
if qp == v.QueryPatt {
continue Loop
}
}
queryPatts = append(queryPatts, v.QueryPatt)
}
name := fmt.Sprintf("sumSeries(%s)", strings.Join(queryPatts, ","))
cons, queryCons := summarizeCons(series)
sum := models.Series{
Target: name,
QueryPatt: name,
Datapoints: out,
Interval: series[0].Interval,
Consolidator: cons,
QueryCons: queryCons,
Tags: map[string]string{"name": name},
}
cache[Req{}] = append(cache[Req{}], sum)
return sum
}
|
go
|
func sumSeries(series []models.Series, cache map[Req][]models.Series) models.Series {
if len(series) == 1 {
return series[0]
}
out := pointSlicePool.Get().([]schema.Point)
crossSeriesSum(series, &out)
var queryPatts []string
Loop:
for _, v := range series {
// avoid duplicates
for _, qp := range queryPatts {
if qp == v.QueryPatt {
continue Loop
}
}
queryPatts = append(queryPatts, v.QueryPatt)
}
name := fmt.Sprintf("sumSeries(%s)", strings.Join(queryPatts, ","))
cons, queryCons := summarizeCons(series)
sum := models.Series{
Target: name,
QueryPatt: name,
Datapoints: out,
Interval: series[0].Interval,
Consolidator: cons,
QueryCons: queryCons,
Tags: map[string]string{"name": name},
}
cache[Req{}] = append(cache[Req{}], sum)
return sum
}
|
[
"func",
"sumSeries",
"(",
"series",
"[",
"]",
"models",
".",
"Series",
",",
"cache",
"map",
"[",
"Req",
"]",
"[",
"]",
"models",
".",
"Series",
")",
"models",
".",
"Series",
"{",
"if",
"len",
"(",
"series",
")",
"==",
"1",
"{",
"return",
"series",
"[",
"0",
"]",
"\n",
"}",
"\n",
"out",
":=",
"pointSlicePool",
".",
"Get",
"(",
")",
".",
"(",
"[",
"]",
"schema",
".",
"Point",
")",
"\n",
"crossSeriesSum",
"(",
"series",
",",
"&",
"out",
")",
"\n",
"var",
"queryPatts",
"[",
"]",
"string",
"\n\n",
"Loop",
":",
"for",
"_",
",",
"v",
":=",
"range",
"series",
"{",
"// avoid duplicates",
"for",
"_",
",",
"qp",
":=",
"range",
"queryPatts",
"{",
"if",
"qp",
"==",
"v",
".",
"QueryPatt",
"{",
"continue",
"Loop",
"\n",
"}",
"\n",
"}",
"\n",
"queryPatts",
"=",
"append",
"(",
"queryPatts",
",",
"v",
".",
"QueryPatt",
")",
"\n",
"}",
"\n",
"name",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"strings",
".",
"Join",
"(",
"queryPatts",
",",
"\"",
"\"",
")",
")",
"\n",
"cons",
",",
"queryCons",
":=",
"summarizeCons",
"(",
"series",
")",
"\n",
"sum",
":=",
"models",
".",
"Series",
"{",
"Target",
":",
"name",
",",
"QueryPatt",
":",
"name",
",",
"Datapoints",
":",
"out",
",",
"Interval",
":",
"series",
"[",
"0",
"]",
".",
"Interval",
",",
"Consolidator",
":",
"cons",
",",
"QueryCons",
":",
"queryCons",
",",
"Tags",
":",
"map",
"[",
"string",
"]",
"string",
"{",
"\"",
"\"",
":",
"name",
"}",
",",
"}",
"\n",
"cache",
"[",
"Req",
"{",
"}",
"]",
"=",
"append",
"(",
"cache",
"[",
"Req",
"{",
"}",
"]",
",",
"sum",
")",
"\n",
"return",
"sum",
"\n",
"}"
] |
// sumSeries returns a copy-on-write series that is the sum of the inputs
|
[
"sumSeries",
"returns",
"a",
"copy",
"-",
"on",
"-",
"write",
"series",
"that",
"is",
"the",
"sum",
"of",
"the",
"inputs"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/func_aspercent.go#L243-L274
|
train
|
grafana/metrictank
|
expr/func_removeabovebelowpercentile.go
|
getPercentileValue
|
func getPercentileValue(datapoints []schema.Point, n float64, sortedDatapointVals []float64) float64 {
sortedDatapointVals = sortedDatapointVals[:0]
for _, p := range datapoints {
if !math.IsNaN(p.Val) {
sortedDatapointVals = append(sortedDatapointVals, p.Val)
}
}
sort.Float64s(sortedDatapointVals)
index := math.Min(math.Ceil(n/100.0*float64(len(sortedDatapointVals)+1)), float64(len(sortedDatapointVals))) - 1
return sortedDatapointVals[int(index)]
}
|
go
|
func getPercentileValue(datapoints []schema.Point, n float64, sortedDatapointVals []float64) float64 {
sortedDatapointVals = sortedDatapointVals[:0]
for _, p := range datapoints {
if !math.IsNaN(p.Val) {
sortedDatapointVals = append(sortedDatapointVals, p.Val)
}
}
sort.Float64s(sortedDatapointVals)
index := math.Min(math.Ceil(n/100.0*float64(len(sortedDatapointVals)+1)), float64(len(sortedDatapointVals))) - 1
return sortedDatapointVals[int(index)]
}
|
[
"func",
"getPercentileValue",
"(",
"datapoints",
"[",
"]",
"schema",
".",
"Point",
",",
"n",
"float64",
",",
"sortedDatapointVals",
"[",
"]",
"float64",
")",
"float64",
"{",
"sortedDatapointVals",
"=",
"sortedDatapointVals",
"[",
":",
"0",
"]",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"datapoints",
"{",
"if",
"!",
"math",
".",
"IsNaN",
"(",
"p",
".",
"Val",
")",
"{",
"sortedDatapointVals",
"=",
"append",
"(",
"sortedDatapointVals",
",",
"p",
".",
"Val",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"sort",
".",
"Float64s",
"(",
"sortedDatapointVals",
")",
"\n\n",
"index",
":=",
"math",
".",
"Min",
"(",
"math",
".",
"Ceil",
"(",
"n",
"/",
"100.0",
"*",
"float64",
"(",
"len",
"(",
"sortedDatapointVals",
")",
"+",
"1",
")",
")",
",",
"float64",
"(",
"len",
"(",
"sortedDatapointVals",
")",
")",
")",
"-",
"1",
"\n\n",
"return",
"sortedDatapointVals",
"[",
"int",
"(",
"index",
")",
"]",
"\n",
"}"
] |
// sortedDatapointVals is an empty slice to be used for sorting datapoints.
// n must be > 0. if n > 100, the largest value is returned.
|
[
"sortedDatapointVals",
"is",
"an",
"empty",
"slice",
"to",
"be",
"used",
"for",
"sorting",
"datapoints",
".",
"n",
"must",
"be",
">",
"0",
".",
"if",
"n",
">",
"100",
"the",
"largest",
"value",
"is",
"returned",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/func_removeabovebelowpercentile.go#L92-L105
|
train
|
grafana/metrictank
|
cluster/node.go
|
MarshalJSON
|
func (n NodeMode) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString(`"`)
buffer.WriteString(n.String())
buffer.WriteString(`"`)
return buffer.Bytes(), nil
}
|
go
|
func (n NodeMode) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString(`"`)
buffer.WriteString(n.String())
buffer.WriteString(`"`)
return buffer.Bytes(), nil
}
|
[
"func",
"(",
"n",
"NodeMode",
")",
"MarshalJSON",
"(",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"buffer",
":=",
"bytes",
".",
"NewBufferString",
"(",
"`\"`",
")",
"\n",
"buffer",
".",
"WriteString",
"(",
"n",
".",
"String",
"(",
")",
")",
"\n",
"buffer",
".",
"WriteString",
"(",
"`\"`",
")",
"\n",
"return",
"buffer",
".",
"Bytes",
"(",
")",
",",
"nil",
"\n",
"}"
] |
// MarshalJSON marshals a NodeMode
|
[
"MarshalJSON",
"marshals",
"a",
"NodeMode"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L58-L63
|
train
|
grafana/metrictank
|
cluster/node.go
|
UnmarshalJSON
|
func (n *NodeMode) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
*n, err = NodeModeFromString(j)
return err
}
|
go
|
func (n *NodeMode) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
*n, err = NodeModeFromString(j)
return err
}
|
[
"func",
"(",
"n",
"*",
"NodeMode",
")",
"UnmarshalJSON",
"(",
"b",
"[",
"]",
"byte",
")",
"error",
"{",
"var",
"j",
"string",
"\n",
"err",
":=",
"json",
".",
"Unmarshal",
"(",
"b",
",",
"&",
"j",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"*",
"n",
",",
"err",
"=",
"NodeModeFromString",
"(",
"j",
")",
"\n",
"return",
"err",
"\n",
"}"
] |
// UnmarshalJSON unmashals a NodeMode
|
[
"UnmarshalJSON",
"unmashals",
"a",
"NodeMode"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L66-L74
|
train
|
grafana/metrictank
|
cluster/node.go
|
UnmarshalJSON
|
func (n *NodeState) UnmarshalJSON(data []byte) error {
s := string(data)
switch s {
case "0", `"NodeNotReady"`:
*n = NodeNotReady
case "1", `"NodeReady"`:
*n = NodeReady
case "2", `"NodeUnreachable"`:
*n = NodeUnreachable
default:
return fmt.Errorf("unrecognized NodeState %q", s)
}
return nil
}
|
go
|
func (n *NodeState) UnmarshalJSON(data []byte) error {
s := string(data)
switch s {
case "0", `"NodeNotReady"`:
*n = NodeNotReady
case "1", `"NodeReady"`:
*n = NodeReady
case "2", `"NodeUnreachable"`:
*n = NodeUnreachable
default:
return fmt.Errorf("unrecognized NodeState %q", s)
}
return nil
}
|
[
"func",
"(",
"n",
"*",
"NodeState",
")",
"UnmarshalJSON",
"(",
"data",
"[",
"]",
"byte",
")",
"error",
"{",
"s",
":=",
"string",
"(",
"data",
")",
"\n",
"switch",
"s",
"{",
"case",
"\"",
"\"",
",",
"`\"NodeNotReady\"`",
":",
"*",
"n",
"=",
"NodeNotReady",
"\n",
"case",
"\"",
"\"",
",",
"`\"NodeReady\"`",
":",
"*",
"n",
"=",
"NodeReady",
"\n",
"case",
"\"",
"\"",
",",
"`\"NodeUnreachable\"`",
":",
"*",
"n",
"=",
"NodeUnreachable",
"\n",
"default",
":",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"s",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// UnmarshalJSON supports unmarshalling according to the older
// integer based, as well as the new string based, representation
|
[
"UnmarshalJSON",
"supports",
"unmarshalling",
"according",
"to",
"the",
"older",
"integer",
"based",
"as",
"well",
"as",
"the",
"new",
"string",
"based",
"representation"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L100-L113
|
train
|
grafana/metrictank
|
cluster/node.go
|
readyStateGCHandler
|
func (n HTTPNode) readyStateGCHandler() {
if gcPercent == gcPercentNotReady {
return
}
var err error
if n.IsReady() {
prev := debug.SetGCPercent(gcPercent)
if prev != gcPercent {
log.Infof("CLU: node is ready. changing GOGC from %d to %d", prev, gcPercent)
err = os.Setenv("GOGC", strconv.Itoa(gcPercent))
}
} else {
prev := debug.SetGCPercent(gcPercentNotReady)
if prev != gcPercentNotReady {
log.Infof("CLU: node is not ready. changing GOGC from %d to %d", prev, gcPercentNotReady)
err = os.Setenv("GOGC", strconv.Itoa(gcPercentNotReady))
}
}
if err != nil {
log.Warnf("CLU: could not set GOGC environment variable. gcPercent metric will be incorrect. %s", err.Error())
}
}
|
go
|
func (n HTTPNode) readyStateGCHandler() {
if gcPercent == gcPercentNotReady {
return
}
var err error
if n.IsReady() {
prev := debug.SetGCPercent(gcPercent)
if prev != gcPercent {
log.Infof("CLU: node is ready. changing GOGC from %d to %d", prev, gcPercent)
err = os.Setenv("GOGC", strconv.Itoa(gcPercent))
}
} else {
prev := debug.SetGCPercent(gcPercentNotReady)
if prev != gcPercentNotReady {
log.Infof("CLU: node is not ready. changing GOGC from %d to %d", prev, gcPercentNotReady)
err = os.Setenv("GOGC", strconv.Itoa(gcPercentNotReady))
}
}
if err != nil {
log.Warnf("CLU: could not set GOGC environment variable. gcPercent metric will be incorrect. %s", err.Error())
}
}
|
[
"func",
"(",
"n",
"HTTPNode",
")",
"readyStateGCHandler",
"(",
")",
"{",
"if",
"gcPercent",
"==",
"gcPercentNotReady",
"{",
"return",
"\n",
"}",
"\n",
"var",
"err",
"error",
"\n",
"if",
"n",
".",
"IsReady",
"(",
")",
"{",
"prev",
":=",
"debug",
".",
"SetGCPercent",
"(",
"gcPercent",
")",
"\n",
"if",
"prev",
"!=",
"gcPercent",
"{",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"prev",
",",
"gcPercent",
")",
"\n",
"err",
"=",
"os",
".",
"Setenv",
"(",
"\"",
"\"",
",",
"strconv",
".",
"Itoa",
"(",
"gcPercent",
")",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"prev",
":=",
"debug",
".",
"SetGCPercent",
"(",
"gcPercentNotReady",
")",
"\n",
"if",
"prev",
"!=",
"gcPercentNotReady",
"{",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"prev",
",",
"gcPercentNotReady",
")",
"\n",
"err",
"=",
"os",
".",
"Setenv",
"(",
"\"",
"\"",
",",
"strconv",
".",
"Itoa",
"(",
"gcPercentNotReady",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"}"
] |
// readyStateGCHandler adjusts the gcPercent value based on the node ready state
|
[
"readyStateGCHandler",
"adjusts",
"the",
"gcPercent",
"value",
"based",
"on",
"the",
"node",
"ready",
"state"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L192-L213
|
train
|
grafana/metrictank
|
cluster/node.go
|
SetState
|
func (n *HTTPNode) SetState(state NodeState) bool {
if n.State == state {
return false
}
n.State = state
now := time.Now()
n.Updated = now
n.StateChange = now
n.readyStateGCHandler()
return true
}
|
go
|
func (n *HTTPNode) SetState(state NodeState) bool {
if n.State == state {
return false
}
n.State = state
now := time.Now()
n.Updated = now
n.StateChange = now
n.readyStateGCHandler()
return true
}
|
[
"func",
"(",
"n",
"*",
"HTTPNode",
")",
"SetState",
"(",
"state",
"NodeState",
")",
"bool",
"{",
"if",
"n",
".",
"State",
"==",
"state",
"{",
"return",
"false",
"\n",
"}",
"\n",
"n",
".",
"State",
"=",
"state",
"\n",
"now",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"n",
".",
"Updated",
"=",
"now",
"\n",
"n",
".",
"StateChange",
"=",
"now",
"\n",
"n",
".",
"readyStateGCHandler",
"(",
")",
"\n",
"return",
"true",
"\n",
"}"
] |
// SetState sets the state of the node and returns whether the state changed
|
[
"SetState",
"sets",
"the",
"state",
"of",
"the",
"node",
"and",
"returns",
"whether",
"the",
"state",
"changed"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L216-L226
|
train
|
grafana/metrictank
|
cluster/node.go
|
SetPriority
|
func (n *HTTPNode) SetPriority(prio int) bool {
if n.Priority == prio {
return false
}
n.Priority = prio
n.Updated = time.Now()
n.readyStateGCHandler()
return true
}
|
go
|
func (n *HTTPNode) SetPriority(prio int) bool {
if n.Priority == prio {
return false
}
n.Priority = prio
n.Updated = time.Now()
n.readyStateGCHandler()
return true
}
|
[
"func",
"(",
"n",
"*",
"HTTPNode",
")",
"SetPriority",
"(",
"prio",
"int",
")",
"bool",
"{",
"if",
"n",
".",
"Priority",
"==",
"prio",
"{",
"return",
"false",
"\n",
"}",
"\n",
"n",
".",
"Priority",
"=",
"prio",
"\n",
"n",
".",
"Updated",
"=",
"time",
".",
"Now",
"(",
")",
"\n",
"n",
".",
"readyStateGCHandler",
"(",
")",
"\n",
"return",
"true",
"\n",
"}"
] |
// SetPriority sets the priority of the node and returns whether it changed
|
[
"SetPriority",
"sets",
"the",
"priority",
"of",
"the",
"node",
"and",
"returns",
"whether",
"it",
"changed"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L229-L237
|
train
|
grafana/metrictank
|
cluster/node.go
|
SetPrimary
|
func (n *HTTPNode) SetPrimary(primary bool) bool {
if n.Primary == primary {
return false
}
now := time.Now()
n.Primary = primary
n.Updated = now
n.PrimaryChange = now
return true
}
|
go
|
func (n *HTTPNode) SetPrimary(primary bool) bool {
if n.Primary == primary {
return false
}
now := time.Now()
n.Primary = primary
n.Updated = now
n.PrimaryChange = now
return true
}
|
[
"func",
"(",
"n",
"*",
"HTTPNode",
")",
"SetPrimary",
"(",
"primary",
"bool",
")",
"bool",
"{",
"if",
"n",
".",
"Primary",
"==",
"primary",
"{",
"return",
"false",
"\n",
"}",
"\n",
"now",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"n",
".",
"Primary",
"=",
"primary",
"\n",
"n",
".",
"Updated",
"=",
"now",
"\n",
"n",
".",
"PrimaryChange",
"=",
"now",
"\n",
"return",
"true",
"\n",
"}"
] |
// SetPrimary sets the primary state of the node and returns whether it changed
|
[
"SetPrimary",
"sets",
"the",
"primary",
"state",
"of",
"the",
"node",
"and",
"returns",
"whether",
"it",
"changed"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L240-L249
|
train
|
grafana/metrictank
|
cluster/node.go
|
SetPartitions
|
func (n *HTTPNode) SetPartitions(part []int32) {
n.Partitions = part
n.Updated = time.Now()
}
|
go
|
func (n *HTTPNode) SetPartitions(part []int32) {
n.Partitions = part
n.Updated = time.Now()
}
|
[
"func",
"(",
"n",
"*",
"HTTPNode",
")",
"SetPartitions",
"(",
"part",
"[",
"]",
"int32",
")",
"{",
"n",
".",
"Partitions",
"=",
"part",
"\n",
"n",
".",
"Updated",
"=",
"time",
".",
"Now",
"(",
")",
"\n",
"}"
] |
// SetPartitions sets the partitions that this node is handling
|
[
"SetPartitions",
"sets",
"the",
"partitions",
"that",
"this",
"node",
"is",
"handling"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/node.go#L252-L255
|
train
|
grafana/metrictank
|
kafka/partitions.go
|
DiffPartitions
|
func DiffPartitions(a []int32, b []int32) []int32 {
var diff []int32
Iter:
for _, eA := range a {
for _, eB := range b {
if eA == eB {
continue Iter
}
}
diff = append(diff, eA)
}
return diff
}
|
go
|
func DiffPartitions(a []int32, b []int32) []int32 {
var diff []int32
Iter:
for _, eA := range a {
for _, eB := range b {
if eA == eB {
continue Iter
}
}
diff = append(diff, eA)
}
return diff
}
|
[
"func",
"DiffPartitions",
"(",
"a",
"[",
"]",
"int32",
",",
"b",
"[",
"]",
"int32",
")",
"[",
"]",
"int32",
"{",
"var",
"diff",
"[",
"]",
"int32",
"\n",
"Iter",
":",
"for",
"_",
",",
"eA",
":=",
"range",
"a",
"{",
"for",
"_",
",",
"eB",
":=",
"range",
"b",
"{",
"if",
"eA",
"==",
"eB",
"{",
"continue",
"Iter",
"\n",
"}",
"\n",
"}",
"\n",
"diff",
"=",
"append",
"(",
"diff",
",",
"eA",
")",
"\n",
"}",
"\n",
"return",
"diff",
"\n",
"}"
] |
// returns elements that are in a but not in b
|
[
"returns",
"elements",
"that",
"are",
"in",
"a",
"but",
"not",
"in",
"b"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/kafka/partitions.go#L10-L22
|
train
|
grafana/metrictank
|
api/models/series.go
|
MarshalJSONFast
|
func (series SeriesByTarget) MarshalJSONFast(b []byte) ([]byte, error) {
b = append(b, '[')
for _, s := range series {
b = append(b, `{"target":`...)
b = strconv.AppendQuoteToASCII(b, s.Target)
if len(s.Tags) != 0 {
b = append(b, `,"tags":{`...)
for name, value := range s.Tags {
b = strconv.AppendQuoteToASCII(b, name)
b = append(b, ':')
b = strconv.AppendQuoteToASCII(b, value)
b = append(b, ',')
}
// Replace trailing comma with a closing bracket
b[len(b)-1] = '}'
}
b = append(b, `,"datapoints":[`...)
for _, p := range s.Datapoints {
b = append(b, '[')
if math.IsNaN(p.Val) {
b = append(b, `null,`...)
} else {
b = strconv.AppendFloat(b, p.Val, 'f', -1, 64)
b = append(b, ',')
}
b = strconv.AppendUint(b, uint64(p.Ts), 10)
b = append(b, `],`...)
}
if len(s.Datapoints) != 0 {
b = b[:len(b)-1] // cut last comma
}
b = append(b, `]},`...)
}
if len(series) != 0 {
b = b[:len(b)-1] // cut last comma
}
b = append(b, ']')
return b, nil
}
|
go
|
func (series SeriesByTarget) MarshalJSONFast(b []byte) ([]byte, error) {
b = append(b, '[')
for _, s := range series {
b = append(b, `{"target":`...)
b = strconv.AppendQuoteToASCII(b, s.Target)
if len(s.Tags) != 0 {
b = append(b, `,"tags":{`...)
for name, value := range s.Tags {
b = strconv.AppendQuoteToASCII(b, name)
b = append(b, ':')
b = strconv.AppendQuoteToASCII(b, value)
b = append(b, ',')
}
// Replace trailing comma with a closing bracket
b[len(b)-1] = '}'
}
b = append(b, `,"datapoints":[`...)
for _, p := range s.Datapoints {
b = append(b, '[')
if math.IsNaN(p.Val) {
b = append(b, `null,`...)
} else {
b = strconv.AppendFloat(b, p.Val, 'f', -1, 64)
b = append(b, ',')
}
b = strconv.AppendUint(b, uint64(p.Ts), 10)
b = append(b, `],`...)
}
if len(s.Datapoints) != 0 {
b = b[:len(b)-1] // cut last comma
}
b = append(b, `]},`...)
}
if len(series) != 0 {
b = b[:len(b)-1] // cut last comma
}
b = append(b, ']')
return b, nil
}
|
[
"func",
"(",
"series",
"SeriesByTarget",
")",
"MarshalJSONFast",
"(",
"b",
"[",
"]",
"byte",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"b",
"=",
"append",
"(",
"b",
",",
"'['",
")",
"\n",
"for",
"_",
",",
"s",
":=",
"range",
"series",
"{",
"b",
"=",
"append",
"(",
"b",
",",
"`{\"target\":`",
"...",
")",
"\n",
"b",
"=",
"strconv",
".",
"AppendQuoteToASCII",
"(",
"b",
",",
"s",
".",
"Target",
")",
"\n",
"if",
"len",
"(",
"s",
".",
"Tags",
")",
"!=",
"0",
"{",
"b",
"=",
"append",
"(",
"b",
",",
"`,\"tags\":{`",
"...",
")",
"\n",
"for",
"name",
",",
"value",
":=",
"range",
"s",
".",
"Tags",
"{",
"b",
"=",
"strconv",
".",
"AppendQuoteToASCII",
"(",
"b",
",",
"name",
")",
"\n",
"b",
"=",
"append",
"(",
"b",
",",
"':'",
")",
"\n",
"b",
"=",
"strconv",
".",
"AppendQuoteToASCII",
"(",
"b",
",",
"value",
")",
"\n",
"b",
"=",
"append",
"(",
"b",
",",
"','",
")",
"\n",
"}",
"\n",
"// Replace trailing comma with a closing bracket",
"b",
"[",
"len",
"(",
"b",
")",
"-",
"1",
"]",
"=",
"'}'",
"\n",
"}",
"\n",
"b",
"=",
"append",
"(",
"b",
",",
"`,\"datapoints\":[`",
"...",
")",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"s",
".",
"Datapoints",
"{",
"b",
"=",
"append",
"(",
"b",
",",
"'['",
")",
"\n",
"if",
"math",
".",
"IsNaN",
"(",
"p",
".",
"Val",
")",
"{",
"b",
"=",
"append",
"(",
"b",
",",
"`null,`",
"...",
")",
"\n",
"}",
"else",
"{",
"b",
"=",
"strconv",
".",
"AppendFloat",
"(",
"b",
",",
"p",
".",
"Val",
",",
"'f'",
",",
"-",
"1",
",",
"64",
")",
"\n",
"b",
"=",
"append",
"(",
"b",
",",
"','",
")",
"\n",
"}",
"\n",
"b",
"=",
"strconv",
".",
"AppendUint",
"(",
"b",
",",
"uint64",
"(",
"p",
".",
"Ts",
")",
",",
"10",
")",
"\n",
"b",
"=",
"append",
"(",
"b",
",",
"`],`",
"...",
")",
"\n",
"}",
"\n",
"if",
"len",
"(",
"s",
".",
"Datapoints",
")",
"!=",
"0",
"{",
"b",
"=",
"b",
"[",
":",
"len",
"(",
"b",
")",
"-",
"1",
"]",
"// cut last comma",
"\n",
"}",
"\n",
"b",
"=",
"append",
"(",
"b",
",",
"`]},`",
"...",
")",
"\n",
"}",
"\n",
"if",
"len",
"(",
"series",
")",
"!=",
"0",
"{",
"b",
"=",
"b",
"[",
":",
"len",
"(",
"b",
")",
"-",
"1",
"]",
"// cut last comma",
"\n",
"}",
"\n",
"b",
"=",
"append",
"(",
"b",
",",
"']'",
")",
"\n",
"return",
"b",
",",
"nil",
"\n",
"}"
] |
// regular graphite output
|
[
"regular",
"graphite",
"output"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/models/series.go#L100-L138
|
train
|
grafana/metrictank
|
idx/memory/time_limit.go
|
NewTimeLimiter
|
func NewTimeLimiter(window, limit time.Duration, now time.Time) *TimeLimiter {
l := TimeLimiter{
since: now,
next: now.Add(window),
window: window,
limit: limit,
factor: float64(window) / float64(limit),
}
return &l
}
|
go
|
func NewTimeLimiter(window, limit time.Duration, now time.Time) *TimeLimiter {
l := TimeLimiter{
since: now,
next: now.Add(window),
window: window,
limit: limit,
factor: float64(window) / float64(limit),
}
return &l
}
|
[
"func",
"NewTimeLimiter",
"(",
"window",
",",
"limit",
"time",
".",
"Duration",
",",
"now",
"time",
".",
"Time",
")",
"*",
"TimeLimiter",
"{",
"l",
":=",
"TimeLimiter",
"{",
"since",
":",
"now",
",",
"next",
":",
"now",
".",
"Add",
"(",
"window",
")",
",",
"window",
":",
"window",
",",
"limit",
":",
"limit",
",",
"factor",
":",
"float64",
"(",
"window",
")",
"/",
"float64",
"(",
"limit",
")",
",",
"}",
"\n",
"return",
"&",
"l",
"\n",
"}"
] |
// NewTimeLimiter creates a new TimeLimiter.
// limit must <= window
|
[
"NewTimeLimiter",
"creates",
"a",
"new",
"TimeLimiter",
".",
"limit",
"must",
"<",
"=",
"window"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/time_limit.go#L25-L34
|
train
|
grafana/metrictank
|
idx/memory/time_limit.go
|
Add
|
func (l *TimeLimiter) Add(d time.Duration) {
l.add(time.Now(), d)
}
|
go
|
func (l *TimeLimiter) Add(d time.Duration) {
l.add(time.Now(), d)
}
|
[
"func",
"(",
"l",
"*",
"TimeLimiter",
")",
"Add",
"(",
"d",
"time",
".",
"Duration",
")",
"{",
"l",
".",
"add",
"(",
"time",
".",
"Now",
"(",
")",
",",
"d",
")",
"\n",
"}"
] |
// Add increments the "time spent" counter by "d"
|
[
"Add",
"increments",
"the",
"time",
"spent",
"counter",
"by",
"d"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/time_limit.go#L37-L39
|
train
|
grafana/metrictank
|
idx/memory/time_limit.go
|
add
|
func (l *TimeLimiter) add(now time.Time, d time.Duration) {
if now.After(l.next) {
l.timeSpent = d
l.since = now.Add(-d)
l.next = l.since.Add(l.window)
return
}
l.timeSpent += d
}
|
go
|
func (l *TimeLimiter) add(now time.Time, d time.Duration) {
if now.After(l.next) {
l.timeSpent = d
l.since = now.Add(-d)
l.next = l.since.Add(l.window)
return
}
l.timeSpent += d
}
|
[
"func",
"(",
"l",
"*",
"TimeLimiter",
")",
"add",
"(",
"now",
"time",
".",
"Time",
",",
"d",
"time",
".",
"Duration",
")",
"{",
"if",
"now",
".",
"After",
"(",
"l",
".",
"next",
")",
"{",
"l",
".",
"timeSpent",
"=",
"d",
"\n",
"l",
".",
"since",
"=",
"now",
".",
"Add",
"(",
"-",
"d",
")",
"\n",
"l",
".",
"next",
"=",
"l",
".",
"since",
".",
"Add",
"(",
"l",
".",
"window",
")",
"\n",
"return",
"\n",
"}",
"\n",
"l",
".",
"timeSpent",
"+=",
"d",
"\n",
"}"
] |
// add increments the "time spent" counter by "d" at a given time
|
[
"add",
"increments",
"the",
"time",
"spent",
"counter",
"by",
"d",
"at",
"a",
"given",
"time"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/time_limit.go#L42-L50
|
train
|
grafana/metrictank
|
idx/memory/memory.go
|
AddOrUpdate
|
func (m *UnpartitionedMemoryIdx) AddOrUpdate(mkey schema.MKey, data *schema.MetricData, partition int32) (idx.Archive, int32, bool) {
pre := time.Now()
// Optimistically read lock
m.RLock()
existing, ok := m.defById[mkey]
if ok {
if log.IsLevelEnabled(log.DebugLevel) {
log.Debugf("memory-idx: metricDef with id %s already in index.", mkey)
}
bumpLastUpdate(&existing.LastUpdate, data.Time)
oldPart := atomic.SwapInt32(&existing.Partition, partition)
statUpdate.Inc()
statUpdateDuration.Value(time.Since(pre))
m.RUnlock()
return *existing, oldPart, ok
}
m.RUnlock()
m.Lock()
defer m.Unlock()
def := schema.MetricDefinitionFromMetricData(data)
def.Partition = partition
archive := m.add(def)
statMetricsActive.Inc()
statAddDuration.Value(time.Since(pre))
if TagSupport {
m.indexTags(def)
}
return archive, 0, false
}
|
go
|
func (m *UnpartitionedMemoryIdx) AddOrUpdate(mkey schema.MKey, data *schema.MetricData, partition int32) (idx.Archive, int32, bool) {
pre := time.Now()
// Optimistically read lock
m.RLock()
existing, ok := m.defById[mkey]
if ok {
if log.IsLevelEnabled(log.DebugLevel) {
log.Debugf("memory-idx: metricDef with id %s already in index.", mkey)
}
bumpLastUpdate(&existing.LastUpdate, data.Time)
oldPart := atomic.SwapInt32(&existing.Partition, partition)
statUpdate.Inc()
statUpdateDuration.Value(time.Since(pre))
m.RUnlock()
return *existing, oldPart, ok
}
m.RUnlock()
m.Lock()
defer m.Unlock()
def := schema.MetricDefinitionFromMetricData(data)
def.Partition = partition
archive := m.add(def)
statMetricsActive.Inc()
statAddDuration.Value(time.Since(pre))
if TagSupport {
m.indexTags(def)
}
return archive, 0, false
}
|
[
"func",
"(",
"m",
"*",
"UnpartitionedMemoryIdx",
")",
"AddOrUpdate",
"(",
"mkey",
"schema",
".",
"MKey",
",",
"data",
"*",
"schema",
".",
"MetricData",
",",
"partition",
"int32",
")",
"(",
"idx",
".",
"Archive",
",",
"int32",
",",
"bool",
")",
"{",
"pre",
":=",
"time",
".",
"Now",
"(",
")",
"\n\n",
"// Optimistically read lock",
"m",
".",
"RLock",
"(",
")",
"\n\n",
"existing",
",",
"ok",
":=",
"m",
".",
"defById",
"[",
"mkey",
"]",
"\n",
"if",
"ok",
"{",
"if",
"log",
".",
"IsLevelEnabled",
"(",
"log",
".",
"DebugLevel",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"mkey",
")",
"\n",
"}",
"\n",
"bumpLastUpdate",
"(",
"&",
"existing",
".",
"LastUpdate",
",",
"data",
".",
"Time",
")",
"\n",
"oldPart",
":=",
"atomic",
".",
"SwapInt32",
"(",
"&",
"existing",
".",
"Partition",
",",
"partition",
")",
"\n",
"statUpdate",
".",
"Inc",
"(",
")",
"\n",
"statUpdateDuration",
".",
"Value",
"(",
"time",
".",
"Since",
"(",
"pre",
")",
")",
"\n",
"m",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"*",
"existing",
",",
"oldPart",
",",
"ok",
"\n",
"}",
"\n\n",
"m",
".",
"RUnlock",
"(",
")",
"\n",
"m",
".",
"Lock",
"(",
")",
"\n",
"defer",
"m",
".",
"Unlock",
"(",
")",
"\n\n",
"def",
":=",
"schema",
".",
"MetricDefinitionFromMetricData",
"(",
"data",
")",
"\n",
"def",
".",
"Partition",
"=",
"partition",
"\n",
"archive",
":=",
"m",
".",
"add",
"(",
"def",
")",
"\n",
"statMetricsActive",
".",
"Inc",
"(",
")",
"\n",
"statAddDuration",
".",
"Value",
"(",
"time",
".",
"Since",
"(",
"pre",
")",
")",
"\n\n",
"if",
"TagSupport",
"{",
"m",
".",
"indexTags",
"(",
"def",
")",
"\n",
"}",
"\n\n",
"return",
"archive",
",",
"0",
",",
"false",
"\n",
"}"
] |
// AddOrUpdate returns the corresponding Archive for the MetricData.
// if it is existing -> updates lastUpdate based on .Time, and partition
// if was new -> adds new MetricDefinition to index
|
[
"AddOrUpdate",
"returns",
"the",
"corresponding",
"Archive",
"for",
"the",
"MetricData",
".",
"if",
"it",
"is",
"existing",
"-",
">",
"updates",
"lastUpdate",
"based",
"on",
".",
"Time",
"and",
"partition",
"if",
"was",
"new",
"-",
">",
"adds",
"new",
"MetricDefinition",
"to",
"index"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/memory.go#L320-L354
|
train
|
grafana/metrictank
|
idx/memory/memory.go
|
indexTags
|
func (m *UnpartitionedMemoryIdx) indexTags(def *schema.MetricDefinition) {
tags, ok := m.tags[def.OrgId]
if !ok {
tags = make(TagIndex)
m.tags[def.OrgId] = tags
}
for _, tag := range def.Tags {
tagSplits := strings.SplitN(tag, "=", 2)
if len(tagSplits) < 2 {
// should never happen because every tag in the index
// must have a valid format
invalidTag.Inc()
log.Errorf("memory-idx: Tag %q of id %q has an invalid format", tag, def.Id)
continue
}
tagName := tagSplits[0]
tagValue := tagSplits[1]
tags.addTagId(tagName, tagValue, def.Id)
}
tags.addTagId("name", def.Name, def.Id)
m.defByTagSet.add(def)
}
|
go
|
func (m *UnpartitionedMemoryIdx) indexTags(def *schema.MetricDefinition) {
tags, ok := m.tags[def.OrgId]
if !ok {
tags = make(TagIndex)
m.tags[def.OrgId] = tags
}
for _, tag := range def.Tags {
tagSplits := strings.SplitN(tag, "=", 2)
if len(tagSplits) < 2 {
// should never happen because every tag in the index
// must have a valid format
invalidTag.Inc()
log.Errorf("memory-idx: Tag %q of id %q has an invalid format", tag, def.Id)
continue
}
tagName := tagSplits[0]
tagValue := tagSplits[1]
tags.addTagId(tagName, tagValue, def.Id)
}
tags.addTagId("name", def.Name, def.Id)
m.defByTagSet.add(def)
}
|
[
"func",
"(",
"m",
"*",
"UnpartitionedMemoryIdx",
")",
"indexTags",
"(",
"def",
"*",
"schema",
".",
"MetricDefinition",
")",
"{",
"tags",
",",
"ok",
":=",
"m",
".",
"tags",
"[",
"def",
".",
"OrgId",
"]",
"\n",
"if",
"!",
"ok",
"{",
"tags",
"=",
"make",
"(",
"TagIndex",
")",
"\n",
"m",
".",
"tags",
"[",
"def",
".",
"OrgId",
"]",
"=",
"tags",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"tag",
":=",
"range",
"def",
".",
"Tags",
"{",
"tagSplits",
":=",
"strings",
".",
"SplitN",
"(",
"tag",
",",
"\"",
"\"",
",",
"2",
")",
"\n",
"if",
"len",
"(",
"tagSplits",
")",
"<",
"2",
"{",
"// should never happen because every tag in the index",
"// must have a valid format",
"invalidTag",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"tag",
",",
"def",
".",
"Id",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"tagName",
":=",
"tagSplits",
"[",
"0",
"]",
"\n",
"tagValue",
":=",
"tagSplits",
"[",
"1",
"]",
"\n",
"tags",
".",
"addTagId",
"(",
"tagName",
",",
"tagValue",
",",
"def",
".",
"Id",
")",
"\n",
"}",
"\n",
"tags",
".",
"addTagId",
"(",
"\"",
"\"",
",",
"def",
".",
"Name",
",",
"def",
".",
"Id",
")",
"\n\n",
"m",
".",
"defByTagSet",
".",
"add",
"(",
"def",
")",
"\n",
"}"
] |
// indexTags reads the tags of a given metric definition and creates the
// corresponding tag index entries to refer to it. It assumes a lock is
// already held.
|
[
"indexTags",
"reads",
"the",
"tags",
"of",
"a",
"given",
"metric",
"definition",
"and",
"creates",
"the",
"corresponding",
"tag",
"index",
"entries",
"to",
"refer",
"to",
"it",
".",
"It",
"assumes",
"a",
"lock",
"is",
"already",
"held",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/memory.go#L369-L393
|
train
|
grafana/metrictank
|
idx/memory/memory.go
|
deindexTags
|
func (m *UnpartitionedMemoryIdx) deindexTags(tags TagIndex, def *schema.MetricDefinition) bool {
for _, tag := range def.Tags {
tagSplits := strings.SplitN(tag, "=", 2)
if len(tagSplits) < 2 {
// should never happen because every tag in the index
// must have a valid format
invalidTag.Inc()
log.Errorf("memory-idx: Tag %q of id %q has an invalid format", tag, def.Id)
continue
}
tagName := tagSplits[0]
tagValue := tagSplits[1]
tags.delTagId(tagName, tagValue, def.Id)
}
tags.delTagId("name", def.Name, def.Id)
m.defByTagSet.del(def)
return true
}
|
go
|
func (m *UnpartitionedMemoryIdx) deindexTags(tags TagIndex, def *schema.MetricDefinition) bool {
for _, tag := range def.Tags {
tagSplits := strings.SplitN(tag, "=", 2)
if len(tagSplits) < 2 {
// should never happen because every tag in the index
// must have a valid format
invalidTag.Inc()
log.Errorf("memory-idx: Tag %q of id %q has an invalid format", tag, def.Id)
continue
}
tagName := tagSplits[0]
tagValue := tagSplits[1]
tags.delTagId(tagName, tagValue, def.Id)
}
tags.delTagId("name", def.Name, def.Id)
m.defByTagSet.del(def)
return true
}
|
[
"func",
"(",
"m",
"*",
"UnpartitionedMemoryIdx",
")",
"deindexTags",
"(",
"tags",
"TagIndex",
",",
"def",
"*",
"schema",
".",
"MetricDefinition",
")",
"bool",
"{",
"for",
"_",
",",
"tag",
":=",
"range",
"def",
".",
"Tags",
"{",
"tagSplits",
":=",
"strings",
".",
"SplitN",
"(",
"tag",
",",
"\"",
"\"",
",",
"2",
")",
"\n",
"if",
"len",
"(",
"tagSplits",
")",
"<",
"2",
"{",
"// should never happen because every tag in the index",
"// must have a valid format",
"invalidTag",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"tag",
",",
"def",
".",
"Id",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"tagName",
":=",
"tagSplits",
"[",
"0",
"]",
"\n",
"tagValue",
":=",
"tagSplits",
"[",
"1",
"]",
"\n",
"tags",
".",
"delTagId",
"(",
"tagName",
",",
"tagValue",
",",
"def",
".",
"Id",
")",
"\n",
"}",
"\n\n",
"tags",
".",
"delTagId",
"(",
"\"",
"\"",
",",
"def",
".",
"Name",
",",
"def",
".",
"Id",
")",
"\n\n",
"m",
".",
"defByTagSet",
".",
"del",
"(",
"def",
")",
"\n\n",
"return",
"true",
"\n",
"}"
] |
// deindexTags takes a given metric definition and removes all references
// to it from the tag index. It assumes a lock is already held.
// a return value of "false" means there was an error and the deindexing was
// unsuccessful, "true" means the indexing was at least partially or completely
// successful
|
[
"deindexTags",
"takes",
"a",
"given",
"metric",
"definition",
"and",
"removes",
"all",
"references",
"to",
"it",
"from",
"the",
"tag",
"index",
".",
"It",
"assumes",
"a",
"lock",
"is",
"already",
"held",
".",
"a",
"return",
"value",
"of",
"false",
"means",
"there",
"was",
"an",
"error",
"and",
"the",
"deindexing",
"was",
"unsuccessful",
"true",
"means",
"the",
"indexing",
"was",
"at",
"least",
"partially",
"or",
"completely",
"successful"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/memory.go#L400-L421
|
train
|
grafana/metrictank
|
idx/memory/memory.go
|
LoadPartition
|
func (m *UnpartitionedMemoryIdx) LoadPartition(partition int32, defs []schema.MetricDefinition) int {
// UnpartitionedMemoryIdx isnt partitioned, so just ignore the partition passed and call Load()
return m.Load(defs)
}
|
go
|
func (m *UnpartitionedMemoryIdx) LoadPartition(partition int32, defs []schema.MetricDefinition) int {
// UnpartitionedMemoryIdx isnt partitioned, so just ignore the partition passed and call Load()
return m.Load(defs)
}
|
[
"func",
"(",
"m",
"*",
"UnpartitionedMemoryIdx",
")",
"LoadPartition",
"(",
"partition",
"int32",
",",
"defs",
"[",
"]",
"schema",
".",
"MetricDefinition",
")",
"int",
"{",
"// UnpartitionedMemoryIdx isnt partitioned, so just ignore the partition passed and call Load()",
"return",
"m",
".",
"Load",
"(",
"defs",
")",
"\n",
"}"
] |
// Used to rebuild the index from an existing set of metricDefinitions for a specific paritition.
|
[
"Used",
"to",
"rebuild",
"the",
"index",
"from",
"an",
"existing",
"set",
"of",
"metricDefinitions",
"for",
"a",
"specific",
"paritition",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/memory.go#L424-L427
|
train
|
grafana/metrictank
|
idx/memory/memory.go
|
GetPath
|
func (m *UnpartitionedMemoryIdx) GetPath(orgId uint32, path string) []idx.Archive {
m.RLock()
defer m.RUnlock()
tree, ok := m.tree[orgId]
if !ok {
return nil
}
node := tree.Items[path]
if node == nil {
return nil
}
archives := make([]idx.Archive, len(node.Defs))
for i, def := range node.Defs {
archive := m.defById[def]
archives[i] = *archive
}
return archives
}
|
go
|
func (m *UnpartitionedMemoryIdx) GetPath(orgId uint32, path string) []idx.Archive {
m.RLock()
defer m.RUnlock()
tree, ok := m.tree[orgId]
if !ok {
return nil
}
node := tree.Items[path]
if node == nil {
return nil
}
archives := make([]idx.Archive, len(node.Defs))
for i, def := range node.Defs {
archive := m.defById[def]
archives[i] = *archive
}
return archives
}
|
[
"func",
"(",
"m",
"*",
"UnpartitionedMemoryIdx",
")",
"GetPath",
"(",
"orgId",
"uint32",
",",
"path",
"string",
")",
"[",
"]",
"idx",
".",
"Archive",
"{",
"m",
".",
"RLock",
"(",
")",
"\n",
"defer",
"m",
".",
"RUnlock",
"(",
")",
"\n",
"tree",
",",
"ok",
":=",
"m",
".",
"tree",
"[",
"orgId",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"node",
":=",
"tree",
".",
"Items",
"[",
"path",
"]",
"\n",
"if",
"node",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"archives",
":=",
"make",
"(",
"[",
"]",
"idx",
".",
"Archive",
",",
"len",
"(",
"node",
".",
"Defs",
")",
")",
"\n",
"for",
"i",
",",
"def",
":=",
"range",
"node",
".",
"Defs",
"{",
"archive",
":=",
"m",
".",
"defById",
"[",
"def",
"]",
"\n",
"archives",
"[",
"i",
"]",
"=",
"*",
"archive",
"\n",
"}",
"\n",
"return",
"archives",
"\n",
"}"
] |
// GetPath returns the node under the given org and path.
// this is an alternative to Find for when you have a path, not a pattern, and want to lookup in a specific org tree only.
|
[
"GetPath",
"returns",
"the",
"node",
"under",
"the",
"given",
"org",
"and",
"path",
".",
"this",
"is",
"an",
"alternative",
"to",
"Find",
"for",
"when",
"you",
"have",
"a",
"path",
"not",
"a",
"pattern",
"and",
"want",
"to",
"lookup",
"in",
"a",
"specific",
"org",
"tree",
"only",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/memory.go#L575-L592
|
train
|
grafana/metrictank
|
idx/memory/memory.go
|
Tags
|
func (m *UnpartitionedMemoryIdx) Tags(orgId uint32, filter string, from int64) ([]string, error) {
if !TagSupport {
log.Warn("memory-idx: received tag query, but tag support is disabled")
return nil, nil
}
var re *regexp.Regexp
if len(filter) > 0 {
if filter[0] != byte('^') {
filter = "^(?:" + filter + ")"
}
var err error
re, err = regexp.Compile(filter)
if err != nil {
return nil, err
}
}
m.RLock()
defer m.RUnlock()
tags, ok := m.tags[orgId]
if !ok {
return nil, nil
}
var res []string
// if there is no filter/from given we know how much space we'll need
// and can preallocate it
if re == nil && from == 0 {
res = make([]string, 0, len(tags))
}
for tag := range tags {
// filter by pattern if one was given
if re != nil && !re.MatchString(tag) {
continue
}
// if from is > 0 we need to find at least one metric definition where
// LastUpdate >= from before we add the tag to the result set
if (from > 0 && m.hasOneMetricFrom(tags, tag, from)) || from == 0 {
res = append(res, tag)
}
}
return res, nil
}
|
go
|
func (m *UnpartitionedMemoryIdx) Tags(orgId uint32, filter string, from int64) ([]string, error) {
if !TagSupport {
log.Warn("memory-idx: received tag query, but tag support is disabled")
return nil, nil
}
var re *regexp.Regexp
if len(filter) > 0 {
if filter[0] != byte('^') {
filter = "^(?:" + filter + ")"
}
var err error
re, err = regexp.Compile(filter)
if err != nil {
return nil, err
}
}
m.RLock()
defer m.RUnlock()
tags, ok := m.tags[orgId]
if !ok {
return nil, nil
}
var res []string
// if there is no filter/from given we know how much space we'll need
// and can preallocate it
if re == nil && from == 0 {
res = make([]string, 0, len(tags))
}
for tag := range tags {
// filter by pattern if one was given
if re != nil && !re.MatchString(tag) {
continue
}
// if from is > 0 we need to find at least one metric definition where
// LastUpdate >= from before we add the tag to the result set
if (from > 0 && m.hasOneMetricFrom(tags, tag, from)) || from == 0 {
res = append(res, tag)
}
}
return res, nil
}
|
[
"func",
"(",
"m",
"*",
"UnpartitionedMemoryIdx",
")",
"Tags",
"(",
"orgId",
"uint32",
",",
"filter",
"string",
",",
"from",
"int64",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"if",
"!",
"TagSupport",
"{",
"log",
".",
"Warn",
"(",
"\"",
"\"",
")",
"\n",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"var",
"re",
"*",
"regexp",
".",
"Regexp",
"\n",
"if",
"len",
"(",
"filter",
")",
">",
"0",
"{",
"if",
"filter",
"[",
"0",
"]",
"!=",
"byte",
"(",
"'^'",
")",
"{",
"filter",
"=",
"\"",
"\"",
"+",
"filter",
"+",
"\"",
"\"",
"\n",
"}",
"\n",
"var",
"err",
"error",
"\n",
"re",
",",
"err",
"=",
"regexp",
".",
"Compile",
"(",
"filter",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"m",
".",
"RLock",
"(",
")",
"\n",
"defer",
"m",
".",
"RUnlock",
"(",
")",
"\n\n",
"tags",
",",
"ok",
":=",
"m",
".",
"tags",
"[",
"orgId",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"var",
"res",
"[",
"]",
"string",
"\n\n",
"// if there is no filter/from given we know how much space we'll need",
"// and can preallocate it",
"if",
"re",
"==",
"nil",
"&&",
"from",
"==",
"0",
"{",
"res",
"=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"len",
"(",
"tags",
")",
")",
"\n",
"}",
"\n\n",
"for",
"tag",
":=",
"range",
"tags",
"{",
"// filter by pattern if one was given",
"if",
"re",
"!=",
"nil",
"&&",
"!",
"re",
".",
"MatchString",
"(",
"tag",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"// if from is > 0 we need to find at least one metric definition where",
"// LastUpdate >= from before we add the tag to the result set",
"if",
"(",
"from",
">",
"0",
"&&",
"m",
".",
"hasOneMetricFrom",
"(",
"tags",
",",
"tag",
",",
"from",
")",
")",
"||",
"from",
"==",
"0",
"{",
"res",
"=",
"append",
"(",
"res",
",",
"tag",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"res",
",",
"nil",
"\n",
"}"
] |
// Tags returns a list of all tag keys associated with the metrics of a given
// organization. The return values are filtered by the regex in the second parameter.
// If the third parameter is >0 then only metrics will be accounted of which the
// LastUpdate time is >= the given value.
|
[
"Tags",
"returns",
"a",
"list",
"of",
"all",
"tag",
"keys",
"associated",
"with",
"the",
"metrics",
"of",
"a",
"given",
"organization",
".",
"The",
"return",
"values",
"are",
"filtered",
"by",
"the",
"regex",
"in",
"the",
"second",
"parameter",
".",
"If",
"the",
"third",
"parameter",
"is",
">",
"0",
"then",
"only",
"metrics",
"will",
"be",
"accounted",
"of",
"which",
"the",
"LastUpdate",
"time",
"is",
">",
"=",
"the",
"given",
"value",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/memory.go#L852-L900
|
train
|
grafana/metrictank
|
idx/memory/memory.go
|
deleteTaggedByIdSet
|
func (m *UnpartitionedMemoryIdx) deleteTaggedByIdSet(orgId uint32, ids IdSet) []idx.Archive {
tags, ok := m.tags[orgId]
if !ok {
return nil
}
deletedDefs := make([]idx.Archive, 0, len(ids))
for id := range ids {
idStr := id
def, ok := m.defById[idStr]
if !ok {
// not necessarily a corruption, the id could have been deleted
// while we switched from read to write lock
continue
}
if !m.deindexTags(tags, &def.MetricDefinition) {
continue
}
deletedDefs = append(deletedDefs, *def)
delete(m.defById, idStr)
}
statMetricsActive.Set(len(m.defById))
return deletedDefs
}
|
go
|
func (m *UnpartitionedMemoryIdx) deleteTaggedByIdSet(orgId uint32, ids IdSet) []idx.Archive {
tags, ok := m.tags[orgId]
if !ok {
return nil
}
deletedDefs := make([]idx.Archive, 0, len(ids))
for id := range ids {
idStr := id
def, ok := m.defById[idStr]
if !ok {
// not necessarily a corruption, the id could have been deleted
// while we switched from read to write lock
continue
}
if !m.deindexTags(tags, &def.MetricDefinition) {
continue
}
deletedDefs = append(deletedDefs, *def)
delete(m.defById, idStr)
}
statMetricsActive.Set(len(m.defById))
return deletedDefs
}
|
[
"func",
"(",
"m",
"*",
"UnpartitionedMemoryIdx",
")",
"deleteTaggedByIdSet",
"(",
"orgId",
"uint32",
",",
"ids",
"IdSet",
")",
"[",
"]",
"idx",
".",
"Archive",
"{",
"tags",
",",
"ok",
":=",
"m",
".",
"tags",
"[",
"orgId",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"deletedDefs",
":=",
"make",
"(",
"[",
"]",
"idx",
".",
"Archive",
",",
"0",
",",
"len",
"(",
"ids",
")",
")",
"\n",
"for",
"id",
":=",
"range",
"ids",
"{",
"idStr",
":=",
"id",
"\n",
"def",
",",
"ok",
":=",
"m",
".",
"defById",
"[",
"idStr",
"]",
"\n",
"if",
"!",
"ok",
"{",
"// not necessarily a corruption, the id could have been deleted",
"// while we switched from read to write lock",
"continue",
"\n",
"}",
"\n",
"if",
"!",
"m",
".",
"deindexTags",
"(",
"tags",
",",
"&",
"def",
".",
"MetricDefinition",
")",
"{",
"continue",
"\n",
"}",
"\n",
"deletedDefs",
"=",
"append",
"(",
"deletedDefs",
",",
"*",
"def",
")",
"\n",
"delete",
"(",
"m",
".",
"defById",
",",
"idStr",
")",
"\n",
"}",
"\n\n",
"statMetricsActive",
".",
"Set",
"(",
"len",
"(",
"m",
".",
"defById",
")",
")",
"\n\n",
"return",
"deletedDefs",
"\n",
"}"
] |
// deleteTaggedByIdSet deletes a map of ids from the tag index and also the DefByIds
// it is important that only IDs of series with tags get passed in here, because
// otherwise the result might be inconsistencies between DefByIDs and the tree index.
|
[
"deleteTaggedByIdSet",
"deletes",
"a",
"map",
"of",
"ids",
"from",
"the",
"tag",
"index",
"and",
"also",
"the",
"DefByIds",
"it",
"is",
"important",
"that",
"only",
"IDs",
"of",
"series",
"with",
"tags",
"get",
"passed",
"in",
"here",
"because",
"otherwise",
"the",
"result",
"might",
"be",
"inconsistencies",
"between",
"DefByIDs",
"and",
"the",
"tree",
"index",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/memory.go#L1212-L1237
|
train
|
grafana/metrictank
|
util/util.go
|
Lcm
|
func Lcm(vals []uint32) uint32 {
out := vals[0]
for i := 1; i < len(vals); i++ {
max := Max(uint32(vals[i]), out)
min := Min(uint32(vals[i]), out)
r := max % min
if r != 0 {
for j := uint32(2); j <= min; j++ {
if (j*max)%min == 0 {
out = j * max
break
}
}
} else {
out = max
}
}
return out
}
|
go
|
func Lcm(vals []uint32) uint32 {
out := vals[0]
for i := 1; i < len(vals); i++ {
max := Max(uint32(vals[i]), out)
min := Min(uint32(vals[i]), out)
r := max % min
if r != 0 {
for j := uint32(2); j <= min; j++ {
if (j*max)%min == 0 {
out = j * max
break
}
}
} else {
out = max
}
}
return out
}
|
[
"func",
"Lcm",
"(",
"vals",
"[",
"]",
"uint32",
")",
"uint32",
"{",
"out",
":=",
"vals",
"[",
"0",
"]",
"\n",
"for",
"i",
":=",
"1",
";",
"i",
"<",
"len",
"(",
"vals",
")",
";",
"i",
"++",
"{",
"max",
":=",
"Max",
"(",
"uint32",
"(",
"vals",
"[",
"i",
"]",
")",
",",
"out",
")",
"\n",
"min",
":=",
"Min",
"(",
"uint32",
"(",
"vals",
"[",
"i",
"]",
")",
",",
"out",
")",
"\n",
"r",
":=",
"max",
"%",
"min",
"\n",
"if",
"r",
"!=",
"0",
"{",
"for",
"j",
":=",
"uint32",
"(",
"2",
")",
";",
"j",
"<=",
"min",
";",
"j",
"++",
"{",
"if",
"(",
"j",
"*",
"max",
")",
"%",
"min",
"==",
"0",
"{",
"out",
"=",
"j",
"*",
"max",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"else",
"{",
"out",
"=",
"max",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"out",
"\n",
"}"
] |
// Lcm returns the least common multiple
|
[
"Lcm",
"returns",
"the",
"least",
"common",
"multiple"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/util/util.go#L25-L43
|
train
|
grafana/metrictank
|
input/input.go
|
ProcessMetricPoint
|
func (in DefaultHandler) ProcessMetricPoint(point schema.MetricPoint, format msg.Format, partition int32) {
if format == msg.FormatMetricPoint {
in.receivedMP.Inc()
} else {
in.receivedMPNO.Inc()
}
// in cassandra we store timestamps as 32bit signed integers.
// math.MaxInt32 = Jan 19 03:14:07 UTC 2038
if !point.Valid() || point.Time >= math.MaxInt32 {
in.invalidMP.Inc()
mdata.PromDiscardedSamples.WithLabelValues(invalidTimestamp, strconv.Itoa(int(point.MKey.Org))).Inc()
log.Debugf("in: Invalid metric %v", point)
return
}
archive, _, ok := in.metricIndex.Update(point, partition)
if !ok {
in.unknownMP.Inc()
mdata.PromDiscardedSamples.WithLabelValues(unknownPointId, strconv.Itoa(int(point.MKey.Org))).Inc()
return
}
m := in.metrics.GetOrCreate(point.MKey, archive.SchemaId, archive.AggId, uint32(archive.Interval))
m.Add(point.Time, point.Value)
}
|
go
|
func (in DefaultHandler) ProcessMetricPoint(point schema.MetricPoint, format msg.Format, partition int32) {
if format == msg.FormatMetricPoint {
in.receivedMP.Inc()
} else {
in.receivedMPNO.Inc()
}
// in cassandra we store timestamps as 32bit signed integers.
// math.MaxInt32 = Jan 19 03:14:07 UTC 2038
if !point.Valid() || point.Time >= math.MaxInt32 {
in.invalidMP.Inc()
mdata.PromDiscardedSamples.WithLabelValues(invalidTimestamp, strconv.Itoa(int(point.MKey.Org))).Inc()
log.Debugf("in: Invalid metric %v", point)
return
}
archive, _, ok := in.metricIndex.Update(point, partition)
if !ok {
in.unknownMP.Inc()
mdata.PromDiscardedSamples.WithLabelValues(unknownPointId, strconv.Itoa(int(point.MKey.Org))).Inc()
return
}
m := in.metrics.GetOrCreate(point.MKey, archive.SchemaId, archive.AggId, uint32(archive.Interval))
m.Add(point.Time, point.Value)
}
|
[
"func",
"(",
"in",
"DefaultHandler",
")",
"ProcessMetricPoint",
"(",
"point",
"schema",
".",
"MetricPoint",
",",
"format",
"msg",
".",
"Format",
",",
"partition",
"int32",
")",
"{",
"if",
"format",
"==",
"msg",
".",
"FormatMetricPoint",
"{",
"in",
".",
"receivedMP",
".",
"Inc",
"(",
")",
"\n",
"}",
"else",
"{",
"in",
".",
"receivedMPNO",
".",
"Inc",
"(",
")",
"\n",
"}",
"\n",
"// in cassandra we store timestamps as 32bit signed integers.",
"// math.MaxInt32 = Jan 19 03:14:07 UTC 2038",
"if",
"!",
"point",
".",
"Valid",
"(",
")",
"||",
"point",
".",
"Time",
">=",
"math",
".",
"MaxInt32",
"{",
"in",
".",
"invalidMP",
".",
"Inc",
"(",
")",
"\n",
"mdata",
".",
"PromDiscardedSamples",
".",
"WithLabelValues",
"(",
"invalidTimestamp",
",",
"strconv",
".",
"Itoa",
"(",
"int",
"(",
"point",
".",
"MKey",
".",
"Org",
")",
")",
")",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"point",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"archive",
",",
"_",
",",
"ok",
":=",
"in",
".",
"metricIndex",
".",
"Update",
"(",
"point",
",",
"partition",
")",
"\n\n",
"if",
"!",
"ok",
"{",
"in",
".",
"unknownMP",
".",
"Inc",
"(",
")",
"\n",
"mdata",
".",
"PromDiscardedSamples",
".",
"WithLabelValues",
"(",
"unknownPointId",
",",
"strconv",
".",
"Itoa",
"(",
"int",
"(",
"point",
".",
"MKey",
".",
"Org",
")",
")",
")",
".",
"Inc",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"m",
":=",
"in",
".",
"metrics",
".",
"GetOrCreate",
"(",
"point",
".",
"MKey",
",",
"archive",
".",
"SchemaId",
",",
"archive",
".",
"AggId",
",",
"uint32",
"(",
"archive",
".",
"Interval",
")",
")",
"\n",
"m",
".",
"Add",
"(",
"point",
".",
"Time",
",",
"point",
".",
"Value",
")",
"\n",
"}"
] |
// ProcessMetricPoint updates the index if possible, and stores the data if we have an index entry
// concurrency-safe.
|
[
"ProcessMetricPoint",
"updates",
"the",
"index",
"if",
"possible",
"and",
"stores",
"the",
"data",
"if",
"we",
"have",
"an",
"index",
"entry",
"concurrency",
"-",
"safe",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/input/input.go#L72-L97
|
train
|
grafana/metrictank
|
input/input.go
|
ProcessMetricData
|
func (in DefaultHandler) ProcessMetricData(md *schema.MetricData, partition int32) {
in.receivedMD.Inc()
err := md.Validate()
if err != nil {
in.invalidMD.Inc()
log.Debugf("in: Invalid metric %v: %s", md, err)
var reason string
switch err {
case schema.ErrInvalidIntervalzero:
reason = invalidInterval
case schema.ErrInvalidOrgIdzero:
reason = invalidOrgId
case schema.ErrInvalidEmptyName:
reason = invalidName
case schema.ErrInvalidMtype:
reason = invalidMtype
case schema.ErrInvalidTagFormat:
reason = invalidTagFormat
default:
reason = "unknown"
}
mdata.PromDiscardedSamples.WithLabelValues(reason, strconv.Itoa(md.OrgId)).Inc()
return
}
// in cassandra we store timestamps and interval as 32bit signed integers.
// math.MaxInt32 = Jan 19 03:14:07 UTC 2038
if md.Time <= 0 || md.Time >= math.MaxInt32 {
in.invalidMD.Inc()
mdata.PromDiscardedSamples.WithLabelValues(invalidTimestamp, strconv.Itoa(md.OrgId)).Inc()
log.Warnf("in: invalid metric %q: .Time %d out of range", md.Id, md.Time)
return
}
if md.Interval <= 0 || md.Interval >= math.MaxInt32 {
in.invalidMD.Inc()
mdata.PromDiscardedSamples.WithLabelValues(invalidInterval, strconv.Itoa(md.OrgId)).Inc()
log.Warnf("in: invalid metric %q. .Interval %d out of range", md.Id, md.Interval)
return
}
mkey, err := schema.MKeyFromString(md.Id)
if err != nil {
log.Errorf("in: Invalid metric %v: could not parse ID: %s", md, err)
return
}
archive, _, _ := in.metricIndex.AddOrUpdate(mkey, md, partition)
m := in.metrics.GetOrCreate(mkey, archive.SchemaId, archive.AggId, uint32(md.Interval))
m.Add(uint32(md.Time), md.Value)
}
|
go
|
func (in DefaultHandler) ProcessMetricData(md *schema.MetricData, partition int32) {
in.receivedMD.Inc()
err := md.Validate()
if err != nil {
in.invalidMD.Inc()
log.Debugf("in: Invalid metric %v: %s", md, err)
var reason string
switch err {
case schema.ErrInvalidIntervalzero:
reason = invalidInterval
case schema.ErrInvalidOrgIdzero:
reason = invalidOrgId
case schema.ErrInvalidEmptyName:
reason = invalidName
case schema.ErrInvalidMtype:
reason = invalidMtype
case schema.ErrInvalidTagFormat:
reason = invalidTagFormat
default:
reason = "unknown"
}
mdata.PromDiscardedSamples.WithLabelValues(reason, strconv.Itoa(md.OrgId)).Inc()
return
}
// in cassandra we store timestamps and interval as 32bit signed integers.
// math.MaxInt32 = Jan 19 03:14:07 UTC 2038
if md.Time <= 0 || md.Time >= math.MaxInt32 {
in.invalidMD.Inc()
mdata.PromDiscardedSamples.WithLabelValues(invalidTimestamp, strconv.Itoa(md.OrgId)).Inc()
log.Warnf("in: invalid metric %q: .Time %d out of range", md.Id, md.Time)
return
}
if md.Interval <= 0 || md.Interval >= math.MaxInt32 {
in.invalidMD.Inc()
mdata.PromDiscardedSamples.WithLabelValues(invalidInterval, strconv.Itoa(md.OrgId)).Inc()
log.Warnf("in: invalid metric %q. .Interval %d out of range", md.Id, md.Interval)
return
}
mkey, err := schema.MKeyFromString(md.Id)
if err != nil {
log.Errorf("in: Invalid metric %v: could not parse ID: %s", md, err)
return
}
archive, _, _ := in.metricIndex.AddOrUpdate(mkey, md, partition)
m := in.metrics.GetOrCreate(mkey, archive.SchemaId, archive.AggId, uint32(md.Interval))
m.Add(uint32(md.Time), md.Value)
}
|
[
"func",
"(",
"in",
"DefaultHandler",
")",
"ProcessMetricData",
"(",
"md",
"*",
"schema",
".",
"MetricData",
",",
"partition",
"int32",
")",
"{",
"in",
".",
"receivedMD",
".",
"Inc",
"(",
")",
"\n",
"err",
":=",
"md",
".",
"Validate",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"in",
".",
"invalidMD",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"md",
",",
"err",
")",
"\n\n",
"var",
"reason",
"string",
"\n",
"switch",
"err",
"{",
"case",
"schema",
".",
"ErrInvalidIntervalzero",
":",
"reason",
"=",
"invalidInterval",
"\n",
"case",
"schema",
".",
"ErrInvalidOrgIdzero",
":",
"reason",
"=",
"invalidOrgId",
"\n",
"case",
"schema",
".",
"ErrInvalidEmptyName",
":",
"reason",
"=",
"invalidName",
"\n",
"case",
"schema",
".",
"ErrInvalidMtype",
":",
"reason",
"=",
"invalidMtype",
"\n",
"case",
"schema",
".",
"ErrInvalidTagFormat",
":",
"reason",
"=",
"invalidTagFormat",
"\n",
"default",
":",
"reason",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"mdata",
".",
"PromDiscardedSamples",
".",
"WithLabelValues",
"(",
"reason",
",",
"strconv",
".",
"Itoa",
"(",
"md",
".",
"OrgId",
")",
")",
".",
"Inc",
"(",
")",
"\n\n",
"return",
"\n",
"}",
"\n",
"// in cassandra we store timestamps and interval as 32bit signed integers.",
"// math.MaxInt32 = Jan 19 03:14:07 UTC 2038",
"if",
"md",
".",
"Time",
"<=",
"0",
"||",
"md",
".",
"Time",
">=",
"math",
".",
"MaxInt32",
"{",
"in",
".",
"invalidMD",
".",
"Inc",
"(",
")",
"\n",
"mdata",
".",
"PromDiscardedSamples",
".",
"WithLabelValues",
"(",
"invalidTimestamp",
",",
"strconv",
".",
"Itoa",
"(",
"md",
".",
"OrgId",
")",
")",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"md",
".",
"Id",
",",
"md",
".",
"Time",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"md",
".",
"Interval",
"<=",
"0",
"||",
"md",
".",
"Interval",
">=",
"math",
".",
"MaxInt32",
"{",
"in",
".",
"invalidMD",
".",
"Inc",
"(",
")",
"\n",
"mdata",
".",
"PromDiscardedSamples",
".",
"WithLabelValues",
"(",
"invalidInterval",
",",
"strconv",
".",
"Itoa",
"(",
"md",
".",
"OrgId",
")",
")",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"md",
".",
"Id",
",",
"md",
".",
"Interval",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"mkey",
",",
"err",
":=",
"schema",
".",
"MKeyFromString",
"(",
"md",
".",
"Id",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"md",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"archive",
",",
"_",
",",
"_",
":=",
"in",
".",
"metricIndex",
".",
"AddOrUpdate",
"(",
"mkey",
",",
"md",
",",
"partition",
")",
"\n\n",
"m",
":=",
"in",
".",
"metrics",
".",
"GetOrCreate",
"(",
"mkey",
",",
"archive",
".",
"SchemaId",
",",
"archive",
".",
"AggId",
",",
"uint32",
"(",
"md",
".",
"Interval",
")",
")",
"\n",
"m",
".",
"Add",
"(",
"uint32",
"(",
"md",
".",
"Time",
")",
",",
"md",
".",
"Value",
")",
"\n",
"}"
] |
// ProcessMetricData assures the data is stored and the metadata is in the index
// concurrency-safe.
|
[
"ProcessMetricData",
"assures",
"the",
"data",
"is",
"stored",
"and",
"the",
"metadata",
"is",
"in",
"the",
"index",
"concurrency",
"-",
"safe",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/input/input.go#L101-L152
|
train
|
grafana/metrictank
|
idx/bigtable/schema.go
|
FormatRowKey
|
func FormatRowKey(mkey schema.MKey, partition int32) string {
return strconv.Itoa(int(partition)) + "_" + mkey.String()
}
|
go
|
func FormatRowKey(mkey schema.MKey, partition int32) string {
return strconv.Itoa(int(partition)) + "_" + mkey.String()
}
|
[
"func",
"FormatRowKey",
"(",
"mkey",
"schema",
".",
"MKey",
",",
"partition",
"int32",
")",
"string",
"{",
"return",
"strconv",
".",
"Itoa",
"(",
"int",
"(",
"partition",
")",
")",
"+",
"\"",
"\"",
"+",
"mkey",
".",
"String",
"(",
")",
"\n",
"}"
] |
// FormatRowKey formats an MKey and partition into a rowKey
|
[
"FormatRowKey",
"formats",
"an",
"MKey",
"and",
"partition",
"into",
"a",
"rowKey"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/bigtable/schema.go#L16-L18
|
train
|
grafana/metrictank
|
idx/bigtable/schema.go
|
SchemaToRow
|
func SchemaToRow(def *schema.MetricDefinition) (string, map[string][]byte) {
row := map[string][]byte{
//"Id" omitted as it is part of the rowKey
"OrgId": make([]byte, 8),
"Name": []byte(def.Name),
"Interval": make([]byte, 8),
"Unit": []byte(def.Unit),
"Mtype": []byte(def.Mtype),
"Tags": []byte(strings.Join(def.Tags, ";")),
"LastUpdate": make([]byte, 8),
//"Partition" omitted as it is part of te rowKey
}
binary.PutVarint(row["OrgId"], int64(def.OrgId))
binary.PutVarint(row["Interval"], int64(def.Interval))
binary.PutVarint(row["LastUpdate"], def.LastUpdate)
return FormatRowKey(def.Id, def.Partition), row
}
|
go
|
func SchemaToRow(def *schema.MetricDefinition) (string, map[string][]byte) {
row := map[string][]byte{
//"Id" omitted as it is part of the rowKey
"OrgId": make([]byte, 8),
"Name": []byte(def.Name),
"Interval": make([]byte, 8),
"Unit": []byte(def.Unit),
"Mtype": []byte(def.Mtype),
"Tags": []byte(strings.Join(def.Tags, ";")),
"LastUpdate": make([]byte, 8),
//"Partition" omitted as it is part of te rowKey
}
binary.PutVarint(row["OrgId"], int64(def.OrgId))
binary.PutVarint(row["Interval"], int64(def.Interval))
binary.PutVarint(row["LastUpdate"], def.LastUpdate)
return FormatRowKey(def.Id, def.Partition), row
}
|
[
"func",
"SchemaToRow",
"(",
"def",
"*",
"schema",
".",
"MetricDefinition",
")",
"(",
"string",
",",
"map",
"[",
"string",
"]",
"[",
"]",
"byte",
")",
"{",
"row",
":=",
"map",
"[",
"string",
"]",
"[",
"]",
"byte",
"{",
"//\"Id\" omitted as it is part of the rowKey",
"\"",
"\"",
":",
"make",
"(",
"[",
"]",
"byte",
",",
"8",
")",
",",
"\"",
"\"",
":",
"[",
"]",
"byte",
"(",
"def",
".",
"Name",
")",
",",
"\"",
"\"",
":",
"make",
"(",
"[",
"]",
"byte",
",",
"8",
")",
",",
"\"",
"\"",
":",
"[",
"]",
"byte",
"(",
"def",
".",
"Unit",
")",
",",
"\"",
"\"",
":",
"[",
"]",
"byte",
"(",
"def",
".",
"Mtype",
")",
",",
"\"",
"\"",
":",
"[",
"]",
"byte",
"(",
"strings",
".",
"Join",
"(",
"def",
".",
"Tags",
",",
"\"",
"\"",
")",
")",
",",
"\"",
"\"",
":",
"make",
"(",
"[",
"]",
"byte",
",",
"8",
")",
",",
"//\"Partition\" omitted as it is part of te rowKey",
"}",
"\n",
"binary",
".",
"PutVarint",
"(",
"row",
"[",
"\"",
"\"",
"]",
",",
"int64",
"(",
"def",
".",
"OrgId",
")",
")",
"\n",
"binary",
".",
"PutVarint",
"(",
"row",
"[",
"\"",
"\"",
"]",
",",
"int64",
"(",
"def",
".",
"Interval",
")",
")",
"\n",
"binary",
".",
"PutVarint",
"(",
"row",
"[",
"\"",
"\"",
"]",
",",
"def",
".",
"LastUpdate",
")",
"\n",
"return",
"FormatRowKey",
"(",
"def",
".",
"Id",
",",
"def",
".",
"Partition",
")",
",",
"row",
"\n",
"}"
] |
// SchemaToRow takes a metricDefintion and returns a rowKey and column data.
|
[
"SchemaToRow",
"takes",
"a",
"metricDefintion",
"and",
"returns",
"a",
"rowKey",
"and",
"column",
"data",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/bigtable/schema.go#L21-L37
|
train
|
grafana/metrictank
|
idx/bigtable/schema.go
|
DecodeRowKey
|
func DecodeRowKey(key string) (schema.MKey, int32, error) {
parts := strings.SplitN(key, "_", 2)
partition, err := strconv.Atoi(parts[0])
if err != nil {
return schema.MKey{}, 0, err
}
mkey, err := schema.MKeyFromString(parts[1])
if err != nil {
return schema.MKey{}, 0, err
}
return mkey, int32(partition), nil
}
|
go
|
func DecodeRowKey(key string) (schema.MKey, int32, error) {
parts := strings.SplitN(key, "_", 2)
partition, err := strconv.Atoi(parts[0])
if err != nil {
return schema.MKey{}, 0, err
}
mkey, err := schema.MKeyFromString(parts[1])
if err != nil {
return schema.MKey{}, 0, err
}
return mkey, int32(partition), nil
}
|
[
"func",
"DecodeRowKey",
"(",
"key",
"string",
")",
"(",
"schema",
".",
"MKey",
",",
"int32",
",",
"error",
")",
"{",
"parts",
":=",
"strings",
".",
"SplitN",
"(",
"key",
",",
"\"",
"\"",
",",
"2",
")",
"\n",
"partition",
",",
"err",
":=",
"strconv",
".",
"Atoi",
"(",
"parts",
"[",
"0",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"schema",
".",
"MKey",
"{",
"}",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"mkey",
",",
"err",
":=",
"schema",
".",
"MKeyFromString",
"(",
"parts",
"[",
"1",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"schema",
".",
"MKey",
"{",
"}",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"return",
"mkey",
",",
"int32",
"(",
"partition",
")",
",",
"nil",
"\n",
"}"
] |
// DecodeRowKey takes a rowKey string and returns the corresponding MKey and partition
|
[
"DecodeRowKey",
"takes",
"a",
"rowKey",
"string",
"and",
"returns",
"the",
"corresponding",
"MKey",
"and",
"partition"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/bigtable/schema.go#L40-L51
|
train
|
grafana/metrictank
|
idx/bigtable/schema.go
|
RowToSchema
|
func RowToSchema(row bigtable.Row, def *schema.MetricDefinition) error {
if def == nil {
return fmt.Errorf("cant write row to nil MetricDefinition")
}
columns, ok := row[COLUMN_FAMILY]
if !ok {
return fmt.Errorf("no columns in columnFamly %s", COLUMN_FAMILY)
}
*def = schema.MetricDefinition{}
var err error
var val int64
def.Id, def.Partition, err = DecodeRowKey(row.Key())
if err != nil {
return err
}
for _, col := range columns {
switch strings.SplitN(col.Column, ":", 2)[1] {
case "OrgId":
val, err = binary.ReadVarint(bytes.NewReader(col.Value))
if err != nil {
return err
}
if val < 0 {
def.OrgId = idx.OrgIdPublic
} else {
def.OrgId = uint32(val)
}
case "Name":
def.Name = string(col.Value)
case "Interval":
val, err = binary.ReadVarint(bytes.NewReader(col.Value))
if err != nil {
return err
}
def.Interval = int(val)
case "Unit":
def.Unit = string(col.Value)
case "Mtype":
def.Mtype = string(col.Value)
case "Tags":
if len(col.Value) == 0 {
def.Tags = nil
} else {
def.Tags = strings.Split(string(col.Value), ";")
}
case "LastUpdate":
def.LastUpdate, err = binary.ReadVarint(bytes.NewReader(col.Value))
if err != nil {
return err
}
default:
return fmt.Errorf("unknown column: %s", col.Column)
}
}
return nil
}
|
go
|
func RowToSchema(row bigtable.Row, def *schema.MetricDefinition) error {
if def == nil {
return fmt.Errorf("cant write row to nil MetricDefinition")
}
columns, ok := row[COLUMN_FAMILY]
if !ok {
return fmt.Errorf("no columns in columnFamly %s", COLUMN_FAMILY)
}
*def = schema.MetricDefinition{}
var err error
var val int64
def.Id, def.Partition, err = DecodeRowKey(row.Key())
if err != nil {
return err
}
for _, col := range columns {
switch strings.SplitN(col.Column, ":", 2)[1] {
case "OrgId":
val, err = binary.ReadVarint(bytes.NewReader(col.Value))
if err != nil {
return err
}
if val < 0 {
def.OrgId = idx.OrgIdPublic
} else {
def.OrgId = uint32(val)
}
case "Name":
def.Name = string(col.Value)
case "Interval":
val, err = binary.ReadVarint(bytes.NewReader(col.Value))
if err != nil {
return err
}
def.Interval = int(val)
case "Unit":
def.Unit = string(col.Value)
case "Mtype":
def.Mtype = string(col.Value)
case "Tags":
if len(col.Value) == 0 {
def.Tags = nil
} else {
def.Tags = strings.Split(string(col.Value), ";")
}
case "LastUpdate":
def.LastUpdate, err = binary.ReadVarint(bytes.NewReader(col.Value))
if err != nil {
return err
}
default:
return fmt.Errorf("unknown column: %s", col.Column)
}
}
return nil
}
|
[
"func",
"RowToSchema",
"(",
"row",
"bigtable",
".",
"Row",
",",
"def",
"*",
"schema",
".",
"MetricDefinition",
")",
"error",
"{",
"if",
"def",
"==",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"columns",
",",
"ok",
":=",
"row",
"[",
"COLUMN_FAMILY",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"COLUMN_FAMILY",
")",
"\n",
"}",
"\n",
"*",
"def",
"=",
"schema",
".",
"MetricDefinition",
"{",
"}",
"\n",
"var",
"err",
"error",
"\n",
"var",
"val",
"int64",
"\n\n",
"def",
".",
"Id",
",",
"def",
".",
"Partition",
",",
"err",
"=",
"DecodeRowKey",
"(",
"row",
".",
"Key",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"for",
"_",
",",
"col",
":=",
"range",
"columns",
"{",
"switch",
"strings",
".",
"SplitN",
"(",
"col",
".",
"Column",
",",
"\"",
"\"",
",",
"2",
")",
"[",
"1",
"]",
"{",
"case",
"\"",
"\"",
":",
"val",
",",
"err",
"=",
"binary",
".",
"ReadVarint",
"(",
"bytes",
".",
"NewReader",
"(",
"col",
".",
"Value",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"val",
"<",
"0",
"{",
"def",
".",
"OrgId",
"=",
"idx",
".",
"OrgIdPublic",
"\n",
"}",
"else",
"{",
"def",
".",
"OrgId",
"=",
"uint32",
"(",
"val",
")",
"\n",
"}",
"\n",
"case",
"\"",
"\"",
":",
"def",
".",
"Name",
"=",
"string",
"(",
"col",
".",
"Value",
")",
"\n",
"case",
"\"",
"\"",
":",
"val",
",",
"err",
"=",
"binary",
".",
"ReadVarint",
"(",
"bytes",
".",
"NewReader",
"(",
"col",
".",
"Value",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"def",
".",
"Interval",
"=",
"int",
"(",
"val",
")",
"\n",
"case",
"\"",
"\"",
":",
"def",
".",
"Unit",
"=",
"string",
"(",
"col",
".",
"Value",
")",
"\n",
"case",
"\"",
"\"",
":",
"def",
".",
"Mtype",
"=",
"string",
"(",
"col",
".",
"Value",
")",
"\n",
"case",
"\"",
"\"",
":",
"if",
"len",
"(",
"col",
".",
"Value",
")",
"==",
"0",
"{",
"def",
".",
"Tags",
"=",
"nil",
"\n",
"}",
"else",
"{",
"def",
".",
"Tags",
"=",
"strings",
".",
"Split",
"(",
"string",
"(",
"col",
".",
"Value",
")",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"case",
"\"",
"\"",
":",
"def",
".",
"LastUpdate",
",",
"err",
"=",
"binary",
".",
"ReadVarint",
"(",
"bytes",
".",
"NewReader",
"(",
"col",
".",
"Value",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"default",
":",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"col",
".",
"Column",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// RowToSchema takes a row and unmarshals the data into the provided MetricDefinition.
|
[
"RowToSchema",
"takes",
"a",
"row",
"and",
"unmarshals",
"the",
"data",
"into",
"the",
"provided",
"MetricDefinition",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/bigtable/schema.go#L54-L110
|
train
|
grafana/metrictank
|
consolidation/consolidation.go
|
String
|
func (c Consolidator) String() string {
switch c {
case None:
return "NoneConsolidator"
case Avg:
return "AverageConsolidator"
case Cnt:
return "CountConsolidator"
case Lst:
return "LastConsolidator"
case Min:
return "MinimumConsolidator"
case Max:
return "MaximumConsolidator"
case Mult:
return "MultiplyConsolidator"
case Med:
return "MedianConsolidator"
case Diff:
return "DifferenceConsolidator"
case StdDev:
return "StdDevConsolidator"
case Range:
return "RangeConsolidator"
case Sum:
return "SumConsolidator"
}
panic(fmt.Sprintf("Consolidator.String(): unknown consolidator %d", c))
}
|
go
|
func (c Consolidator) String() string {
switch c {
case None:
return "NoneConsolidator"
case Avg:
return "AverageConsolidator"
case Cnt:
return "CountConsolidator"
case Lst:
return "LastConsolidator"
case Min:
return "MinimumConsolidator"
case Max:
return "MaximumConsolidator"
case Mult:
return "MultiplyConsolidator"
case Med:
return "MedianConsolidator"
case Diff:
return "DifferenceConsolidator"
case StdDev:
return "StdDevConsolidator"
case Range:
return "RangeConsolidator"
case Sum:
return "SumConsolidator"
}
panic(fmt.Sprintf("Consolidator.String(): unknown consolidator %d", c))
}
|
[
"func",
"(",
"c",
"Consolidator",
")",
"String",
"(",
")",
"string",
"{",
"switch",
"c",
"{",
"case",
"None",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Avg",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Cnt",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Lst",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Min",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Max",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Mult",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Med",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Diff",
":",
"return",
"\"",
"\"",
"\n",
"case",
"StdDev",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Range",
":",
"return",
"\"",
"\"",
"\n",
"case",
"Sum",
":",
"return",
"\"",
"\"",
"\n",
"}",
"\n",
"panic",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"c",
")",
")",
"\n",
"}"
] |
// String provides human friendly names
|
[
"String",
"provides",
"human",
"friendly",
"names"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/consolidation/consolidation.go#L36-L64
|
train
|
grafana/metrictank
|
consolidation/consolidation.go
|
Archive
|
func (c Consolidator) Archive() schema.Method {
switch c {
case None:
panic("cannot get an archive for no consolidation")
case Avg:
panic("avg consolidator has no matching Archive(). you need sum and cnt")
case Cnt:
return schema.Cnt
case Lst:
return schema.Lst
case Min:
return schema.Min
case Max:
return schema.Max
case Sum:
return schema.Sum
}
panic(fmt.Sprintf("Consolidator.Archive(): unknown consolidator %q", c))
}
|
go
|
func (c Consolidator) Archive() schema.Method {
switch c {
case None:
panic("cannot get an archive for no consolidation")
case Avg:
panic("avg consolidator has no matching Archive(). you need sum and cnt")
case Cnt:
return schema.Cnt
case Lst:
return schema.Lst
case Min:
return schema.Min
case Max:
return schema.Max
case Sum:
return schema.Sum
}
panic(fmt.Sprintf("Consolidator.Archive(): unknown consolidator %q", c))
}
|
[
"func",
"(",
"c",
"Consolidator",
")",
"Archive",
"(",
")",
"schema",
".",
"Method",
"{",
"switch",
"c",
"{",
"case",
"None",
":",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"case",
"Avg",
":",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"case",
"Cnt",
":",
"return",
"schema",
".",
"Cnt",
"\n",
"case",
"Lst",
":",
"return",
"schema",
".",
"Lst",
"\n",
"case",
"Min",
":",
"return",
"schema",
".",
"Min",
"\n",
"case",
"Max",
":",
"return",
"schema",
".",
"Max",
"\n",
"case",
"Sum",
":",
"return",
"schema",
".",
"Sum",
"\n",
"}",
"\n",
"panic",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"c",
")",
")",
"\n",
"}"
] |
// provide the name of a stored archive
// see aggregator.go for which archives are available
|
[
"provide",
"the",
"name",
"of",
"a",
"stored",
"archive",
"see",
"aggregator",
".",
"go",
"for",
"which",
"archives",
"are",
"available"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/consolidation/consolidation.go#L68-L86
|
train
|
grafana/metrictank
|
consolidation/consolidation.go
|
GetAggFunc
|
func GetAggFunc(consolidator Consolidator) batch.AggFunc {
var consFunc batch.AggFunc
switch consolidator {
case Avg:
consFunc = batch.Avg
case Cnt:
consFunc = batch.Cnt
case Lst:
consFunc = batch.Lst
case Min:
consFunc = batch.Min
case Max:
consFunc = batch.Max
case Mult:
consFunc = batch.Mult
case Med:
consFunc = batch.Med
case Diff:
consFunc = batch.Diff
case StdDev:
consFunc = batch.StdDev
case Range:
consFunc = batch.Range
case Sum:
consFunc = batch.Sum
}
return consFunc
}
|
go
|
func GetAggFunc(consolidator Consolidator) batch.AggFunc {
var consFunc batch.AggFunc
switch consolidator {
case Avg:
consFunc = batch.Avg
case Cnt:
consFunc = batch.Cnt
case Lst:
consFunc = batch.Lst
case Min:
consFunc = batch.Min
case Max:
consFunc = batch.Max
case Mult:
consFunc = batch.Mult
case Med:
consFunc = batch.Med
case Diff:
consFunc = batch.Diff
case StdDev:
consFunc = batch.StdDev
case Range:
consFunc = batch.Range
case Sum:
consFunc = batch.Sum
}
return consFunc
}
|
[
"func",
"GetAggFunc",
"(",
"consolidator",
"Consolidator",
")",
"batch",
".",
"AggFunc",
"{",
"var",
"consFunc",
"batch",
".",
"AggFunc",
"\n",
"switch",
"consolidator",
"{",
"case",
"Avg",
":",
"consFunc",
"=",
"batch",
".",
"Avg",
"\n",
"case",
"Cnt",
":",
"consFunc",
"=",
"batch",
".",
"Cnt",
"\n",
"case",
"Lst",
":",
"consFunc",
"=",
"batch",
".",
"Lst",
"\n",
"case",
"Min",
":",
"consFunc",
"=",
"batch",
".",
"Min",
"\n",
"case",
"Max",
":",
"consFunc",
"=",
"batch",
".",
"Max",
"\n",
"case",
"Mult",
":",
"consFunc",
"=",
"batch",
".",
"Mult",
"\n",
"case",
"Med",
":",
"consFunc",
"=",
"batch",
".",
"Med",
"\n",
"case",
"Diff",
":",
"consFunc",
"=",
"batch",
".",
"Diff",
"\n",
"case",
"StdDev",
":",
"consFunc",
"=",
"batch",
".",
"StdDev",
"\n",
"case",
"Range",
":",
"consFunc",
"=",
"batch",
".",
"Range",
"\n",
"case",
"Sum",
":",
"consFunc",
"=",
"batch",
".",
"Sum",
"\n",
"}",
"\n",
"return",
"consFunc",
"\n",
"}"
] |
// map the consolidation to the respective aggregation function, if applicable.
|
[
"map",
"the",
"consolidation",
"to",
"the",
"respective",
"aggregation",
"function",
"if",
"applicable",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/consolidation/consolidation.go#L133-L160
|
train
|
grafana/metrictank
|
idx/bigtable/bigtable.go
|
updateBigtable
|
func (b *BigtableIdx) updateBigtable(now uint32, inMemory bool, archive idx.Archive, partition int32) idx.Archive {
// if the entry has not been saved for 1.5x updateInterval
// then perform a blocking save.
if archive.LastSave < (now - b.cfg.updateInterval32 - (b.cfg.updateInterval32 / 2)) {
log.Debugf("bigtable-idx: updating def %s in index.", archive.MetricDefinition.Id)
b.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}
archive.LastSave = now
b.MemoryIndex.UpdateArchive(archive)
} else {
// perform a non-blocking write to the writeQueue. If the queue is full, then
// this will fail and we won't update the LastSave timestamp. The next time
// the metric is seen, the previous lastSave timestamp will still be in place and so
// we will try and save again. This will continue until we are successful or the
// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will
// do a blocking write to the queue.
select {
case b.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}:
archive.LastSave = now
b.MemoryIndex.UpdateArchive(archive)
default:
statSaveSkipped.Inc()
log.Debugf("bigtable-idx: writeQueue is full, update of %s not saved this time", archive.MetricDefinition.Id)
}
}
return archive
}
|
go
|
func (b *BigtableIdx) updateBigtable(now uint32, inMemory bool, archive idx.Archive, partition int32) idx.Archive {
// if the entry has not been saved for 1.5x updateInterval
// then perform a blocking save.
if archive.LastSave < (now - b.cfg.updateInterval32 - (b.cfg.updateInterval32 / 2)) {
log.Debugf("bigtable-idx: updating def %s in index.", archive.MetricDefinition.Id)
b.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}
archive.LastSave = now
b.MemoryIndex.UpdateArchive(archive)
} else {
// perform a non-blocking write to the writeQueue. If the queue is full, then
// this will fail and we won't update the LastSave timestamp. The next time
// the metric is seen, the previous lastSave timestamp will still be in place and so
// we will try and save again. This will continue until we are successful or the
// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will
// do a blocking write to the queue.
select {
case b.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}:
archive.LastSave = now
b.MemoryIndex.UpdateArchive(archive)
default:
statSaveSkipped.Inc()
log.Debugf("bigtable-idx: writeQueue is full, update of %s not saved this time", archive.MetricDefinition.Id)
}
}
return archive
}
|
[
"func",
"(",
"b",
"*",
"BigtableIdx",
")",
"updateBigtable",
"(",
"now",
"uint32",
",",
"inMemory",
"bool",
",",
"archive",
"idx",
".",
"Archive",
",",
"partition",
"int32",
")",
"idx",
".",
"Archive",
"{",
"// if the entry has not been saved for 1.5x updateInterval",
"// then perform a blocking save.",
"if",
"archive",
".",
"LastSave",
"<",
"(",
"now",
"-",
"b",
".",
"cfg",
".",
"updateInterval32",
"-",
"(",
"b",
".",
"cfg",
".",
"updateInterval32",
"/",
"2",
")",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"archive",
".",
"MetricDefinition",
".",
"Id",
")",
"\n",
"b",
".",
"writeQueue",
"<-",
"writeReq",
"{",
"recvTime",
":",
"time",
".",
"Now",
"(",
")",
",",
"def",
":",
"&",
"archive",
".",
"MetricDefinition",
"}",
"\n",
"archive",
".",
"LastSave",
"=",
"now",
"\n",
"b",
".",
"MemoryIndex",
".",
"UpdateArchive",
"(",
"archive",
")",
"\n",
"}",
"else",
"{",
"// perform a non-blocking write to the writeQueue. If the queue is full, then",
"// this will fail and we won't update the LastSave timestamp. The next time",
"// the metric is seen, the previous lastSave timestamp will still be in place and so",
"// we will try and save again. This will continue until we are successful or the",
"// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will",
"// do a blocking write to the queue.",
"select",
"{",
"case",
"b",
".",
"writeQueue",
"<-",
"writeReq",
"{",
"recvTime",
":",
"time",
".",
"Now",
"(",
")",
",",
"def",
":",
"&",
"archive",
".",
"MetricDefinition",
"}",
":",
"archive",
".",
"LastSave",
"=",
"now",
"\n",
"b",
".",
"MemoryIndex",
".",
"UpdateArchive",
"(",
"archive",
")",
"\n",
"default",
":",
"statSaveSkipped",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"archive",
".",
"MetricDefinition",
".",
"Id",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"archive",
"\n",
"}"
] |
// updateBigtable saves the archive to bigtable and
// updates the memory index with the updated fields.
|
[
"updateBigtable",
"saves",
"the",
"archive",
"to",
"bigtable",
"and",
"updates",
"the",
"memory",
"index",
"with",
"the",
"updated",
"fields",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/bigtable/bigtable.go#L282-L308
|
train
|
grafana/metrictank
|
mdata/cache/ccache_metric.go
|
NewCCacheMetric
|
func NewCCacheMetric(mkey schema.MKey) *CCacheMetric {
return &CCacheMetric{
MKey: mkey,
chunks: make(map[uint32]*CCacheChunk),
}
}
|
go
|
func NewCCacheMetric(mkey schema.MKey) *CCacheMetric {
return &CCacheMetric{
MKey: mkey,
chunks: make(map[uint32]*CCacheChunk),
}
}
|
[
"func",
"NewCCacheMetric",
"(",
"mkey",
"schema",
".",
"MKey",
")",
"*",
"CCacheMetric",
"{",
"return",
"&",
"CCacheMetric",
"{",
"MKey",
":",
"mkey",
",",
"chunks",
":",
"make",
"(",
"map",
"[",
"uint32",
"]",
"*",
"CCacheChunk",
")",
",",
"}",
"\n",
"}"
] |
// NewCCacheMetric creates a CCacheMetric
|
[
"NewCCacheMetric",
"creates",
"a",
"CCacheMetric"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cache/ccache_metric.go#L29-L34
|
train
|
grafana/metrictank
|
mdata/cache/ccache_metric.go
|
Del
|
func (mc *CCacheMetric) Del(ts uint32) int {
mc.Lock()
defer mc.Unlock()
if _, ok := mc.chunks[ts]; !ok {
return len(mc.chunks)
}
prev := mc.chunks[ts].Prev
next := mc.chunks[ts].Next
if prev != 0 {
if _, ok := mc.chunks[prev]; ok {
mc.chunks[prev].Next = 0
}
}
if next != 0 {
if _, ok := mc.chunks[next]; ok {
mc.chunks[next].Prev = 0
}
}
delete(mc.chunks, ts)
// regenerate the list of sorted keys after deleting a chunk
// NOTE: we can improve perf by just taking out the ts (partially rewriting
// the slice in one go), can we also batch deletes?
mc.generateKeys()
return len(mc.chunks)
}
|
go
|
func (mc *CCacheMetric) Del(ts uint32) int {
mc.Lock()
defer mc.Unlock()
if _, ok := mc.chunks[ts]; !ok {
return len(mc.chunks)
}
prev := mc.chunks[ts].Prev
next := mc.chunks[ts].Next
if prev != 0 {
if _, ok := mc.chunks[prev]; ok {
mc.chunks[prev].Next = 0
}
}
if next != 0 {
if _, ok := mc.chunks[next]; ok {
mc.chunks[next].Prev = 0
}
}
delete(mc.chunks, ts)
// regenerate the list of sorted keys after deleting a chunk
// NOTE: we can improve perf by just taking out the ts (partially rewriting
// the slice in one go), can we also batch deletes?
mc.generateKeys()
return len(mc.chunks)
}
|
[
"func",
"(",
"mc",
"*",
"CCacheMetric",
")",
"Del",
"(",
"ts",
"uint32",
")",
"int",
"{",
"mc",
".",
"Lock",
"(",
")",
"\n",
"defer",
"mc",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"_",
",",
"ok",
":=",
"mc",
".",
"chunks",
"[",
"ts",
"]",
";",
"!",
"ok",
"{",
"return",
"len",
"(",
"mc",
".",
"chunks",
")",
"\n",
"}",
"\n\n",
"prev",
":=",
"mc",
".",
"chunks",
"[",
"ts",
"]",
".",
"Prev",
"\n",
"next",
":=",
"mc",
".",
"chunks",
"[",
"ts",
"]",
".",
"Next",
"\n\n",
"if",
"prev",
"!=",
"0",
"{",
"if",
"_",
",",
"ok",
":=",
"mc",
".",
"chunks",
"[",
"prev",
"]",
";",
"ok",
"{",
"mc",
".",
"chunks",
"[",
"prev",
"]",
".",
"Next",
"=",
"0",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"next",
"!=",
"0",
"{",
"if",
"_",
",",
"ok",
":=",
"mc",
".",
"chunks",
"[",
"next",
"]",
";",
"ok",
"{",
"mc",
".",
"chunks",
"[",
"next",
"]",
".",
"Prev",
"=",
"0",
"\n",
"}",
"\n",
"}",
"\n\n",
"delete",
"(",
"mc",
".",
"chunks",
",",
"ts",
")",
"\n\n",
"// regenerate the list of sorted keys after deleting a chunk",
"// NOTE: we can improve perf by just taking out the ts (partially rewriting",
"// the slice in one go), can we also batch deletes?",
"mc",
".",
"generateKeys",
"(",
")",
"\n\n",
"return",
"len",
"(",
"mc",
".",
"chunks",
")",
"\n",
"}"
] |
// Del deletes chunks for the given timestamp
|
[
"Del",
"deletes",
"chunks",
"for",
"the",
"given",
"timestamp"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cache/ccache_metric.go#L37-L68
|
train
|
grafana/metrictank
|
mdata/cache/ccache_metric.go
|
Add
|
func (mc *CCacheMetric) Add(prev uint32, itergen chunk.IterGen) {
ts := itergen.T0
mc.Lock()
defer mc.Unlock()
if _, ok := mc.chunks[ts]; ok {
// chunk is already present. no need to error on that, just ignore it
return
}
mc.chunks[ts] = &CCacheChunk{
Ts: ts,
Prev: 0,
Next: 0,
Itgen: itergen,
}
nextTs := mc.nextTs(ts)
log.Debugf("CCacheMetric Add: caching chunk ts %d, nextTs %d", ts, nextTs)
// if previous chunk has not been passed we try to be smart and figure it out.
// this is common in a scenario where a metric continuously gets queried
// for a range that starts less than one chunkspan before now().
res, ok := mc.seekDesc(ts - 1)
if ok {
if prev == 0 {
prev = res
} else if prev != res {
log.Warnf("CCacheMetric Add: 'prev' param disagrees with seek: key = %s, prev = %d, seek = %d",
mc.MKey.String(), prev, res)
}
}
// if the previous chunk is cached, link in both directions
if _, ok := mc.chunks[prev]; ok {
mc.chunks[prev].Next = ts
mc.chunks[ts].Prev = prev
}
// if nextTs() can't figure out the end date it returns ts
if nextTs > ts {
// if the next chunk is cached, link in both directions
if _, ok := mc.chunks[nextTs]; ok {
mc.chunks[nextTs].Prev = ts
mc.chunks[ts].Next = nextTs
}
}
// assure key is added to mc.keys
// if no keys yet, just add it and it's sorted
if len(mc.keys) == 0 {
mc.keys = append(mc.keys, ts)
return
}
// add the ts, and sort if necessary
mc.keys = append(mc.keys, ts)
if mc.keys[len(mc.keys)-1] < mc.keys[len(mc.keys)-2] {
sort.Sort(accnt.Uint32Asc(mc.keys))
}
}
|
go
|
func (mc *CCacheMetric) Add(prev uint32, itergen chunk.IterGen) {
ts := itergen.T0
mc.Lock()
defer mc.Unlock()
if _, ok := mc.chunks[ts]; ok {
// chunk is already present. no need to error on that, just ignore it
return
}
mc.chunks[ts] = &CCacheChunk{
Ts: ts,
Prev: 0,
Next: 0,
Itgen: itergen,
}
nextTs := mc.nextTs(ts)
log.Debugf("CCacheMetric Add: caching chunk ts %d, nextTs %d", ts, nextTs)
// if previous chunk has not been passed we try to be smart and figure it out.
// this is common in a scenario where a metric continuously gets queried
// for a range that starts less than one chunkspan before now().
res, ok := mc.seekDesc(ts - 1)
if ok {
if prev == 0 {
prev = res
} else if prev != res {
log.Warnf("CCacheMetric Add: 'prev' param disagrees with seek: key = %s, prev = %d, seek = %d",
mc.MKey.String(), prev, res)
}
}
// if the previous chunk is cached, link in both directions
if _, ok := mc.chunks[prev]; ok {
mc.chunks[prev].Next = ts
mc.chunks[ts].Prev = prev
}
// if nextTs() can't figure out the end date it returns ts
if nextTs > ts {
// if the next chunk is cached, link in both directions
if _, ok := mc.chunks[nextTs]; ok {
mc.chunks[nextTs].Prev = ts
mc.chunks[ts].Next = nextTs
}
}
// assure key is added to mc.keys
// if no keys yet, just add it and it's sorted
if len(mc.keys) == 0 {
mc.keys = append(mc.keys, ts)
return
}
// add the ts, and sort if necessary
mc.keys = append(mc.keys, ts)
if mc.keys[len(mc.keys)-1] < mc.keys[len(mc.keys)-2] {
sort.Sort(accnt.Uint32Asc(mc.keys))
}
}
|
[
"func",
"(",
"mc",
"*",
"CCacheMetric",
")",
"Add",
"(",
"prev",
"uint32",
",",
"itergen",
"chunk",
".",
"IterGen",
")",
"{",
"ts",
":=",
"itergen",
".",
"T0",
"\n\n",
"mc",
".",
"Lock",
"(",
")",
"\n",
"defer",
"mc",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"_",
",",
"ok",
":=",
"mc",
".",
"chunks",
"[",
"ts",
"]",
";",
"ok",
"{",
"// chunk is already present. no need to error on that, just ignore it",
"return",
"\n",
"}",
"\n\n",
"mc",
".",
"chunks",
"[",
"ts",
"]",
"=",
"&",
"CCacheChunk",
"{",
"Ts",
":",
"ts",
",",
"Prev",
":",
"0",
",",
"Next",
":",
"0",
",",
"Itgen",
":",
"itergen",
",",
"}",
"\n\n",
"nextTs",
":=",
"mc",
".",
"nextTs",
"(",
"ts",
")",
"\n\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"ts",
",",
"nextTs",
")",
"\n\n",
"// if previous chunk has not been passed we try to be smart and figure it out.",
"// this is common in a scenario where a metric continuously gets queried",
"// for a range that starts less than one chunkspan before now().",
"res",
",",
"ok",
":=",
"mc",
".",
"seekDesc",
"(",
"ts",
"-",
"1",
")",
"\n",
"if",
"ok",
"{",
"if",
"prev",
"==",
"0",
"{",
"prev",
"=",
"res",
"\n",
"}",
"else",
"if",
"prev",
"!=",
"res",
"{",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"mc",
".",
"MKey",
".",
"String",
"(",
")",
",",
"prev",
",",
"res",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"// if the previous chunk is cached, link in both directions",
"if",
"_",
",",
"ok",
":=",
"mc",
".",
"chunks",
"[",
"prev",
"]",
";",
"ok",
"{",
"mc",
".",
"chunks",
"[",
"prev",
"]",
".",
"Next",
"=",
"ts",
"\n",
"mc",
".",
"chunks",
"[",
"ts",
"]",
".",
"Prev",
"=",
"prev",
"\n",
"}",
"\n\n",
"// if nextTs() can't figure out the end date it returns ts",
"if",
"nextTs",
">",
"ts",
"{",
"// if the next chunk is cached, link in both directions",
"if",
"_",
",",
"ok",
":=",
"mc",
".",
"chunks",
"[",
"nextTs",
"]",
";",
"ok",
"{",
"mc",
".",
"chunks",
"[",
"nextTs",
"]",
".",
"Prev",
"=",
"ts",
"\n",
"mc",
".",
"chunks",
"[",
"ts",
"]",
".",
"Next",
"=",
"nextTs",
"\n",
"}",
"\n",
"}",
"\n\n",
"// assure key is added to mc.keys",
"// if no keys yet, just add it and it's sorted",
"if",
"len",
"(",
"mc",
".",
"keys",
")",
"==",
"0",
"{",
"mc",
".",
"keys",
"=",
"append",
"(",
"mc",
".",
"keys",
",",
"ts",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"// add the ts, and sort if necessary",
"mc",
".",
"keys",
"=",
"append",
"(",
"mc",
".",
"keys",
",",
"ts",
")",
"\n",
"if",
"mc",
".",
"keys",
"[",
"len",
"(",
"mc",
".",
"keys",
")",
"-",
"1",
"]",
"<",
"mc",
".",
"keys",
"[",
"len",
"(",
"mc",
".",
"keys",
")",
"-",
"2",
"]",
"{",
"sort",
".",
"Sort",
"(",
"accnt",
".",
"Uint32Asc",
"(",
"mc",
".",
"keys",
")",
")",
"\n",
"}",
"\n",
"}"
] |
// Add adds a chunk to the cache
|
[
"Add",
"adds",
"a",
"chunk",
"to",
"the",
"cache"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cache/ccache_metric.go#L192-L255
|
train
|
grafana/metrictank
|
mdata/cache/ccache_metric.go
|
generateKeys
|
func (mc *CCacheMetric) generateKeys() {
keys := make([]uint32, 0, len(mc.chunks))
for k := range mc.chunks {
keys = append(keys, k)
}
sort.Sort(accnt.Uint32Asc(keys))
mc.keys = keys
}
|
go
|
func (mc *CCacheMetric) generateKeys() {
keys := make([]uint32, 0, len(mc.chunks))
for k := range mc.chunks {
keys = append(keys, k)
}
sort.Sort(accnt.Uint32Asc(keys))
mc.keys = keys
}
|
[
"func",
"(",
"mc",
"*",
"CCacheMetric",
")",
"generateKeys",
"(",
")",
"{",
"keys",
":=",
"make",
"(",
"[",
"]",
"uint32",
",",
"0",
",",
"len",
"(",
"mc",
".",
"chunks",
")",
")",
"\n",
"for",
"k",
":=",
"range",
"mc",
".",
"chunks",
"{",
"keys",
"=",
"append",
"(",
"keys",
",",
"k",
")",
"\n",
"}",
"\n",
"sort",
".",
"Sort",
"(",
"accnt",
".",
"Uint32Asc",
"(",
"keys",
")",
")",
"\n",
"mc",
".",
"keys",
"=",
"keys",
"\n",
"}"
] |
// generateKeys generates sorted slice of all chunk timestamps
// assumes we have at least read lock
|
[
"generateKeys",
"generates",
"sorted",
"slice",
"of",
"all",
"chunk",
"timestamps",
"assumes",
"we",
"have",
"at",
"least",
"read",
"lock"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cache/ccache_metric.go#L259-L266
|
train
|
grafana/metrictank
|
mdata/cache/ccache_metric.go
|
lastTs
|
func (mc *CCacheMetric) lastTs() uint32 {
mc.RLock()
defer mc.RUnlock()
return mc.nextTs(mc.keys[len(mc.keys)-1])
}
|
go
|
func (mc *CCacheMetric) lastTs() uint32 {
mc.RLock()
defer mc.RUnlock()
return mc.nextTs(mc.keys[len(mc.keys)-1])
}
|
[
"func",
"(",
"mc",
"*",
"CCacheMetric",
")",
"lastTs",
"(",
")",
"uint32",
"{",
"mc",
".",
"RLock",
"(",
")",
"\n",
"defer",
"mc",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"mc",
".",
"nextTs",
"(",
"mc",
".",
"keys",
"[",
"len",
"(",
"mc",
".",
"keys",
")",
"-",
"1",
"]",
")",
"\n",
"}"
] |
// lastTs returns the last Ts of this metric cache
// since ranges are exclusive at the end this is actually the first Ts that is not cached
|
[
"lastTs",
"returns",
"the",
"last",
"Ts",
"of",
"this",
"metric",
"cache",
"since",
"ranges",
"are",
"exclusive",
"at",
"the",
"end",
"this",
"is",
"actually",
"the",
"first",
"Ts",
"that",
"is",
"not",
"cached"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cache/ccache_metric.go#L299-L303
|
train
|
grafana/metrictank
|
mdata/cache/ccache_metric.go
|
seekAsc
|
func (mc *CCacheMetric) seekAsc(ts uint32) (uint32, bool) {
log.Debugf("CCacheMetric seekAsc: seeking for %d in the keys %+d", ts, mc.keys)
for i := 0; i < len(mc.keys) && mc.keys[i] <= ts; i++ {
if mc.nextTs(mc.keys[i]) > ts {
log.Debugf("CCacheMetric seekAsc: seek found ts %d is between %d and %d", ts, mc.keys[i], mc.nextTs(mc.keys[i]))
return mc.keys[i], true
}
}
log.Debug("CCacheMetric seekAsc: seekAsc unsuccessful")
return 0, false
}
|
go
|
func (mc *CCacheMetric) seekAsc(ts uint32) (uint32, bool) {
log.Debugf("CCacheMetric seekAsc: seeking for %d in the keys %+d", ts, mc.keys)
for i := 0; i < len(mc.keys) && mc.keys[i] <= ts; i++ {
if mc.nextTs(mc.keys[i]) > ts {
log.Debugf("CCacheMetric seekAsc: seek found ts %d is between %d and %d", ts, mc.keys[i], mc.nextTs(mc.keys[i]))
return mc.keys[i], true
}
}
log.Debug("CCacheMetric seekAsc: seekAsc unsuccessful")
return 0, false
}
|
[
"func",
"(",
"mc",
"*",
"CCacheMetric",
")",
"seekAsc",
"(",
"ts",
"uint32",
")",
"(",
"uint32",
",",
"bool",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"ts",
",",
"mc",
".",
"keys",
")",
"\n\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"mc",
".",
"keys",
")",
"&&",
"mc",
".",
"keys",
"[",
"i",
"]",
"<=",
"ts",
";",
"i",
"++",
"{",
"if",
"mc",
".",
"nextTs",
"(",
"mc",
".",
"keys",
"[",
"i",
"]",
")",
">",
"ts",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"ts",
",",
"mc",
".",
"keys",
"[",
"i",
"]",
",",
"mc",
".",
"nextTs",
"(",
"mc",
".",
"keys",
"[",
"i",
"]",
")",
")",
"\n",
"return",
"mc",
".",
"keys",
"[",
"i",
"]",
",",
"true",
"\n",
"}",
"\n",
"}",
"\n\n",
"log",
".",
"Debug",
"(",
"\"",
"\"",
")",
"\n",
"return",
"0",
",",
"false",
"\n",
"}"
] |
// seekAsc finds the t0 of the chunk that contains ts, by searching from old to recent
// if not found or can't be sure returns 0, false
// assumes we already have at least a read lock
|
[
"seekAsc",
"finds",
"the",
"t0",
"of",
"the",
"chunk",
"that",
"contains",
"ts",
"by",
"searching",
"from",
"old",
"to",
"recent",
"if",
"not",
"found",
"or",
"can",
"t",
"be",
"sure",
"returns",
"0",
"false",
"assumes",
"we",
"already",
"have",
"at",
"least",
"a",
"read",
"lock"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cache/ccache_metric.go#L308-L320
|
train
|
grafana/metrictank
|
stats/out_graphite.go
|
writer
|
func (g *Graphite) writer() {
var conn net.Conn
var err error
var wg sync.WaitGroup
assureConn := func() {
connected.Set(conn != nil)
for conn == nil {
time.Sleep(time.Second)
conn, err = net.Dial("tcp", g.addr)
if err == nil {
log.Infof("stats now connected to %s", g.addr)
wg.Add(1)
go g.checkEOF(conn, &wg)
} else {
log.Warnf("stats dialing %s failed: %s. will retry", g.addr, err.Error())
}
connected.Set(conn != nil)
}
}
for buf := range g.toGraphite {
queueItems.Value(len(g.toGraphite))
var ok bool
for !ok {
assureConn()
conn.SetWriteDeadline(time.Now().Add(g.timeout))
pre := time.Now()
_, err = conn.Write(buf)
if err == nil {
ok = true
flushDuration.Value(time.Since(pre))
} else {
log.Warnf("stats failed to write to graphite: %s (took %s). will retry...", err, time.Now().Sub(pre))
conn.Close()
wg.Wait()
conn = nil
}
}
}
}
|
go
|
func (g *Graphite) writer() {
var conn net.Conn
var err error
var wg sync.WaitGroup
assureConn := func() {
connected.Set(conn != nil)
for conn == nil {
time.Sleep(time.Second)
conn, err = net.Dial("tcp", g.addr)
if err == nil {
log.Infof("stats now connected to %s", g.addr)
wg.Add(1)
go g.checkEOF(conn, &wg)
} else {
log.Warnf("stats dialing %s failed: %s. will retry", g.addr, err.Error())
}
connected.Set(conn != nil)
}
}
for buf := range g.toGraphite {
queueItems.Value(len(g.toGraphite))
var ok bool
for !ok {
assureConn()
conn.SetWriteDeadline(time.Now().Add(g.timeout))
pre := time.Now()
_, err = conn.Write(buf)
if err == nil {
ok = true
flushDuration.Value(time.Since(pre))
} else {
log.Warnf("stats failed to write to graphite: %s (took %s). will retry...", err, time.Now().Sub(pre))
conn.Close()
wg.Wait()
conn = nil
}
}
}
}
|
[
"func",
"(",
"g",
"*",
"Graphite",
")",
"writer",
"(",
")",
"{",
"var",
"conn",
"net",
".",
"Conn",
"\n",
"var",
"err",
"error",
"\n",
"var",
"wg",
"sync",
".",
"WaitGroup",
"\n\n",
"assureConn",
":=",
"func",
"(",
")",
"{",
"connected",
".",
"Set",
"(",
"conn",
"!=",
"nil",
")",
"\n",
"for",
"conn",
"==",
"nil",
"{",
"time",
".",
"Sleep",
"(",
"time",
".",
"Second",
")",
"\n",
"conn",
",",
"err",
"=",
"net",
".",
"Dial",
"(",
"\"",
"\"",
",",
"g",
".",
"addr",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"g",
".",
"addr",
")",
"\n",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"g",
".",
"checkEOF",
"(",
"conn",
",",
"&",
"wg",
")",
"\n",
"}",
"else",
"{",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"g",
".",
"addr",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"connected",
".",
"Set",
"(",
"conn",
"!=",
"nil",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"for",
"buf",
":=",
"range",
"g",
".",
"toGraphite",
"{",
"queueItems",
".",
"Value",
"(",
"len",
"(",
"g",
".",
"toGraphite",
")",
")",
"\n",
"var",
"ok",
"bool",
"\n",
"for",
"!",
"ok",
"{",
"assureConn",
"(",
")",
"\n",
"conn",
".",
"SetWriteDeadline",
"(",
"time",
".",
"Now",
"(",
")",
".",
"Add",
"(",
"g",
".",
"timeout",
")",
")",
"\n",
"pre",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"_",
",",
"err",
"=",
"conn",
".",
"Write",
"(",
"buf",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"ok",
"=",
"true",
"\n",
"flushDuration",
".",
"Value",
"(",
"time",
".",
"Since",
"(",
"pre",
")",
")",
"\n",
"}",
"else",
"{",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"err",
",",
"time",
".",
"Now",
"(",
")",
".",
"Sub",
"(",
"pre",
")",
")",
"\n",
"conn",
".",
"Close",
"(",
")",
"\n",
"wg",
".",
"Wait",
"(",
")",
"\n",
"conn",
"=",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// writer connects to graphite and submits all pending data to it
|
[
"writer",
"connects",
"to",
"graphite",
"and",
"submits",
"all",
"pending",
"data",
"to",
"it"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/stats/out_graphite.go#L87-L127
|
train
|
grafana/metrictank
|
mdata/reorder_buffer.go
|
Add
|
func (rob *ReorderBuffer) Add(ts uint32, val float64) ([]schema.Point, error) {
ts = AggBoundary(ts, rob.interval)
// out of order and too old
if rob.buf[rob.newest].Ts != 0 && ts <= rob.buf[rob.newest].Ts-(uint32(cap(rob.buf))*rob.interval) {
return nil, errors.ErrMetricTooOld
}
var res []schema.Point
oldest := (rob.newest + 1) % uint32(cap(rob.buf))
index := (ts / rob.interval) % uint32(cap(rob.buf))
if ts == rob.buf[index].Ts {
return nil, errors.ErrMetricNewValueForTimestamp
} else if ts > rob.buf[rob.newest].Ts {
flushCount := (ts - rob.buf[rob.newest].Ts) / rob.interval
if flushCount > uint32(cap(rob.buf)) {
flushCount = uint32(cap(rob.buf))
}
for i := uint32(0); i < flushCount; i++ {
if rob.buf[oldest].Ts != 0 {
res = append(res, rob.buf[oldest])
rob.buf[oldest].Ts = 0
}
oldest = (oldest + 1) % uint32(cap(rob.buf))
}
rob.buf[index].Ts = ts
rob.buf[index].Val = val
rob.newest = index
} else {
metricsReordered.Inc()
rob.buf[index].Ts = ts
rob.buf[index].Val = val
}
return res, nil
}
|
go
|
func (rob *ReorderBuffer) Add(ts uint32, val float64) ([]schema.Point, error) {
ts = AggBoundary(ts, rob.interval)
// out of order and too old
if rob.buf[rob.newest].Ts != 0 && ts <= rob.buf[rob.newest].Ts-(uint32(cap(rob.buf))*rob.interval) {
return nil, errors.ErrMetricTooOld
}
var res []schema.Point
oldest := (rob.newest + 1) % uint32(cap(rob.buf))
index := (ts / rob.interval) % uint32(cap(rob.buf))
if ts == rob.buf[index].Ts {
return nil, errors.ErrMetricNewValueForTimestamp
} else if ts > rob.buf[rob.newest].Ts {
flushCount := (ts - rob.buf[rob.newest].Ts) / rob.interval
if flushCount > uint32(cap(rob.buf)) {
flushCount = uint32(cap(rob.buf))
}
for i := uint32(0); i < flushCount; i++ {
if rob.buf[oldest].Ts != 0 {
res = append(res, rob.buf[oldest])
rob.buf[oldest].Ts = 0
}
oldest = (oldest + 1) % uint32(cap(rob.buf))
}
rob.buf[index].Ts = ts
rob.buf[index].Val = val
rob.newest = index
} else {
metricsReordered.Inc()
rob.buf[index].Ts = ts
rob.buf[index].Val = val
}
return res, nil
}
|
[
"func",
"(",
"rob",
"*",
"ReorderBuffer",
")",
"Add",
"(",
"ts",
"uint32",
",",
"val",
"float64",
")",
"(",
"[",
"]",
"schema",
".",
"Point",
",",
"error",
")",
"{",
"ts",
"=",
"AggBoundary",
"(",
"ts",
",",
"rob",
".",
"interval",
")",
"\n\n",
"// out of order and too old",
"if",
"rob",
".",
"buf",
"[",
"rob",
".",
"newest",
"]",
".",
"Ts",
"!=",
"0",
"&&",
"ts",
"<=",
"rob",
".",
"buf",
"[",
"rob",
".",
"newest",
"]",
".",
"Ts",
"-",
"(",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"*",
"rob",
".",
"interval",
")",
"{",
"return",
"nil",
",",
"errors",
".",
"ErrMetricTooOld",
"\n",
"}",
"\n\n",
"var",
"res",
"[",
"]",
"schema",
".",
"Point",
"\n",
"oldest",
":=",
"(",
"rob",
".",
"newest",
"+",
"1",
")",
"%",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"\n",
"index",
":=",
"(",
"ts",
"/",
"rob",
".",
"interval",
")",
"%",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"\n",
"if",
"ts",
"==",
"rob",
".",
"buf",
"[",
"index",
"]",
".",
"Ts",
"{",
"return",
"nil",
",",
"errors",
".",
"ErrMetricNewValueForTimestamp",
"\n",
"}",
"else",
"if",
"ts",
">",
"rob",
".",
"buf",
"[",
"rob",
".",
"newest",
"]",
".",
"Ts",
"{",
"flushCount",
":=",
"(",
"ts",
"-",
"rob",
".",
"buf",
"[",
"rob",
".",
"newest",
"]",
".",
"Ts",
")",
"/",
"rob",
".",
"interval",
"\n",
"if",
"flushCount",
">",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"{",
"flushCount",
"=",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"\n",
"}",
"\n\n",
"for",
"i",
":=",
"uint32",
"(",
"0",
")",
";",
"i",
"<",
"flushCount",
";",
"i",
"++",
"{",
"if",
"rob",
".",
"buf",
"[",
"oldest",
"]",
".",
"Ts",
"!=",
"0",
"{",
"res",
"=",
"append",
"(",
"res",
",",
"rob",
".",
"buf",
"[",
"oldest",
"]",
")",
"\n",
"rob",
".",
"buf",
"[",
"oldest",
"]",
".",
"Ts",
"=",
"0",
"\n",
"}",
"\n",
"oldest",
"=",
"(",
"oldest",
"+",
"1",
")",
"%",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"\n",
"}",
"\n",
"rob",
".",
"buf",
"[",
"index",
"]",
".",
"Ts",
"=",
"ts",
"\n",
"rob",
".",
"buf",
"[",
"index",
"]",
".",
"Val",
"=",
"val",
"\n",
"rob",
".",
"newest",
"=",
"index",
"\n",
"}",
"else",
"{",
"metricsReordered",
".",
"Inc",
"(",
")",
"\n",
"rob",
".",
"buf",
"[",
"index",
"]",
".",
"Ts",
"=",
"ts",
"\n",
"rob",
".",
"buf",
"[",
"index",
"]",
".",
"Val",
"=",
"val",
"\n",
"}",
"\n\n",
"return",
"res",
",",
"nil",
"\n",
"}"
] |
// Add adds the point if it falls within the window.
// it returns points that have been purged out of the buffer, as well as whether the add succeeded.
|
[
"Add",
"adds",
"the",
"point",
"if",
"it",
"falls",
"within",
"the",
"window",
".",
"it",
"returns",
"points",
"that",
"have",
"been",
"purged",
"out",
"of",
"the",
"buffer",
"as",
"well",
"as",
"whether",
"the",
"add",
"succeeded",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/reorder_buffer.go#L31-L67
|
train
|
grafana/metrictank
|
mdata/reorder_buffer.go
|
Get
|
func (rob *ReorderBuffer) Get() []schema.Point {
res := make([]schema.Point, 0, cap(rob.buf))
oldest := (rob.newest + 1) % uint32(cap(rob.buf))
for {
if rob.buf[oldest].Ts != 0 {
res = append(res, rob.buf[oldest])
}
if oldest == rob.newest {
break
}
oldest = (oldest + 1) % uint32(cap(rob.buf))
}
return res
}
|
go
|
func (rob *ReorderBuffer) Get() []schema.Point {
res := make([]schema.Point, 0, cap(rob.buf))
oldest := (rob.newest + 1) % uint32(cap(rob.buf))
for {
if rob.buf[oldest].Ts != 0 {
res = append(res, rob.buf[oldest])
}
if oldest == rob.newest {
break
}
oldest = (oldest + 1) % uint32(cap(rob.buf))
}
return res
}
|
[
"func",
"(",
"rob",
"*",
"ReorderBuffer",
")",
"Get",
"(",
")",
"[",
"]",
"schema",
".",
"Point",
"{",
"res",
":=",
"make",
"(",
"[",
"]",
"schema",
".",
"Point",
",",
"0",
",",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"\n",
"oldest",
":=",
"(",
"rob",
".",
"newest",
"+",
"1",
")",
"%",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"\n\n",
"for",
"{",
"if",
"rob",
".",
"buf",
"[",
"oldest",
"]",
".",
"Ts",
"!=",
"0",
"{",
"res",
"=",
"append",
"(",
"res",
",",
"rob",
".",
"buf",
"[",
"oldest",
"]",
")",
"\n",
"}",
"\n",
"if",
"oldest",
"==",
"rob",
".",
"newest",
"{",
"break",
"\n",
"}",
"\n",
"oldest",
"=",
"(",
"oldest",
"+",
"1",
")",
"%",
"uint32",
"(",
"cap",
"(",
"rob",
".",
"buf",
")",
")",
"\n",
"}",
"\n\n",
"return",
"res",
"\n",
"}"
] |
// Get returns the points in the buffer
|
[
"Get",
"returns",
"the",
"points",
"in",
"the",
"buffer"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/reorder_buffer.go#L70-L85
|
train
|
grafana/metrictank
|
cmd/mt-whisper-importer-reader/conversion.go
|
incResolution
|
func incResolution(points []whisper.Point, method string, inRes, outRes, rawRes uint32) map[string][]whisper.Point {
out := make(map[string][]whisper.Point)
resFactor := float64(outRes) / float64(rawRes)
for _, inPoint := range points {
if inPoint.Timestamp == 0 {
continue
}
// inPoints are guaranteed to be quantized by whisper
// outRes is < inRes, otherwise this function should never be called
// rangeEnd is the the TS of the last datapoint that will be generated based on inPoint
rangeEnd := inPoint.Timestamp - (inPoint.Timestamp % outRes)
// generate datapoints based on inPoint in reverse order
var outPoints []whisper.Point
for ts := rangeEnd; ts > inPoint.Timestamp-inRes; ts = ts - outRes {
if ts > uint32(*importUpTo) || ts < uint32(*importAfter) {
continue
}
outPoints = append(outPoints, whisper.Point{Timestamp: ts})
}
for _, outPoint := range outPoints {
if method == "sum" {
outPoint.Value = inPoint.Value / float64(len(outPoints))
out["sum"] = append(out["sum"], outPoint)
out["cnt"] = append(out["cnt"], whisper.Point{Timestamp: outPoint.Timestamp, Value: resFactor})
} else if method == "fakeavg" {
outPoint.Value = inPoint.Value * resFactor
out["sum"] = append(out["sum"], outPoint)
out["cnt"] = append(out["cnt"], whisper.Point{Timestamp: outPoint.Timestamp, Value: resFactor})
} else {
outPoint.Value = inPoint.Value
out[method] = append(out[method], outPoint)
}
}
}
for m := range out {
out[m] = sortPoints(out[m])
}
return out
}
|
go
|
func incResolution(points []whisper.Point, method string, inRes, outRes, rawRes uint32) map[string][]whisper.Point {
out := make(map[string][]whisper.Point)
resFactor := float64(outRes) / float64(rawRes)
for _, inPoint := range points {
if inPoint.Timestamp == 0 {
continue
}
// inPoints are guaranteed to be quantized by whisper
// outRes is < inRes, otherwise this function should never be called
// rangeEnd is the the TS of the last datapoint that will be generated based on inPoint
rangeEnd := inPoint.Timestamp - (inPoint.Timestamp % outRes)
// generate datapoints based on inPoint in reverse order
var outPoints []whisper.Point
for ts := rangeEnd; ts > inPoint.Timestamp-inRes; ts = ts - outRes {
if ts > uint32(*importUpTo) || ts < uint32(*importAfter) {
continue
}
outPoints = append(outPoints, whisper.Point{Timestamp: ts})
}
for _, outPoint := range outPoints {
if method == "sum" {
outPoint.Value = inPoint.Value / float64(len(outPoints))
out["sum"] = append(out["sum"], outPoint)
out["cnt"] = append(out["cnt"], whisper.Point{Timestamp: outPoint.Timestamp, Value: resFactor})
} else if method == "fakeavg" {
outPoint.Value = inPoint.Value * resFactor
out["sum"] = append(out["sum"], outPoint)
out["cnt"] = append(out["cnt"], whisper.Point{Timestamp: outPoint.Timestamp, Value: resFactor})
} else {
outPoint.Value = inPoint.Value
out[method] = append(out[method], outPoint)
}
}
}
for m := range out {
out[m] = sortPoints(out[m])
}
return out
}
|
[
"func",
"incResolution",
"(",
"points",
"[",
"]",
"whisper",
".",
"Point",
",",
"method",
"string",
",",
"inRes",
",",
"outRes",
",",
"rawRes",
"uint32",
")",
"map",
"[",
"string",
"]",
"[",
"]",
"whisper",
".",
"Point",
"{",
"out",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"[",
"]",
"whisper",
".",
"Point",
")",
"\n",
"resFactor",
":=",
"float64",
"(",
"outRes",
")",
"/",
"float64",
"(",
"rawRes",
")",
"\n",
"for",
"_",
",",
"inPoint",
":=",
"range",
"points",
"{",
"if",
"inPoint",
".",
"Timestamp",
"==",
"0",
"{",
"continue",
"\n",
"}",
"\n\n",
"// inPoints are guaranteed to be quantized by whisper",
"// outRes is < inRes, otherwise this function should never be called",
"// rangeEnd is the the TS of the last datapoint that will be generated based on inPoint",
"rangeEnd",
":=",
"inPoint",
".",
"Timestamp",
"-",
"(",
"inPoint",
".",
"Timestamp",
"%",
"outRes",
")",
"\n\n",
"// generate datapoints based on inPoint in reverse order",
"var",
"outPoints",
"[",
"]",
"whisper",
".",
"Point",
"\n",
"for",
"ts",
":=",
"rangeEnd",
";",
"ts",
">",
"inPoint",
".",
"Timestamp",
"-",
"inRes",
";",
"ts",
"=",
"ts",
"-",
"outRes",
"{",
"if",
"ts",
">",
"uint32",
"(",
"*",
"importUpTo",
")",
"||",
"ts",
"<",
"uint32",
"(",
"*",
"importAfter",
")",
"{",
"continue",
"\n",
"}",
"\n",
"outPoints",
"=",
"append",
"(",
"outPoints",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"ts",
"}",
")",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"outPoint",
":=",
"range",
"outPoints",
"{",
"if",
"method",
"==",
"\"",
"\"",
"{",
"outPoint",
".",
"Value",
"=",
"inPoint",
".",
"Value",
"/",
"float64",
"(",
"len",
"(",
"outPoints",
")",
")",
"\n",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"outPoint",
")",
"\n",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"outPoint",
".",
"Timestamp",
",",
"Value",
":",
"resFactor",
"}",
")",
"\n",
"}",
"else",
"if",
"method",
"==",
"\"",
"\"",
"{",
"outPoint",
".",
"Value",
"=",
"inPoint",
".",
"Value",
"*",
"resFactor",
"\n",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"outPoint",
")",
"\n",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"outPoint",
".",
"Timestamp",
",",
"Value",
":",
"resFactor",
"}",
")",
"\n",
"}",
"else",
"{",
"outPoint",
".",
"Value",
"=",
"inPoint",
".",
"Value",
"\n",
"out",
"[",
"method",
"]",
"=",
"append",
"(",
"out",
"[",
"method",
"]",
",",
"outPoint",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"m",
":=",
"range",
"out",
"{",
"out",
"[",
"m",
"]",
"=",
"sortPoints",
"(",
"out",
"[",
"m",
"]",
")",
"\n",
"}",
"\n",
"return",
"out",
"\n",
"}"
] |
// increase resolution of given points according to defined specs by generating
// additional datapoints to bridge the gaps between the given points. depending
// on what aggregation method is specified, those datapoints may be generated in
// slightly different ways.
|
[
"increase",
"resolution",
"of",
"given",
"points",
"according",
"to",
"defined",
"specs",
"by",
"generating",
"additional",
"datapoints",
"to",
"bridge",
"the",
"gaps",
"between",
"the",
"given",
"points",
".",
"depending",
"on",
"what",
"aggregation",
"method",
"is",
"specified",
"those",
"datapoints",
"may",
"be",
"generated",
"in",
"slightly",
"different",
"ways",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-whisper-importer-reader/conversion.go#L135-L176
|
train
|
grafana/metrictank
|
cmd/mt-whisper-importer-reader/conversion.go
|
decResolution
|
func decResolution(points []whisper.Point, method string, inRes, outRes, rawRes uint32) map[string][]whisper.Point {
out := make(map[string][]whisper.Point)
agg := mdata.NewAggregation()
currentBoundary := uint32(0)
flush := func() {
if agg.Cnt == 0 {
return
}
var value float64
switch method {
case "min":
value = agg.Min
case "max":
value = agg.Max
case "lst":
value = agg.Lst
case "avg":
value = agg.Sum / agg.Cnt
case "sum":
out["cnt"] = append(out["cnt"], whisper.Point{
Timestamp: currentBoundary,
Value: agg.Cnt * float64(inRes) / float64(rawRes),
})
out["sum"] = append(out["sum"], whisper.Point{
Timestamp: currentBoundary,
Value: agg.Sum,
})
agg.Reset()
return
case "fakeavg":
cnt := agg.Cnt * float64(inRes) / float64(rawRes)
out["cnt"] = append(out["cnt"], whisper.Point{
Timestamp: currentBoundary,
Value: cnt,
})
out["sum"] = append(out["sum"], whisper.Point{
Timestamp: currentBoundary,
Value: (agg.Sum / agg.Cnt) * cnt,
})
agg.Reset()
return
default:
return
}
out[method] = append(out[method], whisper.Point{
Timestamp: currentBoundary,
Value: value,
})
agg.Reset()
}
for _, inPoint := range sortPoints(points) {
if inPoint.Timestamp == 0 {
continue
}
boundary := mdata.AggBoundary(inPoint.Timestamp, outRes)
if boundary > uint32(*importUpTo) {
break
}
if boundary < uint32(*importAfter) {
continue
}
if boundary == currentBoundary {
agg.Add(inPoint.Value)
if inPoint.Timestamp == boundary {
flush()
}
} else {
flush()
currentBoundary = boundary
agg.Add(inPoint.Value)
}
}
return out
}
|
go
|
func decResolution(points []whisper.Point, method string, inRes, outRes, rawRes uint32) map[string][]whisper.Point {
out := make(map[string][]whisper.Point)
agg := mdata.NewAggregation()
currentBoundary := uint32(0)
flush := func() {
if agg.Cnt == 0 {
return
}
var value float64
switch method {
case "min":
value = agg.Min
case "max":
value = agg.Max
case "lst":
value = agg.Lst
case "avg":
value = agg.Sum / agg.Cnt
case "sum":
out["cnt"] = append(out["cnt"], whisper.Point{
Timestamp: currentBoundary,
Value: agg.Cnt * float64(inRes) / float64(rawRes),
})
out["sum"] = append(out["sum"], whisper.Point{
Timestamp: currentBoundary,
Value: agg.Sum,
})
agg.Reset()
return
case "fakeavg":
cnt := agg.Cnt * float64(inRes) / float64(rawRes)
out["cnt"] = append(out["cnt"], whisper.Point{
Timestamp: currentBoundary,
Value: cnt,
})
out["sum"] = append(out["sum"], whisper.Point{
Timestamp: currentBoundary,
Value: (agg.Sum / agg.Cnt) * cnt,
})
agg.Reset()
return
default:
return
}
out[method] = append(out[method], whisper.Point{
Timestamp: currentBoundary,
Value: value,
})
agg.Reset()
}
for _, inPoint := range sortPoints(points) {
if inPoint.Timestamp == 0 {
continue
}
boundary := mdata.AggBoundary(inPoint.Timestamp, outRes)
if boundary > uint32(*importUpTo) {
break
}
if boundary < uint32(*importAfter) {
continue
}
if boundary == currentBoundary {
agg.Add(inPoint.Value)
if inPoint.Timestamp == boundary {
flush()
}
} else {
flush()
currentBoundary = boundary
agg.Add(inPoint.Value)
}
}
return out
}
|
[
"func",
"decResolution",
"(",
"points",
"[",
"]",
"whisper",
".",
"Point",
",",
"method",
"string",
",",
"inRes",
",",
"outRes",
",",
"rawRes",
"uint32",
")",
"map",
"[",
"string",
"]",
"[",
"]",
"whisper",
".",
"Point",
"{",
"out",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"[",
"]",
"whisper",
".",
"Point",
")",
"\n",
"agg",
":=",
"mdata",
".",
"NewAggregation",
"(",
")",
"\n",
"currentBoundary",
":=",
"uint32",
"(",
"0",
")",
"\n\n",
"flush",
":=",
"func",
"(",
")",
"{",
"if",
"agg",
".",
"Cnt",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n\n",
"var",
"value",
"float64",
"\n",
"switch",
"method",
"{",
"case",
"\"",
"\"",
":",
"value",
"=",
"agg",
".",
"Min",
"\n",
"case",
"\"",
"\"",
":",
"value",
"=",
"agg",
".",
"Max",
"\n",
"case",
"\"",
"\"",
":",
"value",
"=",
"agg",
".",
"Lst",
"\n",
"case",
"\"",
"\"",
":",
"value",
"=",
"agg",
".",
"Sum",
"/",
"agg",
".",
"Cnt",
"\n",
"case",
"\"",
"\"",
":",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"currentBoundary",
",",
"Value",
":",
"agg",
".",
"Cnt",
"*",
"float64",
"(",
"inRes",
")",
"/",
"float64",
"(",
"rawRes",
")",
",",
"}",
")",
"\n",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"currentBoundary",
",",
"Value",
":",
"agg",
".",
"Sum",
",",
"}",
")",
"\n",
"agg",
".",
"Reset",
"(",
")",
"\n",
"return",
"\n",
"case",
"\"",
"\"",
":",
"cnt",
":=",
"agg",
".",
"Cnt",
"*",
"float64",
"(",
"inRes",
")",
"/",
"float64",
"(",
"rawRes",
")",
"\n",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"currentBoundary",
",",
"Value",
":",
"cnt",
",",
"}",
")",
"\n",
"out",
"[",
"\"",
"\"",
"]",
"=",
"append",
"(",
"out",
"[",
"\"",
"\"",
"]",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"currentBoundary",
",",
"Value",
":",
"(",
"agg",
".",
"Sum",
"/",
"agg",
".",
"Cnt",
")",
"*",
"cnt",
",",
"}",
")",
"\n",
"agg",
".",
"Reset",
"(",
")",
"\n",
"return",
"\n",
"default",
":",
"return",
"\n",
"}",
"\n",
"out",
"[",
"method",
"]",
"=",
"append",
"(",
"out",
"[",
"method",
"]",
",",
"whisper",
".",
"Point",
"{",
"Timestamp",
":",
"currentBoundary",
",",
"Value",
":",
"value",
",",
"}",
")",
"\n",
"agg",
".",
"Reset",
"(",
")",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"inPoint",
":=",
"range",
"sortPoints",
"(",
"points",
")",
"{",
"if",
"inPoint",
".",
"Timestamp",
"==",
"0",
"{",
"continue",
"\n",
"}",
"\n",
"boundary",
":=",
"mdata",
".",
"AggBoundary",
"(",
"inPoint",
".",
"Timestamp",
",",
"outRes",
")",
"\n",
"if",
"boundary",
">",
"uint32",
"(",
"*",
"importUpTo",
")",
"{",
"break",
"\n",
"}",
"\n",
"if",
"boundary",
"<",
"uint32",
"(",
"*",
"importAfter",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"if",
"boundary",
"==",
"currentBoundary",
"{",
"agg",
".",
"Add",
"(",
"inPoint",
".",
"Value",
")",
"\n",
"if",
"inPoint",
".",
"Timestamp",
"==",
"boundary",
"{",
"flush",
"(",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"flush",
"(",
")",
"\n",
"currentBoundary",
"=",
"boundary",
"\n",
"agg",
".",
"Add",
"(",
"inPoint",
".",
"Value",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"out",
"\n",
"}"
] |
// decreases the resolution of given points by using the aggregation method specified
// in the second argument. emulates the way metrictank aggregates data when it generates
// rollups of the raw data.
|
[
"decreases",
"the",
"resolution",
"of",
"given",
"points",
"by",
"using",
"the",
"aggregation",
"method",
"specified",
"in",
"the",
"second",
"argument",
".",
"emulates",
"the",
"way",
"metrictank",
"aggregates",
"data",
"when",
"it",
"generates",
"rollups",
"of",
"the",
"raw",
"data",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-whisper-importer-reader/conversion.go#L181-L259
|
train
|
grafana/metrictank
|
mdata/store_mock.go
|
Add
|
func (c *MockStore) Add(cwr *ChunkWriteRequest) {
if !c.Drop {
intervalHint := cwr.Key.Archive.Span()
itgen, err := chunk.NewIterGen(cwr.Chunk.Series.T0, intervalHint, cwr.Chunk.Encode(cwr.Span))
if err != nil {
panic(err)
}
c.results[cwr.Key] = append(c.results[cwr.Key], itgen)
c.items++
}
}
|
go
|
func (c *MockStore) Add(cwr *ChunkWriteRequest) {
if !c.Drop {
intervalHint := cwr.Key.Archive.Span()
itgen, err := chunk.NewIterGen(cwr.Chunk.Series.T0, intervalHint, cwr.Chunk.Encode(cwr.Span))
if err != nil {
panic(err)
}
c.results[cwr.Key] = append(c.results[cwr.Key], itgen)
c.items++
}
}
|
[
"func",
"(",
"c",
"*",
"MockStore",
")",
"Add",
"(",
"cwr",
"*",
"ChunkWriteRequest",
")",
"{",
"if",
"!",
"c",
".",
"Drop",
"{",
"intervalHint",
":=",
"cwr",
".",
"Key",
".",
"Archive",
".",
"Span",
"(",
")",
"\n",
"itgen",
",",
"err",
":=",
"chunk",
".",
"NewIterGen",
"(",
"cwr",
".",
"Chunk",
".",
"Series",
".",
"T0",
",",
"intervalHint",
",",
"cwr",
".",
"Chunk",
".",
"Encode",
"(",
"cwr",
".",
"Span",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"c",
".",
"results",
"[",
"cwr",
".",
"Key",
"]",
"=",
"append",
"(",
"c",
".",
"results",
"[",
"cwr",
".",
"Key",
"]",
",",
"itgen",
")",
"\n",
"c",
".",
"items",
"++",
"\n",
"}",
"\n",
"}"
] |
// Add adds a chunk to the store
|
[
"Add",
"adds",
"a",
"chunk",
"to",
"the",
"store"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/store_mock.go#L40-L50
|
train
|
grafana/metrictank
|
mdata/chunk/encode.go
|
encode
|
func encode(span uint32, format Format, data []byte) []byte {
switch format {
case FormatStandardGoTszWithSpan, FormatGoTszLongWithSpan:
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, format)
spanCode, ok := RevChunkSpans[span]
if !ok {
// it's probably better to panic than to persist the chunk with a wrong length
panic(fmt.Sprintf("Chunk span invalid: %d", span))
}
binary.Write(buf, binary.LittleEndian, spanCode)
buf.Write(data)
return buf.Bytes()
case FormatStandardGoTsz:
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, format)
buf.Write(data)
return buf.Bytes()
}
return nil
}
|
go
|
func encode(span uint32, format Format, data []byte) []byte {
switch format {
case FormatStandardGoTszWithSpan, FormatGoTszLongWithSpan:
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, format)
spanCode, ok := RevChunkSpans[span]
if !ok {
// it's probably better to panic than to persist the chunk with a wrong length
panic(fmt.Sprintf("Chunk span invalid: %d", span))
}
binary.Write(buf, binary.LittleEndian, spanCode)
buf.Write(data)
return buf.Bytes()
case FormatStandardGoTsz:
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, format)
buf.Write(data)
return buf.Bytes()
}
return nil
}
|
[
"func",
"encode",
"(",
"span",
"uint32",
",",
"format",
"Format",
",",
"data",
"[",
"]",
"byte",
")",
"[",
"]",
"byte",
"{",
"switch",
"format",
"{",
"case",
"FormatStandardGoTszWithSpan",
",",
"FormatGoTszLongWithSpan",
":",
"buf",
":=",
"new",
"(",
"bytes",
".",
"Buffer",
")",
"\n",
"binary",
".",
"Write",
"(",
"buf",
",",
"binary",
".",
"LittleEndian",
",",
"format",
")",
"\n\n",
"spanCode",
",",
"ok",
":=",
"RevChunkSpans",
"[",
"span",
"]",
"\n",
"if",
"!",
"ok",
"{",
"// it's probably better to panic than to persist the chunk with a wrong length",
"panic",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"span",
")",
")",
"\n",
"}",
"\n",
"binary",
".",
"Write",
"(",
"buf",
",",
"binary",
".",
"LittleEndian",
",",
"spanCode",
")",
"\n",
"buf",
".",
"Write",
"(",
"data",
")",
"\n",
"return",
"buf",
".",
"Bytes",
"(",
")",
"\n",
"case",
"FormatStandardGoTsz",
":",
"buf",
":=",
"new",
"(",
"bytes",
".",
"Buffer",
")",
"\n",
"binary",
".",
"Write",
"(",
"buf",
",",
"binary",
".",
"LittleEndian",
",",
"format",
")",
"\n",
"buf",
".",
"Write",
"(",
"data",
")",
"\n",
"return",
"buf",
".",
"Bytes",
"(",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// encode is a helper function to encode a chunk of data into various formats
// input data is copied
|
[
"encode",
"is",
"a",
"helper",
"function",
"to",
"encode",
"a",
"chunk",
"of",
"data",
"into",
"various",
"formats",
"input",
"data",
"is",
"copied"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/encode.go#L11-L32
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
parseExpression
|
func parseExpression(expr string) (expression, error) {
var pos int
prefix, regex, not := false, false, false
res := expression{}
// scan up to operator to get key
FIND_OPERATOR:
for ; pos < len(expr); pos++ {
switch expr[pos] {
case '=':
break FIND_OPERATOR
case '!':
not = true
break FIND_OPERATOR
case '^':
prefix = true
break FIND_OPERATOR
case ';':
return res, errInvalidQuery
}
}
// key must not be empty
if pos == 0 {
return res, errInvalidQuery
}
res.key = expr[:pos]
// shift over the !/^ characters
if not || prefix {
pos++
}
if len(expr) <= pos || expr[pos] != '=' {
return res, errInvalidQuery
}
pos++
if len(expr) > pos && expr[pos] == '~' {
// ^=~ is not a valid operator
if prefix {
return res, errInvalidQuery
}
regex = true
pos++
}
valuePos := pos
for ; pos < len(expr); pos++ {
// disallow ; in value
if expr[pos] == 59 {
return res, errInvalidQuery
}
}
res.value = expr[valuePos:]
// special key to match on tag instead of a value
if res.key == "__tag" {
// currently ! (not) queries on tags are not supported
// and unlike normal queries a value must be set
if not || len(res.value) == 0 {
return res, errInvalidQuery
}
if regex {
res.operator = MATCH_TAG
} else if prefix {
res.operator = PREFIX_TAG
} else {
// currently only match & prefix operator are supported on tag
return res, errInvalidQuery
}
return res, nil
}
if not {
if regex {
res.operator = NOT_MATCH
} else {
res.operator = NOT_EQUAL
}
} else {
if regex {
res.operator = MATCH
} else if prefix {
res.operator = PREFIX
} else {
res.operator = EQUAL
}
}
return res, nil
}
|
go
|
func parseExpression(expr string) (expression, error) {
var pos int
prefix, regex, not := false, false, false
res := expression{}
// scan up to operator to get key
FIND_OPERATOR:
for ; pos < len(expr); pos++ {
switch expr[pos] {
case '=':
break FIND_OPERATOR
case '!':
not = true
break FIND_OPERATOR
case '^':
prefix = true
break FIND_OPERATOR
case ';':
return res, errInvalidQuery
}
}
// key must not be empty
if pos == 0 {
return res, errInvalidQuery
}
res.key = expr[:pos]
// shift over the !/^ characters
if not || prefix {
pos++
}
if len(expr) <= pos || expr[pos] != '=' {
return res, errInvalidQuery
}
pos++
if len(expr) > pos && expr[pos] == '~' {
// ^=~ is not a valid operator
if prefix {
return res, errInvalidQuery
}
regex = true
pos++
}
valuePos := pos
for ; pos < len(expr); pos++ {
// disallow ; in value
if expr[pos] == 59 {
return res, errInvalidQuery
}
}
res.value = expr[valuePos:]
// special key to match on tag instead of a value
if res.key == "__tag" {
// currently ! (not) queries on tags are not supported
// and unlike normal queries a value must be set
if not || len(res.value) == 0 {
return res, errInvalidQuery
}
if regex {
res.operator = MATCH_TAG
} else if prefix {
res.operator = PREFIX_TAG
} else {
// currently only match & prefix operator are supported on tag
return res, errInvalidQuery
}
return res, nil
}
if not {
if regex {
res.operator = NOT_MATCH
} else {
res.operator = NOT_EQUAL
}
} else {
if regex {
res.operator = MATCH
} else if prefix {
res.operator = PREFIX
} else {
res.operator = EQUAL
}
}
return res, nil
}
|
[
"func",
"parseExpression",
"(",
"expr",
"string",
")",
"(",
"expression",
",",
"error",
")",
"{",
"var",
"pos",
"int",
"\n",
"prefix",
",",
"regex",
",",
"not",
":=",
"false",
",",
"false",
",",
"false",
"\n",
"res",
":=",
"expression",
"{",
"}",
"\n\n",
"// scan up to operator to get key",
"FIND_OPERATOR",
":",
"for",
";",
"pos",
"<",
"len",
"(",
"expr",
")",
";",
"pos",
"++",
"{",
"switch",
"expr",
"[",
"pos",
"]",
"{",
"case",
"'='",
":",
"break",
"FIND_OPERATOR",
"\n",
"case",
"'!'",
":",
"not",
"=",
"true",
"\n",
"break",
"FIND_OPERATOR",
"\n",
"case",
"'^'",
":",
"prefix",
"=",
"true",
"\n",
"break",
"FIND_OPERATOR",
"\n",
"case",
"';'",
":",
"return",
"res",
",",
"errInvalidQuery",
"\n",
"}",
"\n",
"}",
"\n\n",
"// key must not be empty",
"if",
"pos",
"==",
"0",
"{",
"return",
"res",
",",
"errInvalidQuery",
"\n",
"}",
"\n\n",
"res",
".",
"key",
"=",
"expr",
"[",
":",
"pos",
"]",
"\n\n",
"// shift over the !/^ characters",
"if",
"not",
"||",
"prefix",
"{",
"pos",
"++",
"\n",
"}",
"\n\n",
"if",
"len",
"(",
"expr",
")",
"<=",
"pos",
"||",
"expr",
"[",
"pos",
"]",
"!=",
"'='",
"{",
"return",
"res",
",",
"errInvalidQuery",
"\n",
"}",
"\n",
"pos",
"++",
"\n\n",
"if",
"len",
"(",
"expr",
")",
">",
"pos",
"&&",
"expr",
"[",
"pos",
"]",
"==",
"'~'",
"{",
"// ^=~ is not a valid operator",
"if",
"prefix",
"{",
"return",
"res",
",",
"errInvalidQuery",
"\n",
"}",
"\n",
"regex",
"=",
"true",
"\n",
"pos",
"++",
"\n",
"}",
"\n\n",
"valuePos",
":=",
"pos",
"\n",
"for",
";",
"pos",
"<",
"len",
"(",
"expr",
")",
";",
"pos",
"++",
"{",
"// disallow ; in value",
"if",
"expr",
"[",
"pos",
"]",
"==",
"59",
"{",
"return",
"res",
",",
"errInvalidQuery",
"\n",
"}",
"\n",
"}",
"\n",
"res",
".",
"value",
"=",
"expr",
"[",
"valuePos",
":",
"]",
"\n\n",
"// special key to match on tag instead of a value",
"if",
"res",
".",
"key",
"==",
"\"",
"\"",
"{",
"// currently ! (not) queries on tags are not supported",
"// and unlike normal queries a value must be set",
"if",
"not",
"||",
"len",
"(",
"res",
".",
"value",
")",
"==",
"0",
"{",
"return",
"res",
",",
"errInvalidQuery",
"\n",
"}",
"\n\n",
"if",
"regex",
"{",
"res",
".",
"operator",
"=",
"MATCH_TAG",
"\n",
"}",
"else",
"if",
"prefix",
"{",
"res",
".",
"operator",
"=",
"PREFIX_TAG",
"\n",
"}",
"else",
"{",
"// currently only match & prefix operator are supported on tag",
"return",
"res",
",",
"errInvalidQuery",
"\n",
"}",
"\n\n",
"return",
"res",
",",
"nil",
"\n",
"}",
"\n\n",
"if",
"not",
"{",
"if",
"regex",
"{",
"res",
".",
"operator",
"=",
"NOT_MATCH",
"\n",
"}",
"else",
"{",
"res",
".",
"operator",
"=",
"NOT_EQUAL",
"\n",
"}",
"\n",
"}",
"else",
"{",
"if",
"regex",
"{",
"res",
".",
"operator",
"=",
"MATCH",
"\n",
"}",
"else",
"if",
"prefix",
"{",
"res",
".",
"operator",
"=",
"PREFIX",
"\n",
"}",
"else",
"{",
"res",
".",
"operator",
"=",
"EQUAL",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"res",
",",
"nil",
"\n",
"}"
] |
// parseExpression returns an expression that's been generated from the given
// string, in case of error the operator will be PARSING_ERROR.
|
[
"parseExpression",
"returns",
"an",
"expression",
"that",
"s",
"been",
"generated",
"from",
"the",
"given",
"string",
"in",
"case",
"of",
"error",
"the",
"operator",
"will",
"be",
"PARSING_ERROR",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L125-L218
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
getInitialByEqual
|
func (q *TagQuery) getInitialByEqual(expr kv, idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
KEYS:
for k := range q.index[expr.key][expr.value] {
select {
case <-stopCh:
break KEYS
case idCh <- k:
}
}
close(idCh)
}
|
go
|
func (q *TagQuery) getInitialByEqual(expr kv, idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
KEYS:
for k := range q.index[expr.key][expr.value] {
select {
case <-stopCh:
break KEYS
case idCh <- k:
}
}
close(idCh)
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"getInitialByEqual",
"(",
"expr",
"kv",
",",
"idCh",
"chan",
"schema",
".",
"MKey",
",",
"stopCh",
"chan",
"struct",
"{",
"}",
")",
"{",
"defer",
"q",
".",
"wg",
".",
"Done",
"(",
")",
"\n\n",
"KEYS",
":",
"for",
"k",
":=",
"range",
"q",
".",
"index",
"[",
"expr",
".",
"key",
"]",
"[",
"expr",
".",
"value",
"]",
"{",
"select",
"{",
"case",
"<-",
"stopCh",
":",
"break",
"KEYS",
"\n",
"case",
"idCh",
"<-",
"k",
":",
"}",
"\n",
"}",
"\n\n",
"close",
"(",
"idCh",
")",
"\n",
"}"
] |
// getInitialByEqual generates the initial resultset by executing the given equal expression
|
[
"getInitialByEqual",
"generates",
"the",
"initial",
"resultset",
"by",
"executing",
"the",
"given",
"equal",
"expression"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L338-L351
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
getInitialByPrefix
|
func (q *TagQuery) getInitialByPrefix(expr kv, idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
VALUES:
for v, ids := range q.index[expr.key] {
if !strings.HasPrefix(v, expr.value) {
continue
}
for id := range ids {
select {
case <-stopCh:
break VALUES
case idCh <- id:
}
}
}
close(idCh)
}
|
go
|
func (q *TagQuery) getInitialByPrefix(expr kv, idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
VALUES:
for v, ids := range q.index[expr.key] {
if !strings.HasPrefix(v, expr.value) {
continue
}
for id := range ids {
select {
case <-stopCh:
break VALUES
case idCh <- id:
}
}
}
close(idCh)
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"getInitialByPrefix",
"(",
"expr",
"kv",
",",
"idCh",
"chan",
"schema",
".",
"MKey",
",",
"stopCh",
"chan",
"struct",
"{",
"}",
")",
"{",
"defer",
"q",
".",
"wg",
".",
"Done",
"(",
")",
"\n\n",
"VALUES",
":",
"for",
"v",
",",
"ids",
":=",
"range",
"q",
".",
"index",
"[",
"expr",
".",
"key",
"]",
"{",
"if",
"!",
"strings",
".",
"HasPrefix",
"(",
"v",
",",
"expr",
".",
"value",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"for",
"id",
":=",
"range",
"ids",
"{",
"select",
"{",
"case",
"<-",
"stopCh",
":",
"break",
"VALUES",
"\n",
"case",
"idCh",
"<-",
"id",
":",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"close",
"(",
"idCh",
")",
"\n",
"}"
] |
// getInitialByPrefix generates the initial resultset by executing the given prefix match expression
|
[
"getInitialByPrefix",
"generates",
"the",
"initial",
"resultset",
"by",
"executing",
"the",
"given",
"prefix",
"match",
"expression"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L354-L373
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
getInitialByMatch
|
func (q *TagQuery) getInitialByMatch(expr kvRe, idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
// shortcut if value == nil.
// this will simply match any value, like ^.+. since we know that every value
// in the index must not be empty, we can skip the matching.
if expr.value == nil {
VALUES1:
for _, ids := range q.index[expr.key] {
for id := range ids {
select {
case <-stopCh:
break VALUES1
case idCh <- id:
}
}
}
close(idCh)
return
}
VALUES2:
for v, ids := range q.index[expr.key] {
if !expr.value.MatchString(v) {
continue
}
for id := range ids {
select {
case <-stopCh:
break VALUES2
case idCh <- id:
}
}
}
close(idCh)
}
|
go
|
func (q *TagQuery) getInitialByMatch(expr kvRe, idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
// shortcut if value == nil.
// this will simply match any value, like ^.+. since we know that every value
// in the index must not be empty, we can skip the matching.
if expr.value == nil {
VALUES1:
for _, ids := range q.index[expr.key] {
for id := range ids {
select {
case <-stopCh:
break VALUES1
case idCh <- id:
}
}
}
close(idCh)
return
}
VALUES2:
for v, ids := range q.index[expr.key] {
if !expr.value.MatchString(v) {
continue
}
for id := range ids {
select {
case <-stopCh:
break VALUES2
case idCh <- id:
}
}
}
close(idCh)
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"getInitialByMatch",
"(",
"expr",
"kvRe",
",",
"idCh",
"chan",
"schema",
".",
"MKey",
",",
"stopCh",
"chan",
"struct",
"{",
"}",
")",
"{",
"defer",
"q",
".",
"wg",
".",
"Done",
"(",
")",
"\n\n",
"// shortcut if value == nil.",
"// this will simply match any value, like ^.+. since we know that every value",
"// in the index must not be empty, we can skip the matching.",
"if",
"expr",
".",
"value",
"==",
"nil",
"{",
"VALUES1",
":",
"for",
"_",
",",
"ids",
":=",
"range",
"q",
".",
"index",
"[",
"expr",
".",
"key",
"]",
"{",
"for",
"id",
":=",
"range",
"ids",
"{",
"select",
"{",
"case",
"<-",
"stopCh",
":",
"break",
"VALUES1",
"\n",
"case",
"idCh",
"<-",
"id",
":",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"close",
"(",
"idCh",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"VALUES2",
":",
"for",
"v",
",",
"ids",
":=",
"range",
"q",
".",
"index",
"[",
"expr",
".",
"key",
"]",
"{",
"if",
"!",
"expr",
".",
"value",
".",
"MatchString",
"(",
"v",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"for",
"id",
":=",
"range",
"ids",
"{",
"select",
"{",
"case",
"<-",
"stopCh",
":",
"break",
"VALUES2",
"\n",
"case",
"idCh",
"<-",
"id",
":",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"close",
"(",
"idCh",
")",
"\n",
"}"
] |
// getInitialByMatch generates the initial resultset by executing the given match expression
|
[
"getInitialByMatch",
"generates",
"the",
"initial",
"resultset",
"by",
"executing",
"the",
"given",
"match",
"expression"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L376-L413
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
getInitialByTagPrefix
|
func (q *TagQuery) getInitialByTagPrefix(idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
TAGS:
for tag, values := range q.index {
if !strings.HasPrefix(tag, q.tagPrefix) {
continue
}
for _, ids := range values {
for id := range ids {
select {
case <-stopCh:
break TAGS
case idCh <- id:
}
}
}
}
close(idCh)
}
|
go
|
func (q *TagQuery) getInitialByTagPrefix(idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
TAGS:
for tag, values := range q.index {
if !strings.HasPrefix(tag, q.tagPrefix) {
continue
}
for _, ids := range values {
for id := range ids {
select {
case <-stopCh:
break TAGS
case idCh <- id:
}
}
}
}
close(idCh)
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"getInitialByTagPrefix",
"(",
"idCh",
"chan",
"schema",
".",
"MKey",
",",
"stopCh",
"chan",
"struct",
"{",
"}",
")",
"{",
"defer",
"q",
".",
"wg",
".",
"Done",
"(",
")",
"\n\n",
"TAGS",
":",
"for",
"tag",
",",
"values",
":=",
"range",
"q",
".",
"index",
"{",
"if",
"!",
"strings",
".",
"HasPrefix",
"(",
"tag",
",",
"q",
".",
"tagPrefix",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"ids",
":=",
"range",
"values",
"{",
"for",
"id",
":=",
"range",
"ids",
"{",
"select",
"{",
"case",
"<-",
"stopCh",
":",
"break",
"TAGS",
"\n",
"case",
"idCh",
"<-",
"id",
":",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"close",
"(",
"idCh",
")",
"\n",
"}"
] |
// getInitialByTagPrefix generates the initial resultset by creating a list of
// metric IDs of which at least one tag starts with the defined prefix
|
[
"getInitialByTagPrefix",
"generates",
"the",
"initial",
"resultset",
"by",
"creating",
"a",
"list",
"of",
"metric",
"IDs",
"of",
"which",
"at",
"least",
"one",
"tag",
"starts",
"with",
"the",
"defined",
"prefix"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L417-L438
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
getInitialByTagMatch
|
func (q *TagQuery) getInitialByTagMatch(idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
TAGS:
for tag, values := range q.index {
if q.tagMatch.value.MatchString(tag) {
for _, ids := range values {
for id := range ids {
select {
case <-stopCh:
break TAGS
case idCh <- id:
}
}
}
}
}
close(idCh)
}
|
go
|
func (q *TagQuery) getInitialByTagMatch(idCh chan schema.MKey, stopCh chan struct{}) {
defer q.wg.Done()
TAGS:
for tag, values := range q.index {
if q.tagMatch.value.MatchString(tag) {
for _, ids := range values {
for id := range ids {
select {
case <-stopCh:
break TAGS
case idCh <- id:
}
}
}
}
}
close(idCh)
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"getInitialByTagMatch",
"(",
"idCh",
"chan",
"schema",
".",
"MKey",
",",
"stopCh",
"chan",
"struct",
"{",
"}",
")",
"{",
"defer",
"q",
".",
"wg",
".",
"Done",
"(",
")",
"\n\n",
"TAGS",
":",
"for",
"tag",
",",
"values",
":=",
"range",
"q",
".",
"index",
"{",
"if",
"q",
".",
"tagMatch",
".",
"value",
".",
"MatchString",
"(",
"tag",
")",
"{",
"for",
"_",
",",
"ids",
":=",
"range",
"values",
"{",
"for",
"id",
":=",
"range",
"ids",
"{",
"select",
"{",
"case",
"<-",
"stopCh",
":",
"break",
"TAGS",
"\n",
"case",
"idCh",
"<-",
"id",
":",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"close",
"(",
"idCh",
")",
"\n",
"}"
] |
// getInitialByTagMatch generates the initial resultset by creating a list of
// metric IDs of which at least one tag matches the defined regex
|
[
"getInitialByTagMatch",
"generates",
"the",
"initial",
"resultset",
"by",
"creating",
"a",
"list",
"of",
"metric",
"IDs",
"of",
"which",
"at",
"least",
"one",
"tag",
"matches",
"the",
"defined",
"regex"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L442-L461
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
filterIdsFromChan
|
func (q *TagQuery) filterIdsFromChan(idCh, resCh chan schema.MKey) {
for id := range idCh {
var def *idx.Archive
var ok bool
if def, ok = q.byId[id]; !ok {
// should never happen because every ID in the tag index
// must be present in the byId lookup table
corruptIndex.Inc()
log.Errorf("memory-idx: ID %q is in tag index but not in the byId lookup table", id)
continue
}
// we always omit tag filters because Run() does not support filtering by tags
if q.testByAllExpressions(id, def, false) {
resCh <- id
}
}
q.wg.Done()
}
|
go
|
func (q *TagQuery) filterIdsFromChan(idCh, resCh chan schema.MKey) {
for id := range idCh {
var def *idx.Archive
var ok bool
if def, ok = q.byId[id]; !ok {
// should never happen because every ID in the tag index
// must be present in the byId lookup table
corruptIndex.Inc()
log.Errorf("memory-idx: ID %q is in tag index but not in the byId lookup table", id)
continue
}
// we always omit tag filters because Run() does not support filtering by tags
if q.testByAllExpressions(id, def, false) {
resCh <- id
}
}
q.wg.Done()
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"filterIdsFromChan",
"(",
"idCh",
",",
"resCh",
"chan",
"schema",
".",
"MKey",
")",
"{",
"for",
"id",
":=",
"range",
"idCh",
"{",
"var",
"def",
"*",
"idx",
".",
"Archive",
"\n",
"var",
"ok",
"bool",
"\n\n",
"if",
"def",
",",
"ok",
"=",
"q",
".",
"byId",
"[",
"id",
"]",
";",
"!",
"ok",
"{",
"// should never happen because every ID in the tag index",
"// must be present in the byId lookup table",
"corruptIndex",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"id",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// we always omit tag filters because Run() does not support filtering by tags",
"if",
"q",
".",
"testByAllExpressions",
"(",
"id",
",",
"def",
",",
"false",
")",
"{",
"resCh",
"<-",
"id",
"\n",
"}",
"\n",
"}",
"\n\n",
"q",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"}"
] |
// filterIdsFromChan takes a channel of metric ids and runs them through the
// required tests to decide whether a metric should be part of the final
// result set or not
// it returns the final result set via the given resCh parameter
|
[
"filterIdsFromChan",
"takes",
"a",
"channel",
"of",
"metric",
"ids",
"and",
"runs",
"them",
"through",
"the",
"required",
"tests",
"to",
"decide",
"whether",
"a",
"metric",
"should",
"be",
"part",
"of",
"the",
"final",
"result",
"set",
"or",
"not",
"it",
"returns",
"the",
"final",
"result",
"set",
"via",
"the",
"given",
"resCh",
"parameter"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L727-L747
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
sortByCost
|
func (q *TagQuery) sortByCost() {
for i, kv := range q.equal {
q.equal[i].cost = uint(len(q.index[kv.key][kv.value]))
}
// for prefix and match clauses we can't determine the actual cost
// without actually evaluating them, so we estimate based on
// cardinality of the key
for i, kv := range q.prefix {
q.prefix[i].cost = uint(len(q.index[kv.key]))
}
for i, kvRe := range q.match {
q.match[i].cost = uint(len(q.index[kvRe.key]))
}
sort.Sort(KvByCost(q.equal))
sort.Sort(KvByCost(q.notEqual))
sort.Sort(KvByCost(q.prefix))
sort.Sort(KvReByCost(q.match))
sort.Sort(KvReByCost(q.notMatch))
}
|
go
|
func (q *TagQuery) sortByCost() {
for i, kv := range q.equal {
q.equal[i].cost = uint(len(q.index[kv.key][kv.value]))
}
// for prefix and match clauses we can't determine the actual cost
// without actually evaluating them, so we estimate based on
// cardinality of the key
for i, kv := range q.prefix {
q.prefix[i].cost = uint(len(q.index[kv.key]))
}
for i, kvRe := range q.match {
q.match[i].cost = uint(len(q.index[kvRe.key]))
}
sort.Sort(KvByCost(q.equal))
sort.Sort(KvByCost(q.notEqual))
sort.Sort(KvByCost(q.prefix))
sort.Sort(KvReByCost(q.match))
sort.Sort(KvReByCost(q.notMatch))
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"sortByCost",
"(",
")",
"{",
"for",
"i",
",",
"kv",
":=",
"range",
"q",
".",
"equal",
"{",
"q",
".",
"equal",
"[",
"i",
"]",
".",
"cost",
"=",
"uint",
"(",
"len",
"(",
"q",
".",
"index",
"[",
"kv",
".",
"key",
"]",
"[",
"kv",
".",
"value",
"]",
")",
")",
"\n",
"}",
"\n\n",
"// for prefix and match clauses we can't determine the actual cost",
"// without actually evaluating them, so we estimate based on",
"// cardinality of the key",
"for",
"i",
",",
"kv",
":=",
"range",
"q",
".",
"prefix",
"{",
"q",
".",
"prefix",
"[",
"i",
"]",
".",
"cost",
"=",
"uint",
"(",
"len",
"(",
"q",
".",
"index",
"[",
"kv",
".",
"key",
"]",
")",
")",
"\n",
"}",
"\n\n",
"for",
"i",
",",
"kvRe",
":=",
"range",
"q",
".",
"match",
"{",
"q",
".",
"match",
"[",
"i",
"]",
".",
"cost",
"=",
"uint",
"(",
"len",
"(",
"q",
".",
"index",
"[",
"kvRe",
".",
"key",
"]",
")",
")",
"\n",
"}",
"\n\n",
"sort",
".",
"Sort",
"(",
"KvByCost",
"(",
"q",
".",
"equal",
")",
")",
"\n",
"sort",
".",
"Sort",
"(",
"KvByCost",
"(",
"q",
".",
"notEqual",
")",
")",
"\n",
"sort",
".",
"Sort",
"(",
"KvByCost",
"(",
"q",
".",
"prefix",
")",
")",
"\n",
"sort",
".",
"Sort",
"(",
"KvReByCost",
"(",
"q",
".",
"match",
")",
")",
"\n",
"sort",
".",
"Sort",
"(",
"KvReByCost",
"(",
"q",
".",
"notMatch",
")",
")",
"\n",
"}"
] |
// sortByCost tries to estimate the cost of different expressions and sort them
// in increasing order
// this is to reduce the result set cheaply and only apply expensive tests to an
// already reduced set of results
|
[
"sortByCost",
"tries",
"to",
"estimate",
"the",
"cost",
"of",
"different",
"expressions",
"and",
"sort",
"them",
"in",
"increasing",
"order",
"this",
"is",
"to",
"reduce",
"the",
"result",
"set",
"cheaply",
"and",
"only",
"apply",
"expensive",
"tests",
"to",
"an",
"already",
"reduced",
"set",
"of",
"results"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L753-L774
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
Run
|
func (q *TagQuery) Run(index TagIndex, byId map[schema.MKey]*idx.Archive) IdSet {
q.index = index
q.byId = byId
q.sortByCost()
idCh, _ := q.getInitialIds()
resCh := make(chan schema.MKey)
// start the tag query workers. they'll consume the ids on the idCh and
// evaluate for each of them whether it satisfies all the conditions
// defined in the query expressions. those that satisfy all conditions
// will be pushed into the resCh
q.wg.Add(TagQueryWorkers)
for i := 0; i < TagQueryWorkers; i++ {
go q.filterIdsFromChan(idCh, resCh)
}
go func() {
q.wg.Wait()
close(resCh)
}()
result := make(IdSet)
for id := range resCh {
result[id] = struct{}{}
}
return result
}
|
go
|
func (q *TagQuery) Run(index TagIndex, byId map[schema.MKey]*idx.Archive) IdSet {
q.index = index
q.byId = byId
q.sortByCost()
idCh, _ := q.getInitialIds()
resCh := make(chan schema.MKey)
// start the tag query workers. they'll consume the ids on the idCh and
// evaluate for each of them whether it satisfies all the conditions
// defined in the query expressions. those that satisfy all conditions
// will be pushed into the resCh
q.wg.Add(TagQueryWorkers)
for i := 0; i < TagQueryWorkers; i++ {
go q.filterIdsFromChan(idCh, resCh)
}
go func() {
q.wg.Wait()
close(resCh)
}()
result := make(IdSet)
for id := range resCh {
result[id] = struct{}{}
}
return result
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"Run",
"(",
"index",
"TagIndex",
",",
"byId",
"map",
"[",
"schema",
".",
"MKey",
"]",
"*",
"idx",
".",
"Archive",
")",
"IdSet",
"{",
"q",
".",
"index",
"=",
"index",
"\n",
"q",
".",
"byId",
"=",
"byId",
"\n\n",
"q",
".",
"sortByCost",
"(",
")",
"\n\n",
"idCh",
",",
"_",
":=",
"q",
".",
"getInitialIds",
"(",
")",
"\n",
"resCh",
":=",
"make",
"(",
"chan",
"schema",
".",
"MKey",
")",
"\n\n",
"// start the tag query workers. they'll consume the ids on the idCh and",
"// evaluate for each of them whether it satisfies all the conditions",
"// defined in the query expressions. those that satisfy all conditions",
"// will be pushed into the resCh",
"q",
".",
"wg",
".",
"Add",
"(",
"TagQueryWorkers",
")",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"TagQueryWorkers",
";",
"i",
"++",
"{",
"go",
"q",
".",
"filterIdsFromChan",
"(",
"idCh",
",",
"resCh",
")",
"\n",
"}",
"\n\n",
"go",
"func",
"(",
")",
"{",
"q",
".",
"wg",
".",
"Wait",
"(",
")",
"\n",
"close",
"(",
"resCh",
")",
"\n",
"}",
"(",
")",
"\n\n",
"result",
":=",
"make",
"(",
"IdSet",
")",
"\n\n",
"for",
"id",
":=",
"range",
"resCh",
"{",
"result",
"[",
"id",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n\n",
"return",
"result",
"\n",
"}"
] |
// Run executes the tag query on the given index and returns a list of ids
|
[
"Run",
"executes",
"the",
"tag",
"query",
"on",
"the",
"given",
"index",
"and",
"returns",
"a",
"list",
"of",
"ids"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L777-L807
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
filterTagsFromChan
|
func (q *TagQuery) filterTagsFromChan(idCh chan schema.MKey, tagCh chan string, stopCh chan struct{}, omitTagFilters bool) {
// used to prevent that this worker thread will push the same result into
// the chan twice
resultsCache := make(map[string]struct{})
IDS:
for id := range idCh {
var def *idx.Archive
var ok bool
if def, ok = q.byId[id]; !ok {
// should never happen because every ID in the tag index
// must be present in the byId lookup table
corruptIndex.Inc()
log.Errorf("memory-idx: ID %q is in tag index but not in the byId lookup table", id)
continue
}
// generate a set of all tags of the current metric that satisfy the
// tag filter condition
metricTags := make(map[string]struct{}, 0)
for _, tag := range def.Tags {
equal := strings.Index(tag, "=")
if equal < 0 {
corruptIndex.Inc()
log.Errorf("memory-idx: ID %q has tag %q in index without '=' sign", id, tag)
continue
}
key := tag[:equal]
// this tag has already been pushed into tagCh, so we can stop evaluating
if _, ok := resultsCache[key]; ok {
continue
}
if q.tagClause == PREFIX_TAG {
if !strings.HasPrefix(key, q.tagPrefix) {
continue
}
} else if q.tagClause == MATCH_TAG {
if _, ok := q.tagMatch.missCache.Load(key); ok || !q.tagMatch.value.MatchString(tag) {
if !ok {
q.tagMatch.missCache.Store(key, struct{}{})
}
continue
}
}
metricTags[key] = struct{}{}
}
// if we don't filter tags, then we can assume that "name" should always be part of the result set
if omitTagFilters {
if _, ok := resultsCache["name"]; !ok {
metricTags["name"] = struct{}{}
}
}
// if some tags satisfy the current tag filter condition then we run
// the metric through all tag expression tests in order to decide
// whether those tags should be part of the final result set
if len(metricTags) > 0 {
if q.testByAllExpressions(id, def, omitTagFilters) {
for key := range metricTags {
select {
case tagCh <- key:
case <-stopCh:
// if execution of query has stopped because the max tag
// count has been reached then tagCh <- might block
// because that channel will not be consumed anymore. in
// that case the stop channel will have been closed so
// we so we exit here
break IDS
}
resultsCache[key] = struct{}{}
}
} else {
// check if we need to stop
select {
case <-stopCh:
break IDS
default:
}
}
}
}
q.wg.Done()
}
|
go
|
func (q *TagQuery) filterTagsFromChan(idCh chan schema.MKey, tagCh chan string, stopCh chan struct{}, omitTagFilters bool) {
// used to prevent that this worker thread will push the same result into
// the chan twice
resultsCache := make(map[string]struct{})
IDS:
for id := range idCh {
var def *idx.Archive
var ok bool
if def, ok = q.byId[id]; !ok {
// should never happen because every ID in the tag index
// must be present in the byId lookup table
corruptIndex.Inc()
log.Errorf("memory-idx: ID %q is in tag index but not in the byId lookup table", id)
continue
}
// generate a set of all tags of the current metric that satisfy the
// tag filter condition
metricTags := make(map[string]struct{}, 0)
for _, tag := range def.Tags {
equal := strings.Index(tag, "=")
if equal < 0 {
corruptIndex.Inc()
log.Errorf("memory-idx: ID %q has tag %q in index without '=' sign", id, tag)
continue
}
key := tag[:equal]
// this tag has already been pushed into tagCh, so we can stop evaluating
if _, ok := resultsCache[key]; ok {
continue
}
if q.tagClause == PREFIX_TAG {
if !strings.HasPrefix(key, q.tagPrefix) {
continue
}
} else if q.tagClause == MATCH_TAG {
if _, ok := q.tagMatch.missCache.Load(key); ok || !q.tagMatch.value.MatchString(tag) {
if !ok {
q.tagMatch.missCache.Store(key, struct{}{})
}
continue
}
}
metricTags[key] = struct{}{}
}
// if we don't filter tags, then we can assume that "name" should always be part of the result set
if omitTagFilters {
if _, ok := resultsCache["name"]; !ok {
metricTags["name"] = struct{}{}
}
}
// if some tags satisfy the current tag filter condition then we run
// the metric through all tag expression tests in order to decide
// whether those tags should be part of the final result set
if len(metricTags) > 0 {
if q.testByAllExpressions(id, def, omitTagFilters) {
for key := range metricTags {
select {
case tagCh <- key:
case <-stopCh:
// if execution of query has stopped because the max tag
// count has been reached then tagCh <- might block
// because that channel will not be consumed anymore. in
// that case the stop channel will have been closed so
// we so we exit here
break IDS
}
resultsCache[key] = struct{}{}
}
} else {
// check if we need to stop
select {
case <-stopCh:
break IDS
default:
}
}
}
}
q.wg.Done()
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"filterTagsFromChan",
"(",
"idCh",
"chan",
"schema",
".",
"MKey",
",",
"tagCh",
"chan",
"string",
",",
"stopCh",
"chan",
"struct",
"{",
"}",
",",
"omitTagFilters",
"bool",
")",
"{",
"// used to prevent that this worker thread will push the same result into",
"// the chan twice",
"resultsCache",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
")",
"\n\n",
"IDS",
":",
"for",
"id",
":=",
"range",
"idCh",
"{",
"var",
"def",
"*",
"idx",
".",
"Archive",
"\n",
"var",
"ok",
"bool",
"\n\n",
"if",
"def",
",",
"ok",
"=",
"q",
".",
"byId",
"[",
"id",
"]",
";",
"!",
"ok",
"{",
"// should never happen because every ID in the tag index",
"// must be present in the byId lookup table",
"corruptIndex",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"id",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// generate a set of all tags of the current metric that satisfy the",
"// tag filter condition",
"metricTags",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
",",
"0",
")",
"\n",
"for",
"_",
",",
"tag",
":=",
"range",
"def",
".",
"Tags",
"{",
"equal",
":=",
"strings",
".",
"Index",
"(",
"tag",
",",
"\"",
"\"",
")",
"\n",
"if",
"equal",
"<",
"0",
"{",
"corruptIndex",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"id",
",",
"tag",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"key",
":=",
"tag",
"[",
":",
"equal",
"]",
"\n",
"// this tag has already been pushed into tagCh, so we can stop evaluating",
"if",
"_",
",",
"ok",
":=",
"resultsCache",
"[",
"key",
"]",
";",
"ok",
"{",
"continue",
"\n",
"}",
"\n\n",
"if",
"q",
".",
"tagClause",
"==",
"PREFIX_TAG",
"{",
"if",
"!",
"strings",
".",
"HasPrefix",
"(",
"key",
",",
"q",
".",
"tagPrefix",
")",
"{",
"continue",
"\n",
"}",
"\n",
"}",
"else",
"if",
"q",
".",
"tagClause",
"==",
"MATCH_TAG",
"{",
"if",
"_",
",",
"ok",
":=",
"q",
".",
"tagMatch",
".",
"missCache",
".",
"Load",
"(",
"key",
")",
";",
"ok",
"||",
"!",
"q",
".",
"tagMatch",
".",
"value",
".",
"MatchString",
"(",
"tag",
")",
"{",
"if",
"!",
"ok",
"{",
"q",
".",
"tagMatch",
".",
"missCache",
".",
"Store",
"(",
"key",
",",
"struct",
"{",
"}",
"{",
"}",
")",
"\n",
"}",
"\n",
"continue",
"\n",
"}",
"\n",
"}",
"\n",
"metricTags",
"[",
"key",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n\n",
"// if we don't filter tags, then we can assume that \"name\" should always be part of the result set",
"if",
"omitTagFilters",
"{",
"if",
"_",
",",
"ok",
":=",
"resultsCache",
"[",
"\"",
"\"",
"]",
";",
"!",
"ok",
"{",
"metricTags",
"[",
"\"",
"\"",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"// if some tags satisfy the current tag filter condition then we run",
"// the metric through all tag expression tests in order to decide",
"// whether those tags should be part of the final result set",
"if",
"len",
"(",
"metricTags",
")",
">",
"0",
"{",
"if",
"q",
".",
"testByAllExpressions",
"(",
"id",
",",
"def",
",",
"omitTagFilters",
")",
"{",
"for",
"key",
":=",
"range",
"metricTags",
"{",
"select",
"{",
"case",
"tagCh",
"<-",
"key",
":",
"case",
"<-",
"stopCh",
":",
"// if execution of query has stopped because the max tag",
"// count has been reached then tagCh <- might block",
"// because that channel will not be consumed anymore. in",
"// that case the stop channel will have been closed so",
"// we so we exit here",
"break",
"IDS",
"\n",
"}",
"\n",
"resultsCache",
"[",
"key",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"}",
"else",
"{",
"// check if we need to stop",
"select",
"{",
"case",
"<-",
"stopCh",
":",
"break",
"IDS",
"\n",
"default",
":",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"q",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"}"
] |
// filterTagsFromChan takes a channel of metric IDs and evaluates each of them
// according to the criteria associated with this query
// those that pass all the tests will have their relevant tags extracted, which
// are then pushed into the given tag channel
|
[
"filterTagsFromChan",
"takes",
"a",
"channel",
"of",
"metric",
"IDs",
"and",
"evaluates",
"each",
"of",
"them",
"according",
"to",
"the",
"criteria",
"associated",
"with",
"this",
"query",
"those",
"that",
"pass",
"all",
"the",
"tests",
"will",
"have",
"their",
"relevant",
"tags",
"extracted",
"which",
"are",
"then",
"pushed",
"into",
"the",
"given",
"tag",
"channel"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L841-L928
|
train
|
grafana/metrictank
|
idx/memory/tag_query.go
|
RunGetTags
|
func (q *TagQuery) RunGetTags(index TagIndex, byId map[schema.MKey]*idx.Archive) map[string]struct{} {
q.index = index
q.byId = byId
maxTagCount := int32(math.MaxInt32)
// start a thread to calculate the maximum possible number of tags.
// this might not always complete before the query execution, but in most
// cases it likely will. when it does end before the execution of the query,
// the value of maxTagCount will be used to abort the query execution once
// the max number of possible tags has been reached
q.wg.Add(1)
go atomic.StoreInt32(&maxTagCount, int32(q.getMaxTagCount()))
q.sortByCost()
idCh, stopCh := q.getInitialIds()
tagCh := make(chan string)
// we know there can only be 1 tag filter, so if we detect that the given
// tag condition matches the special tag "name", we can omit the filtering
// because every metric has a name.
matchName := q.tagFilterMatchesName()
// start the tag query workers. they'll consume the ids on the idCh and
// evaluate for each of them whether it satisfies all the conditions
// defined in the query expressions. then they will extract the tags of
// those that satisfy all conditions and push them into tagCh.
q.wg.Add(TagQueryWorkers)
for i := 0; i < TagQueryWorkers; i++ {
go q.filterTagsFromChan(idCh, tagCh, stopCh, matchName)
}
go func() {
q.wg.Wait()
close(tagCh)
}()
result := make(map[string]struct{})
for tag := range tagCh {
result[tag] = struct{}{}
// if we know that there can't be more results than what we have
// abort the query execution
if int32(len(result)) >= atomic.LoadInt32(&maxTagCount) {
break
}
}
// abort query execution and wait for all workers to end
close(stopCh)
q.wg.Wait()
return result
}
|
go
|
func (q *TagQuery) RunGetTags(index TagIndex, byId map[schema.MKey]*idx.Archive) map[string]struct{} {
q.index = index
q.byId = byId
maxTagCount := int32(math.MaxInt32)
// start a thread to calculate the maximum possible number of tags.
// this might not always complete before the query execution, but in most
// cases it likely will. when it does end before the execution of the query,
// the value of maxTagCount will be used to abort the query execution once
// the max number of possible tags has been reached
q.wg.Add(1)
go atomic.StoreInt32(&maxTagCount, int32(q.getMaxTagCount()))
q.sortByCost()
idCh, stopCh := q.getInitialIds()
tagCh := make(chan string)
// we know there can only be 1 tag filter, so if we detect that the given
// tag condition matches the special tag "name", we can omit the filtering
// because every metric has a name.
matchName := q.tagFilterMatchesName()
// start the tag query workers. they'll consume the ids on the idCh and
// evaluate for each of them whether it satisfies all the conditions
// defined in the query expressions. then they will extract the tags of
// those that satisfy all conditions and push them into tagCh.
q.wg.Add(TagQueryWorkers)
for i := 0; i < TagQueryWorkers; i++ {
go q.filterTagsFromChan(idCh, tagCh, stopCh, matchName)
}
go func() {
q.wg.Wait()
close(tagCh)
}()
result := make(map[string]struct{})
for tag := range tagCh {
result[tag] = struct{}{}
// if we know that there can't be more results than what we have
// abort the query execution
if int32(len(result)) >= atomic.LoadInt32(&maxTagCount) {
break
}
}
// abort query execution and wait for all workers to end
close(stopCh)
q.wg.Wait()
return result
}
|
[
"func",
"(",
"q",
"*",
"TagQuery",
")",
"RunGetTags",
"(",
"index",
"TagIndex",
",",
"byId",
"map",
"[",
"schema",
".",
"MKey",
"]",
"*",
"idx",
".",
"Archive",
")",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
"{",
"q",
".",
"index",
"=",
"index",
"\n",
"q",
".",
"byId",
"=",
"byId",
"\n\n",
"maxTagCount",
":=",
"int32",
"(",
"math",
".",
"MaxInt32",
")",
"\n\n",
"// start a thread to calculate the maximum possible number of tags.",
"// this might not always complete before the query execution, but in most",
"// cases it likely will. when it does end before the execution of the query,",
"// the value of maxTagCount will be used to abort the query execution once",
"// the max number of possible tags has been reached",
"q",
".",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"atomic",
".",
"StoreInt32",
"(",
"&",
"maxTagCount",
",",
"int32",
"(",
"q",
".",
"getMaxTagCount",
"(",
")",
")",
")",
"\n\n",
"q",
".",
"sortByCost",
"(",
")",
"\n",
"idCh",
",",
"stopCh",
":=",
"q",
".",
"getInitialIds",
"(",
")",
"\n",
"tagCh",
":=",
"make",
"(",
"chan",
"string",
")",
"\n\n",
"// we know there can only be 1 tag filter, so if we detect that the given",
"// tag condition matches the special tag \"name\", we can omit the filtering",
"// because every metric has a name.",
"matchName",
":=",
"q",
".",
"tagFilterMatchesName",
"(",
")",
"\n\n",
"// start the tag query workers. they'll consume the ids on the idCh and",
"// evaluate for each of them whether it satisfies all the conditions",
"// defined in the query expressions. then they will extract the tags of",
"// those that satisfy all conditions and push them into tagCh.",
"q",
".",
"wg",
".",
"Add",
"(",
"TagQueryWorkers",
")",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"TagQueryWorkers",
";",
"i",
"++",
"{",
"go",
"q",
".",
"filterTagsFromChan",
"(",
"idCh",
",",
"tagCh",
",",
"stopCh",
",",
"matchName",
")",
"\n",
"}",
"\n\n",
"go",
"func",
"(",
")",
"{",
"q",
".",
"wg",
".",
"Wait",
"(",
")",
"\n",
"close",
"(",
"tagCh",
")",
"\n",
"}",
"(",
")",
"\n\n",
"result",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
")",
"\n\n",
"for",
"tag",
":=",
"range",
"tagCh",
"{",
"result",
"[",
"tag",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n\n",
"// if we know that there can't be more results than what we have",
"// abort the query execution",
"if",
"int32",
"(",
"len",
"(",
"result",
")",
")",
">=",
"atomic",
".",
"LoadInt32",
"(",
"&",
"maxTagCount",
")",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n\n",
"// abort query execution and wait for all workers to end",
"close",
"(",
"stopCh",
")",
"\n\n",
"q",
".",
"wg",
".",
"Wait",
"(",
")",
"\n",
"return",
"result",
"\n",
"}"
] |
// RunGetTags executes the tag query and returns all the tags of the
// resulting metrics
|
[
"RunGetTags",
"executes",
"the",
"tag",
"query",
"and",
"returns",
"all",
"the",
"tags",
"of",
"the",
"resulting",
"metrics"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/tag_query.go#L958-L1012
|
train
|
grafana/metrictank
|
mdata/chunk/tsz/tsz.go
|
NewSeries4h
|
func NewSeries4h(t0 uint32) *Series4h {
s := Series4h{
T0: t0,
leading: ^uint8(0),
}
// block header
s.bw.writeBits(uint64(t0), 32)
return &s
}
|
go
|
func NewSeries4h(t0 uint32) *Series4h {
s := Series4h{
T0: t0,
leading: ^uint8(0),
}
// block header
s.bw.writeBits(uint64(t0), 32)
return &s
}
|
[
"func",
"NewSeries4h",
"(",
"t0",
"uint32",
")",
"*",
"Series4h",
"{",
"s",
":=",
"Series4h",
"{",
"T0",
":",
"t0",
",",
"leading",
":",
"^",
"uint8",
"(",
"0",
")",
",",
"}",
"\n\n",
"// block header",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"t0",
")",
",",
"32",
")",
"\n\n",
"return",
"&",
"s",
"\n\n",
"}"
] |
// NewSeries4h creates a new Series4h
|
[
"NewSeries4h",
"creates",
"a",
"new",
"Series4h"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tsz.go#L39-L50
|
train
|
grafana/metrictank
|
mdata/chunk/tsz/tsz.go
|
Push
|
func (s *Series4h) Push(t uint32, v float64) {
s.Lock()
defer s.Unlock()
if s.t == 0 {
// first point
s.t = t
s.val = v
s.tDelta = t - s.T0
s.bw.writeBits(uint64(s.tDelta), 14)
s.bw.writeBits(math.Float64bits(v), 64)
return
}
tDelta := t - s.t
dod := int32(tDelta - s.tDelta)
switch {
case dod == 0:
s.bw.writeBit(zero)
case -63 <= dod && dod <= 64:
s.bw.writeBits(0x02, 2) // '10'
s.bw.writeBits(uint64(dod), 7)
case -255 <= dod && dod <= 256:
s.bw.writeBits(0x06, 3) // '110'
s.bw.writeBits(uint64(dod), 9)
case -2047 <= dod && dod <= 2048:
s.bw.writeBits(0x0e, 4) // '1110'
s.bw.writeBits(uint64(dod), 12)
default:
s.bw.writeBits(0x0f, 4) // '1111'
s.bw.writeBits(uint64(dod), 32)
}
vDelta := math.Float64bits(v) ^ math.Float64bits(s.val)
if vDelta == 0 {
s.bw.writeBit(zero)
} else {
s.bw.writeBit(one)
leading := uint8(bits.LeadingZeros64(vDelta))
trailing := uint8(bits.TrailingZeros64(vDelta))
// clamp number of leading zeros to avoid overflow when encoding
if leading >= 32 {
leading = 31
}
// TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead
if s.leading != ^uint8(0) && leading >= s.leading && trailing >= s.trailing {
s.bw.writeBit(zero)
s.bw.writeBits(vDelta>>s.trailing, 64-int(s.leading)-int(s.trailing))
} else {
s.leading, s.trailing = leading, trailing
s.bw.writeBit(one)
s.bw.writeBits(uint64(leading), 5)
// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.
// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).
// So instead we write out a 0 and adjust it back to 64 on unpacking.
sigbits := 64 - leading - trailing
s.bw.writeBits(uint64(sigbits), 6)
s.bw.writeBits(vDelta>>trailing, int(sigbits))
}
}
s.tDelta = tDelta
s.t = t
s.val = v
}
|
go
|
func (s *Series4h) Push(t uint32, v float64) {
s.Lock()
defer s.Unlock()
if s.t == 0 {
// first point
s.t = t
s.val = v
s.tDelta = t - s.T0
s.bw.writeBits(uint64(s.tDelta), 14)
s.bw.writeBits(math.Float64bits(v), 64)
return
}
tDelta := t - s.t
dod := int32(tDelta - s.tDelta)
switch {
case dod == 0:
s.bw.writeBit(zero)
case -63 <= dod && dod <= 64:
s.bw.writeBits(0x02, 2) // '10'
s.bw.writeBits(uint64(dod), 7)
case -255 <= dod && dod <= 256:
s.bw.writeBits(0x06, 3) // '110'
s.bw.writeBits(uint64(dod), 9)
case -2047 <= dod && dod <= 2048:
s.bw.writeBits(0x0e, 4) // '1110'
s.bw.writeBits(uint64(dod), 12)
default:
s.bw.writeBits(0x0f, 4) // '1111'
s.bw.writeBits(uint64(dod), 32)
}
vDelta := math.Float64bits(v) ^ math.Float64bits(s.val)
if vDelta == 0 {
s.bw.writeBit(zero)
} else {
s.bw.writeBit(one)
leading := uint8(bits.LeadingZeros64(vDelta))
trailing := uint8(bits.TrailingZeros64(vDelta))
// clamp number of leading zeros to avoid overflow when encoding
if leading >= 32 {
leading = 31
}
// TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead
if s.leading != ^uint8(0) && leading >= s.leading && trailing >= s.trailing {
s.bw.writeBit(zero)
s.bw.writeBits(vDelta>>s.trailing, 64-int(s.leading)-int(s.trailing))
} else {
s.leading, s.trailing = leading, trailing
s.bw.writeBit(one)
s.bw.writeBits(uint64(leading), 5)
// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.
// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).
// So instead we write out a 0 and adjust it back to 64 on unpacking.
sigbits := 64 - leading - trailing
s.bw.writeBits(uint64(sigbits), 6)
s.bw.writeBits(vDelta>>trailing, int(sigbits))
}
}
s.tDelta = tDelta
s.t = t
s.val = v
}
|
[
"func",
"(",
"s",
"*",
"Series4h",
")",
"Push",
"(",
"t",
"uint32",
",",
"v",
"float64",
")",
"{",
"s",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"s",
".",
"t",
"==",
"0",
"{",
"// first point",
"s",
".",
"t",
"=",
"t",
"\n",
"s",
".",
"val",
"=",
"v",
"\n",
"s",
".",
"tDelta",
"=",
"t",
"-",
"s",
".",
"T0",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"s",
".",
"tDelta",
")",
",",
"14",
")",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"math",
".",
"Float64bits",
"(",
"v",
")",
",",
"64",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"tDelta",
":=",
"t",
"-",
"s",
".",
"t",
"\n",
"dod",
":=",
"int32",
"(",
"tDelta",
"-",
"s",
".",
"tDelta",
")",
"\n\n",
"switch",
"{",
"case",
"dod",
"==",
"0",
":",
"s",
".",
"bw",
".",
"writeBit",
"(",
"zero",
")",
"\n",
"case",
"-",
"63",
"<=",
"dod",
"&&",
"dod",
"<=",
"64",
":",
"s",
".",
"bw",
".",
"writeBits",
"(",
"0x02",
",",
"2",
")",
"// '10'",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"dod",
")",
",",
"7",
")",
"\n",
"case",
"-",
"255",
"<=",
"dod",
"&&",
"dod",
"<=",
"256",
":",
"s",
".",
"bw",
".",
"writeBits",
"(",
"0x06",
",",
"3",
")",
"// '110'",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"dod",
")",
",",
"9",
")",
"\n",
"case",
"-",
"2047",
"<=",
"dod",
"&&",
"dod",
"<=",
"2048",
":",
"s",
".",
"bw",
".",
"writeBits",
"(",
"0x0e",
",",
"4",
")",
"// '1110'",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"dod",
")",
",",
"12",
")",
"\n",
"default",
":",
"s",
".",
"bw",
".",
"writeBits",
"(",
"0x0f",
",",
"4",
")",
"// '1111'",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"dod",
")",
",",
"32",
")",
"\n",
"}",
"\n\n",
"vDelta",
":=",
"math",
".",
"Float64bits",
"(",
"v",
")",
"^",
"math",
".",
"Float64bits",
"(",
"s",
".",
"val",
")",
"\n\n",
"if",
"vDelta",
"==",
"0",
"{",
"s",
".",
"bw",
".",
"writeBit",
"(",
"zero",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"bw",
".",
"writeBit",
"(",
"one",
")",
"\n\n",
"leading",
":=",
"uint8",
"(",
"bits",
".",
"LeadingZeros64",
"(",
"vDelta",
")",
")",
"\n",
"trailing",
":=",
"uint8",
"(",
"bits",
".",
"TrailingZeros64",
"(",
"vDelta",
")",
")",
"\n\n",
"// clamp number of leading zeros to avoid overflow when encoding",
"if",
"leading",
">=",
"32",
"{",
"leading",
"=",
"31",
"\n",
"}",
"\n\n",
"// TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead",
"if",
"s",
".",
"leading",
"!=",
"^",
"uint8",
"(",
"0",
")",
"&&",
"leading",
">=",
"s",
".",
"leading",
"&&",
"trailing",
">=",
"s",
".",
"trailing",
"{",
"s",
".",
"bw",
".",
"writeBit",
"(",
"zero",
")",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"vDelta",
">>",
"s",
".",
"trailing",
",",
"64",
"-",
"int",
"(",
"s",
".",
"leading",
")",
"-",
"int",
"(",
"s",
".",
"trailing",
")",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"leading",
",",
"s",
".",
"trailing",
"=",
"leading",
",",
"trailing",
"\n\n",
"s",
".",
"bw",
".",
"writeBit",
"(",
"one",
")",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"leading",
")",
",",
"5",
")",
"\n\n",
"// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.",
"// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).",
"// So instead we write out a 0 and adjust it back to 64 on unpacking.",
"sigbits",
":=",
"64",
"-",
"leading",
"-",
"trailing",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"uint64",
"(",
"sigbits",
")",
",",
"6",
")",
"\n",
"s",
".",
"bw",
".",
"writeBits",
"(",
"vDelta",
">>",
"trailing",
",",
"int",
"(",
"sigbits",
")",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"s",
".",
"tDelta",
"=",
"tDelta",
"\n",
"s",
".",
"t",
"=",
"t",
"\n",
"s",
".",
"val",
"=",
"v",
"\n\n",
"}"
] |
// Push a timestamp and value to the series
|
[
"Push",
"a",
"timestamp",
"and",
"value",
"to",
"the",
"series"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tsz.go#L70-L142
|
train
|
grafana/metrictank
|
mdata/chunk/tsz/tsz.go
|
Iter
|
func (s *Series4h) Iter(intervalHint uint32) *Iter4h {
s.Lock()
w := s.bw.clone()
s.Unlock()
finishV1(w)
iter, _ := bstreamIterator4h(w, intervalHint)
return iter
}
|
go
|
func (s *Series4h) Iter(intervalHint uint32) *Iter4h {
s.Lock()
w := s.bw.clone()
s.Unlock()
finishV1(w)
iter, _ := bstreamIterator4h(w, intervalHint)
return iter
}
|
[
"func",
"(",
"s",
"*",
"Series4h",
")",
"Iter",
"(",
"intervalHint",
"uint32",
")",
"*",
"Iter4h",
"{",
"s",
".",
"Lock",
"(",
")",
"\n",
"w",
":=",
"s",
".",
"bw",
".",
"clone",
"(",
")",
"\n",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"finishV1",
"(",
"w",
")",
"\n",
"iter",
",",
"_",
":=",
"bstreamIterator4h",
"(",
"w",
",",
"intervalHint",
")",
"\n",
"return",
"iter",
"\n",
"}"
] |
// Iter4h lets you iterate over a series. It is not concurrency-safe.
|
[
"Iter4h",
"lets",
"you",
"iterate",
"over",
"a",
"series",
".",
"It",
"is",
"not",
"concurrency",
"-",
"safe",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tsz.go#L145-L153
|
train
|
grafana/metrictank
|
mdata/chunk/tsz/tsz.go
|
NewIterator4h
|
func NewIterator4h(b []byte, intervalHint uint32) (*Iter4h, error) {
return bstreamIterator4h(newBReader(b), intervalHint)
}
|
go
|
func NewIterator4h(b []byte, intervalHint uint32) (*Iter4h, error) {
return bstreamIterator4h(newBReader(b), intervalHint)
}
|
[
"func",
"NewIterator4h",
"(",
"b",
"[",
"]",
"byte",
",",
"intervalHint",
"uint32",
")",
"(",
"*",
"Iter4h",
",",
"error",
")",
"{",
"return",
"bstreamIterator4h",
"(",
"newBReader",
"(",
"b",
")",
",",
"intervalHint",
")",
"\n",
"}"
] |
// NewIterator4h creates an Iter4h
|
[
"NewIterator4h",
"creates",
"an",
"Iter4h"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tsz.go#L191-L193
|
train
|
grafana/metrictank
|
cluster/manager.go
|
clusterStats
|
func (c *MemberlistManager) clusterStats() {
primReady := 0
primNotReady := 0
secReady := 0
secNotReady := 0
queryReady := 0
queryNotReady := 0
partitions := make(map[int32]int)
for _, p := range c.members {
if p.Primary {
if p.IsReady() {
primReady++
} else {
primNotReady++
}
} else if p.Mode != ModeQuery {
if p.IsReady() {
secReady++
} else {
secNotReady++
}
} else {
if p.IsReady() {
queryReady++
} else {
queryNotReady++
}
}
for _, partition := range p.Partitions {
partitions[partition]++
}
}
totalPrimaryReady.Set(primReady)
totalPrimaryNotReady.Set(primNotReady)
totalSecondaryReady.Set(secReady)
totalSecondaryNotReady.Set(secNotReady)
totalQueryReady.Set(queryReady)
totalQueryNotReady.Set(queryNotReady)
totalPartitions.Set(len(partitions))
}
|
go
|
func (c *MemberlistManager) clusterStats() {
primReady := 0
primNotReady := 0
secReady := 0
secNotReady := 0
queryReady := 0
queryNotReady := 0
partitions := make(map[int32]int)
for _, p := range c.members {
if p.Primary {
if p.IsReady() {
primReady++
} else {
primNotReady++
}
} else if p.Mode != ModeQuery {
if p.IsReady() {
secReady++
} else {
secNotReady++
}
} else {
if p.IsReady() {
queryReady++
} else {
queryNotReady++
}
}
for _, partition := range p.Partitions {
partitions[partition]++
}
}
totalPrimaryReady.Set(primReady)
totalPrimaryNotReady.Set(primNotReady)
totalSecondaryReady.Set(secReady)
totalSecondaryNotReady.Set(secNotReady)
totalQueryReady.Set(queryReady)
totalQueryNotReady.Set(queryNotReady)
totalPartitions.Set(len(partitions))
}
|
[
"func",
"(",
"c",
"*",
"MemberlistManager",
")",
"clusterStats",
"(",
")",
"{",
"primReady",
":=",
"0",
"\n",
"primNotReady",
":=",
"0",
"\n",
"secReady",
":=",
"0",
"\n",
"secNotReady",
":=",
"0",
"\n",
"queryReady",
":=",
"0",
"\n",
"queryNotReady",
":=",
"0",
"\n",
"partitions",
":=",
"make",
"(",
"map",
"[",
"int32",
"]",
"int",
")",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"c",
".",
"members",
"{",
"if",
"p",
".",
"Primary",
"{",
"if",
"p",
".",
"IsReady",
"(",
")",
"{",
"primReady",
"++",
"\n",
"}",
"else",
"{",
"primNotReady",
"++",
"\n",
"}",
"\n",
"}",
"else",
"if",
"p",
".",
"Mode",
"!=",
"ModeQuery",
"{",
"if",
"p",
".",
"IsReady",
"(",
")",
"{",
"secReady",
"++",
"\n",
"}",
"else",
"{",
"secNotReady",
"++",
"\n",
"}",
"\n",
"}",
"else",
"{",
"if",
"p",
".",
"IsReady",
"(",
")",
"{",
"queryReady",
"++",
"\n",
"}",
"else",
"{",
"queryNotReady",
"++",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"_",
",",
"partition",
":=",
"range",
"p",
".",
"Partitions",
"{",
"partitions",
"[",
"partition",
"]",
"++",
"\n",
"}",
"\n",
"}",
"\n\n",
"totalPrimaryReady",
".",
"Set",
"(",
"primReady",
")",
"\n",
"totalPrimaryNotReady",
".",
"Set",
"(",
"primNotReady",
")",
"\n",
"totalSecondaryReady",
".",
"Set",
"(",
"secReady",
")",
"\n",
"totalSecondaryNotReady",
".",
"Set",
"(",
"secNotReady",
")",
"\n",
"totalQueryReady",
".",
"Set",
"(",
"queryReady",
")",
"\n",
"totalQueryNotReady",
".",
"Set",
"(",
"queryNotReady",
")",
"\n\n",
"totalPartitions",
".",
"Set",
"(",
"len",
"(",
"partitions",
")",
")",
"\n",
"}"
] |
// report the cluster stats every time there is a change to the cluster state.
// it is assumed that the lock is acquired before calling this method.
|
[
"report",
"the",
"cluster",
"stats",
"every",
"time",
"there",
"is",
"a",
"change",
"to",
"the",
"cluster",
"state",
".",
"it",
"is",
"assumed",
"that",
"the",
"lock",
"is",
"acquired",
"before",
"calling",
"this",
"method",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L190-L231
|
train
|
grafana/metrictank
|
cluster/manager.go
|
NodeMeta
|
func (c *MemberlistManager) NodeMeta(limit int) []byte {
c.RLock()
meta, err := json.Marshal(c.members[c.nodeName])
c.RUnlock()
if err != nil {
log.Fatalf("CLU manager: %s", err.Error())
}
return meta
}
|
go
|
func (c *MemberlistManager) NodeMeta(limit int) []byte {
c.RLock()
meta, err := json.Marshal(c.members[c.nodeName])
c.RUnlock()
if err != nil {
log.Fatalf("CLU manager: %s", err.Error())
}
return meta
}
|
[
"func",
"(",
"c",
"*",
"MemberlistManager",
")",
"NodeMeta",
"(",
"limit",
"int",
")",
"[",
"]",
"byte",
"{",
"c",
".",
"RLock",
"(",
")",
"\n",
"meta",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"c",
".",
"members",
"[",
"c",
".",
"nodeName",
"]",
")",
"\n",
"c",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"return",
"meta",
"\n",
"}"
] |
// NodeMeta is used to retrieve meta-data about the current node
// when broadcasting an alive message. It's length is limited to
// the given byte size. This metadata is available in the HTTPNode structure.
|
[
"NodeMeta",
"is",
"used",
"to",
"retrieve",
"meta",
"-",
"data",
"about",
"the",
"current",
"node",
"when",
"broadcasting",
"an",
"alive",
"message",
".",
"It",
"s",
"length",
"is",
"limited",
"to",
"the",
"given",
"byte",
"size",
".",
"This",
"metadata",
"is",
"available",
"in",
"the",
"HTTPNode",
"structure",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L331-L339
|
train
|
grafana/metrictank
|
cluster/manager.go
|
IsReady
|
func (c *MemberlistManager) IsReady() bool {
c.RLock()
defer c.RUnlock()
return c.members[c.nodeName].IsReady()
}
|
go
|
func (c *MemberlistManager) IsReady() bool {
c.RLock()
defer c.RUnlock()
return c.members[c.nodeName].IsReady()
}
|
[
"func",
"(",
"c",
"*",
"MemberlistManager",
")",
"IsReady",
"(",
")",
"bool",
"{",
"c",
".",
"RLock",
"(",
")",
"\n",
"defer",
"c",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"c",
".",
"members",
"[",
"c",
".",
"nodeName",
"]",
".",
"IsReady",
"(",
")",
"\n",
"}"
] |
// Returns true if this node is a ready to accept requests
// from users.
|
[
"Returns",
"true",
"if",
"this",
"node",
"is",
"a",
"ready",
"to",
"accept",
"requests",
"from",
"users",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L377-L381
|
train
|
grafana/metrictank
|
cluster/manager.go
|
SetState
|
func (c *MemberlistManager) SetState(state NodeState) {
c.Lock()
node := c.members[c.nodeName]
if !node.SetState(state) {
c.Unlock()
return
}
c.members[c.nodeName] = node
c.Unlock()
nodeReady.Set(state == NodeReady)
c.BroadcastUpdate()
}
|
go
|
func (c *MemberlistManager) SetState(state NodeState) {
c.Lock()
node := c.members[c.nodeName]
if !node.SetState(state) {
c.Unlock()
return
}
c.members[c.nodeName] = node
c.Unlock()
nodeReady.Set(state == NodeReady)
c.BroadcastUpdate()
}
|
[
"func",
"(",
"c",
"*",
"MemberlistManager",
")",
"SetState",
"(",
"state",
"NodeState",
")",
"{",
"c",
".",
"Lock",
"(",
")",
"\n",
"node",
":=",
"c",
".",
"members",
"[",
"c",
".",
"nodeName",
"]",
"\n",
"if",
"!",
"node",
".",
"SetState",
"(",
"state",
")",
"{",
"c",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"c",
".",
"members",
"[",
"c",
".",
"nodeName",
"]",
"=",
"node",
"\n",
"c",
".",
"Unlock",
"(",
")",
"\n",
"nodeReady",
".",
"Set",
"(",
"state",
"==",
"NodeReady",
")",
"\n",
"c",
".",
"BroadcastUpdate",
"(",
")",
"\n",
"}"
] |
// Set the state of this node.
|
[
"Set",
"the",
"state",
"of",
"this",
"node",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L389-L400
|
train
|
grafana/metrictank
|
cluster/manager.go
|
IsPrimary
|
func (c *MemberlistManager) IsPrimary() bool {
c.RLock()
defer c.RUnlock()
return c.members[c.nodeName].Primary
}
|
go
|
func (c *MemberlistManager) IsPrimary() bool {
c.RLock()
defer c.RUnlock()
return c.members[c.nodeName].Primary
}
|
[
"func",
"(",
"c",
"*",
"MemberlistManager",
")",
"IsPrimary",
"(",
")",
"bool",
"{",
"c",
".",
"RLock",
"(",
")",
"\n",
"defer",
"c",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"c",
".",
"members",
"[",
"c",
".",
"nodeName",
"]",
".",
"Primary",
"\n",
"}"
] |
// Returns true if the this node is a set as a primary node that should write data to cassandra.
|
[
"Returns",
"true",
"if",
"the",
"this",
"node",
"is",
"a",
"set",
"as",
"a",
"primary",
"node",
"that",
"should",
"write",
"data",
"to",
"cassandra",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L403-L407
|
train
|
grafana/metrictank
|
cluster/manager.go
|
SetPrimary
|
func (c *MemberlistManager) SetPrimary(primary bool) {
c.Lock()
node := c.members[c.nodeName]
if !node.SetPrimary(primary) {
c.Unlock()
return
}
c.members[c.nodeName] = node
c.Unlock()
nodePrimary.Set(primary)
c.BroadcastUpdate()
}
|
go
|
func (c *MemberlistManager) SetPrimary(primary bool) {
c.Lock()
node := c.members[c.nodeName]
if !node.SetPrimary(primary) {
c.Unlock()
return
}
c.members[c.nodeName] = node
c.Unlock()
nodePrimary.Set(primary)
c.BroadcastUpdate()
}
|
[
"func",
"(",
"c",
"*",
"MemberlistManager",
")",
"SetPrimary",
"(",
"primary",
"bool",
")",
"{",
"c",
".",
"Lock",
"(",
")",
"\n",
"node",
":=",
"c",
".",
"members",
"[",
"c",
".",
"nodeName",
"]",
"\n",
"if",
"!",
"node",
".",
"SetPrimary",
"(",
"primary",
")",
"{",
"c",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"c",
".",
"members",
"[",
"c",
".",
"nodeName",
"]",
"=",
"node",
"\n",
"c",
".",
"Unlock",
"(",
")",
"\n",
"nodePrimary",
".",
"Set",
"(",
"primary",
")",
"\n",
"c",
".",
"BroadcastUpdate",
"(",
")",
"\n",
"}"
] |
// SetPrimary sets the primary status of this node
|
[
"SetPrimary",
"sets",
"the",
"primary",
"status",
"of",
"this",
"node"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L410-L421
|
train
|
grafana/metrictank
|
expr/func_movingaverage.go
|
Signature
|
func (s *FuncMovingAverage) Signature() ([]Arg, []Arg) {
return []Arg{
ArgSeriesList{val: &s.in},
// this could be an int OR a string.
// we need to figure out the interval of the data we will consume
// and request from -= interval * points
// interestingly the from adjustment might mean the archive TTL is no longer sufficient and push the request into a different rollup archive, which we should probably
// account for. let's solve all of this later.
ArgInt{val: &s.window},
}, []Arg{ArgSeriesList{}}
}
|
go
|
func (s *FuncMovingAverage) Signature() ([]Arg, []Arg) {
return []Arg{
ArgSeriesList{val: &s.in},
// this could be an int OR a string.
// we need to figure out the interval of the data we will consume
// and request from -= interval * points
// interestingly the from adjustment might mean the archive TTL is no longer sufficient and push the request into a different rollup archive, which we should probably
// account for. let's solve all of this later.
ArgInt{val: &s.window},
}, []Arg{ArgSeriesList{}}
}
|
[
"func",
"(",
"s",
"*",
"FuncMovingAverage",
")",
"Signature",
"(",
")",
"(",
"[",
"]",
"Arg",
",",
"[",
"]",
"Arg",
")",
"{",
"return",
"[",
"]",
"Arg",
"{",
"ArgSeriesList",
"{",
"val",
":",
"&",
"s",
".",
"in",
"}",
",",
"// this could be an int OR a string.",
"// we need to figure out the interval of the data we will consume",
"// and request from -= interval * points",
"// interestingly the from adjustment might mean the archive TTL is no longer sufficient and push the request into a different rollup archive, which we should probably",
"// account for. let's solve all of this later.",
"ArgInt",
"{",
"val",
":",
"&",
"s",
".",
"window",
"}",
",",
"}",
",",
"[",
"]",
"Arg",
"{",
"ArgSeriesList",
"{",
"}",
"}",
"\n",
"}"
] |
// note if input is 1 series, then output is too. not sure how to communicate that
|
[
"note",
"if",
"input",
"is",
"1",
"series",
"then",
"output",
"is",
"too",
".",
"not",
"sure",
"how",
"to",
"communicate",
"that"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/func_movingaverage.go#L18-L28
|
train
|
grafana/metrictank
|
expr/func_aggregate.go
|
NewAggregateConstructor
|
func NewAggregateConstructor(aggDescription string, aggFunc crossSeriesAggFunc) func() GraphiteFunc {
return func() GraphiteFunc {
return &FuncAggregate{agg: seriesAggregator{function: aggFunc, name: aggDescription}}
}
}
|
go
|
func NewAggregateConstructor(aggDescription string, aggFunc crossSeriesAggFunc) func() GraphiteFunc {
return func() GraphiteFunc {
return &FuncAggregate{agg: seriesAggregator{function: aggFunc, name: aggDescription}}
}
}
|
[
"func",
"NewAggregateConstructor",
"(",
"aggDescription",
"string",
",",
"aggFunc",
"crossSeriesAggFunc",
")",
"func",
"(",
")",
"GraphiteFunc",
"{",
"return",
"func",
"(",
")",
"GraphiteFunc",
"{",
"return",
"&",
"FuncAggregate",
"{",
"agg",
":",
"seriesAggregator",
"{",
"function",
":",
"aggFunc",
",",
"name",
":",
"aggDescription",
"}",
"}",
"\n",
"}",
"\n",
"}"
] |
// NewAggregateConstructor takes an agg string and returns a constructor function
|
[
"NewAggregateConstructor",
"takes",
"an",
"agg",
"string",
"and",
"returns",
"a",
"constructor",
"function"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/func_aggregate.go#L16-L20
|
train
|
grafana/metrictank
|
input/kafkamdm/kafkamdm.go
|
tryGetOffset
|
func (k *KafkaMdm) tryGetOffset(topic string, partition int32, offset int64, attempts int, sleep time.Duration) (int64, error) {
var val int64
var err error
var offsetStr string
switch offset {
case sarama.OffsetNewest:
offsetStr = "newest"
case sarama.OffsetOldest:
offsetStr = "oldest"
default:
offsetStr = strconv.FormatInt(offset, 10)
}
attempt := 1
for {
val, err = k.client.GetOffset(topic, partition, offset)
if err == nil {
break
}
err = fmt.Errorf("failed to get offset %s of partition %s:%d. %s (attempt %d/%d)", offsetStr, topic, partition, err, attempt, attempts)
if attempt == attempts {
break
}
log.Warnf("kafkamdm: %s", err.Error())
attempt += 1
time.Sleep(sleep)
}
return val, err
}
|
go
|
func (k *KafkaMdm) tryGetOffset(topic string, partition int32, offset int64, attempts int, sleep time.Duration) (int64, error) {
var val int64
var err error
var offsetStr string
switch offset {
case sarama.OffsetNewest:
offsetStr = "newest"
case sarama.OffsetOldest:
offsetStr = "oldest"
default:
offsetStr = strconv.FormatInt(offset, 10)
}
attempt := 1
for {
val, err = k.client.GetOffset(topic, partition, offset)
if err == nil {
break
}
err = fmt.Errorf("failed to get offset %s of partition %s:%d. %s (attempt %d/%d)", offsetStr, topic, partition, err, attempt, attempts)
if attempt == attempts {
break
}
log.Warnf("kafkamdm: %s", err.Error())
attempt += 1
time.Sleep(sleep)
}
return val, err
}
|
[
"func",
"(",
"k",
"*",
"KafkaMdm",
")",
"tryGetOffset",
"(",
"topic",
"string",
",",
"partition",
"int32",
",",
"offset",
"int64",
",",
"attempts",
"int",
",",
"sleep",
"time",
".",
"Duration",
")",
"(",
"int64",
",",
"error",
")",
"{",
"var",
"val",
"int64",
"\n",
"var",
"err",
"error",
"\n",
"var",
"offsetStr",
"string",
"\n\n",
"switch",
"offset",
"{",
"case",
"sarama",
".",
"OffsetNewest",
":",
"offsetStr",
"=",
"\"",
"\"",
"\n",
"case",
"sarama",
".",
"OffsetOldest",
":",
"offsetStr",
"=",
"\"",
"\"",
"\n",
"default",
":",
"offsetStr",
"=",
"strconv",
".",
"FormatInt",
"(",
"offset",
",",
"10",
")",
"\n",
"}",
"\n\n",
"attempt",
":=",
"1",
"\n",
"for",
"{",
"val",
",",
"err",
"=",
"k",
".",
"client",
".",
"GetOffset",
"(",
"topic",
",",
"partition",
",",
"offset",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"break",
"\n",
"}",
"\n\n",
"err",
"=",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"offsetStr",
",",
"topic",
",",
"partition",
",",
"err",
",",
"attempt",
",",
"attempts",
")",
"\n",
"if",
"attempt",
"==",
"attempts",
"{",
"break",
"\n",
"}",
"\n",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"attempt",
"+=",
"1",
"\n",
"time",
".",
"Sleep",
"(",
"sleep",
")",
"\n",
"}",
"\n",
"return",
"val",
",",
"err",
"\n",
"}"
] |
// tryGetOffset will to query kafka repeatedly for the requested offset and give up after attempts unsuccesfull attempts
// an error is returned when it had to give up
|
[
"tryGetOffset",
"will",
"to",
"query",
"kafka",
"repeatedly",
"for",
"the",
"requested",
"offset",
"and",
"give",
"up",
"after",
"attempts",
"unsuccesfull",
"attempts",
"an",
"error",
"is",
"returned",
"when",
"it",
"had",
"to",
"give",
"up"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/input/kafkamdm/kafkamdm.go#L218-L249
|
train
|
grafana/metrictank
|
input/kafkamdm/kafkamdm.go
|
consumePartition
|
func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset int64) {
defer k.wg.Done()
// determine the pos of the topic and the initial offset of our consumer
newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 7, time.Second*10)
if err != nil {
log.Errorf("kafkamdm: %s", err.Error())
k.cancel()
return
}
if currentOffset == sarama.OffsetNewest {
currentOffset = newest
} else if currentOffset == sarama.OffsetOldest {
currentOffset, err = k.tryGetOffset(topic, partition, sarama.OffsetOldest, 7, time.Second*10)
if err != nil {
log.Errorf("kafkamdm: %s", err.Error())
k.cancel()
return
}
}
kafkaStats := kafkaStats[partition]
kafkaStats.Offset.Set(int(currentOffset))
kafkaStats.LogSize.Set(int(newest))
kafkaStats.Lag.Set(int(newest - currentOffset))
go k.trackStats(topic, partition)
log.Infof("kafkamdm: consuming from %s:%d from offset %d", topic, partition, currentOffset)
pc, err := k.consumer.ConsumePartition(topic, partition, currentOffset)
if err != nil {
log.Errorf("kafkamdm: failed to start partitionConsumer for %s:%d. %s", topic, partition, err)
k.cancel()
return
}
messages := pc.Messages()
for {
select {
case msg, ok := <-messages:
// https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-am-i-getting-a-nil-message-from-the-sarama-consumer
if !ok {
log.Errorf("kafkamdm: kafka consumer for %s:%d has shutdown. stop consuming", topic, partition)
k.cancel()
return
}
if log.IsLevelEnabled(log.DebugLevel) {
log.Debugf("kafkamdm: received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key)
}
k.handleMsg(msg.Value, partition)
kafkaStats.Offset.Set(int(msg.Offset))
case <-k.shutdown:
pc.Close()
log.Infof("kafkamdm: consumer for %s:%d ended.", topic, partition)
return
}
}
}
|
go
|
func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset int64) {
defer k.wg.Done()
// determine the pos of the topic and the initial offset of our consumer
newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 7, time.Second*10)
if err != nil {
log.Errorf("kafkamdm: %s", err.Error())
k.cancel()
return
}
if currentOffset == sarama.OffsetNewest {
currentOffset = newest
} else if currentOffset == sarama.OffsetOldest {
currentOffset, err = k.tryGetOffset(topic, partition, sarama.OffsetOldest, 7, time.Second*10)
if err != nil {
log.Errorf("kafkamdm: %s", err.Error())
k.cancel()
return
}
}
kafkaStats := kafkaStats[partition]
kafkaStats.Offset.Set(int(currentOffset))
kafkaStats.LogSize.Set(int(newest))
kafkaStats.Lag.Set(int(newest - currentOffset))
go k.trackStats(topic, partition)
log.Infof("kafkamdm: consuming from %s:%d from offset %d", topic, partition, currentOffset)
pc, err := k.consumer.ConsumePartition(topic, partition, currentOffset)
if err != nil {
log.Errorf("kafkamdm: failed to start partitionConsumer for %s:%d. %s", topic, partition, err)
k.cancel()
return
}
messages := pc.Messages()
for {
select {
case msg, ok := <-messages:
// https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-am-i-getting-a-nil-message-from-the-sarama-consumer
if !ok {
log.Errorf("kafkamdm: kafka consumer for %s:%d has shutdown. stop consuming", topic, partition)
k.cancel()
return
}
if log.IsLevelEnabled(log.DebugLevel) {
log.Debugf("kafkamdm: received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key)
}
k.handleMsg(msg.Value, partition)
kafkaStats.Offset.Set(int(msg.Offset))
case <-k.shutdown:
pc.Close()
log.Infof("kafkamdm: consumer for %s:%d ended.", topic, partition)
return
}
}
}
|
[
"func",
"(",
"k",
"*",
"KafkaMdm",
")",
"consumePartition",
"(",
"topic",
"string",
",",
"partition",
"int32",
",",
"currentOffset",
"int64",
")",
"{",
"defer",
"k",
".",
"wg",
".",
"Done",
"(",
")",
"\n\n",
"// determine the pos of the topic and the initial offset of our consumer",
"newest",
",",
"err",
":=",
"k",
".",
"tryGetOffset",
"(",
"topic",
",",
"partition",
",",
"sarama",
".",
"OffsetNewest",
",",
"7",
",",
"time",
".",
"Second",
"*",
"10",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"k",
".",
"cancel",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"currentOffset",
"==",
"sarama",
".",
"OffsetNewest",
"{",
"currentOffset",
"=",
"newest",
"\n",
"}",
"else",
"if",
"currentOffset",
"==",
"sarama",
".",
"OffsetOldest",
"{",
"currentOffset",
",",
"err",
"=",
"k",
".",
"tryGetOffset",
"(",
"topic",
",",
"partition",
",",
"sarama",
".",
"OffsetOldest",
",",
"7",
",",
"time",
".",
"Second",
"*",
"10",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"k",
".",
"cancel",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n\n",
"kafkaStats",
":=",
"kafkaStats",
"[",
"partition",
"]",
"\n",
"kafkaStats",
".",
"Offset",
".",
"Set",
"(",
"int",
"(",
"currentOffset",
")",
")",
"\n",
"kafkaStats",
".",
"LogSize",
".",
"Set",
"(",
"int",
"(",
"newest",
")",
")",
"\n",
"kafkaStats",
".",
"Lag",
".",
"Set",
"(",
"int",
"(",
"newest",
"-",
"currentOffset",
")",
")",
"\n",
"go",
"k",
".",
"trackStats",
"(",
"topic",
",",
"partition",
")",
"\n\n",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"topic",
",",
"partition",
",",
"currentOffset",
")",
"\n",
"pc",
",",
"err",
":=",
"k",
".",
"consumer",
".",
"ConsumePartition",
"(",
"topic",
",",
"partition",
",",
"currentOffset",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"topic",
",",
"partition",
",",
"err",
")",
"\n",
"k",
".",
"cancel",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"messages",
":=",
"pc",
".",
"Messages",
"(",
")",
"\n",
"for",
"{",
"select",
"{",
"case",
"msg",
",",
"ok",
":=",
"<-",
"messages",
":",
"// https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-am-i-getting-a-nil-message-from-the-sarama-consumer",
"if",
"!",
"ok",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"topic",
",",
"partition",
")",
"\n",
"k",
".",
"cancel",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"log",
".",
"IsLevelEnabled",
"(",
"log",
".",
"DebugLevel",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"msg",
".",
"Topic",
",",
"msg",
".",
"Partition",
",",
"msg",
".",
"Offset",
",",
"msg",
".",
"Key",
")",
"\n",
"}",
"\n",
"k",
".",
"handleMsg",
"(",
"msg",
".",
"Value",
",",
"partition",
")",
"\n",
"kafkaStats",
".",
"Offset",
".",
"Set",
"(",
"int",
"(",
"msg",
".",
"Offset",
")",
")",
"\n",
"case",
"<-",
"k",
".",
"shutdown",
":",
"pc",
".",
"Close",
"(",
")",
"\n",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"topic",
",",
"partition",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// consumePartition consumes from the topic until k.shutdown is triggered.
|
[
"consumePartition",
"consumes",
"from",
"the",
"topic",
"until",
"k",
".",
"shutdown",
"is",
"triggered",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/input/kafkamdm/kafkamdm.go#L252-L307
|
train
|
grafana/metrictank
|
idx/cassandra/cassandra.go
|
InitBare
|
func (c *CasIdx) InitBare() error {
var err error
tmpSession, err := c.cluster.CreateSession()
if err != nil {
return fmt.Errorf("failed to create cassandra session: %s", err)
}
// read templates
schemaKeyspace := util.ReadEntry(c.cfg.schemaFile, "schema_keyspace").(string)
schemaTable := util.ReadEntry(c.cfg.schemaFile, "schema_table").(string)
// create the keyspace or ensure it exists
if c.cfg.createKeyspace {
log.Infof("cassandra-idx: ensuring that keyspace %s exists.", c.cfg.keyspace)
err = tmpSession.Query(fmt.Sprintf(schemaKeyspace, c.cfg.keyspace)).Exec()
if err != nil {
return fmt.Errorf("failed to initialize cassandra keyspace: %s", err)
}
log.Info("cassandra-idx: ensuring that table metric_idx exists.")
err = tmpSession.Query(fmt.Sprintf(schemaTable, c.cfg.keyspace)).Exec()
if err != nil {
return fmt.Errorf("failed to initialize cassandra table: %s", err)
}
c.EnsureArchiveTableExists(tmpSession)
} else {
var keyspaceMetadata *gocql.KeyspaceMetadata
for attempt := 1; attempt > 0; attempt++ {
keyspaceMetadata, err = tmpSession.KeyspaceMetadata(c.cfg.keyspace)
if err != nil {
if attempt >= 5 {
return fmt.Errorf("cassandra keyspace not found. %d attempts", attempt)
}
log.Warnf("cassandra-idx: cassandra keyspace not found. retrying in 5s. attempt: %d", attempt)
time.Sleep(5 * time.Second)
} else {
if _, ok := keyspaceMetadata.Tables["metric_idx"]; ok {
break
} else {
if attempt >= 5 {
return fmt.Errorf("cassandra table not found. %d attempts", attempt)
}
log.Warnf("cassandra-idx: cassandra table not found. retrying in 5s. attempt: %d", attempt)
time.Sleep(5 * time.Second)
}
}
}
}
tmpSession.Close()
c.cluster.Keyspace = c.cfg.keyspace
session, err := c.cluster.CreateSession()
if err != nil {
return fmt.Errorf("failed to create cassandra session: %s", err)
}
c.session = session
return nil
}
|
go
|
func (c *CasIdx) InitBare() error {
var err error
tmpSession, err := c.cluster.CreateSession()
if err != nil {
return fmt.Errorf("failed to create cassandra session: %s", err)
}
// read templates
schemaKeyspace := util.ReadEntry(c.cfg.schemaFile, "schema_keyspace").(string)
schemaTable := util.ReadEntry(c.cfg.schemaFile, "schema_table").(string)
// create the keyspace or ensure it exists
if c.cfg.createKeyspace {
log.Infof("cassandra-idx: ensuring that keyspace %s exists.", c.cfg.keyspace)
err = tmpSession.Query(fmt.Sprintf(schemaKeyspace, c.cfg.keyspace)).Exec()
if err != nil {
return fmt.Errorf("failed to initialize cassandra keyspace: %s", err)
}
log.Info("cassandra-idx: ensuring that table metric_idx exists.")
err = tmpSession.Query(fmt.Sprintf(schemaTable, c.cfg.keyspace)).Exec()
if err != nil {
return fmt.Errorf("failed to initialize cassandra table: %s", err)
}
c.EnsureArchiveTableExists(tmpSession)
} else {
var keyspaceMetadata *gocql.KeyspaceMetadata
for attempt := 1; attempt > 0; attempt++ {
keyspaceMetadata, err = tmpSession.KeyspaceMetadata(c.cfg.keyspace)
if err != nil {
if attempt >= 5 {
return fmt.Errorf("cassandra keyspace not found. %d attempts", attempt)
}
log.Warnf("cassandra-idx: cassandra keyspace not found. retrying in 5s. attempt: %d", attempt)
time.Sleep(5 * time.Second)
} else {
if _, ok := keyspaceMetadata.Tables["metric_idx"]; ok {
break
} else {
if attempt >= 5 {
return fmt.Errorf("cassandra table not found. %d attempts", attempt)
}
log.Warnf("cassandra-idx: cassandra table not found. retrying in 5s. attempt: %d", attempt)
time.Sleep(5 * time.Second)
}
}
}
}
tmpSession.Close()
c.cluster.Keyspace = c.cfg.keyspace
session, err := c.cluster.CreateSession()
if err != nil {
return fmt.Errorf("failed to create cassandra session: %s", err)
}
c.session = session
return nil
}
|
[
"func",
"(",
"c",
"*",
"CasIdx",
")",
"InitBare",
"(",
")",
"error",
"{",
"var",
"err",
"error",
"\n",
"tmpSession",
",",
"err",
":=",
"c",
".",
"cluster",
".",
"CreateSession",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"// read templates",
"schemaKeyspace",
":=",
"util",
".",
"ReadEntry",
"(",
"c",
".",
"cfg",
".",
"schemaFile",
",",
"\"",
"\"",
")",
".",
"(",
"string",
")",
"\n",
"schemaTable",
":=",
"util",
".",
"ReadEntry",
"(",
"c",
".",
"cfg",
".",
"schemaFile",
",",
"\"",
"\"",
")",
".",
"(",
"string",
")",
"\n\n",
"// create the keyspace or ensure it exists",
"if",
"c",
".",
"cfg",
".",
"createKeyspace",
"{",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"c",
".",
"cfg",
".",
"keyspace",
")",
"\n",
"err",
"=",
"tmpSession",
".",
"Query",
"(",
"fmt",
".",
"Sprintf",
"(",
"schemaKeyspace",
",",
"c",
".",
"cfg",
".",
"keyspace",
")",
")",
".",
"Exec",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"log",
".",
"Info",
"(",
"\"",
"\"",
")",
"\n",
"err",
"=",
"tmpSession",
".",
"Query",
"(",
"fmt",
".",
"Sprintf",
"(",
"schemaTable",
",",
"c",
".",
"cfg",
".",
"keyspace",
")",
")",
".",
"Exec",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"c",
".",
"EnsureArchiveTableExists",
"(",
"tmpSession",
")",
"\n",
"}",
"else",
"{",
"var",
"keyspaceMetadata",
"*",
"gocql",
".",
"KeyspaceMetadata",
"\n",
"for",
"attempt",
":=",
"1",
";",
"attempt",
">",
"0",
";",
"attempt",
"++",
"{",
"keyspaceMetadata",
",",
"err",
"=",
"tmpSession",
".",
"KeyspaceMetadata",
"(",
"c",
".",
"cfg",
".",
"keyspace",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"if",
"attempt",
">=",
"5",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"attempt",
")",
"\n",
"}",
"\n",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"attempt",
")",
"\n",
"time",
".",
"Sleep",
"(",
"5",
"*",
"time",
".",
"Second",
")",
"\n",
"}",
"else",
"{",
"if",
"_",
",",
"ok",
":=",
"keyspaceMetadata",
".",
"Tables",
"[",
"\"",
"\"",
"]",
";",
"ok",
"{",
"break",
"\n",
"}",
"else",
"{",
"if",
"attempt",
">=",
"5",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"attempt",
")",
"\n",
"}",
"\n",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"attempt",
")",
"\n",
"time",
".",
"Sleep",
"(",
"5",
"*",
"time",
".",
"Second",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"tmpSession",
".",
"Close",
"(",
")",
"\n",
"c",
".",
"cluster",
".",
"Keyspace",
"=",
"c",
".",
"cfg",
".",
"keyspace",
"\n",
"session",
",",
"err",
":=",
"c",
".",
"cluster",
".",
"CreateSession",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"c",
".",
"session",
"=",
"session",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// InitBare makes sure the keyspace, tables, and index exists in cassandra and creates a session
|
[
"InitBare",
"makes",
"sure",
"the",
"keyspace",
"tables",
"and",
"index",
"exists",
"in",
"cassandra",
"and",
"creates",
"a",
"session"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L113-L171
|
train
|
grafana/metrictank
|
idx/cassandra/cassandra.go
|
EnsureArchiveTableExists
|
func (c *CasIdx) EnsureArchiveTableExists(session *gocql.Session) error {
var err error
if session == nil {
session, err = c.cluster.CreateSession()
if err != nil {
return fmt.Errorf("failed to create cassandra session: %s", err)
}
}
schemaArchiveTable := util.ReadEntry(c.cfg.schemaFile, "schema_archive_table").(string)
if c.cfg.createKeyspace {
log.Info("cassandra-idx: ensuring that table metric_idx_archive exists.")
err = session.Query(fmt.Sprintf(schemaArchiveTable, c.cfg.keyspace)).Exec()
if err != nil {
return fmt.Errorf("failed to initialize cassandra table: %s", err)
}
} else {
var keyspaceMetadata *gocql.KeyspaceMetadata
keyspaceMetadata, err = session.KeyspaceMetadata(c.cfg.keyspace)
if err != nil {
return fmt.Errorf("failed to read cassandra tables: %s", err)
}
if _, ok := keyspaceMetadata.Tables["metric_idx_archive"]; !ok {
return fmt.Errorf("table metric_idx_archive does not exist")
}
}
return nil
}
|
go
|
func (c *CasIdx) EnsureArchiveTableExists(session *gocql.Session) error {
var err error
if session == nil {
session, err = c.cluster.CreateSession()
if err != nil {
return fmt.Errorf("failed to create cassandra session: %s", err)
}
}
schemaArchiveTable := util.ReadEntry(c.cfg.schemaFile, "schema_archive_table").(string)
if c.cfg.createKeyspace {
log.Info("cassandra-idx: ensuring that table metric_idx_archive exists.")
err = session.Query(fmt.Sprintf(schemaArchiveTable, c.cfg.keyspace)).Exec()
if err != nil {
return fmt.Errorf("failed to initialize cassandra table: %s", err)
}
} else {
var keyspaceMetadata *gocql.KeyspaceMetadata
keyspaceMetadata, err = session.KeyspaceMetadata(c.cfg.keyspace)
if err != nil {
return fmt.Errorf("failed to read cassandra tables: %s", err)
}
if _, ok := keyspaceMetadata.Tables["metric_idx_archive"]; !ok {
return fmt.Errorf("table metric_idx_archive does not exist")
}
}
return nil
}
|
[
"func",
"(",
"c",
"*",
"CasIdx",
")",
"EnsureArchiveTableExists",
"(",
"session",
"*",
"gocql",
".",
"Session",
")",
"error",
"{",
"var",
"err",
"error",
"\n",
"if",
"session",
"==",
"nil",
"{",
"session",
",",
"err",
"=",
"c",
".",
"cluster",
".",
"CreateSession",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"schemaArchiveTable",
":=",
"util",
".",
"ReadEntry",
"(",
"c",
".",
"cfg",
".",
"schemaFile",
",",
"\"",
"\"",
")",
".",
"(",
"string",
")",
"\n\n",
"if",
"c",
".",
"cfg",
".",
"createKeyspace",
"{",
"log",
".",
"Info",
"(",
"\"",
"\"",
")",
"\n",
"err",
"=",
"session",
".",
"Query",
"(",
"fmt",
".",
"Sprintf",
"(",
"schemaArchiveTable",
",",
"c",
".",
"cfg",
".",
"keyspace",
")",
")",
".",
"Exec",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"var",
"keyspaceMetadata",
"*",
"gocql",
".",
"KeyspaceMetadata",
"\n",
"keyspaceMetadata",
",",
"err",
"=",
"session",
".",
"KeyspaceMetadata",
"(",
"c",
".",
"cfg",
".",
"keyspace",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"if",
"_",
",",
"ok",
":=",
"keyspaceMetadata",
".",
"Tables",
"[",
"\"",
"\"",
"]",
";",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// EnsureArchiveTableExists checks if the index archive table exists or not. If it does not exist and
// the create-keyspace flag is true, then it will create it, if it doesn't exist and the create-keyspace
// flag is false, then it will return an error. If the table exists then it just returns nil.
// The index archive table is not required for Metrictank to run, it's only required by the
// mt-index-prune utility to archive old metrics from the index.
|
[
"EnsureArchiveTableExists",
"checks",
"if",
"the",
"index",
"archive",
"table",
"exists",
"or",
"not",
".",
"If",
"it",
"does",
"not",
"exist",
"and",
"the",
"create",
"-",
"keyspace",
"flag",
"is",
"true",
"then",
"it",
"will",
"create",
"it",
"if",
"it",
"doesn",
"t",
"exist",
"and",
"the",
"create",
"-",
"keyspace",
"flag",
"is",
"false",
"then",
"it",
"will",
"return",
"an",
"error",
".",
"If",
"the",
"table",
"exists",
"then",
"it",
"just",
"returns",
"nil",
".",
"The",
"index",
"archive",
"table",
"is",
"not",
"required",
"for",
"Metrictank",
"to",
"run",
"it",
"s",
"only",
"required",
"by",
"the",
"mt",
"-",
"index",
"-",
"prune",
"utility",
"to",
"archive",
"old",
"metrics",
"from",
"the",
"index",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L178-L206
|
train
|
grafana/metrictank
|
idx/cassandra/cassandra.go
|
Init
|
func (c *CasIdx) Init() error {
log.Infof("initializing cassandra-idx. Hosts=%s", c.cfg.hosts)
if err := c.MemoryIndex.Init(); err != nil {
return err
}
if err := c.InitBare(); err != nil {
return err
}
if c.cfg.updateCassIdx {
c.wg.Add(c.cfg.numConns)
for i := 0; i < c.cfg.numConns; i++ {
go c.processWriteQueue()
}
log.Infof("cassandra-idx: started %d writeQueue handlers", c.cfg.numConns)
}
//Rebuild the in-memory index.
c.rebuildIndex()
if memory.IndexRules.Prunable() {
go c.prune()
}
return nil
}
|
go
|
func (c *CasIdx) Init() error {
log.Infof("initializing cassandra-idx. Hosts=%s", c.cfg.hosts)
if err := c.MemoryIndex.Init(); err != nil {
return err
}
if err := c.InitBare(); err != nil {
return err
}
if c.cfg.updateCassIdx {
c.wg.Add(c.cfg.numConns)
for i := 0; i < c.cfg.numConns; i++ {
go c.processWriteQueue()
}
log.Infof("cassandra-idx: started %d writeQueue handlers", c.cfg.numConns)
}
//Rebuild the in-memory index.
c.rebuildIndex()
if memory.IndexRules.Prunable() {
go c.prune()
}
return nil
}
|
[
"func",
"(",
"c",
"*",
"CasIdx",
")",
"Init",
"(",
")",
"error",
"{",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"c",
".",
"cfg",
".",
"hosts",
")",
"\n",
"if",
"err",
":=",
"c",
".",
"MemoryIndex",
".",
"Init",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"c",
".",
"InitBare",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"c",
".",
"cfg",
".",
"updateCassIdx",
"{",
"c",
".",
"wg",
".",
"Add",
"(",
"c",
".",
"cfg",
".",
"numConns",
")",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"c",
".",
"cfg",
".",
"numConns",
";",
"i",
"++",
"{",
"go",
"c",
".",
"processWriteQueue",
"(",
")",
"\n",
"}",
"\n",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"c",
".",
"cfg",
".",
"numConns",
")",
"\n",
"}",
"\n\n",
"//Rebuild the in-memory index.",
"c",
".",
"rebuildIndex",
"(",
")",
"\n\n",
"if",
"memory",
".",
"IndexRules",
".",
"Prunable",
"(",
")",
"{",
"go",
"c",
".",
"prune",
"(",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// Init makes sure the needed keyspace, table, index in cassandra exists, creates the session,
// rebuilds the in-memory index, sets up write queues, metrics and pruning routines
|
[
"Init",
"makes",
"sure",
"the",
"needed",
"keyspace",
"table",
"index",
"in",
"cassandra",
"exists",
"creates",
"the",
"session",
"rebuilds",
"the",
"in",
"-",
"memory",
"index",
"sets",
"up",
"write",
"queues",
"metrics",
"and",
"pruning",
"routines"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L210-L235
|
train
|
grafana/metrictank
|
idx/cassandra/cassandra.go
|
updateCassandra
|
func (c *CasIdx) updateCassandra(now uint32, inMemory bool, archive idx.Archive, partition int32) idx.Archive {
// if the entry has not been saved for 1.5x updateInterval
// then perform a blocking save.
if archive.LastSave < (now - c.updateInterval32 - c.updateInterval32/2) {
log.Debugf("cassandra-idx: updating def %s in index.", archive.MetricDefinition.Id)
c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}
archive.LastSave = now
c.MemoryIndex.UpdateArchive(archive)
} else {
// perform a non-blocking write to the writeQueue. If the queue is full, then
// this will fail and we won't update the LastSave timestamp. The next time
// the metric is seen, the previous lastSave timestamp will still be in place and so
// we will try and save again. This will continue until we are successful or the
// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will
// do a blocking write to the queue.
select {
case c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}:
archive.LastSave = now
c.MemoryIndex.UpdateArchive(archive)
default:
statSaveSkipped.Inc()
log.Debugf("cassandra-idx: writeQueue is full, update of %s not saved this time.", archive.MetricDefinition.Id)
}
}
return archive
}
|
go
|
func (c *CasIdx) updateCassandra(now uint32, inMemory bool, archive idx.Archive, partition int32) idx.Archive {
// if the entry has not been saved for 1.5x updateInterval
// then perform a blocking save.
if archive.LastSave < (now - c.updateInterval32 - c.updateInterval32/2) {
log.Debugf("cassandra-idx: updating def %s in index.", archive.MetricDefinition.Id)
c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}
archive.LastSave = now
c.MemoryIndex.UpdateArchive(archive)
} else {
// perform a non-blocking write to the writeQueue. If the queue is full, then
// this will fail and we won't update the LastSave timestamp. The next time
// the metric is seen, the previous lastSave timestamp will still be in place and so
// we will try and save again. This will continue until we are successful or the
// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will
// do a blocking write to the queue.
select {
case c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}:
archive.LastSave = now
c.MemoryIndex.UpdateArchive(archive)
default:
statSaveSkipped.Inc()
log.Debugf("cassandra-idx: writeQueue is full, update of %s not saved this time.", archive.MetricDefinition.Id)
}
}
return archive
}
|
[
"func",
"(",
"c",
"*",
"CasIdx",
")",
"updateCassandra",
"(",
"now",
"uint32",
",",
"inMemory",
"bool",
",",
"archive",
"idx",
".",
"Archive",
",",
"partition",
"int32",
")",
"idx",
".",
"Archive",
"{",
"// if the entry has not been saved for 1.5x updateInterval",
"// then perform a blocking save.",
"if",
"archive",
".",
"LastSave",
"<",
"(",
"now",
"-",
"c",
".",
"updateInterval32",
"-",
"c",
".",
"updateInterval32",
"/",
"2",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"archive",
".",
"MetricDefinition",
".",
"Id",
")",
"\n",
"c",
".",
"writeQueue",
"<-",
"writeReq",
"{",
"recvTime",
":",
"time",
".",
"Now",
"(",
")",
",",
"def",
":",
"&",
"archive",
".",
"MetricDefinition",
"}",
"\n",
"archive",
".",
"LastSave",
"=",
"now",
"\n",
"c",
".",
"MemoryIndex",
".",
"UpdateArchive",
"(",
"archive",
")",
"\n",
"}",
"else",
"{",
"// perform a non-blocking write to the writeQueue. If the queue is full, then",
"// this will fail and we won't update the LastSave timestamp. The next time",
"// the metric is seen, the previous lastSave timestamp will still be in place and so",
"// we will try and save again. This will continue until we are successful or the",
"// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will",
"// do a blocking write to the queue.",
"select",
"{",
"case",
"c",
".",
"writeQueue",
"<-",
"writeReq",
"{",
"recvTime",
":",
"time",
".",
"Now",
"(",
")",
",",
"def",
":",
"&",
"archive",
".",
"MetricDefinition",
"}",
":",
"archive",
".",
"LastSave",
"=",
"now",
"\n",
"c",
".",
"MemoryIndex",
".",
"UpdateArchive",
"(",
"archive",
")",
"\n",
"default",
":",
"statSaveSkipped",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"archive",
".",
"MetricDefinition",
".",
"Id",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"archive",
"\n",
"}"
] |
// updateCassandra saves the archive to cassandra and
// updates the memory index with the updated fields.
|
[
"updateCassandra",
"saves",
"the",
"archive",
"to",
"cassandra",
"and",
"updates",
"the",
"memory",
"index",
"with",
"the",
"updated",
"fields",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L316-L342
|
train
|
grafana/metrictank
|
idx/cassandra/cassandra.go
|
LoadPartitions
|
func (c *CasIdx) LoadPartitions(partitions []int32, defs []schema.MetricDefinition, now time.Time) []schema.MetricDefinition {
placeholders := make([]string, len(partitions))
for i, p := range partitions {
placeholders[i] = strconv.Itoa(int(p))
}
q := fmt.Sprintf("SELECT id, orgid, partition, name, interval, unit, mtype, tags, lastupdate from metric_idx where partition in (%s)", strings.Join(placeholders, ","))
iter := c.session.Query(q).Iter()
return c.load(defs, iter, now)
}
|
go
|
func (c *CasIdx) LoadPartitions(partitions []int32, defs []schema.MetricDefinition, now time.Time) []schema.MetricDefinition {
placeholders := make([]string, len(partitions))
for i, p := range partitions {
placeholders[i] = strconv.Itoa(int(p))
}
q := fmt.Sprintf("SELECT id, orgid, partition, name, interval, unit, mtype, tags, lastupdate from metric_idx where partition in (%s)", strings.Join(placeholders, ","))
iter := c.session.Query(q).Iter()
return c.load(defs, iter, now)
}
|
[
"func",
"(",
"c",
"*",
"CasIdx",
")",
"LoadPartitions",
"(",
"partitions",
"[",
"]",
"int32",
",",
"defs",
"[",
"]",
"schema",
".",
"MetricDefinition",
",",
"now",
"time",
".",
"Time",
")",
"[",
"]",
"schema",
".",
"MetricDefinition",
"{",
"placeholders",
":=",
"make",
"(",
"[",
"]",
"string",
",",
"len",
"(",
"partitions",
")",
")",
"\n",
"for",
"i",
",",
"p",
":=",
"range",
"partitions",
"{",
"placeholders",
"[",
"i",
"]",
"=",
"strconv",
".",
"Itoa",
"(",
"int",
"(",
"p",
")",
")",
"\n",
"}",
"\n",
"q",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"strings",
".",
"Join",
"(",
"placeholders",
",",
"\"",
"\"",
")",
")",
"\n",
"iter",
":=",
"c",
".",
"session",
".",
"Query",
"(",
"q",
")",
".",
"Iter",
"(",
")",
"\n",
"return",
"c",
".",
"load",
"(",
"defs",
",",
"iter",
",",
"now",
")",
"\n",
"}"
] |
// LoadPartitions appends MetricDefinitions from the given partitions to defs and returns the modified defs, honoring pruning settings relative to now
|
[
"LoadPartitions",
"appends",
"MetricDefinitions",
"from",
"the",
"given",
"partitions",
"to",
"defs",
"and",
"returns",
"the",
"modified",
"defs",
"honoring",
"pruning",
"settings",
"relative",
"to",
"now"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L379-L387
|
train
|
grafana/metrictank
|
idx/cassandra/cassandra.go
|
load
|
func (c *CasIdx) load(defs []schema.MetricDefinition, iter cqlIterator, now time.Time) []schema.MetricDefinition {
defsByNames := make(map[string][]*schema.MetricDefinition)
var id, name, unit, mtype string
var orgId, interval int
var partition int32
var lastupdate int64
var tags []string
for iter.Scan(&id, &orgId, &partition, &name, &interval, &unit, &mtype, &tags, &lastupdate) {
mkey, err := schema.MKeyFromString(id)
if err != nil {
log.Errorf("cassandra-idx: load() could not parse ID %q: %s -> skipping", id, err)
continue
}
if orgId < 0 {
orgId = int(idx.OrgIdPublic)
}
mdef := &schema.MetricDefinition{
Id: mkey,
OrgId: uint32(orgId),
Partition: partition,
Name: name,
Interval: interval,
Unit: unit,
Mtype: mtype,
Tags: tags,
LastUpdate: lastupdate,
}
nameWithTags := mdef.NameWithTags()
defsByNames[nameWithTags] = append(defsByNames[nameWithTags], mdef)
}
if err := iter.Close(); err != nil {
log.Fatalf("Could not close iterator: %s", err.Error())
}
// getting all cutoffs once saves having to recompute everytime we have a match
cutoffs := memory.IndexRules.Cutoffs(now)
NAMES:
for nameWithTags, defsByName := range defsByNames {
irId, _ := memory.IndexRules.Match(nameWithTags)
cutoff := cutoffs[irId]
for _, def := range defsByName {
if def.LastUpdate >= cutoff {
// if any of the defs for a given nameWithTags is not stale, then we need to load
// all the defs for that nameWithTags.
for _, defToAdd := range defsByNames[nameWithTags] {
defs = append(defs, *defToAdd)
}
continue NAMES
}
}
}
return defs
}
|
go
|
func (c *CasIdx) load(defs []schema.MetricDefinition, iter cqlIterator, now time.Time) []schema.MetricDefinition {
defsByNames := make(map[string][]*schema.MetricDefinition)
var id, name, unit, mtype string
var orgId, interval int
var partition int32
var lastupdate int64
var tags []string
for iter.Scan(&id, &orgId, &partition, &name, &interval, &unit, &mtype, &tags, &lastupdate) {
mkey, err := schema.MKeyFromString(id)
if err != nil {
log.Errorf("cassandra-idx: load() could not parse ID %q: %s -> skipping", id, err)
continue
}
if orgId < 0 {
orgId = int(idx.OrgIdPublic)
}
mdef := &schema.MetricDefinition{
Id: mkey,
OrgId: uint32(orgId),
Partition: partition,
Name: name,
Interval: interval,
Unit: unit,
Mtype: mtype,
Tags: tags,
LastUpdate: lastupdate,
}
nameWithTags := mdef.NameWithTags()
defsByNames[nameWithTags] = append(defsByNames[nameWithTags], mdef)
}
if err := iter.Close(); err != nil {
log.Fatalf("Could not close iterator: %s", err.Error())
}
// getting all cutoffs once saves having to recompute everytime we have a match
cutoffs := memory.IndexRules.Cutoffs(now)
NAMES:
for nameWithTags, defsByName := range defsByNames {
irId, _ := memory.IndexRules.Match(nameWithTags)
cutoff := cutoffs[irId]
for _, def := range defsByName {
if def.LastUpdate >= cutoff {
// if any of the defs for a given nameWithTags is not stale, then we need to load
// all the defs for that nameWithTags.
for _, defToAdd := range defsByNames[nameWithTags] {
defs = append(defs, *defToAdd)
}
continue NAMES
}
}
}
return defs
}
|
[
"func",
"(",
"c",
"*",
"CasIdx",
")",
"load",
"(",
"defs",
"[",
"]",
"schema",
".",
"MetricDefinition",
",",
"iter",
"cqlIterator",
",",
"now",
"time",
".",
"Time",
")",
"[",
"]",
"schema",
".",
"MetricDefinition",
"{",
"defsByNames",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"[",
"]",
"*",
"schema",
".",
"MetricDefinition",
")",
"\n",
"var",
"id",
",",
"name",
",",
"unit",
",",
"mtype",
"string",
"\n",
"var",
"orgId",
",",
"interval",
"int",
"\n",
"var",
"partition",
"int32",
"\n",
"var",
"lastupdate",
"int64",
"\n",
"var",
"tags",
"[",
"]",
"string",
"\n",
"for",
"iter",
".",
"Scan",
"(",
"&",
"id",
",",
"&",
"orgId",
",",
"&",
"partition",
",",
"&",
"name",
",",
"&",
"interval",
",",
"&",
"unit",
",",
"&",
"mtype",
",",
"&",
"tags",
",",
"&",
"lastupdate",
")",
"{",
"mkey",
",",
"err",
":=",
"schema",
".",
"MKeyFromString",
"(",
"id",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"id",
",",
"err",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"if",
"orgId",
"<",
"0",
"{",
"orgId",
"=",
"int",
"(",
"idx",
".",
"OrgIdPublic",
")",
"\n",
"}",
"\n\n",
"mdef",
":=",
"&",
"schema",
".",
"MetricDefinition",
"{",
"Id",
":",
"mkey",
",",
"OrgId",
":",
"uint32",
"(",
"orgId",
")",
",",
"Partition",
":",
"partition",
",",
"Name",
":",
"name",
",",
"Interval",
":",
"interval",
",",
"Unit",
":",
"unit",
",",
"Mtype",
":",
"mtype",
",",
"Tags",
":",
"tags",
",",
"LastUpdate",
":",
"lastupdate",
",",
"}",
"\n",
"nameWithTags",
":=",
"mdef",
".",
"NameWithTags",
"(",
")",
"\n",
"defsByNames",
"[",
"nameWithTags",
"]",
"=",
"append",
"(",
"defsByNames",
"[",
"nameWithTags",
"]",
",",
"mdef",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"iter",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n\n",
"// getting all cutoffs once saves having to recompute everytime we have a match",
"cutoffs",
":=",
"memory",
".",
"IndexRules",
".",
"Cutoffs",
"(",
"now",
")",
"\n\n",
"NAMES",
":",
"for",
"nameWithTags",
",",
"defsByName",
":=",
"range",
"defsByNames",
"{",
"irId",
",",
"_",
":=",
"memory",
".",
"IndexRules",
".",
"Match",
"(",
"nameWithTags",
")",
"\n",
"cutoff",
":=",
"cutoffs",
"[",
"irId",
"]",
"\n",
"for",
"_",
",",
"def",
":=",
"range",
"defsByName",
"{",
"if",
"def",
".",
"LastUpdate",
">=",
"cutoff",
"{",
"// if any of the defs for a given nameWithTags is not stale, then we need to load",
"// all the defs for that nameWithTags.",
"for",
"_",
",",
"defToAdd",
":=",
"range",
"defsByNames",
"[",
"nameWithTags",
"]",
"{",
"defs",
"=",
"append",
"(",
"defs",
",",
"*",
"defToAdd",
")",
"\n",
"}",
"\n",
"continue",
"NAMES",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"defs",
"\n",
"}"
] |
// load appends MetricDefinitions from the iterator to defs and returns the modified defs, honoring pruning settings relative to now
|
[
"load",
"appends",
"MetricDefinitions",
"from",
"the",
"iterator",
"to",
"defs",
"and",
"returns",
"the",
"modified",
"defs",
"honoring",
"pruning",
"settings",
"relative",
"to",
"now"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L390-L445
|
train
|
grafana/metrictank
|
idx/cassandra/cassandra.go
|
ArchiveDefs
|
func (c *CasIdx) ArchiveDefs(defs []schema.MetricDefinition) (int, error) {
defChan := make(chan *schema.MetricDefinition, c.cfg.numConns)
g, ctx := errgroup.WithContext(context.Background())
// keep track of how many defs were successfully archived.
success := make([]int, c.cfg.numConns)
for i := 0; i < c.cfg.numConns; i++ {
i := i
g.Go(func() error {
for {
select {
case def, ok := <-defChan:
if !ok {
return nil
}
err := c.addDefToArchive(*def)
if err != nil {
// If we failed to add the def to the archive table then just continue on to the next def.
// As we havnet yet removed the this def from the metric_idx table yet, the next time archiving
// is performed the this def will be processed again. As no action is needed by an operator, we
// just log this as a warning.
log.Warnf("cassandra-idx: Failed add def to archive table. error=%s. def=%+v", err, *def)
continue
}
err = c.deleteDef(def.Id, def.Partition)
if err != nil {
// The next time archiving is performed this def will be processed again. Re-adding the def to the archive
// table will just be treated like an update with only the archived_at field changing. As no action is needed
// by an operator, we just log this as a warning.
log.Warnf("cassandra-idx: Failed to remove archived def from metric_idx table. error=%s. def=%+v", err, *def)
continue
}
// increment counter of defs successfully archived
success[i] = success[i] + 1
case <-ctx.Done():
return ctx.Err()
}
}
})
}
for i := range defs {
defChan <- &defs[i]
}
close(defChan)
// wait for all goroutines to complete.
err := g.Wait()
// get the count of defs successfully archived.
total := 0
for _, count := range success {
total = total + count
}
return total, err
}
|
go
|
func (c *CasIdx) ArchiveDefs(defs []schema.MetricDefinition) (int, error) {
defChan := make(chan *schema.MetricDefinition, c.cfg.numConns)
g, ctx := errgroup.WithContext(context.Background())
// keep track of how many defs were successfully archived.
success := make([]int, c.cfg.numConns)
for i := 0; i < c.cfg.numConns; i++ {
i := i
g.Go(func() error {
for {
select {
case def, ok := <-defChan:
if !ok {
return nil
}
err := c.addDefToArchive(*def)
if err != nil {
// If we failed to add the def to the archive table then just continue on to the next def.
// As we havnet yet removed the this def from the metric_idx table yet, the next time archiving
// is performed the this def will be processed again. As no action is needed by an operator, we
// just log this as a warning.
log.Warnf("cassandra-idx: Failed add def to archive table. error=%s. def=%+v", err, *def)
continue
}
err = c.deleteDef(def.Id, def.Partition)
if err != nil {
// The next time archiving is performed this def will be processed again. Re-adding the def to the archive
// table will just be treated like an update with only the archived_at field changing. As no action is needed
// by an operator, we just log this as a warning.
log.Warnf("cassandra-idx: Failed to remove archived def from metric_idx table. error=%s. def=%+v", err, *def)
continue
}
// increment counter of defs successfully archived
success[i] = success[i] + 1
case <-ctx.Done():
return ctx.Err()
}
}
})
}
for i := range defs {
defChan <- &defs[i]
}
close(defChan)
// wait for all goroutines to complete.
err := g.Wait()
// get the count of defs successfully archived.
total := 0
for _, count := range success {
total = total + count
}
return total, err
}
|
[
"func",
"(",
"c",
"*",
"CasIdx",
")",
"ArchiveDefs",
"(",
"defs",
"[",
"]",
"schema",
".",
"MetricDefinition",
")",
"(",
"int",
",",
"error",
")",
"{",
"defChan",
":=",
"make",
"(",
"chan",
"*",
"schema",
".",
"MetricDefinition",
",",
"c",
".",
"cfg",
".",
"numConns",
")",
"\n",
"g",
",",
"ctx",
":=",
"errgroup",
".",
"WithContext",
"(",
"context",
".",
"Background",
"(",
")",
")",
"\n\n",
"// keep track of how many defs were successfully archived.",
"success",
":=",
"make",
"(",
"[",
"]",
"int",
",",
"c",
".",
"cfg",
".",
"numConns",
")",
"\n\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"c",
".",
"cfg",
".",
"numConns",
";",
"i",
"++",
"{",
"i",
":=",
"i",
"\n",
"g",
".",
"Go",
"(",
"func",
"(",
")",
"error",
"{",
"for",
"{",
"select",
"{",
"case",
"def",
",",
"ok",
":=",
"<-",
"defChan",
":",
"if",
"!",
"ok",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"err",
":=",
"c",
".",
"addDefToArchive",
"(",
"*",
"def",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// If we failed to add the def to the archive table then just continue on to the next def.",
"// As we havnet yet removed the this def from the metric_idx table yet, the next time archiving",
"// is performed the this def will be processed again. As no action is needed by an operator, we",
"// just log this as a warning.",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"err",
",",
"*",
"def",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"err",
"=",
"c",
".",
"deleteDef",
"(",
"def",
".",
"Id",
",",
"def",
".",
"Partition",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// The next time archiving is performed this def will be processed again. Re-adding the def to the archive",
"// table will just be treated like an update with only the archived_at field changing. As no action is needed",
"// by an operator, we just log this as a warning.",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"err",
",",
"*",
"def",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// increment counter of defs successfully archived",
"success",
"[",
"i",
"]",
"=",
"success",
"[",
"i",
"]",
"+",
"1",
"\n",
"case",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"return",
"ctx",
".",
"Err",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
")",
"\n",
"}",
"\n",
"for",
"i",
":=",
"range",
"defs",
"{",
"defChan",
"<-",
"&",
"defs",
"[",
"i",
"]",
"\n",
"}",
"\n",
"close",
"(",
"defChan",
")",
"\n\n",
"// wait for all goroutines to complete.",
"err",
":=",
"g",
".",
"Wait",
"(",
")",
"\n\n",
"// get the count of defs successfully archived.",
"total",
":=",
"0",
"\n",
"for",
"_",
",",
"count",
":=",
"range",
"success",
"{",
"total",
"=",
"total",
"+",
"count",
"\n",
"}",
"\n\n",
"return",
"total",
",",
"err",
"\n",
"}"
] |
// ArchiveDefs writes each of the provided defs to the archive table and
// then deletes the defs from the metric_idx table.
|
[
"ArchiveDefs",
"writes",
"each",
"of",
"the",
"provided",
"defs",
"to",
"the",
"archive",
"table",
"and",
"then",
"deletes",
"the",
"defs",
"from",
"the",
"metric_idx",
"table",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L449-L507
|
train
|
grafana/metrictank
|
mdata/notifierKafka/notifierKafka.go
|
flush
|
func (c *NotifierKafka) flush() {
if len(c.buf) == 0 {
return
}
// In order to correctly route the saveMessages to the correct partition,
// we can't send them in batches anymore.
payload := make([]*sarama.ProducerMessage, 0, len(c.buf))
var pMsg mdata.PersistMessageBatch
for i, msg := range c.buf {
amkey, err := schema.AMKeyFromString(msg.Key)
if err != nil {
log.Errorf("kafka-cluster: failed to parse key %q", msg.Key)
continue
}
partition, ok := c.handler.PartitionOf(amkey.MKey)
if !ok {
log.Errorf("kafka-cluster: failed to lookup metricDef with id %s", msg.Key)
continue
}
buf := bytes.NewBuffer(c.bPool.Get())
binary.Write(buf, binary.LittleEndian, uint8(mdata.PersistMessageBatchV1))
encoder := json.NewEncoder(buf)
pMsg = mdata.PersistMessageBatch{Instance: c.instance, SavedChunks: c.buf[i : i+1]}
err = encoder.Encode(&pMsg)
if err != nil {
log.Fatalf("kafka-cluster: failed to marshal persistMessage to json.")
}
messagesSize.Value(buf.Len())
kafkaMsg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder(buf.Bytes()),
Partition: partition,
}
payload = append(payload, kafkaMsg)
}
c.buf = nil
go func() {
log.Debugf("kafka-cluster: sending %d batch metricPersist messages", len(payload))
sent := false
for !sent {
err := c.producer.SendMessages(payload)
if err != nil {
log.Warnf("kafka-cluster: publisher %s", err)
} else {
sent = true
}
time.Sleep(time.Second)
}
messagesPublished.Add(len(payload))
// put our buffers back in the bufferPool
for _, msg := range payload {
c.bPool.Put([]byte(msg.Value.(sarama.ByteEncoder)))
}
}()
}
|
go
|
func (c *NotifierKafka) flush() {
if len(c.buf) == 0 {
return
}
// In order to correctly route the saveMessages to the correct partition,
// we can't send them in batches anymore.
payload := make([]*sarama.ProducerMessage, 0, len(c.buf))
var pMsg mdata.PersistMessageBatch
for i, msg := range c.buf {
amkey, err := schema.AMKeyFromString(msg.Key)
if err != nil {
log.Errorf("kafka-cluster: failed to parse key %q", msg.Key)
continue
}
partition, ok := c.handler.PartitionOf(amkey.MKey)
if !ok {
log.Errorf("kafka-cluster: failed to lookup metricDef with id %s", msg.Key)
continue
}
buf := bytes.NewBuffer(c.bPool.Get())
binary.Write(buf, binary.LittleEndian, uint8(mdata.PersistMessageBatchV1))
encoder := json.NewEncoder(buf)
pMsg = mdata.PersistMessageBatch{Instance: c.instance, SavedChunks: c.buf[i : i+1]}
err = encoder.Encode(&pMsg)
if err != nil {
log.Fatalf("kafka-cluster: failed to marshal persistMessage to json.")
}
messagesSize.Value(buf.Len())
kafkaMsg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder(buf.Bytes()),
Partition: partition,
}
payload = append(payload, kafkaMsg)
}
c.buf = nil
go func() {
log.Debugf("kafka-cluster: sending %d batch metricPersist messages", len(payload))
sent := false
for !sent {
err := c.producer.SendMessages(payload)
if err != nil {
log.Warnf("kafka-cluster: publisher %s", err)
} else {
sent = true
}
time.Sleep(time.Second)
}
messagesPublished.Add(len(payload))
// put our buffers back in the bufferPool
for _, msg := range payload {
c.bPool.Put([]byte(msg.Value.(sarama.ByteEncoder)))
}
}()
}
|
[
"func",
"(",
"c",
"*",
"NotifierKafka",
")",
"flush",
"(",
")",
"{",
"if",
"len",
"(",
"c",
".",
"buf",
")",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n\n",
"// In order to correctly route the saveMessages to the correct partition,",
"// we can't send them in batches anymore.",
"payload",
":=",
"make",
"(",
"[",
"]",
"*",
"sarama",
".",
"ProducerMessage",
",",
"0",
",",
"len",
"(",
"c",
".",
"buf",
")",
")",
"\n",
"var",
"pMsg",
"mdata",
".",
"PersistMessageBatch",
"\n",
"for",
"i",
",",
"msg",
":=",
"range",
"c",
".",
"buf",
"{",
"amkey",
",",
"err",
":=",
"schema",
".",
"AMKeyFromString",
"(",
"msg",
".",
"Key",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"msg",
".",
"Key",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"partition",
",",
"ok",
":=",
"c",
".",
"handler",
".",
"PartitionOf",
"(",
"amkey",
".",
"MKey",
")",
"\n",
"if",
"!",
"ok",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"msg",
".",
"Key",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"buf",
":=",
"bytes",
".",
"NewBuffer",
"(",
"c",
".",
"bPool",
".",
"Get",
"(",
")",
")",
"\n",
"binary",
".",
"Write",
"(",
"buf",
",",
"binary",
".",
"LittleEndian",
",",
"uint8",
"(",
"mdata",
".",
"PersistMessageBatchV1",
")",
")",
"\n",
"encoder",
":=",
"json",
".",
"NewEncoder",
"(",
"buf",
")",
"\n",
"pMsg",
"=",
"mdata",
".",
"PersistMessageBatch",
"{",
"Instance",
":",
"c",
".",
"instance",
",",
"SavedChunks",
":",
"c",
".",
"buf",
"[",
"i",
":",
"i",
"+",
"1",
"]",
"}",
"\n",
"err",
"=",
"encoder",
".",
"Encode",
"(",
"&",
"pMsg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"messagesSize",
".",
"Value",
"(",
"buf",
".",
"Len",
"(",
")",
")",
"\n",
"kafkaMsg",
":=",
"&",
"sarama",
".",
"ProducerMessage",
"{",
"Topic",
":",
"topic",
",",
"Value",
":",
"sarama",
".",
"ByteEncoder",
"(",
"buf",
".",
"Bytes",
"(",
")",
")",
",",
"Partition",
":",
"partition",
",",
"}",
"\n",
"payload",
"=",
"append",
"(",
"payload",
",",
"kafkaMsg",
")",
"\n",
"}",
"\n\n",
"c",
".",
"buf",
"=",
"nil",
"\n\n",
"go",
"func",
"(",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"len",
"(",
"payload",
")",
")",
"\n",
"sent",
":=",
"false",
"\n",
"for",
"!",
"sent",
"{",
"err",
":=",
"c",
".",
"producer",
".",
"SendMessages",
"(",
"payload",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"else",
"{",
"sent",
"=",
"true",
"\n",
"}",
"\n",
"time",
".",
"Sleep",
"(",
"time",
".",
"Second",
")",
"\n",
"}",
"\n",
"messagesPublished",
".",
"Add",
"(",
"len",
"(",
"payload",
")",
")",
"\n",
"// put our buffers back in the bufferPool",
"for",
"_",
",",
"msg",
":=",
"range",
"payload",
"{",
"c",
".",
"bPool",
".",
"Put",
"(",
"[",
"]",
"byte",
"(",
"msg",
".",
"Value",
".",
"(",
"sarama",
".",
"ByteEncoder",
")",
")",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"}"
] |
// flush makes sure the batch gets sent, asynchronously.
|
[
"flush",
"makes",
"sure",
"the",
"batch",
"gets",
"sent",
"asynchronously",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/notifierKafka/notifierKafka.go#L201-L259
|
train
|
grafana/metrictank
|
api/cluster.go
|
indexFind
|
func (s *Server) indexFind(ctx *middleware.Context, req models.IndexFind) {
resp := models.NewIndexFindResp()
// query nodes don't own any data
if s.MetricIndex == nil {
response.Write(ctx, response.NewMsgp(200, resp))
return
}
for _, pattern := range req.Patterns {
nodes, err := s.MetricIndex.Find(req.OrgId, pattern, req.From)
if err != nil {
response.Write(ctx, response.WrapError(err))
return
}
resp.Nodes[pattern] = nodes
}
response.Write(ctx, response.NewMsgp(200, resp))
}
|
go
|
func (s *Server) indexFind(ctx *middleware.Context, req models.IndexFind) {
resp := models.NewIndexFindResp()
// query nodes don't own any data
if s.MetricIndex == nil {
response.Write(ctx, response.NewMsgp(200, resp))
return
}
for _, pattern := range req.Patterns {
nodes, err := s.MetricIndex.Find(req.OrgId, pattern, req.From)
if err != nil {
response.Write(ctx, response.WrapError(err))
return
}
resp.Nodes[pattern] = nodes
}
response.Write(ctx, response.NewMsgp(200, resp))
}
|
[
"func",
"(",
"s",
"*",
"Server",
")",
"indexFind",
"(",
"ctx",
"*",
"middleware",
".",
"Context",
",",
"req",
"models",
".",
"IndexFind",
")",
"{",
"resp",
":=",
"models",
".",
"NewIndexFindResp",
"(",
")",
"\n\n",
"// query nodes don't own any data",
"if",
"s",
".",
"MetricIndex",
"==",
"nil",
"{",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"NewMsgp",
"(",
"200",
",",
"resp",
")",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"pattern",
":=",
"range",
"req",
".",
"Patterns",
"{",
"nodes",
",",
"err",
":=",
"s",
".",
"MetricIndex",
".",
"Find",
"(",
"req",
".",
"OrgId",
",",
"pattern",
",",
"req",
".",
"From",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"WrapError",
"(",
"err",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"resp",
".",
"Nodes",
"[",
"pattern",
"]",
"=",
"nodes",
"\n",
"}",
"\n",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"NewMsgp",
"(",
"200",
",",
"resp",
")",
")",
"\n",
"}"
] |
// IndexFind returns a sequence of msgp encoded idx.Node's
|
[
"IndexFind",
"returns",
"a",
"sequence",
"of",
"msgp",
"encoded",
"idx",
".",
"Node",
"s"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/cluster.go#L114-L132
|
train
|
grafana/metrictank
|
api/cluster.go
|
indexGet
|
func (s *Server) indexGet(ctx *middleware.Context, req models.IndexGet) {
// query nodes don't own any data.
if s.MetricIndex == nil {
response.Write(ctx, response.NewMsgp(404, nil))
return
}
def, ok := s.MetricIndex.Get(req.MKey)
if !ok {
response.Write(ctx, response.NewError(http.StatusNotFound, "Not Found"))
return
}
response.Write(ctx, response.NewMsgp(200, &def))
}
|
go
|
func (s *Server) indexGet(ctx *middleware.Context, req models.IndexGet) {
// query nodes don't own any data.
if s.MetricIndex == nil {
response.Write(ctx, response.NewMsgp(404, nil))
return
}
def, ok := s.MetricIndex.Get(req.MKey)
if !ok {
response.Write(ctx, response.NewError(http.StatusNotFound, "Not Found"))
return
}
response.Write(ctx, response.NewMsgp(200, &def))
}
|
[
"func",
"(",
"s",
"*",
"Server",
")",
"indexGet",
"(",
"ctx",
"*",
"middleware",
".",
"Context",
",",
"req",
"models",
".",
"IndexGet",
")",
"{",
"// query nodes don't own any data.",
"if",
"s",
".",
"MetricIndex",
"==",
"nil",
"{",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"NewMsgp",
"(",
"404",
",",
"nil",
")",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"def",
",",
"ok",
":=",
"s",
".",
"MetricIndex",
".",
"Get",
"(",
"req",
".",
"MKey",
")",
"\n",
"if",
"!",
"ok",
"{",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"NewError",
"(",
"http",
".",
"StatusNotFound",
",",
"\"",
"\"",
")",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"NewMsgp",
"(",
"200",
",",
"&",
"def",
")",
")",
"\n",
"}"
] |
// IndexGet returns a msgp encoded schema.MetricDefinition
|
[
"IndexGet",
"returns",
"a",
"msgp",
"encoded",
"schema",
".",
"MetricDefinition"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/cluster.go#L236-L251
|
train
|
grafana/metrictank
|
api/cluster.go
|
indexList
|
func (s *Server) indexList(ctx *middleware.Context, req models.IndexList) {
// query nodes don't own any data.
if s.MetricIndex == nil {
response.Write(ctx, response.NewMsgpArray(200, nil))
return
}
defs := s.MetricIndex.List(req.OrgId)
resp := make([]msgp.Marshaler, len(defs))
for i := range defs {
d := defs[i]
resp[i] = &d
}
response.Write(ctx, response.NewMsgpArray(200, resp))
}
|
go
|
func (s *Server) indexList(ctx *middleware.Context, req models.IndexList) {
// query nodes don't own any data.
if s.MetricIndex == nil {
response.Write(ctx, response.NewMsgpArray(200, nil))
return
}
defs := s.MetricIndex.List(req.OrgId)
resp := make([]msgp.Marshaler, len(defs))
for i := range defs {
d := defs[i]
resp[i] = &d
}
response.Write(ctx, response.NewMsgpArray(200, resp))
}
|
[
"func",
"(",
"s",
"*",
"Server",
")",
"indexList",
"(",
"ctx",
"*",
"middleware",
".",
"Context",
",",
"req",
"models",
".",
"IndexList",
")",
"{",
"// query nodes don't own any data.",
"if",
"s",
".",
"MetricIndex",
"==",
"nil",
"{",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"NewMsgpArray",
"(",
"200",
",",
"nil",
")",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"defs",
":=",
"s",
".",
"MetricIndex",
".",
"List",
"(",
"req",
".",
"OrgId",
")",
"\n",
"resp",
":=",
"make",
"(",
"[",
"]",
"msgp",
".",
"Marshaler",
",",
"len",
"(",
"defs",
")",
")",
"\n",
"for",
"i",
":=",
"range",
"defs",
"{",
"d",
":=",
"defs",
"[",
"i",
"]",
"\n",
"resp",
"[",
"i",
"]",
"=",
"&",
"d",
"\n",
"}",
"\n",
"response",
".",
"Write",
"(",
"ctx",
",",
"response",
".",
"NewMsgpArray",
"(",
"200",
",",
"resp",
")",
")",
"\n",
"}"
] |
// IndexList returns msgp encoded schema.MetricDefinition's
|
[
"IndexList",
"returns",
"msgp",
"encoded",
"schema",
".",
"MetricDefinition",
"s"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/cluster.go#L254-L269
|
train
|
Subsets and Splits
SQL Console for semeru/code-text-go
Retrieves a limited set of code samples with their languages, with a specific case adjustment for 'Go' language.