repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
list | docstring
stringlengths 6
2.61k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 85
252
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
grafana/metrictank
|
tracing/tracing.go
|
Error
|
func Error(span opentracing.Span, err error) {
span.LogFields(log.Error(err))
}
|
go
|
func Error(span opentracing.Span, err error) {
span.LogFields(log.Error(err))
}
|
[
"func",
"Error",
"(",
"span",
"opentracing",
".",
"Span",
",",
"err",
"error",
")",
"{",
"span",
".",
"LogFields",
"(",
"log",
".",
"Error",
"(",
"err",
")",
")",
"\n",
"}"
] |
// Error logs error
|
[
"Error",
"logs",
"error"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/tracing/tracing.go#L26-L28
|
train
|
grafana/metrictank
|
tracing/tracing.go
|
Errorf
|
func Errorf(span opentracing.Span, format string, a ...interface{}) {
span.LogFields(log.Error(fmt.Errorf(format, a...)))
}
|
go
|
func Errorf(span opentracing.Span, format string, a ...interface{}) {
span.LogFields(log.Error(fmt.Errorf(format, a...)))
}
|
[
"func",
"Errorf",
"(",
"span",
"opentracing",
".",
"Span",
",",
"format",
"string",
",",
"a",
"...",
"interface",
"{",
"}",
")",
"{",
"span",
".",
"LogFields",
"(",
"log",
".",
"Error",
"(",
"fmt",
".",
"Errorf",
"(",
"format",
",",
"a",
"...",
")",
")",
")",
"\n",
"}"
] |
// Errorf logs error
|
[
"Errorf",
"logs",
"error"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/tracing/tracing.go#L31-L33
|
train
|
grafana/metrictank
|
clock/clock.go
|
AlignedTick
|
func AlignedTick(period time.Duration) <-chan time.Time {
// note that time.Ticker is not an interface,
// and that if we instantiate one, we can't write to its channel
// hence we can't leverage that type.
c := make(chan time.Time)
go func() {
for {
unix := time.Now().UnixNano()
diff := time.Duration(period - (time.Duration(unix) % period))
time.Sleep(diff)
select {
case c <- time.Now():
default:
}
}
}()
return c
}
|
go
|
func AlignedTick(period time.Duration) <-chan time.Time {
// note that time.Ticker is not an interface,
// and that if we instantiate one, we can't write to its channel
// hence we can't leverage that type.
c := make(chan time.Time)
go func() {
for {
unix := time.Now().UnixNano()
diff := time.Duration(period - (time.Duration(unix) % period))
time.Sleep(diff)
select {
case c <- time.Now():
default:
}
}
}()
return c
}
|
[
"func",
"AlignedTick",
"(",
"period",
"time",
".",
"Duration",
")",
"<-",
"chan",
"time",
".",
"Time",
"{",
"// note that time.Ticker is not an interface,",
"// and that if we instantiate one, we can't write to its channel",
"// hence we can't leverage that type.",
"c",
":=",
"make",
"(",
"chan",
"time",
".",
"Time",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"for",
"{",
"unix",
":=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"\n",
"diff",
":=",
"time",
".",
"Duration",
"(",
"period",
"-",
"(",
"time",
".",
"Duration",
"(",
"unix",
")",
"%",
"period",
")",
")",
"\n",
"time",
".",
"Sleep",
"(",
"diff",
")",
"\n",
"select",
"{",
"case",
"c",
"<-",
"time",
".",
"Now",
"(",
")",
":",
"default",
":",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"return",
"c",
"\n",
"}"
] |
// AlignedTick returns a tick channel so that, let's say interval is a second
// then it will tick at every whole second, or if it's 60s than it's every whole
// minute. Note that in my testing this is about .0001 to 0.0002 seconds later due
// to scheduling etc.
|
[
"AlignedTick",
"returns",
"a",
"tick",
"channel",
"so",
"that",
"let",
"s",
"say",
"interval",
"is",
"a",
"second",
"then",
"it",
"will",
"tick",
"at",
"every",
"whole",
"second",
"or",
"if",
"it",
"s",
"60s",
"than",
"it",
"s",
"every",
"whole",
"minute",
".",
"Note",
"that",
"in",
"my",
"testing",
"this",
"is",
"about",
".",
"0001",
"to",
"0",
".",
"0002",
"seconds",
"later",
"due",
"to",
"scheduling",
"etc",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/clock/clock.go#L9-L26
|
train
|
grafana/metrictank
|
api/middleware/tracer.go
|
Tracer
|
func Tracer(tracer opentracing.Tracer) macaron.Handler {
return func(macCtx *macaron.Context) {
path := pathSlug(macCtx.Req.URL.Path)
// graphite cluster requests use local=1
// this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc)
// from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client
if macCtx.Req.Request.Form.Get("local") == "1" {
path += "-local"
}
spanCtx, _ := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(macCtx.Req.Header))
span := tracer.StartSpan("HTTP "+macCtx.Req.Method+" "+path, ext.RPCServerOption(spanCtx))
ext.HTTPMethod.Set(span, macCtx.Req.Method)
ext.HTTPUrl.Set(span, macCtx.Req.URL.String())
ext.Component.Set(span, "metrictank/api")
macCtx.Req = macaron.Request{macCtx.Req.WithContext(opentracing.ContextWithSpan(macCtx.Req.Context(), span))}
macCtx.Resp = &TracingResponseWriter{
ResponseWriter: macCtx.Resp,
}
macCtx.MapTo(macCtx.Resp, (*http.ResponseWriter)(nil))
rw := macCtx.Resp.(*TracingResponseWriter)
// if tracing is enabled (context is not a opentracing.noopSpanContext)
// store traceID in output headers
if spanCtx, ok := span.Context().(jaeger.SpanContext); ok {
traceID := spanCtx.TraceID().String()
headers := macCtx.Resp.Header()
headers["Trace-Id"] = []string{traceID}
}
// call next handler. This will return after all handlers
// have completed and the request has been sent.
macCtx.Next()
// if tracing has been disabled we return directly without calling
// span.Finish()
if noTrace, ok := macCtx.Data["noTrace"]; ok && noTrace.(bool) {
return
}
status := rw.Status()
ext.HTTPStatusCode.Set(span, uint16(status))
if status >= 200 && status < 300 {
span.SetTag("http.size", rw.Size())
}
if status >= 400 {
tracing.Error(span, errors.New(string(rw.errBody)))
if status >= http.StatusInternalServerError {
tracing.Failure(span)
}
}
span.Finish()
}
}
|
go
|
func Tracer(tracer opentracing.Tracer) macaron.Handler {
return func(macCtx *macaron.Context) {
path := pathSlug(macCtx.Req.URL.Path)
// graphite cluster requests use local=1
// this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc)
// from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client
if macCtx.Req.Request.Form.Get("local") == "1" {
path += "-local"
}
spanCtx, _ := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(macCtx.Req.Header))
span := tracer.StartSpan("HTTP "+macCtx.Req.Method+" "+path, ext.RPCServerOption(spanCtx))
ext.HTTPMethod.Set(span, macCtx.Req.Method)
ext.HTTPUrl.Set(span, macCtx.Req.URL.String())
ext.Component.Set(span, "metrictank/api")
macCtx.Req = macaron.Request{macCtx.Req.WithContext(opentracing.ContextWithSpan(macCtx.Req.Context(), span))}
macCtx.Resp = &TracingResponseWriter{
ResponseWriter: macCtx.Resp,
}
macCtx.MapTo(macCtx.Resp, (*http.ResponseWriter)(nil))
rw := macCtx.Resp.(*TracingResponseWriter)
// if tracing is enabled (context is not a opentracing.noopSpanContext)
// store traceID in output headers
if spanCtx, ok := span.Context().(jaeger.SpanContext); ok {
traceID := spanCtx.TraceID().String()
headers := macCtx.Resp.Header()
headers["Trace-Id"] = []string{traceID}
}
// call next handler. This will return after all handlers
// have completed and the request has been sent.
macCtx.Next()
// if tracing has been disabled we return directly without calling
// span.Finish()
if noTrace, ok := macCtx.Data["noTrace"]; ok && noTrace.(bool) {
return
}
status := rw.Status()
ext.HTTPStatusCode.Set(span, uint16(status))
if status >= 200 && status < 300 {
span.SetTag("http.size", rw.Size())
}
if status >= 400 {
tracing.Error(span, errors.New(string(rw.errBody)))
if status >= http.StatusInternalServerError {
tracing.Failure(span)
}
}
span.Finish()
}
}
|
[
"func",
"Tracer",
"(",
"tracer",
"opentracing",
".",
"Tracer",
")",
"macaron",
".",
"Handler",
"{",
"return",
"func",
"(",
"macCtx",
"*",
"macaron",
".",
"Context",
")",
"{",
"path",
":=",
"pathSlug",
"(",
"macCtx",
".",
"Req",
".",
"URL",
".",
"Path",
")",
"\n",
"// graphite cluster requests use local=1",
"// this way we can differentiate \"full\" render requests from client to MT (encompassing data processing, proxing to graphite, etc)",
"// from \"subrequests\" where metrictank is called by graphite and graphite does the processing and returns to the client",
"if",
"macCtx",
".",
"Req",
".",
"Request",
".",
"Form",
".",
"Get",
"(",
"\"",
"\"",
")",
"==",
"\"",
"\"",
"{",
"path",
"+=",
"\"",
"\"",
"\n",
"}",
"\n\n",
"spanCtx",
",",
"_",
":=",
"tracer",
".",
"Extract",
"(",
"opentracing",
".",
"HTTPHeaders",
",",
"opentracing",
".",
"HTTPHeadersCarrier",
"(",
"macCtx",
".",
"Req",
".",
"Header",
")",
")",
"\n",
"span",
":=",
"tracer",
".",
"StartSpan",
"(",
"\"",
"\"",
"+",
"macCtx",
".",
"Req",
".",
"Method",
"+",
"\"",
"\"",
"+",
"path",
",",
"ext",
".",
"RPCServerOption",
"(",
"spanCtx",
")",
")",
"\n\n",
"ext",
".",
"HTTPMethod",
".",
"Set",
"(",
"span",
",",
"macCtx",
".",
"Req",
".",
"Method",
")",
"\n",
"ext",
".",
"HTTPUrl",
".",
"Set",
"(",
"span",
",",
"macCtx",
".",
"Req",
".",
"URL",
".",
"String",
"(",
")",
")",
"\n",
"ext",
".",
"Component",
".",
"Set",
"(",
"span",
",",
"\"",
"\"",
")",
"\n\n",
"macCtx",
".",
"Req",
"=",
"macaron",
".",
"Request",
"{",
"macCtx",
".",
"Req",
".",
"WithContext",
"(",
"opentracing",
".",
"ContextWithSpan",
"(",
"macCtx",
".",
"Req",
".",
"Context",
"(",
")",
",",
"span",
")",
")",
"}",
"\n",
"macCtx",
".",
"Resp",
"=",
"&",
"TracingResponseWriter",
"{",
"ResponseWriter",
":",
"macCtx",
".",
"Resp",
",",
"}",
"\n",
"macCtx",
".",
"MapTo",
"(",
"macCtx",
".",
"Resp",
",",
"(",
"*",
"http",
".",
"ResponseWriter",
")",
"(",
"nil",
")",
")",
"\n\n",
"rw",
":=",
"macCtx",
".",
"Resp",
".",
"(",
"*",
"TracingResponseWriter",
")",
"\n\n",
"// if tracing is enabled (context is not a opentracing.noopSpanContext)",
"// store traceID in output headers",
"if",
"spanCtx",
",",
"ok",
":=",
"span",
".",
"Context",
"(",
")",
".",
"(",
"jaeger",
".",
"SpanContext",
")",
";",
"ok",
"{",
"traceID",
":=",
"spanCtx",
".",
"TraceID",
"(",
")",
".",
"String",
"(",
")",
"\n",
"headers",
":=",
"macCtx",
".",
"Resp",
".",
"Header",
"(",
")",
"\n",
"headers",
"[",
"\"",
"\"",
"]",
"=",
"[",
"]",
"string",
"{",
"traceID",
"}",
"\n",
"}",
"\n\n",
"// call next handler. This will return after all handlers",
"// have completed and the request has been sent.",
"macCtx",
".",
"Next",
"(",
")",
"\n\n",
"// if tracing has been disabled we return directly without calling",
"// span.Finish()",
"if",
"noTrace",
",",
"ok",
":=",
"macCtx",
".",
"Data",
"[",
"\"",
"\"",
"]",
";",
"ok",
"&&",
"noTrace",
".",
"(",
"bool",
")",
"{",
"return",
"\n",
"}",
"\n\n",
"status",
":=",
"rw",
".",
"Status",
"(",
")",
"\n",
"ext",
".",
"HTTPStatusCode",
".",
"Set",
"(",
"span",
",",
"uint16",
"(",
"status",
")",
")",
"\n",
"if",
"status",
">=",
"200",
"&&",
"status",
"<",
"300",
"{",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"rw",
".",
"Size",
"(",
")",
")",
"\n",
"}",
"\n",
"if",
"status",
">=",
"400",
"{",
"tracing",
".",
"Error",
"(",
"span",
",",
"errors",
".",
"New",
"(",
"string",
"(",
"rw",
".",
"errBody",
")",
")",
")",
"\n",
"if",
"status",
">=",
"http",
".",
"StatusInternalServerError",
"{",
"tracing",
".",
"Failure",
"(",
"span",
")",
"\n",
"}",
"\n",
"}",
"\n",
"span",
".",
"Finish",
"(",
")",
"\n",
"}",
"\n",
"}"
] |
// Tracer returns a middleware that traces requests
|
[
"Tracer",
"returns",
"a",
"middleware",
"that",
"traces",
"requests"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/middleware/tracer.go#L32-L88
|
train
|
grafana/metrictank
|
conf/schemas.go
|
Get
|
func (s Schemas) Get(i uint16) Schema {
if i+1 > uint16(len(s.index)) {
return s.DefaultSchema
}
return s.index[i]
}
|
go
|
func (s Schemas) Get(i uint16) Schema {
if i+1 > uint16(len(s.index)) {
return s.DefaultSchema
}
return s.index[i]
}
|
[
"func",
"(",
"s",
"Schemas",
")",
"Get",
"(",
"i",
"uint16",
")",
"Schema",
"{",
"if",
"i",
"+",
"1",
">",
"uint16",
"(",
"len",
"(",
"s",
".",
"index",
")",
")",
"{",
"return",
"s",
".",
"DefaultSchema",
"\n",
"}",
"\n",
"return",
"s",
".",
"index",
"[",
"i",
"]",
"\n",
"}"
] |
// Get returns the schema setting corresponding to the given index
|
[
"Get",
"returns",
"the",
"schema",
"setting",
"corresponding",
"to",
"the",
"given",
"index"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/schemas.go#L215-L220
|
train
|
grafana/metrictank
|
conf/schemas.go
|
TTLs
|
func (schemas Schemas) TTLs() []uint32 {
ttls := make(map[uint32]struct{})
for _, s := range schemas.raw {
for _, r := range s.Retentions {
ttls[uint32(r.MaxRetention())] = struct{}{}
}
}
for _, r := range schemas.DefaultSchema.Retentions {
ttls[uint32(r.MaxRetention())] = struct{}{}
}
var ttlSlice []uint32
for ttl := range ttls {
ttlSlice = append(ttlSlice, ttl)
}
return ttlSlice
}
|
go
|
func (schemas Schemas) TTLs() []uint32 {
ttls := make(map[uint32]struct{})
for _, s := range schemas.raw {
for _, r := range s.Retentions {
ttls[uint32(r.MaxRetention())] = struct{}{}
}
}
for _, r := range schemas.DefaultSchema.Retentions {
ttls[uint32(r.MaxRetention())] = struct{}{}
}
var ttlSlice []uint32
for ttl := range ttls {
ttlSlice = append(ttlSlice, ttl)
}
return ttlSlice
}
|
[
"func",
"(",
"schemas",
"Schemas",
")",
"TTLs",
"(",
")",
"[",
"]",
"uint32",
"{",
"ttls",
":=",
"make",
"(",
"map",
"[",
"uint32",
"]",
"struct",
"{",
"}",
")",
"\n",
"for",
"_",
",",
"s",
":=",
"range",
"schemas",
".",
"raw",
"{",
"for",
"_",
",",
"r",
":=",
"range",
"s",
".",
"Retentions",
"{",
"ttls",
"[",
"uint32",
"(",
"r",
".",
"MaxRetention",
"(",
")",
")",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"_",
",",
"r",
":=",
"range",
"schemas",
".",
"DefaultSchema",
".",
"Retentions",
"{",
"ttls",
"[",
"uint32",
"(",
"r",
".",
"MaxRetention",
"(",
")",
")",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"var",
"ttlSlice",
"[",
"]",
"uint32",
"\n",
"for",
"ttl",
":=",
"range",
"ttls",
"{",
"ttlSlice",
"=",
"append",
"(",
"ttlSlice",
",",
"ttl",
")",
"\n",
"}",
"\n",
"return",
"ttlSlice",
"\n",
"}"
] |
// TTLs returns a slice of all TTL's seen amongst all archives of all schemas
|
[
"TTLs",
"returns",
"a",
"slice",
"of",
"all",
"TTL",
"s",
"seen",
"amongst",
"all",
"archives",
"of",
"all",
"schemas"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/schemas.go#L223-L238
|
train
|
grafana/metrictank
|
conf/schemas.go
|
MaxChunkSpan
|
func (schemas Schemas) MaxChunkSpan() uint32 {
max := uint32(0)
for _, s := range schemas.raw {
for _, r := range s.Retentions {
max = util.Max(max, r.ChunkSpan)
}
}
for _, r := range schemas.DefaultSchema.Retentions {
max = util.Max(max, r.ChunkSpan)
}
return max
}
|
go
|
func (schemas Schemas) MaxChunkSpan() uint32 {
max := uint32(0)
for _, s := range schemas.raw {
for _, r := range s.Retentions {
max = util.Max(max, r.ChunkSpan)
}
}
for _, r := range schemas.DefaultSchema.Retentions {
max = util.Max(max, r.ChunkSpan)
}
return max
}
|
[
"func",
"(",
"schemas",
"Schemas",
")",
"MaxChunkSpan",
"(",
")",
"uint32",
"{",
"max",
":=",
"uint32",
"(",
"0",
")",
"\n",
"for",
"_",
",",
"s",
":=",
"range",
"schemas",
".",
"raw",
"{",
"for",
"_",
",",
"r",
":=",
"range",
"s",
".",
"Retentions",
"{",
"max",
"=",
"util",
".",
"Max",
"(",
"max",
",",
"r",
".",
"ChunkSpan",
")",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"_",
",",
"r",
":=",
"range",
"schemas",
".",
"DefaultSchema",
".",
"Retentions",
"{",
"max",
"=",
"util",
".",
"Max",
"(",
"max",
",",
"r",
".",
"ChunkSpan",
")",
"\n",
"}",
"\n",
"return",
"max",
"\n",
"}"
] |
// MaxChunkSpan returns the largest chunkspan seen amongst all archives of all schemas
|
[
"MaxChunkSpan",
"returns",
"the",
"largest",
"chunkspan",
"seen",
"amongst",
"all",
"archives",
"of",
"all",
"schemas"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/schemas.go#L241-L252
|
train
|
grafana/metrictank
|
cmd/mt-index-cat/out/tpl_pattern_custom.go
|
patternCustom
|
func patternCustom(in ...interface{}) string {
usage := func() {
PatternCustomUsage("")
os.Exit(-1)
}
// one or more of "<chance> <operation>" followed by an input string at the end.
if len(in) < 3 || len(in)%2 != 1 {
usage()
}
input, ok := in[len(in)-1].(string)
if !ok {
usage()
}
var buckets []bucket
var sum int
for i := 0; i < len(in)-2; i += 2 {
chance, ok := in[i].(int)
if !ok {
usage()
}
patt, ok := in[i+1].(string)
if !ok {
usage()
}
if patt == "pass" {
sum += chance
buckets = append(buckets, bucket{
chance: chance,
fn: Passthrough,
})
continue
}
if patt[0] < '0' || patt[0] > '9' {
usage()
}
num := int(patt[0] - '0') // parse ascii number to int
if patt[1:] != "rcnw" && patt[1:] != "rccw" {
usage()
}
var fn func(in string) string
if patt[1:] == "rcnw" {
fn = ReplaceRandomConsecutiveNodesWildcard(num)
} else {
fn = ReplaceRandomConsecutiveCharsWildcard(num)
}
sum += chance
buckets = append(buckets, bucket{
chance: chance,
fn: fn,
})
}
if sum != 100 {
usage()
}
pos := rand.Intn(100)
sum = 0
for _, b := range buckets {
if pos < sum+b.chance {
return b.fn(input)
}
sum += b.chance
}
panic("should never happen")
return "foo"
}
|
go
|
func patternCustom(in ...interface{}) string {
usage := func() {
PatternCustomUsage("")
os.Exit(-1)
}
// one or more of "<chance> <operation>" followed by an input string at the end.
if len(in) < 3 || len(in)%2 != 1 {
usage()
}
input, ok := in[len(in)-1].(string)
if !ok {
usage()
}
var buckets []bucket
var sum int
for i := 0; i < len(in)-2; i += 2 {
chance, ok := in[i].(int)
if !ok {
usage()
}
patt, ok := in[i+1].(string)
if !ok {
usage()
}
if patt == "pass" {
sum += chance
buckets = append(buckets, bucket{
chance: chance,
fn: Passthrough,
})
continue
}
if patt[0] < '0' || patt[0] > '9' {
usage()
}
num := int(patt[0] - '0') // parse ascii number to int
if patt[1:] != "rcnw" && patt[1:] != "rccw" {
usage()
}
var fn func(in string) string
if patt[1:] == "rcnw" {
fn = ReplaceRandomConsecutiveNodesWildcard(num)
} else {
fn = ReplaceRandomConsecutiveCharsWildcard(num)
}
sum += chance
buckets = append(buckets, bucket{
chance: chance,
fn: fn,
})
}
if sum != 100 {
usage()
}
pos := rand.Intn(100)
sum = 0
for _, b := range buckets {
if pos < sum+b.chance {
return b.fn(input)
}
sum += b.chance
}
panic("should never happen")
return "foo"
}
|
[
"func",
"patternCustom",
"(",
"in",
"...",
"interface",
"{",
"}",
")",
"string",
"{",
"usage",
":=",
"func",
"(",
")",
"{",
"PatternCustomUsage",
"(",
"\"",
"\"",
")",
"\n",
"os",
".",
"Exit",
"(",
"-",
"1",
")",
"\n",
"}",
"\n\n",
"// one or more of \"<chance> <operation>\" followed by an input string at the end.",
"if",
"len",
"(",
"in",
")",
"<",
"3",
"||",
"len",
"(",
"in",
")",
"%",
"2",
"!=",
"1",
"{",
"usage",
"(",
")",
"\n",
"}",
"\n",
"input",
",",
"ok",
":=",
"in",
"[",
"len",
"(",
"in",
")",
"-",
"1",
"]",
".",
"(",
"string",
")",
"\n",
"if",
"!",
"ok",
"{",
"usage",
"(",
")",
"\n",
"}",
"\n",
"var",
"buckets",
"[",
"]",
"bucket",
"\n",
"var",
"sum",
"int",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"in",
")",
"-",
"2",
";",
"i",
"+=",
"2",
"{",
"chance",
",",
"ok",
":=",
"in",
"[",
"i",
"]",
".",
"(",
"int",
")",
"\n",
"if",
"!",
"ok",
"{",
"usage",
"(",
")",
"\n",
"}",
"\n",
"patt",
",",
"ok",
":=",
"in",
"[",
"i",
"+",
"1",
"]",
".",
"(",
"string",
")",
"\n",
"if",
"!",
"ok",
"{",
"usage",
"(",
")",
"\n",
"}",
"\n",
"if",
"patt",
"==",
"\"",
"\"",
"{",
"sum",
"+=",
"chance",
"\n",
"buckets",
"=",
"append",
"(",
"buckets",
",",
"bucket",
"{",
"chance",
":",
"chance",
",",
"fn",
":",
"Passthrough",
",",
"}",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"if",
"patt",
"[",
"0",
"]",
"<",
"'0'",
"||",
"patt",
"[",
"0",
"]",
">",
"'9'",
"{",
"usage",
"(",
")",
"\n",
"}",
"\n",
"num",
":=",
"int",
"(",
"patt",
"[",
"0",
"]",
"-",
"'0'",
")",
"// parse ascii number to int",
"\n",
"if",
"patt",
"[",
"1",
":",
"]",
"!=",
"\"",
"\"",
"&&",
"patt",
"[",
"1",
":",
"]",
"!=",
"\"",
"\"",
"{",
"usage",
"(",
")",
"\n",
"}",
"\n",
"var",
"fn",
"func",
"(",
"in",
"string",
")",
"string",
"\n",
"if",
"patt",
"[",
"1",
":",
"]",
"==",
"\"",
"\"",
"{",
"fn",
"=",
"ReplaceRandomConsecutiveNodesWildcard",
"(",
"num",
")",
"\n",
"}",
"else",
"{",
"fn",
"=",
"ReplaceRandomConsecutiveCharsWildcard",
"(",
"num",
")",
"\n",
"}",
"\n\n",
"sum",
"+=",
"chance",
"\n",
"buckets",
"=",
"append",
"(",
"buckets",
",",
"bucket",
"{",
"chance",
":",
"chance",
",",
"fn",
":",
"fn",
",",
"}",
")",
"\n",
"}",
"\n",
"if",
"sum",
"!=",
"100",
"{",
"usage",
"(",
")",
"\n",
"}",
"\n",
"pos",
":=",
"rand",
".",
"Intn",
"(",
"100",
")",
"\n",
"sum",
"=",
"0",
"\n",
"for",
"_",
",",
"b",
":=",
"range",
"buckets",
"{",
"if",
"pos",
"<",
"sum",
"+",
"b",
".",
"chance",
"{",
"return",
"b",
".",
"fn",
"(",
"input",
")",
"\n",
"}",
"\n",
"sum",
"+=",
"b",
".",
"chance",
"\n\n",
"}",
"\n",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"return",
"\"",
"\"",
"\n",
"}"
] |
// percentage chance, and function
|
[
"percentage",
"chance",
"and",
"function"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/tpl_pattern_custom.go#L28-L95
|
train
|
grafana/metrictank
|
cmd/mt-index-cat/out/tpl_pattern_custom.go
|
ReplaceRandomConsecutiveNodesWildcard
|
func ReplaceRandomConsecutiveNodesWildcard(num int) func(in string) string {
return func(in string) string {
parts := strings.Split(in, ".")
if len(parts) < num {
log.Fatalf("metric %q has not enough nodes to replace %d nodes", in, num)
}
pos := rand.Intn(len(parts) - num + 1)
for i := pos; i < pos+num; i++ {
parts[pos] = "*"
}
return strings.Join(parts, ".")
}
}
|
go
|
func ReplaceRandomConsecutiveNodesWildcard(num int) func(in string) string {
return func(in string) string {
parts := strings.Split(in, ".")
if len(parts) < num {
log.Fatalf("metric %q has not enough nodes to replace %d nodes", in, num)
}
pos := rand.Intn(len(parts) - num + 1)
for i := pos; i < pos+num; i++ {
parts[pos] = "*"
}
return strings.Join(parts, ".")
}
}
|
[
"func",
"ReplaceRandomConsecutiveNodesWildcard",
"(",
"num",
"int",
")",
"func",
"(",
"in",
"string",
")",
"string",
"{",
"return",
"func",
"(",
"in",
"string",
")",
"string",
"{",
"parts",
":=",
"strings",
".",
"Split",
"(",
"in",
",",
"\"",
"\"",
")",
"\n",
"if",
"len",
"(",
"parts",
")",
"<",
"num",
"{",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
",",
"in",
",",
"num",
")",
"\n",
"}",
"\n",
"pos",
":=",
"rand",
".",
"Intn",
"(",
"len",
"(",
"parts",
")",
"-",
"num",
"+",
"1",
")",
"\n",
"for",
"i",
":=",
"pos",
";",
"i",
"<",
"pos",
"+",
"num",
";",
"i",
"++",
"{",
"parts",
"[",
"pos",
"]",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"return",
"strings",
".",
"Join",
"(",
"parts",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}"
] |
// ReplaceRandomConsecutiveNodesWildcard returns a function that will replace num consecutive random nodes with wildcards
// the implementation is rather naive and can be optimized
|
[
"ReplaceRandomConsecutiveNodesWildcard",
"returns",
"a",
"function",
"that",
"will",
"replace",
"num",
"consecutive",
"random",
"nodes",
"with",
"wildcards",
"the",
"implementation",
"is",
"rather",
"naive",
"and",
"can",
"be",
"optimized"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/tpl_pattern_custom.go#L103-L115
|
train
|
grafana/metrictank
|
cmd/mt-store-cat/series.go
|
printPointSummary
|
func printPointSummary(ctx context.Context, store *cassandra.CassandraStore, tables []cassandra.Table, metrics []Metric, fromUnix, toUnix, fix uint32) {
for _, metric := range metrics {
fmt.Println("## Metric", metric)
for _, table := range tables {
fmt.Println("### Table", table.Name)
if fix != 0 {
points := getSeries(ctx, store, table, metric.AMKey, fromUnix, toUnix, fix)
printPointsSummary(points, fromUnix, toUnix)
} else {
igens, err := store.SearchTable(ctx, metric.AMKey, table, fromUnix, toUnix)
if err != nil {
panic(err)
}
printSummary(igens, fromUnix, toUnix)
}
}
}
}
|
go
|
func printPointSummary(ctx context.Context, store *cassandra.CassandraStore, tables []cassandra.Table, metrics []Metric, fromUnix, toUnix, fix uint32) {
for _, metric := range metrics {
fmt.Println("## Metric", metric)
for _, table := range tables {
fmt.Println("### Table", table.Name)
if fix != 0 {
points := getSeries(ctx, store, table, metric.AMKey, fromUnix, toUnix, fix)
printPointsSummary(points, fromUnix, toUnix)
} else {
igens, err := store.SearchTable(ctx, metric.AMKey, table, fromUnix, toUnix)
if err != nil {
panic(err)
}
printSummary(igens, fromUnix, toUnix)
}
}
}
}
|
[
"func",
"printPointSummary",
"(",
"ctx",
"context",
".",
"Context",
",",
"store",
"*",
"cassandra",
".",
"CassandraStore",
",",
"tables",
"[",
"]",
"cassandra",
".",
"Table",
",",
"metrics",
"[",
"]",
"Metric",
",",
"fromUnix",
",",
"toUnix",
",",
"fix",
"uint32",
")",
"{",
"for",
"_",
",",
"metric",
":=",
"range",
"metrics",
"{",
"fmt",
".",
"Println",
"(",
"\"",
"\"",
",",
"metric",
")",
"\n",
"for",
"_",
",",
"table",
":=",
"range",
"tables",
"{",
"fmt",
".",
"Println",
"(",
"\"",
"\"",
",",
"table",
".",
"Name",
")",
"\n",
"if",
"fix",
"!=",
"0",
"{",
"points",
":=",
"getSeries",
"(",
"ctx",
",",
"store",
",",
"table",
",",
"metric",
".",
"AMKey",
",",
"fromUnix",
",",
"toUnix",
",",
"fix",
")",
"\n",
"printPointsSummary",
"(",
"points",
",",
"fromUnix",
",",
"toUnix",
")",
"\n",
"}",
"else",
"{",
"igens",
",",
"err",
":=",
"store",
".",
"SearchTable",
"(",
"ctx",
",",
"metric",
".",
"AMKey",
",",
"table",
",",
"fromUnix",
",",
"toUnix",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"printSummary",
"(",
"igens",
",",
"fromUnix",
",",
"toUnix",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// printPointSummary prints a summarized view of the points in the store corresponding to the given requirements
|
[
"printPointSummary",
"prints",
"a",
"summarized",
"view",
"of",
"the",
"points",
"in",
"the",
"store",
"corresponding",
"to",
"the",
"given",
"requirements"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/series.go#L36-L53
|
train
|
grafana/metrictank
|
cmd/mt-index-cat/out/template_functions.go
|
pattern
|
func pattern(in string) string {
mode := rand.Intn(3)
if mode == 0 {
// in this mode, replaces a node with a wildcard
parts := strings.Split(in, ".")
parts[rand.Intn(len(parts))] = "*"
return strings.Join(parts, ".")
} else if mode == 1 {
// randomly replace chars with a *
// note that in 1/5 cases, nothing happens
// and otherwise, sometimes valid patterns are produced,
// but it's also possible to produce patterns that won't match anything (if '.' was taken out)
if len(in) < 5 {
log.Fatalf("metric %q too short for pattern replacement", in)
}
chars := rand.Intn(5)
pos := rand.Intn(len(in) - chars)
return in[0:pos] + "*" + in[pos+chars:]
}
// mode 3: do nothing :)
return in
}
|
go
|
func pattern(in string) string {
mode := rand.Intn(3)
if mode == 0 {
// in this mode, replaces a node with a wildcard
parts := strings.Split(in, ".")
parts[rand.Intn(len(parts))] = "*"
return strings.Join(parts, ".")
} else if mode == 1 {
// randomly replace chars with a *
// note that in 1/5 cases, nothing happens
// and otherwise, sometimes valid patterns are produced,
// but it's also possible to produce patterns that won't match anything (if '.' was taken out)
if len(in) < 5 {
log.Fatalf("metric %q too short for pattern replacement", in)
}
chars := rand.Intn(5)
pos := rand.Intn(len(in) - chars)
return in[0:pos] + "*" + in[pos+chars:]
}
// mode 3: do nothing :)
return in
}
|
[
"func",
"pattern",
"(",
"in",
"string",
")",
"string",
"{",
"mode",
":=",
"rand",
".",
"Intn",
"(",
"3",
")",
"\n",
"if",
"mode",
"==",
"0",
"{",
"// in this mode, replaces a node with a wildcard",
"parts",
":=",
"strings",
".",
"Split",
"(",
"in",
",",
"\"",
"\"",
")",
"\n",
"parts",
"[",
"rand",
".",
"Intn",
"(",
"len",
"(",
"parts",
")",
")",
"]",
"=",
"\"",
"\"",
"\n",
"return",
"strings",
".",
"Join",
"(",
"parts",
",",
"\"",
"\"",
")",
"\n",
"}",
"else",
"if",
"mode",
"==",
"1",
"{",
"// randomly replace chars with a *",
"// note that in 1/5 cases, nothing happens",
"// and otherwise, sometimes valid patterns are produced,",
"// but it's also possible to produce patterns that won't match anything (if '.' was taken out)",
"if",
"len",
"(",
"in",
")",
"<",
"5",
"{",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
",",
"in",
")",
"\n",
"}",
"\n",
"chars",
":=",
"rand",
".",
"Intn",
"(",
"5",
")",
"\n",
"pos",
":=",
"rand",
".",
"Intn",
"(",
"len",
"(",
"in",
")",
"-",
"chars",
")",
"\n",
"return",
"in",
"[",
"0",
":",
"pos",
"]",
"+",
"\"",
"\"",
"+",
"in",
"[",
"pos",
"+",
"chars",
":",
"]",
"\n",
"}",
"\n",
"// mode 3: do nothing :)",
"return",
"in",
"\n",
"}"
] |
// random choice between replacing a node with a wildcard, a char with a wildcard, and passthrough
|
[
"random",
"choice",
"between",
"replacing",
"a",
"node",
"with",
"a",
"wildcard",
"a",
"char",
"with",
"a",
"wildcard",
"and",
"passthrough"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/template_functions.go#L10-L31
|
train
|
grafana/metrictank
|
cmd/mt-index-cat/out/template_functions.go
|
roundDuration
|
func roundDuration(in int64) int64 {
abs := in
if abs < 0 {
abs = -abs
}
if abs <= 10 { // 10s -> don't round
return in
} else if abs <= 60 { // 1min -> round to 10s
return round(in, 10)
} else if abs <= 600 { // 10min -> round to 1min
return round(in, 60)
} else if abs <= 3600 { // 1h -> round to 10min
return round(in, 600)
} else if abs <= 3600*24 { // 24h -> round to 1h
return round(in, 3600)
} else if abs <= 3600*24*7 { // 7d -> round to 1d
return round(in, 3600*24)
} else if abs <= 3600*24*30 { // 30d -> round to 7d
return round(in, 3600*24*7)
}
// default to rounding to months
return round(in, 3600*24*30)
}
|
go
|
func roundDuration(in int64) int64 {
abs := in
if abs < 0 {
abs = -abs
}
if abs <= 10 { // 10s -> don't round
return in
} else if abs <= 60 { // 1min -> round to 10s
return round(in, 10)
} else if abs <= 600 { // 10min -> round to 1min
return round(in, 60)
} else if abs <= 3600 { // 1h -> round to 10min
return round(in, 600)
} else if abs <= 3600*24 { // 24h -> round to 1h
return round(in, 3600)
} else if abs <= 3600*24*7 { // 7d -> round to 1d
return round(in, 3600*24)
} else if abs <= 3600*24*30 { // 30d -> round to 7d
return round(in, 3600*24*7)
}
// default to rounding to months
return round(in, 3600*24*30)
}
|
[
"func",
"roundDuration",
"(",
"in",
"int64",
")",
"int64",
"{",
"abs",
":=",
"in",
"\n",
"if",
"abs",
"<",
"0",
"{",
"abs",
"=",
"-",
"abs",
"\n",
"}",
"\n",
"if",
"abs",
"<=",
"10",
"{",
"// 10s -> don't round",
"return",
"in",
"\n",
"}",
"else",
"if",
"abs",
"<=",
"60",
"{",
"// 1min -> round to 10s",
"return",
"round",
"(",
"in",
",",
"10",
")",
"\n",
"}",
"else",
"if",
"abs",
"<=",
"600",
"{",
"// 10min -> round to 1min",
"return",
"round",
"(",
"in",
",",
"60",
")",
"\n",
"}",
"else",
"if",
"abs",
"<=",
"3600",
"{",
"// 1h -> round to 10min",
"return",
"round",
"(",
"in",
",",
"600",
")",
"\n",
"}",
"else",
"if",
"abs",
"<=",
"3600",
"*",
"24",
"{",
"// 24h -> round to 1h",
"return",
"round",
"(",
"in",
",",
"3600",
")",
"\n",
"}",
"else",
"if",
"abs",
"<=",
"3600",
"*",
"24",
"*",
"7",
"{",
"// 7d -> round to 1d",
"return",
"round",
"(",
"in",
",",
"3600",
"*",
"24",
")",
"\n",
"}",
"else",
"if",
"abs",
"<=",
"3600",
"*",
"24",
"*",
"30",
"{",
"// 30d -> round to 7d",
"return",
"round",
"(",
"in",
",",
"3600",
"*",
"24",
"*",
"7",
")",
"\n",
"}",
"\n",
"// default to rounding to months",
"return",
"round",
"(",
"in",
",",
"3600",
"*",
"24",
"*",
"30",
")",
"\n",
"}"
] |
// roundDuration rounds a second-specified duration for rough classification
|
[
"roundDuration",
"rounds",
"a",
"second",
"-",
"specified",
"duration",
"for",
"rough",
"classification"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/template_functions.go#L39-L61
|
train
|
grafana/metrictank
|
cmd/mt-index-cat/out/template_functions.go
|
round
|
func round(d, r int64) int64 {
neg := d < 0
if neg {
d = -d
}
if m := d % r; m+m < r {
d = d - m
} else {
d = d + r - m
}
if neg {
return -d
}
return d
}
|
go
|
func round(d, r int64) int64 {
neg := d < 0
if neg {
d = -d
}
if m := d % r; m+m < r {
d = d - m
} else {
d = d + r - m
}
if neg {
return -d
}
return d
}
|
[
"func",
"round",
"(",
"d",
",",
"r",
"int64",
")",
"int64",
"{",
"neg",
":=",
"d",
"<",
"0",
"\n",
"if",
"neg",
"{",
"d",
"=",
"-",
"d",
"\n",
"}",
"\n",
"if",
"m",
":=",
"d",
"%",
"r",
";",
"m",
"+",
"m",
"<",
"r",
"{",
"d",
"=",
"d",
"-",
"m",
"\n",
"}",
"else",
"{",
"d",
"=",
"d",
"+",
"r",
"-",
"m",
"\n",
"}",
"\n",
"if",
"neg",
"{",
"return",
"-",
"d",
"\n",
"}",
"\n",
"return",
"d",
"\n",
"}"
] |
// round rounds number d to the nearest r-boundary
|
[
"round",
"rounds",
"number",
"d",
"to",
"the",
"nearest",
"r",
"-",
"boundary"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/template_functions.go#L64-L78
|
train
|
grafana/metrictank
|
cmd/mt-store-cat/chunk_by_ttl.go
|
showKeyTTL
|
func showKeyTTL(iter *gocql.Iter, groupTTL string) {
roundTTL := 1
switch groupTTL {
case "m":
roundTTL = 60
case "h":
roundTTL = 60 * 60
case "d":
roundTTL = 60 * 60 * 24
}
var b bucket
bucketMap := make(map[bucket]int)
for iter.Scan(&b.key, &b.ttl) {
b.ttl /= roundTTL
bucketMap[b] += 1
}
var bucketList []bucketWithCount
for b, count := range bucketMap {
bucketList = append(bucketList, bucketWithCount{
b.key,
b.ttl,
count,
})
}
sort.Sort(byTTL(bucketList))
for _, b := range bucketList {
fmt.Printf("%s %d%s %d\n", b.key, b.ttl, groupTTL, b.c)
}
err := iter.Close()
if err != nil {
log.Errorf("cassandra query error. %s", err)
}
}
|
go
|
func showKeyTTL(iter *gocql.Iter, groupTTL string) {
roundTTL := 1
switch groupTTL {
case "m":
roundTTL = 60
case "h":
roundTTL = 60 * 60
case "d":
roundTTL = 60 * 60 * 24
}
var b bucket
bucketMap := make(map[bucket]int)
for iter.Scan(&b.key, &b.ttl) {
b.ttl /= roundTTL
bucketMap[b] += 1
}
var bucketList []bucketWithCount
for b, count := range bucketMap {
bucketList = append(bucketList, bucketWithCount{
b.key,
b.ttl,
count,
})
}
sort.Sort(byTTL(bucketList))
for _, b := range bucketList {
fmt.Printf("%s %d%s %d\n", b.key, b.ttl, groupTTL, b.c)
}
err := iter.Close()
if err != nil {
log.Errorf("cassandra query error. %s", err)
}
}
|
[
"func",
"showKeyTTL",
"(",
"iter",
"*",
"gocql",
".",
"Iter",
",",
"groupTTL",
"string",
")",
"{",
"roundTTL",
":=",
"1",
"\n",
"switch",
"groupTTL",
"{",
"case",
"\"",
"\"",
":",
"roundTTL",
"=",
"60",
"\n",
"case",
"\"",
"\"",
":",
"roundTTL",
"=",
"60",
"*",
"60",
"\n",
"case",
"\"",
"\"",
":",
"roundTTL",
"=",
"60",
"*",
"60",
"*",
"24",
"\n",
"}",
"\n\n",
"var",
"b",
"bucket",
"\n",
"bucketMap",
":=",
"make",
"(",
"map",
"[",
"bucket",
"]",
"int",
")",
"\n",
"for",
"iter",
".",
"Scan",
"(",
"&",
"b",
".",
"key",
",",
"&",
"b",
".",
"ttl",
")",
"{",
"b",
".",
"ttl",
"/=",
"roundTTL",
"\n",
"bucketMap",
"[",
"b",
"]",
"+=",
"1",
"\n",
"}",
"\n\n",
"var",
"bucketList",
"[",
"]",
"bucketWithCount",
"\n",
"for",
"b",
",",
"count",
":=",
"range",
"bucketMap",
"{",
"bucketList",
"=",
"append",
"(",
"bucketList",
",",
"bucketWithCount",
"{",
"b",
".",
"key",
",",
"b",
".",
"ttl",
",",
"count",
",",
"}",
")",
"\n",
"}",
"\n\n",
"sort",
".",
"Sort",
"(",
"byTTL",
"(",
"bucketList",
")",
")",
"\n",
"for",
"_",
",",
"b",
":=",
"range",
"bucketList",
"{",
"fmt",
".",
"Printf",
"(",
"\"",
"\\n",
"\"",
",",
"b",
".",
"key",
",",
"b",
".",
"ttl",
",",
"groupTTL",
",",
"b",
".",
"c",
")",
"\n",
"}",
"\n",
"err",
":=",
"iter",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}"
] |
// shows an overview of all keys and their ttls and closes the iter
// iter must return rows of key and ttl.
|
[
"shows",
"an",
"overview",
"of",
"all",
"keys",
"and",
"their",
"ttls",
"and",
"closes",
"the",
"iter",
"iter",
"must",
"return",
"rows",
"of",
"key",
"and",
"ttl",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/chunk_by_ttl.go#L30-L65
|
train
|
grafana/metrictank
|
conf/retention.go
|
ParseRetentions
|
func ParseRetentions(defs string) (Retentions, error) {
retentions := make(Retentions, 0)
for i, def := range strings.Split(defs, ",") {
def = strings.TrimSpace(def)
parts := strings.Split(def, ":")
if len(parts) < 2 || len(parts) > 5 {
return nil, fmt.Errorf("bad retentions spec %q", def)
}
// try old format
val1, err1 := strconv.ParseInt(parts[0], 10, 0)
val2, err2 := strconv.ParseInt(parts[1], 10, 0)
var retention Retention
var err error
if err1 == nil && err2 == nil {
retention = NewRetention(int(val1), int(val2))
} else {
// try new format
retention, err = ParseRetentionNew(def)
if err != nil {
return nil, err
}
}
if i != 0 && !schema.IsSpanValid(uint32(retention.SecondsPerPoint)) {
return nil, fmt.Errorf("invalid retention: can't encode span of %d", retention.SecondsPerPoint)
}
if len(parts) >= 3 {
retention.ChunkSpan, err = dur.ParseNDuration(parts[2])
if err != nil {
return nil, err
}
if (Month_sec % retention.ChunkSpan) != 0 {
return nil, errors.New("chunkSpan must fit without remainders into month_sec (28*24*60*60)")
}
_, ok := chunk.RevChunkSpans[retention.ChunkSpan]
if !ok {
return nil, fmt.Errorf("chunkSpan %s is not a valid value (https://github.com/grafana/metrictank/blob/master/docs/memory-server.md#valid-chunk-spans)", parts[2])
}
} else {
// default to a valid chunkspan that can hold at least 100 points, or select the largest one otherwise.
approxSpan := uint32(retention.SecondsPerPoint * 100)
var span uint32
for _, span = range chunk.ChunkSpans {
if span >= approxSpan {
break
}
}
retention.ChunkSpan = span
}
retention.NumChunks = 2
if len(parts) >= 4 {
i, err := strconv.Atoi(parts[3])
if err != nil {
return nil, err
}
retention.NumChunks = uint32(i)
}
if len(parts) == 5 {
// user is allowed to specify both a bool or a timestamp.
// internally we map both to timestamp.
// 0 (default) is effectively the same as 'true'
// math.MaxUint32 is effectively the same as 'false'
readyInt, err := strconv.ParseUint(parts[4], 10, 32)
if err == nil {
retention.Ready = uint32(readyInt)
} else {
readyBool, err := strconv.ParseBool(parts[4])
if err != nil {
return nil, errReadyFormat
}
if !readyBool {
retention.Ready = math.MaxUint32
}
}
}
retentions = append(retentions, retention)
}
return retentions, retentions.Validate()
}
|
go
|
func ParseRetentions(defs string) (Retentions, error) {
retentions := make(Retentions, 0)
for i, def := range strings.Split(defs, ",") {
def = strings.TrimSpace(def)
parts := strings.Split(def, ":")
if len(parts) < 2 || len(parts) > 5 {
return nil, fmt.Errorf("bad retentions spec %q", def)
}
// try old format
val1, err1 := strconv.ParseInt(parts[0], 10, 0)
val2, err2 := strconv.ParseInt(parts[1], 10, 0)
var retention Retention
var err error
if err1 == nil && err2 == nil {
retention = NewRetention(int(val1), int(val2))
} else {
// try new format
retention, err = ParseRetentionNew(def)
if err != nil {
return nil, err
}
}
if i != 0 && !schema.IsSpanValid(uint32(retention.SecondsPerPoint)) {
return nil, fmt.Errorf("invalid retention: can't encode span of %d", retention.SecondsPerPoint)
}
if len(parts) >= 3 {
retention.ChunkSpan, err = dur.ParseNDuration(parts[2])
if err != nil {
return nil, err
}
if (Month_sec % retention.ChunkSpan) != 0 {
return nil, errors.New("chunkSpan must fit without remainders into month_sec (28*24*60*60)")
}
_, ok := chunk.RevChunkSpans[retention.ChunkSpan]
if !ok {
return nil, fmt.Errorf("chunkSpan %s is not a valid value (https://github.com/grafana/metrictank/blob/master/docs/memory-server.md#valid-chunk-spans)", parts[2])
}
} else {
// default to a valid chunkspan that can hold at least 100 points, or select the largest one otherwise.
approxSpan := uint32(retention.SecondsPerPoint * 100)
var span uint32
for _, span = range chunk.ChunkSpans {
if span >= approxSpan {
break
}
}
retention.ChunkSpan = span
}
retention.NumChunks = 2
if len(parts) >= 4 {
i, err := strconv.Atoi(parts[3])
if err != nil {
return nil, err
}
retention.NumChunks = uint32(i)
}
if len(parts) == 5 {
// user is allowed to specify both a bool or a timestamp.
// internally we map both to timestamp.
// 0 (default) is effectively the same as 'true'
// math.MaxUint32 is effectively the same as 'false'
readyInt, err := strconv.ParseUint(parts[4], 10, 32)
if err == nil {
retention.Ready = uint32(readyInt)
} else {
readyBool, err := strconv.ParseBool(parts[4])
if err != nil {
return nil, errReadyFormat
}
if !readyBool {
retention.Ready = math.MaxUint32
}
}
}
retentions = append(retentions, retention)
}
return retentions, retentions.Validate()
}
|
[
"func",
"ParseRetentions",
"(",
"defs",
"string",
")",
"(",
"Retentions",
",",
"error",
")",
"{",
"retentions",
":=",
"make",
"(",
"Retentions",
",",
"0",
")",
"\n",
"for",
"i",
",",
"def",
":=",
"range",
"strings",
".",
"Split",
"(",
"defs",
",",
"\"",
"\"",
")",
"{",
"def",
"=",
"strings",
".",
"TrimSpace",
"(",
"def",
")",
"\n",
"parts",
":=",
"strings",
".",
"Split",
"(",
"def",
",",
"\"",
"\"",
")",
"\n",
"if",
"len",
"(",
"parts",
")",
"<",
"2",
"||",
"len",
"(",
"parts",
")",
">",
"5",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"def",
")",
"\n",
"}",
"\n\n",
"// try old format",
"val1",
",",
"err1",
":=",
"strconv",
".",
"ParseInt",
"(",
"parts",
"[",
"0",
"]",
",",
"10",
",",
"0",
")",
"\n",
"val2",
",",
"err2",
":=",
"strconv",
".",
"ParseInt",
"(",
"parts",
"[",
"1",
"]",
",",
"10",
",",
"0",
")",
"\n\n",
"var",
"retention",
"Retention",
"\n",
"var",
"err",
"error",
"\n",
"if",
"err1",
"==",
"nil",
"&&",
"err2",
"==",
"nil",
"{",
"retention",
"=",
"NewRetention",
"(",
"int",
"(",
"val1",
")",
",",
"int",
"(",
"val2",
")",
")",
"\n",
"}",
"else",
"{",
"// try new format",
"retention",
",",
"err",
"=",
"ParseRetentionNew",
"(",
"def",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"i",
"!=",
"0",
"&&",
"!",
"schema",
".",
"IsSpanValid",
"(",
"uint32",
"(",
"retention",
".",
"SecondsPerPoint",
")",
")",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"retention",
".",
"SecondsPerPoint",
")",
"\n\n",
"}",
"\n",
"if",
"len",
"(",
"parts",
")",
">=",
"3",
"{",
"retention",
".",
"ChunkSpan",
",",
"err",
"=",
"dur",
".",
"ParseNDuration",
"(",
"parts",
"[",
"2",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"(",
"Month_sec",
"%",
"retention",
".",
"ChunkSpan",
")",
"!=",
"0",
"{",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"_",
",",
"ok",
":=",
"chunk",
".",
"RevChunkSpans",
"[",
"retention",
".",
"ChunkSpan",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"parts",
"[",
"2",
"]",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"// default to a valid chunkspan that can hold at least 100 points, or select the largest one otherwise.",
"approxSpan",
":=",
"uint32",
"(",
"retention",
".",
"SecondsPerPoint",
"*",
"100",
")",
"\n",
"var",
"span",
"uint32",
"\n",
"for",
"_",
",",
"span",
"=",
"range",
"chunk",
".",
"ChunkSpans",
"{",
"if",
"span",
">=",
"approxSpan",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"retention",
".",
"ChunkSpan",
"=",
"span",
"\n",
"}",
"\n",
"retention",
".",
"NumChunks",
"=",
"2",
"\n",
"if",
"len",
"(",
"parts",
")",
">=",
"4",
"{",
"i",
",",
"err",
":=",
"strconv",
".",
"Atoi",
"(",
"parts",
"[",
"3",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"retention",
".",
"NumChunks",
"=",
"uint32",
"(",
"i",
")",
"\n",
"}",
"\n",
"if",
"len",
"(",
"parts",
")",
"==",
"5",
"{",
"// user is allowed to specify both a bool or a timestamp.",
"// internally we map both to timestamp.",
"// 0 (default) is effectively the same as 'true'",
"// math.MaxUint32 is effectively the same as 'false'",
"readyInt",
",",
"err",
":=",
"strconv",
".",
"ParseUint",
"(",
"parts",
"[",
"4",
"]",
",",
"10",
",",
"32",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"retention",
".",
"Ready",
"=",
"uint32",
"(",
"readyInt",
")",
"\n",
"}",
"else",
"{",
"readyBool",
",",
"err",
":=",
"strconv",
".",
"ParseBool",
"(",
"parts",
"[",
"4",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errReadyFormat",
"\n",
"}",
"\n",
"if",
"!",
"readyBool",
"{",
"retention",
".",
"Ready",
"=",
"math",
".",
"MaxUint32",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"retentions",
"=",
"append",
"(",
"retentions",
",",
"retention",
")",
"\n",
"}",
"\n",
"return",
"retentions",
",",
"retentions",
".",
"Validate",
"(",
")",
"\n",
"}"
] |
// ParseRetentions parses retention definitions into a Retentions structure
|
[
"ParseRetentions",
"parses",
"retention",
"definitions",
"into",
"a",
"Retentions",
"structure"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/retention.go#L92-L173
|
train
|
grafana/metrictank
|
cmd/mt-store-cat/metrics.go
|
getMetrics
|
func getMetrics(store *cassandra.CassandraStore, prefix, substr, glob string, archive schema.Archive) ([]Metric, error) {
var metrics []Metric
iter := store.Session.Query("select id, name from metric_idx").Iter()
var m Metric
var idString string
for iter.Scan(&idString, &m.name) {
if match(prefix, substr, glob, m) {
mkey, err := schema.MKeyFromString(idString)
if err != nil {
panic(err)
}
m.AMKey = schema.AMKey{
MKey: mkey,
Archive: archive,
}
metrics = append(metrics, m)
}
}
err := iter.Close()
if err != nil {
return metrics, err
}
sort.Sort(MetricsByName(metrics))
return metrics, nil
}
|
go
|
func getMetrics(store *cassandra.CassandraStore, prefix, substr, glob string, archive schema.Archive) ([]Metric, error) {
var metrics []Metric
iter := store.Session.Query("select id, name from metric_idx").Iter()
var m Metric
var idString string
for iter.Scan(&idString, &m.name) {
if match(prefix, substr, glob, m) {
mkey, err := schema.MKeyFromString(idString)
if err != nil {
panic(err)
}
m.AMKey = schema.AMKey{
MKey: mkey,
Archive: archive,
}
metrics = append(metrics, m)
}
}
err := iter.Close()
if err != nil {
return metrics, err
}
sort.Sort(MetricsByName(metrics))
return metrics, nil
}
|
[
"func",
"getMetrics",
"(",
"store",
"*",
"cassandra",
".",
"CassandraStore",
",",
"prefix",
",",
"substr",
",",
"glob",
"string",
",",
"archive",
"schema",
".",
"Archive",
")",
"(",
"[",
"]",
"Metric",
",",
"error",
")",
"{",
"var",
"metrics",
"[",
"]",
"Metric",
"\n",
"iter",
":=",
"store",
".",
"Session",
".",
"Query",
"(",
"\"",
"\"",
")",
".",
"Iter",
"(",
")",
"\n",
"var",
"m",
"Metric",
"\n",
"var",
"idString",
"string",
"\n",
"for",
"iter",
".",
"Scan",
"(",
"&",
"idString",
",",
"&",
"m",
".",
"name",
")",
"{",
"if",
"match",
"(",
"prefix",
",",
"substr",
",",
"glob",
",",
"m",
")",
"{",
"mkey",
",",
"err",
":=",
"schema",
".",
"MKeyFromString",
"(",
"idString",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"m",
".",
"AMKey",
"=",
"schema",
".",
"AMKey",
"{",
"MKey",
":",
"mkey",
",",
"Archive",
":",
"archive",
",",
"}",
"\n",
"metrics",
"=",
"append",
"(",
"metrics",
",",
"m",
")",
"\n",
"}",
"\n",
"}",
"\n",
"err",
":=",
"iter",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"metrics",
",",
"err",
"\n",
"}",
"\n",
"sort",
".",
"Sort",
"(",
"MetricsByName",
"(",
"metrics",
")",
")",
"\n",
"return",
"metrics",
",",
"nil",
"\n",
"}"
] |
// getMetrics lists all metrics from the store matching the given condition.
|
[
"getMetrics",
"lists",
"all",
"metrics",
"from",
"the",
"store",
"matching",
"the",
"given",
"condition",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/metrics.go#L53-L77
|
train
|
grafana/metrictank
|
cmd/mt-store-cat/metrics.go
|
getMetric
|
func getMetric(store *cassandra.CassandraStore, amkey schema.AMKey) ([]Metric, error) {
var metrics []Metric
// index only stores MKey's, not AMKey's.
iter := store.Session.Query("select name from metric_idx where id=? ALLOW FILTERING", amkey.MKey.String()).Iter()
var m Metric
for iter.Scan(&m.name) {
m.AMKey = amkey
metrics = append(metrics, m)
}
if len(metrics) > 1 {
panic(fmt.Sprintf("wtf. found more than one entry for id %s: %v", amkey.String(), metrics))
}
err := iter.Close()
if err != nil {
return metrics, err
}
return metrics, nil
}
|
go
|
func getMetric(store *cassandra.CassandraStore, amkey schema.AMKey) ([]Metric, error) {
var metrics []Metric
// index only stores MKey's, not AMKey's.
iter := store.Session.Query("select name from metric_idx where id=? ALLOW FILTERING", amkey.MKey.String()).Iter()
var m Metric
for iter.Scan(&m.name) {
m.AMKey = amkey
metrics = append(metrics, m)
}
if len(metrics) > 1 {
panic(fmt.Sprintf("wtf. found more than one entry for id %s: %v", amkey.String(), metrics))
}
err := iter.Close()
if err != nil {
return metrics, err
}
return metrics, nil
}
|
[
"func",
"getMetric",
"(",
"store",
"*",
"cassandra",
".",
"CassandraStore",
",",
"amkey",
"schema",
".",
"AMKey",
")",
"(",
"[",
"]",
"Metric",
",",
"error",
")",
"{",
"var",
"metrics",
"[",
"]",
"Metric",
"\n",
"// index only stores MKey's, not AMKey's.",
"iter",
":=",
"store",
".",
"Session",
".",
"Query",
"(",
"\"",
"\"",
",",
"amkey",
".",
"MKey",
".",
"String",
"(",
")",
")",
".",
"Iter",
"(",
")",
"\n",
"var",
"m",
"Metric",
"\n",
"for",
"iter",
".",
"Scan",
"(",
"&",
"m",
".",
"name",
")",
"{",
"m",
".",
"AMKey",
"=",
"amkey",
"\n",
"metrics",
"=",
"append",
"(",
"metrics",
",",
"m",
")",
"\n",
"}",
"\n",
"if",
"len",
"(",
"metrics",
")",
">",
"1",
"{",
"panic",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"amkey",
".",
"String",
"(",
")",
",",
"metrics",
")",
")",
"\n",
"}",
"\n",
"err",
":=",
"iter",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"metrics",
",",
"err",
"\n",
"}",
"\n",
"return",
"metrics",
",",
"nil",
"\n",
"}"
] |
// getMetric returns the metric for the given AMKey
|
[
"getMetric",
"returns",
"the",
"metric",
"for",
"the",
"given",
"AMKey"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/metrics.go#L80-L97
|
train
|
grafana/metrictank
|
consolidation/consolidate.go
|
ConsolidateStable
|
func ConsolidateStable(points []schema.Point, interval, maxDataPoints uint32, consolidator Consolidator) ([]schema.Point, uint32) {
aggNum := AggEvery(uint32(len(points)), maxDataPoints)
// note that the amount of points to strip is always < 1 postAggInterval's worth.
// there's 2 important considerations here:
// 1) we shouldn't make any too drastic alterations of the timerange returned compared to the requested time range
// 2) the stripping effort shouldn't significantly alter the output otherwise things get confusing
// these 2 remarks boil down to "the amount of points stripped should be a small fraction of the amount of input points"
// we use this simple heuristic:
// only nudge if we have points > 2 * postAggInterval's worth where "postAggInterval's worth is aggNum points"
//
// this also assures that in the special case where people request MaxDataPoints=1 we will always consolidate
// all points together and don't trim a significant amount of the points
// that are expected to go into the aggregation
// e.g. consider a case where we have points with ts 140,150,160,170
// aggNum = aggEvery(4/1) = 4, postAggInterval is thus 40.
// strict application of the logic would return 1 point with ts=200 (aggregation of all points 170-200 which is 1 point)
// and strip the first 3 points,
// which is not what we want. since we only have a small set of points, better to incorporate all points into 1 bucket with ts 170.
// note that in this case (where we don't nudge) the timestamps in output are not cleanly divisible by postAggInterval
// we only start stripping if we have more than 2*4=8 points
// see the unit tests which explore cases like this (TestConsolidateStableNoTrimDueToNotManyPoints)
if len(points) > int(2*aggNum) {
_, num := nudge(points[0].Ts, interval, aggNum)
points = points[num:]
}
points = Consolidate(points, aggNum, consolidator)
interval *= aggNum
return points, interval
}
|
go
|
func ConsolidateStable(points []schema.Point, interval, maxDataPoints uint32, consolidator Consolidator) ([]schema.Point, uint32) {
aggNum := AggEvery(uint32(len(points)), maxDataPoints)
// note that the amount of points to strip is always < 1 postAggInterval's worth.
// there's 2 important considerations here:
// 1) we shouldn't make any too drastic alterations of the timerange returned compared to the requested time range
// 2) the stripping effort shouldn't significantly alter the output otherwise things get confusing
// these 2 remarks boil down to "the amount of points stripped should be a small fraction of the amount of input points"
// we use this simple heuristic:
// only nudge if we have points > 2 * postAggInterval's worth where "postAggInterval's worth is aggNum points"
//
// this also assures that in the special case where people request MaxDataPoints=1 we will always consolidate
// all points together and don't trim a significant amount of the points
// that are expected to go into the aggregation
// e.g. consider a case where we have points with ts 140,150,160,170
// aggNum = aggEvery(4/1) = 4, postAggInterval is thus 40.
// strict application of the logic would return 1 point with ts=200 (aggregation of all points 170-200 which is 1 point)
// and strip the first 3 points,
// which is not what we want. since we only have a small set of points, better to incorporate all points into 1 bucket with ts 170.
// note that in this case (where we don't nudge) the timestamps in output are not cleanly divisible by postAggInterval
// we only start stripping if we have more than 2*4=8 points
// see the unit tests which explore cases like this (TestConsolidateStableNoTrimDueToNotManyPoints)
if len(points) > int(2*aggNum) {
_, num := nudge(points[0].Ts, interval, aggNum)
points = points[num:]
}
points = Consolidate(points, aggNum, consolidator)
interval *= aggNum
return points, interval
}
|
[
"func",
"ConsolidateStable",
"(",
"points",
"[",
"]",
"schema",
".",
"Point",
",",
"interval",
",",
"maxDataPoints",
"uint32",
",",
"consolidator",
"Consolidator",
")",
"(",
"[",
"]",
"schema",
".",
"Point",
",",
"uint32",
")",
"{",
"aggNum",
":=",
"AggEvery",
"(",
"uint32",
"(",
"len",
"(",
"points",
")",
")",
",",
"maxDataPoints",
")",
"\n",
"// note that the amount of points to strip is always < 1 postAggInterval's worth.",
"// there's 2 important considerations here:",
"// 1) we shouldn't make any too drastic alterations of the timerange returned compared to the requested time range",
"// 2) the stripping effort shouldn't significantly alter the output otherwise things get confusing",
"// these 2 remarks boil down to \"the amount of points stripped should be a small fraction of the amount of input points\"",
"// we use this simple heuristic:",
"// only nudge if we have points > 2 * postAggInterval's worth where \"postAggInterval's worth is aggNum points\"",
"//",
"// this also assures that in the special case where people request MaxDataPoints=1 we will always consolidate",
"// all points together and don't trim a significant amount of the points",
"// that are expected to go into the aggregation",
"// e.g. consider a case where we have points with ts 140,150,160,170",
"// aggNum = aggEvery(4/1) = 4, postAggInterval is thus 40.",
"// strict application of the logic would return 1 point with ts=200 (aggregation of all points 170-200 which is 1 point)",
"// and strip the first 3 points,",
"// which is not what we want. since we only have a small set of points, better to incorporate all points into 1 bucket with ts 170.",
"// note that in this case (where we don't nudge) the timestamps in output are not cleanly divisible by postAggInterval",
"// we only start stripping if we have more than 2*4=8 points",
"// see the unit tests which explore cases like this (TestConsolidateStableNoTrimDueToNotManyPoints)",
"if",
"len",
"(",
"points",
")",
">",
"int",
"(",
"2",
"*",
"aggNum",
")",
"{",
"_",
",",
"num",
":=",
"nudge",
"(",
"points",
"[",
"0",
"]",
".",
"Ts",
",",
"interval",
",",
"aggNum",
")",
"\n",
"points",
"=",
"points",
"[",
"num",
":",
"]",
"\n",
"}",
"\n",
"points",
"=",
"Consolidate",
"(",
"points",
",",
"aggNum",
",",
"consolidator",
")",
"\n",
"interval",
"*=",
"aggNum",
"\n",
"return",
"points",
",",
"interval",
"\n",
"}"
] |
// ConsolidateStable consolidates points in a "stable" way, meaning if you run the same function again so that the input
// receives new points at the end and old points get removed at the beginning, we keep picking the same points to consolidate together
// interval is the interval between the input points
|
[
"ConsolidateStable",
"consolidates",
"points",
"in",
"a",
"stable",
"way",
"meaning",
"if",
"you",
"run",
"the",
"same",
"function",
"again",
"so",
"that",
"the",
"input",
"receives",
"new",
"points",
"at",
"the",
"end",
"and",
"old",
"points",
"get",
"removed",
"at",
"the",
"beginning",
"we",
"keep",
"picking",
"the",
"same",
"points",
"to",
"consolidate",
"together",
"interval",
"is",
"the",
"interval",
"between",
"the",
"input",
"points"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/consolidation/consolidate.go#L79-L108
|
train
|
grafana/metrictank
|
api/dataprocessor.go
|
doRecover
|
func doRecover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
panic(e)
}
if err, ok := e.(error); ok {
*errp = err
} else if errStr, ok := e.(string); ok {
*errp = errors.New(errStr)
} else {
*errp = fmt.Errorf("%v", e)
}
}
return
}
|
go
|
func doRecover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
panic(e)
}
if err, ok := e.(error); ok {
*errp = err
} else if errStr, ok := e.(string); ok {
*errp = errors.New(errStr)
} else {
*errp = fmt.Errorf("%v", e)
}
}
return
}
|
[
"func",
"doRecover",
"(",
"errp",
"*",
"error",
")",
"{",
"e",
":=",
"recover",
"(",
")",
"\n",
"if",
"e",
"!=",
"nil",
"{",
"if",
"_",
",",
"ok",
":=",
"e",
".",
"(",
"runtime",
".",
"Error",
")",
";",
"ok",
"{",
"panic",
"(",
"e",
")",
"\n",
"}",
"\n",
"if",
"err",
",",
"ok",
":=",
"e",
".",
"(",
"error",
")",
";",
"ok",
"{",
"*",
"errp",
"=",
"err",
"\n",
"}",
"else",
"if",
"errStr",
",",
"ok",
":=",
"e",
".",
"(",
"string",
")",
";",
"ok",
"{",
"*",
"errp",
"=",
"errors",
".",
"New",
"(",
"errStr",
")",
"\n",
"}",
"else",
"{",
"*",
"errp",
"=",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"e",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"\n",
"}"
] |
// doRecover is the handler that turns panics into returns from the top level of getTarget.
|
[
"doRecover",
"is",
"the",
"handler",
"that",
"turns",
"panics",
"into",
"returns",
"from",
"the",
"top",
"level",
"of",
"getTarget",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L25-L40
|
train
|
grafana/metrictank
|
api/dataprocessor.go
|
getTargetsRemote
|
func (s *Server) getTargetsRemote(ctx context.Context, remoteReqs map[string][]models.Req) ([]models.Series, error) {
responses := make(chan getTargetsResp, len(remoteReqs))
rCtx, cancel := context.WithCancel(ctx)
defer cancel()
wg := sync.WaitGroup{}
wg.Add(len(remoteReqs))
for _, nodeReqs := range remoteReqs {
log.Debugf("DP getTargetsRemote: handling %d reqs from %s", len(nodeReqs), nodeReqs[0].Node.GetName())
go func(reqs []models.Req) {
defer wg.Done()
node := reqs[0].Node
buf, err := node.Post(rCtx, "getTargetsRemote", "/getdata", models.GetData{Requests: reqs})
if err != nil {
cancel()
responses <- getTargetsResp{nil, err}
return
}
var resp models.GetDataResp
_, err = resp.UnmarshalMsg(buf)
if err != nil {
cancel()
log.Errorf("DP getTargetsRemote: error unmarshaling body from %s/getdata: %q", node.GetName(), err.Error())
responses <- getTargetsResp{nil, err}
return
}
log.Debugf("DP getTargetsRemote: %s returned %d series", node.GetName(), len(resp.Series))
responses <- getTargetsResp{resp.Series, nil}
}(nodeReqs)
}
// wait for all getTargetsRemote goroutines to end, then close our responses channel
go func() {
wg.Wait()
close(responses)
}()
out := make([]models.Series, 0)
for resp := range responses {
if resp.err != nil {
return nil, resp.err
}
out = append(out, resp.series...)
}
log.Debugf("DP getTargetsRemote: total of %d series found on peers", len(out))
return out, nil
}
|
go
|
func (s *Server) getTargetsRemote(ctx context.Context, remoteReqs map[string][]models.Req) ([]models.Series, error) {
responses := make(chan getTargetsResp, len(remoteReqs))
rCtx, cancel := context.WithCancel(ctx)
defer cancel()
wg := sync.WaitGroup{}
wg.Add(len(remoteReqs))
for _, nodeReqs := range remoteReqs {
log.Debugf("DP getTargetsRemote: handling %d reqs from %s", len(nodeReqs), nodeReqs[0].Node.GetName())
go func(reqs []models.Req) {
defer wg.Done()
node := reqs[0].Node
buf, err := node.Post(rCtx, "getTargetsRemote", "/getdata", models.GetData{Requests: reqs})
if err != nil {
cancel()
responses <- getTargetsResp{nil, err}
return
}
var resp models.GetDataResp
_, err = resp.UnmarshalMsg(buf)
if err != nil {
cancel()
log.Errorf("DP getTargetsRemote: error unmarshaling body from %s/getdata: %q", node.GetName(), err.Error())
responses <- getTargetsResp{nil, err}
return
}
log.Debugf("DP getTargetsRemote: %s returned %d series", node.GetName(), len(resp.Series))
responses <- getTargetsResp{resp.Series, nil}
}(nodeReqs)
}
// wait for all getTargetsRemote goroutines to end, then close our responses channel
go func() {
wg.Wait()
close(responses)
}()
out := make([]models.Series, 0)
for resp := range responses {
if resp.err != nil {
return nil, resp.err
}
out = append(out, resp.series...)
}
log.Debugf("DP getTargetsRemote: total of %d series found on peers", len(out))
return out, nil
}
|
[
"func",
"(",
"s",
"*",
"Server",
")",
"getTargetsRemote",
"(",
"ctx",
"context",
".",
"Context",
",",
"remoteReqs",
"map",
"[",
"string",
"]",
"[",
"]",
"models",
".",
"Req",
")",
"(",
"[",
"]",
"models",
".",
"Series",
",",
"error",
")",
"{",
"responses",
":=",
"make",
"(",
"chan",
"getTargetsResp",
",",
"len",
"(",
"remoteReqs",
")",
")",
"\n",
"rCtx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"ctx",
")",
"\n",
"defer",
"cancel",
"(",
")",
"\n",
"wg",
":=",
"sync",
".",
"WaitGroup",
"{",
"}",
"\n",
"wg",
".",
"Add",
"(",
"len",
"(",
"remoteReqs",
")",
")",
"\n",
"for",
"_",
",",
"nodeReqs",
":=",
"range",
"remoteReqs",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"len",
"(",
"nodeReqs",
")",
",",
"nodeReqs",
"[",
"0",
"]",
".",
"Node",
".",
"GetName",
"(",
")",
")",
"\n",
"go",
"func",
"(",
"reqs",
"[",
"]",
"models",
".",
"Req",
")",
"{",
"defer",
"wg",
".",
"Done",
"(",
")",
"\n",
"node",
":=",
"reqs",
"[",
"0",
"]",
".",
"Node",
"\n",
"buf",
",",
"err",
":=",
"node",
".",
"Post",
"(",
"rCtx",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"models",
".",
"GetData",
"{",
"Requests",
":",
"reqs",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"cancel",
"(",
")",
"\n",
"responses",
"<-",
"getTargetsResp",
"{",
"nil",
",",
"err",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"var",
"resp",
"models",
".",
"GetDataResp",
"\n",
"_",
",",
"err",
"=",
"resp",
".",
"UnmarshalMsg",
"(",
"buf",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"cancel",
"(",
")",
"\n",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"node",
".",
"GetName",
"(",
")",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"responses",
"<-",
"getTargetsResp",
"{",
"nil",
",",
"err",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"node",
".",
"GetName",
"(",
")",
",",
"len",
"(",
"resp",
".",
"Series",
")",
")",
"\n",
"responses",
"<-",
"getTargetsResp",
"{",
"resp",
".",
"Series",
",",
"nil",
"}",
"\n",
"}",
"(",
"nodeReqs",
")",
"\n",
"}",
"\n\n",
"// wait for all getTargetsRemote goroutines to end, then close our responses channel",
"go",
"func",
"(",
")",
"{",
"wg",
".",
"Wait",
"(",
")",
"\n",
"close",
"(",
"responses",
")",
"\n",
"}",
"(",
")",
"\n\n",
"out",
":=",
"make",
"(",
"[",
"]",
"models",
".",
"Series",
",",
"0",
")",
"\n",
"for",
"resp",
":=",
"range",
"responses",
"{",
"if",
"resp",
".",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"resp",
".",
"err",
"\n",
"}",
"\n",
"out",
"=",
"append",
"(",
"out",
",",
"resp",
".",
"series",
"...",
")",
"\n",
"}",
"\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"len",
"(",
"out",
")",
")",
"\n",
"return",
"out",
",",
"nil",
"\n",
"}"
] |
// getTargetsRemote issues the requests on other nodes
// it's nothing more than a thin network wrapper around getTargetsLocal of a peer.
|
[
"getTargetsRemote",
"issues",
"the",
"requests",
"on",
"other",
"nodes",
"it",
"s",
"nothing",
"more",
"than",
"a",
"thin",
"network",
"wrapper",
"around",
"getTargetsLocal",
"of",
"a",
"peer",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L194-L239
|
train
|
grafana/metrictank
|
api/dataprocessor.go
|
getTargetsLocal
|
func (s *Server) getTargetsLocal(ctx context.Context, reqs []models.Req) ([]models.Series, error) {
log.Debugf("DP getTargetsLocal: handling %d reqs locally", len(reqs))
responses := make(chan getTargetsResp, len(reqs))
var wg sync.WaitGroup
reqLimiter := util.NewLimiter(getTargetsConcurrency)
rCtx, cancel := context.WithCancel(ctx)
defer cancel()
LOOP:
for _, req := range reqs {
// if there are already getDataConcurrency goroutines running, then block
// until a slot becomes free or our context is canceled.
if !reqLimiter.Acquire(rCtx) {
//request canceled
break LOOP
}
wg.Add(1)
go func(req models.Req) {
rCtx, span := tracing.NewSpan(rCtx, s.Tracer, "getTargetsLocal")
req.Trace(span)
pre := time.Now()
points, interval, err := s.getTarget(rCtx, req)
if err != nil {
tags.Error.Set(span, true)
cancel() // cancel all other requests.
responses <- getTargetsResp{nil, err}
} else {
getTargetDuration.Value(time.Now().Sub(pre))
responses <- getTargetsResp{[]models.Series{{
Target: req.Target, // always simply the metric name from index
Datapoints: points,
Interval: interval,
QueryPatt: req.Pattern, // foo.* or foo.bar whatever the etName arg was
QueryFrom: req.From,
QueryTo: req.To,
QueryCons: req.ConsReq,
Consolidator: req.Consolidator,
}}, nil}
}
wg.Done()
// pop an item of our limiter so that other requests can be processed.
reqLimiter.Release()
span.Finish()
}(req)
}
go func() {
wg.Wait()
close(responses)
}()
out := make([]models.Series, 0, len(reqs))
for resp := range responses {
if resp.err != nil {
return nil, resp.err
}
out = append(out, resp.series...)
}
log.Debugf("DP getTargetsLocal: %d series found locally", len(out))
return out, nil
}
|
go
|
func (s *Server) getTargetsLocal(ctx context.Context, reqs []models.Req) ([]models.Series, error) {
log.Debugf("DP getTargetsLocal: handling %d reqs locally", len(reqs))
responses := make(chan getTargetsResp, len(reqs))
var wg sync.WaitGroup
reqLimiter := util.NewLimiter(getTargetsConcurrency)
rCtx, cancel := context.WithCancel(ctx)
defer cancel()
LOOP:
for _, req := range reqs {
// if there are already getDataConcurrency goroutines running, then block
// until a slot becomes free or our context is canceled.
if !reqLimiter.Acquire(rCtx) {
//request canceled
break LOOP
}
wg.Add(1)
go func(req models.Req) {
rCtx, span := tracing.NewSpan(rCtx, s.Tracer, "getTargetsLocal")
req.Trace(span)
pre := time.Now()
points, interval, err := s.getTarget(rCtx, req)
if err != nil {
tags.Error.Set(span, true)
cancel() // cancel all other requests.
responses <- getTargetsResp{nil, err}
} else {
getTargetDuration.Value(time.Now().Sub(pre))
responses <- getTargetsResp{[]models.Series{{
Target: req.Target, // always simply the metric name from index
Datapoints: points,
Interval: interval,
QueryPatt: req.Pattern, // foo.* or foo.bar whatever the etName arg was
QueryFrom: req.From,
QueryTo: req.To,
QueryCons: req.ConsReq,
Consolidator: req.Consolidator,
}}, nil}
}
wg.Done()
// pop an item of our limiter so that other requests can be processed.
reqLimiter.Release()
span.Finish()
}(req)
}
go func() {
wg.Wait()
close(responses)
}()
out := make([]models.Series, 0, len(reqs))
for resp := range responses {
if resp.err != nil {
return nil, resp.err
}
out = append(out, resp.series...)
}
log.Debugf("DP getTargetsLocal: %d series found locally", len(out))
return out, nil
}
|
[
"func",
"(",
"s",
"*",
"Server",
")",
"getTargetsLocal",
"(",
"ctx",
"context",
".",
"Context",
",",
"reqs",
"[",
"]",
"models",
".",
"Req",
")",
"(",
"[",
"]",
"models",
".",
"Series",
",",
"error",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"len",
"(",
"reqs",
")",
")",
"\n",
"responses",
":=",
"make",
"(",
"chan",
"getTargetsResp",
",",
"len",
"(",
"reqs",
")",
")",
"\n\n",
"var",
"wg",
"sync",
".",
"WaitGroup",
"\n",
"reqLimiter",
":=",
"util",
".",
"NewLimiter",
"(",
"getTargetsConcurrency",
")",
"\n\n",
"rCtx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"ctx",
")",
"\n",
"defer",
"cancel",
"(",
")",
"\n",
"LOOP",
":",
"for",
"_",
",",
"req",
":=",
"range",
"reqs",
"{",
"// if there are already getDataConcurrency goroutines running, then block",
"// until a slot becomes free or our context is canceled.",
"if",
"!",
"reqLimiter",
".",
"Acquire",
"(",
"rCtx",
")",
"{",
"//request canceled",
"break",
"LOOP",
"\n",
"}",
"\n",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"func",
"(",
"req",
"models",
".",
"Req",
")",
"{",
"rCtx",
",",
"span",
":=",
"tracing",
".",
"NewSpan",
"(",
"rCtx",
",",
"s",
".",
"Tracer",
",",
"\"",
"\"",
")",
"\n",
"req",
".",
"Trace",
"(",
"span",
")",
"\n",
"pre",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"points",
",",
"interval",
",",
"err",
":=",
"s",
".",
"getTarget",
"(",
"rCtx",
",",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"tags",
".",
"Error",
".",
"Set",
"(",
"span",
",",
"true",
")",
"\n",
"cancel",
"(",
")",
"// cancel all other requests.",
"\n",
"responses",
"<-",
"getTargetsResp",
"{",
"nil",
",",
"err",
"}",
"\n",
"}",
"else",
"{",
"getTargetDuration",
".",
"Value",
"(",
"time",
".",
"Now",
"(",
")",
".",
"Sub",
"(",
"pre",
")",
")",
"\n",
"responses",
"<-",
"getTargetsResp",
"{",
"[",
"]",
"models",
".",
"Series",
"{",
"{",
"Target",
":",
"req",
".",
"Target",
",",
"// always simply the metric name from index",
"Datapoints",
":",
"points",
",",
"Interval",
":",
"interval",
",",
"QueryPatt",
":",
"req",
".",
"Pattern",
",",
"// foo.* or foo.bar whatever the etName arg was",
"QueryFrom",
":",
"req",
".",
"From",
",",
"QueryTo",
":",
"req",
".",
"To",
",",
"QueryCons",
":",
"req",
".",
"ConsReq",
",",
"Consolidator",
":",
"req",
".",
"Consolidator",
",",
"}",
"}",
",",
"nil",
"}",
"\n",
"}",
"\n",
"wg",
".",
"Done",
"(",
")",
"\n",
"// pop an item of our limiter so that other requests can be processed.",
"reqLimiter",
".",
"Release",
"(",
")",
"\n",
"span",
".",
"Finish",
"(",
")",
"\n",
"}",
"(",
"req",
")",
"\n",
"}",
"\n",
"go",
"func",
"(",
")",
"{",
"wg",
".",
"Wait",
"(",
")",
"\n",
"close",
"(",
"responses",
")",
"\n",
"}",
"(",
")",
"\n",
"out",
":=",
"make",
"(",
"[",
"]",
"models",
".",
"Series",
",",
"0",
",",
"len",
"(",
"reqs",
")",
")",
"\n",
"for",
"resp",
":=",
"range",
"responses",
"{",
"if",
"resp",
".",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"resp",
".",
"err",
"\n",
"}",
"\n",
"out",
"=",
"append",
"(",
"out",
",",
"resp",
".",
"series",
"...",
")",
"\n",
"}",
"\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"len",
"(",
"out",
")",
")",
"\n",
"return",
"out",
",",
"nil",
"\n\n",
"}"
] |
// error is the error of the first failing target request
|
[
"error",
"is",
"the",
"error",
"of",
"the",
"first",
"failing",
"target",
"request"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L242-L302
|
train
|
grafana/metrictank
|
api/dataprocessor.go
|
mergeSeries
|
func mergeSeries(in []models.Series) []models.Series {
type segment struct {
target string
query string
from uint32
to uint32
con consolidation.Consolidator
}
seriesByTarget := make(map[segment][]models.Series)
for _, series := range in {
s := segment{
series.Target,
series.QueryPatt,
series.QueryFrom,
series.QueryTo,
series.Consolidator,
}
seriesByTarget[s] = append(seriesByTarget[s], series)
}
merged := make([]models.Series, len(seriesByTarget))
i := 0
for _, series := range seriesByTarget {
if len(series) == 1 {
merged[i] = series[0]
} else {
//we use the first series in the list as our result. We check over every
// point and if it is null, we then check the other series for a non null
// value to use instead.
log.Debugf("DP mergeSeries: %s has multiple series.", series[0].Target)
for i := range series[0].Datapoints {
for j := 0; j < len(series); j++ {
if !math.IsNaN(series[j].Datapoints[i].Val) {
series[0].Datapoints[i].Val = series[j].Datapoints[i].Val
break
}
}
}
merged[i] = series[0]
}
i++
}
return merged
}
|
go
|
func mergeSeries(in []models.Series) []models.Series {
type segment struct {
target string
query string
from uint32
to uint32
con consolidation.Consolidator
}
seriesByTarget := make(map[segment][]models.Series)
for _, series := range in {
s := segment{
series.Target,
series.QueryPatt,
series.QueryFrom,
series.QueryTo,
series.Consolidator,
}
seriesByTarget[s] = append(seriesByTarget[s], series)
}
merged := make([]models.Series, len(seriesByTarget))
i := 0
for _, series := range seriesByTarget {
if len(series) == 1 {
merged[i] = series[0]
} else {
//we use the first series in the list as our result. We check over every
// point and if it is null, we then check the other series for a non null
// value to use instead.
log.Debugf("DP mergeSeries: %s has multiple series.", series[0].Target)
for i := range series[0].Datapoints {
for j := 0; j < len(series); j++ {
if !math.IsNaN(series[j].Datapoints[i].Val) {
series[0].Datapoints[i].Val = series[j].Datapoints[i].Val
break
}
}
}
merged[i] = series[0]
}
i++
}
return merged
}
|
[
"func",
"mergeSeries",
"(",
"in",
"[",
"]",
"models",
".",
"Series",
")",
"[",
"]",
"models",
".",
"Series",
"{",
"type",
"segment",
"struct",
"{",
"target",
"string",
"\n",
"query",
"string",
"\n",
"from",
"uint32",
"\n",
"to",
"uint32",
"\n",
"con",
"consolidation",
".",
"Consolidator",
"\n",
"}",
"\n",
"seriesByTarget",
":=",
"make",
"(",
"map",
"[",
"segment",
"]",
"[",
"]",
"models",
".",
"Series",
")",
"\n",
"for",
"_",
",",
"series",
":=",
"range",
"in",
"{",
"s",
":=",
"segment",
"{",
"series",
".",
"Target",
",",
"series",
".",
"QueryPatt",
",",
"series",
".",
"QueryFrom",
",",
"series",
".",
"QueryTo",
",",
"series",
".",
"Consolidator",
",",
"}",
"\n",
"seriesByTarget",
"[",
"s",
"]",
"=",
"append",
"(",
"seriesByTarget",
"[",
"s",
"]",
",",
"series",
")",
"\n",
"}",
"\n",
"merged",
":=",
"make",
"(",
"[",
"]",
"models",
".",
"Series",
",",
"len",
"(",
"seriesByTarget",
")",
")",
"\n",
"i",
":=",
"0",
"\n",
"for",
"_",
",",
"series",
":=",
"range",
"seriesByTarget",
"{",
"if",
"len",
"(",
"series",
")",
"==",
"1",
"{",
"merged",
"[",
"i",
"]",
"=",
"series",
"[",
"0",
"]",
"\n",
"}",
"else",
"{",
"//we use the first series in the list as our result. We check over every",
"// point and if it is null, we then check the other series for a non null",
"// value to use instead.",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"series",
"[",
"0",
"]",
".",
"Target",
")",
"\n",
"for",
"i",
":=",
"range",
"series",
"[",
"0",
"]",
".",
"Datapoints",
"{",
"for",
"j",
":=",
"0",
";",
"j",
"<",
"len",
"(",
"series",
")",
";",
"j",
"++",
"{",
"if",
"!",
"math",
".",
"IsNaN",
"(",
"series",
"[",
"j",
"]",
".",
"Datapoints",
"[",
"i",
"]",
".",
"Val",
")",
"{",
"series",
"[",
"0",
"]",
".",
"Datapoints",
"[",
"i",
"]",
".",
"Val",
"=",
"series",
"[",
"j",
"]",
".",
"Datapoints",
"[",
"i",
"]",
".",
"Val",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"merged",
"[",
"i",
"]",
"=",
"series",
"[",
"0",
"]",
"\n",
"}",
"\n",
"i",
"++",
"\n",
"}",
"\n",
"return",
"merged",
"\n",
"}"
] |
// check for duplicate series names for the same query. If found merge the results.
|
[
"check",
"for",
"duplicate",
"series",
"names",
"for",
"the",
"same",
"query",
".",
"If",
"found",
"merge",
"the",
"results",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L592-L634
|
train
|
grafana/metrictank
|
mdata/cwr.go
|
NewChunkWriteRequest
|
func NewChunkWriteRequest(metric *AggMetric, key schema.AMKey, chunk *chunk.Chunk, ttl, span uint32, ts time.Time) ChunkWriteRequest {
return ChunkWriteRequest{metric, key, chunk, ttl, span, ts}
}
|
go
|
func NewChunkWriteRequest(metric *AggMetric, key schema.AMKey, chunk *chunk.Chunk, ttl, span uint32, ts time.Time) ChunkWriteRequest {
return ChunkWriteRequest{metric, key, chunk, ttl, span, ts}
}
|
[
"func",
"NewChunkWriteRequest",
"(",
"metric",
"*",
"AggMetric",
",",
"key",
"schema",
".",
"AMKey",
",",
"chunk",
"*",
"chunk",
".",
"Chunk",
",",
"ttl",
",",
"span",
"uint32",
",",
"ts",
"time",
".",
"Time",
")",
"ChunkWriteRequest",
"{",
"return",
"ChunkWriteRequest",
"{",
"metric",
",",
"key",
",",
"chunk",
",",
"ttl",
",",
"span",
",",
"ts",
"}",
"\n",
"}"
] |
// NewChunkWriteRequest creates a new ChunkWriteRequest
|
[
"NewChunkWriteRequest",
"creates",
"a",
"new",
"ChunkWriteRequest"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cwr.go#L21-L23
|
train
|
grafana/metrictank
|
mdata/chunk/tsz/tszlong.go
|
Iter
|
func (s *SeriesLong) Iter() *IterLong {
s.Lock()
w := s.bw.clone()
s.Unlock()
finishV2(w)
iter, _ := bstreamIteratorLong(s.T0, w)
return iter
}
|
go
|
func (s *SeriesLong) Iter() *IterLong {
s.Lock()
w := s.bw.clone()
s.Unlock()
finishV2(w)
iter, _ := bstreamIteratorLong(s.T0, w)
return iter
}
|
[
"func",
"(",
"s",
"*",
"SeriesLong",
")",
"Iter",
"(",
")",
"*",
"IterLong",
"{",
"s",
".",
"Lock",
"(",
")",
"\n",
"w",
":=",
"s",
".",
"bw",
".",
"clone",
"(",
")",
"\n",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"finishV2",
"(",
"w",
")",
"\n",
"iter",
",",
"_",
":=",
"bstreamIteratorLong",
"(",
"s",
".",
"T0",
",",
"w",
")",
"\n",
"return",
"iter",
"\n",
"}"
] |
// IterLong lets you iterate over a series. It is not concurrency-safe.
|
[
"IterLong",
"lets",
"you",
"iterate",
"over",
"a",
"series",
".",
"It",
"is",
"not",
"concurrency",
"-",
"safe",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tszlong.go#L143-L151
|
train
|
grafana/metrictank
|
mdata/chunk/tsz/tszlong.go
|
NewIteratorLong
|
func NewIteratorLong(t0 uint32, b []byte) (*IterLong, error) {
return bstreamIteratorLong(t0, newBReader(b))
}
|
go
|
func NewIteratorLong(t0 uint32, b []byte) (*IterLong, error) {
return bstreamIteratorLong(t0, newBReader(b))
}
|
[
"func",
"NewIteratorLong",
"(",
"t0",
"uint32",
",",
"b",
"[",
"]",
"byte",
")",
"(",
"*",
"IterLong",
",",
"error",
")",
"{",
"return",
"bstreamIteratorLong",
"(",
"t0",
",",
"newBReader",
"(",
"b",
")",
")",
"\n",
"}"
] |
// NewIteratorLong for the series
|
[
"NewIteratorLong",
"for",
"the",
"series"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tszlong.go#L182-L184
|
train
|
grafana/metrictank
|
mdata/chunk/tsz/tszlong.go
|
Next
|
func (it *IterLong) Next() bool {
if it.err != nil || it.finished {
return false
}
var first bool
if it.t == 0 {
it.t = it.T0
first = true
}
// read delta-of-delta
dod, ok := it.dod()
if !ok {
return false
}
it.tDelta += uint32(dod)
it.t = it.t + it.tDelta
if first {
// first point. read the float raw
v, err := it.br.readBits(64)
if err != nil {
it.err = err
return false
}
it.val = math.Float64frombits(v)
return true
}
// read compressed value
bit, err := it.br.readBit()
if err != nil {
it.err = err
return false
}
if bit == zero {
// it.val = it.val
} else {
bit, itErr := it.br.readBit()
if itErr != nil {
it.err = err
return false
}
if bit == zero {
// reuse leading/trailing zero bits
// it.leading, it.trailing = it.leading, it.trailing
} else {
bits, err := it.br.readBits(5)
if err != nil {
it.err = err
return false
}
it.leading = uint8(bits)
bits, err = it.br.readBits(6)
if err != nil {
it.err = err
return false
}
mbits := uint8(bits)
// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
if mbits == 0 {
mbits = 64
}
it.trailing = 64 - it.leading - mbits
}
mbits := int(64 - it.leading - it.trailing)
bits, err := it.br.readBits(mbits)
if err != nil {
it.err = err
return false
}
vbits := math.Float64bits(it.val)
vbits ^= (bits << it.trailing)
it.val = math.Float64frombits(vbits)
}
return true
}
|
go
|
func (it *IterLong) Next() bool {
if it.err != nil || it.finished {
return false
}
var first bool
if it.t == 0 {
it.t = it.T0
first = true
}
// read delta-of-delta
dod, ok := it.dod()
if !ok {
return false
}
it.tDelta += uint32(dod)
it.t = it.t + it.tDelta
if first {
// first point. read the float raw
v, err := it.br.readBits(64)
if err != nil {
it.err = err
return false
}
it.val = math.Float64frombits(v)
return true
}
// read compressed value
bit, err := it.br.readBit()
if err != nil {
it.err = err
return false
}
if bit == zero {
// it.val = it.val
} else {
bit, itErr := it.br.readBit()
if itErr != nil {
it.err = err
return false
}
if bit == zero {
// reuse leading/trailing zero bits
// it.leading, it.trailing = it.leading, it.trailing
} else {
bits, err := it.br.readBits(5)
if err != nil {
it.err = err
return false
}
it.leading = uint8(bits)
bits, err = it.br.readBits(6)
if err != nil {
it.err = err
return false
}
mbits := uint8(bits)
// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
if mbits == 0 {
mbits = 64
}
it.trailing = 64 - it.leading - mbits
}
mbits := int(64 - it.leading - it.trailing)
bits, err := it.br.readBits(mbits)
if err != nil {
it.err = err
return false
}
vbits := math.Float64bits(it.val)
vbits ^= (bits << it.trailing)
it.val = math.Float64frombits(vbits)
}
return true
}
|
[
"func",
"(",
"it",
"*",
"IterLong",
")",
"Next",
"(",
")",
"bool",
"{",
"if",
"it",
".",
"err",
"!=",
"nil",
"||",
"it",
".",
"finished",
"{",
"return",
"false",
"\n",
"}",
"\n\n",
"var",
"first",
"bool",
"\n",
"if",
"it",
".",
"t",
"==",
"0",
"{",
"it",
".",
"t",
"=",
"it",
".",
"T0",
"\n",
"first",
"=",
"true",
"\n",
"}",
"\n\n",
"// read delta-of-delta",
"dod",
",",
"ok",
":=",
"it",
".",
"dod",
"(",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"false",
"\n",
"}",
"\n\n",
"it",
".",
"tDelta",
"+=",
"uint32",
"(",
"dod",
")",
"\n",
"it",
".",
"t",
"=",
"it",
".",
"t",
"+",
"it",
".",
"tDelta",
"\n\n",
"if",
"first",
"{",
"// first point. read the float raw",
"v",
",",
"err",
":=",
"it",
".",
"br",
".",
"readBits",
"(",
"64",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"it",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n\n",
"it",
".",
"val",
"=",
"math",
".",
"Float64frombits",
"(",
"v",
")",
"\n",
"return",
"true",
"\n",
"}",
"\n\n",
"// read compressed value",
"bit",
",",
"err",
":=",
"it",
".",
"br",
".",
"readBit",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"it",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n\n",
"if",
"bit",
"==",
"zero",
"{",
"// it.val = it.val",
"}",
"else",
"{",
"bit",
",",
"itErr",
":=",
"it",
".",
"br",
".",
"readBit",
"(",
")",
"\n",
"if",
"itErr",
"!=",
"nil",
"{",
"it",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"if",
"bit",
"==",
"zero",
"{",
"// reuse leading/trailing zero bits",
"// it.leading, it.trailing = it.leading, it.trailing",
"}",
"else",
"{",
"bits",
",",
"err",
":=",
"it",
".",
"br",
".",
"readBits",
"(",
"5",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"it",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"it",
".",
"leading",
"=",
"uint8",
"(",
"bits",
")",
"\n\n",
"bits",
",",
"err",
"=",
"it",
".",
"br",
".",
"readBits",
"(",
"6",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"it",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"mbits",
":=",
"uint8",
"(",
"bits",
")",
"\n",
"// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder",
"if",
"mbits",
"==",
"0",
"{",
"mbits",
"=",
"64",
"\n",
"}",
"\n",
"it",
".",
"trailing",
"=",
"64",
"-",
"it",
".",
"leading",
"-",
"mbits",
"\n",
"}",
"\n\n",
"mbits",
":=",
"int",
"(",
"64",
"-",
"it",
".",
"leading",
"-",
"it",
".",
"trailing",
")",
"\n",
"bits",
",",
"err",
":=",
"it",
".",
"br",
".",
"readBits",
"(",
"mbits",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"it",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"vbits",
":=",
"math",
".",
"Float64bits",
"(",
"it",
".",
"val",
")",
"\n",
"vbits",
"^=",
"(",
"bits",
"<<",
"it",
".",
"trailing",
")",
"\n",
"it",
".",
"val",
"=",
"math",
".",
"Float64frombits",
"(",
"vbits",
")",
"\n",
"}",
"\n\n",
"return",
"true",
"\n",
"}"
] |
// Next iteration of the series iterator
|
[
"Next",
"iteration",
"of",
"the",
"series",
"iterator"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tszlong.go#L241-L325
|
train
|
grafana/metrictank
|
stats/tick.go
|
tick
|
func tick(period time.Duration) chan time.Time {
ch := make(chan time.Time)
go func() {
for {
now := time.Now()
nowUnix := now.UnixNano()
diff := period - (time.Duration(nowUnix) % period)
ideal := now.Add(diff)
time.Sleep(diff)
// try to write, if it blocks, skip the tick
select {
case ch <- ideal:
default:
}
}
}()
return ch
}
|
go
|
func tick(period time.Duration) chan time.Time {
ch := make(chan time.Time)
go func() {
for {
now := time.Now()
nowUnix := now.UnixNano()
diff := period - (time.Duration(nowUnix) % period)
ideal := now.Add(diff)
time.Sleep(diff)
// try to write, if it blocks, skip the tick
select {
case ch <- ideal:
default:
}
}
}()
return ch
}
|
[
"func",
"tick",
"(",
"period",
"time",
".",
"Duration",
")",
"chan",
"time",
".",
"Time",
"{",
"ch",
":=",
"make",
"(",
"chan",
"time",
".",
"Time",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"for",
"{",
"now",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"nowUnix",
":=",
"now",
".",
"UnixNano",
"(",
")",
"\n",
"diff",
":=",
"period",
"-",
"(",
"time",
".",
"Duration",
"(",
"nowUnix",
")",
"%",
"period",
")",
"\n",
"ideal",
":=",
"now",
".",
"Add",
"(",
"diff",
")",
"\n",
"time",
".",
"Sleep",
"(",
"diff",
")",
"\n\n",
"// try to write, if it blocks, skip the tick",
"select",
"{",
"case",
"ch",
"<-",
"ideal",
":",
"default",
":",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"return",
"ch",
"\n",
"}"
] |
// provides "clean" ticks at precise intervals, and delivers them shortly after
|
[
"provides",
"clean",
"ticks",
"at",
"precise",
"intervals",
"and",
"delivers",
"them",
"shortly",
"after"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/stats/tick.go#L6-L24
|
train
|
grafana/metrictank
|
api/models/request.go
|
Trace
|
func (r Req) Trace(span opentracing.Span) {
span.SetTag("key", r.MKey)
span.SetTag("target", r.Target)
span.SetTag("pattern", r.Pattern)
span.SetTag("from", r.From)
span.SetTag("to", r.To)
span.SetTag("span", r.To-r.From-1)
span.SetTag("mdp", r.MaxPoints)
span.SetTag("rawInterval", r.RawInterval)
span.SetTag("cons", r.Consolidator)
span.SetTag("consReq", r.ConsReq)
span.SetTag("schemaId", r.SchemaId)
span.SetTag("aggId", r.AggId)
span.SetTag("archive", r.Archive)
span.SetTag("archInterval", r.ArchInterval)
span.SetTag("TTL", r.TTL)
span.SetTag("outInterval", r.OutInterval)
span.SetTag("aggNum", r.AggNum)
}
|
go
|
func (r Req) Trace(span opentracing.Span) {
span.SetTag("key", r.MKey)
span.SetTag("target", r.Target)
span.SetTag("pattern", r.Pattern)
span.SetTag("from", r.From)
span.SetTag("to", r.To)
span.SetTag("span", r.To-r.From-1)
span.SetTag("mdp", r.MaxPoints)
span.SetTag("rawInterval", r.RawInterval)
span.SetTag("cons", r.Consolidator)
span.SetTag("consReq", r.ConsReq)
span.SetTag("schemaId", r.SchemaId)
span.SetTag("aggId", r.AggId)
span.SetTag("archive", r.Archive)
span.SetTag("archInterval", r.ArchInterval)
span.SetTag("TTL", r.TTL)
span.SetTag("outInterval", r.OutInterval)
span.SetTag("aggNum", r.AggNum)
}
|
[
"func",
"(",
"r",
"Req",
")",
"Trace",
"(",
"span",
"opentracing",
".",
"Span",
")",
"{",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"MKey",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"Target",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"Pattern",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"From",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"To",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"To",
"-",
"r",
".",
"From",
"-",
"1",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"MaxPoints",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"RawInterval",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"Consolidator",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"ConsReq",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"SchemaId",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"AggId",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"Archive",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"ArchInterval",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"TTL",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"OutInterval",
")",
"\n",
"span",
".",
"SetTag",
"(",
"\"",
"\"",
",",
"r",
".",
"AggNum",
")",
"\n",
"}"
] |
// Trace puts all request properties as tags in a span
// good for when a span deals with 1 request
|
[
"Trace",
"puts",
"all",
"request",
"properties",
"as",
"tags",
"in",
"a",
"span",
"good",
"for",
"when",
"a",
"span",
"deals",
"with",
"1",
"request"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/models/request.go#L75-L93
|
train
|
grafana/metrictank
|
api/models/request.go
|
TraceLog
|
func (r Req) TraceLog(span opentracing.Span) {
span.LogFields(
log.Object("key", r.MKey),
log.String("target", r.Target),
log.String("pattern", r.Pattern),
log.Int("from", int(r.From)),
log.Int("to", int(r.To)),
log.Int("span", int(r.To-r.From-1)),
log.Int("mdp", int(r.MaxPoints)),
log.Int("rawInterval", int(r.RawInterval)),
log.String("cons", r.Consolidator.String()),
log.String("consReq", r.ConsReq.String()),
log.Int("schemaId", int(r.SchemaId)),
log.Int("aggId", int(r.AggId)),
log.Int("archive", r.Archive),
log.Int("archInterval", int(r.ArchInterval)),
log.Int("TTL", int(r.TTL)),
log.Int("outInterval", int(r.OutInterval)),
log.Int("aggNum", int(r.AggNum)),
)
}
|
go
|
func (r Req) TraceLog(span opentracing.Span) {
span.LogFields(
log.Object("key", r.MKey),
log.String("target", r.Target),
log.String("pattern", r.Pattern),
log.Int("from", int(r.From)),
log.Int("to", int(r.To)),
log.Int("span", int(r.To-r.From-1)),
log.Int("mdp", int(r.MaxPoints)),
log.Int("rawInterval", int(r.RawInterval)),
log.String("cons", r.Consolidator.String()),
log.String("consReq", r.ConsReq.String()),
log.Int("schemaId", int(r.SchemaId)),
log.Int("aggId", int(r.AggId)),
log.Int("archive", r.Archive),
log.Int("archInterval", int(r.ArchInterval)),
log.Int("TTL", int(r.TTL)),
log.Int("outInterval", int(r.OutInterval)),
log.Int("aggNum", int(r.AggNum)),
)
}
|
[
"func",
"(",
"r",
"Req",
")",
"TraceLog",
"(",
"span",
"opentracing",
".",
"Span",
")",
"{",
"span",
".",
"LogFields",
"(",
"log",
".",
"Object",
"(",
"\"",
"\"",
",",
"r",
".",
"MKey",
")",
",",
"log",
".",
"String",
"(",
"\"",
"\"",
",",
"r",
".",
"Target",
")",
",",
"log",
".",
"String",
"(",
"\"",
"\"",
",",
"r",
".",
"Pattern",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"From",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"To",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"To",
"-",
"r",
".",
"From",
"-",
"1",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"MaxPoints",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"RawInterval",
")",
")",
",",
"log",
".",
"String",
"(",
"\"",
"\"",
",",
"r",
".",
"Consolidator",
".",
"String",
"(",
")",
")",
",",
"log",
".",
"String",
"(",
"\"",
"\"",
",",
"r",
".",
"ConsReq",
".",
"String",
"(",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"SchemaId",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"AggId",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"r",
".",
"Archive",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"ArchInterval",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"TTL",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"OutInterval",
")",
")",
",",
"log",
".",
"Int",
"(",
"\"",
"\"",
",",
"int",
"(",
"r",
".",
"AggNum",
")",
")",
",",
")",
"\n",
"}"
] |
// TraceLog puts all request properties in a span log entry
// good for when a span deals with multiple requests
// note that the amount of data generated here can be up to
// 1000~1500 bytes
|
[
"TraceLog",
"puts",
"all",
"request",
"properties",
"in",
"a",
"span",
"log",
"entry",
"good",
"for",
"when",
"a",
"span",
"deals",
"with",
"multiple",
"requests",
"note",
"that",
"the",
"amount",
"of",
"data",
"generated",
"here",
"can",
"be",
"up",
"to",
"1000~1500",
"bytes"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/models/request.go#L99-L119
|
train
|
grafana/metrictank
|
conf/aggregations.go
|
NewAggregations
|
func NewAggregations() Aggregations {
return Aggregations{
Data: make([]Aggregation, 0),
DefaultAggregation: Aggregation{
Name: "default",
Pattern: regexp.MustCompile(".*"),
XFilesFactor: 0.5,
AggregationMethod: []Method{Avg},
},
}
}
|
go
|
func NewAggregations() Aggregations {
return Aggregations{
Data: make([]Aggregation, 0),
DefaultAggregation: Aggregation{
Name: "default",
Pattern: regexp.MustCompile(".*"),
XFilesFactor: 0.5,
AggregationMethod: []Method{Avg},
},
}
}
|
[
"func",
"NewAggregations",
"(",
")",
"Aggregations",
"{",
"return",
"Aggregations",
"{",
"Data",
":",
"make",
"(",
"[",
"]",
"Aggregation",
",",
"0",
")",
",",
"DefaultAggregation",
":",
"Aggregation",
"{",
"Name",
":",
"\"",
"\"",
",",
"Pattern",
":",
"regexp",
".",
"MustCompile",
"(",
"\"",
"\"",
")",
",",
"XFilesFactor",
":",
"0.5",
",",
"AggregationMethod",
":",
"[",
"]",
"Method",
"{",
"Avg",
"}",
",",
"}",
",",
"}",
"\n",
"}"
] |
// NewAggregations create instance of Aggregations
|
[
"NewAggregations",
"create",
"instance",
"of",
"Aggregations"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L26-L36
|
train
|
grafana/metrictank
|
conf/aggregations.go
|
ReadAggregations
|
func ReadAggregations(file string) (Aggregations, error) {
config, err := configparser.Read(file)
if err != nil {
return Aggregations{}, err
}
sections, err := config.AllSections()
if err != nil {
return Aggregations{}, err
}
result := NewAggregations()
for _, s := range sections {
item := Aggregation{}
item.Name = strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []")
if item.Name == "" || strings.HasPrefix(item.Name, "#") {
continue
}
item.Pattern, err = regexp.Compile(s.ValueOf("pattern"))
if err != nil {
return Aggregations{}, fmt.Errorf("[%s]: failed to parse pattern %q: %s", item.Name, s.ValueOf("pattern"), err.Error())
}
item.XFilesFactor, err = strconv.ParseFloat(s.ValueOf("xFilesFactor"), 64)
if err != nil {
return Aggregations{}, fmt.Errorf("[%s]: failed to parse xFilesFactor %q: %s", item.Name, s.ValueOf("xFilesFactor"), err.Error())
}
aggregationMethodStr := s.ValueOf("aggregationMethod")
methodStrs := strings.Split(aggregationMethodStr, ",")
for _, methodStr := range methodStrs {
switch methodStr {
case "average", "avg":
item.AggregationMethod = append(item.AggregationMethod, Avg)
case "sum":
item.AggregationMethod = append(item.AggregationMethod, Sum)
case "last":
item.AggregationMethod = append(item.AggregationMethod, Lst)
case "max":
item.AggregationMethod = append(item.AggregationMethod, Max)
case "min":
item.AggregationMethod = append(item.AggregationMethod, Min)
default:
return result, fmt.Errorf("[%s]: unknown aggregation method %q", item.Name, methodStr)
}
}
result.Data = append(result.Data, item)
}
return result, nil
}
|
go
|
func ReadAggregations(file string) (Aggregations, error) {
config, err := configparser.Read(file)
if err != nil {
return Aggregations{}, err
}
sections, err := config.AllSections()
if err != nil {
return Aggregations{}, err
}
result := NewAggregations()
for _, s := range sections {
item := Aggregation{}
item.Name = strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []")
if item.Name == "" || strings.HasPrefix(item.Name, "#") {
continue
}
item.Pattern, err = regexp.Compile(s.ValueOf("pattern"))
if err != nil {
return Aggregations{}, fmt.Errorf("[%s]: failed to parse pattern %q: %s", item.Name, s.ValueOf("pattern"), err.Error())
}
item.XFilesFactor, err = strconv.ParseFloat(s.ValueOf("xFilesFactor"), 64)
if err != nil {
return Aggregations{}, fmt.Errorf("[%s]: failed to parse xFilesFactor %q: %s", item.Name, s.ValueOf("xFilesFactor"), err.Error())
}
aggregationMethodStr := s.ValueOf("aggregationMethod")
methodStrs := strings.Split(aggregationMethodStr, ",")
for _, methodStr := range methodStrs {
switch methodStr {
case "average", "avg":
item.AggregationMethod = append(item.AggregationMethod, Avg)
case "sum":
item.AggregationMethod = append(item.AggregationMethod, Sum)
case "last":
item.AggregationMethod = append(item.AggregationMethod, Lst)
case "max":
item.AggregationMethod = append(item.AggregationMethod, Max)
case "min":
item.AggregationMethod = append(item.AggregationMethod, Min)
default:
return result, fmt.Errorf("[%s]: unknown aggregation method %q", item.Name, methodStr)
}
}
result.Data = append(result.Data, item)
}
return result, nil
}
|
[
"func",
"ReadAggregations",
"(",
"file",
"string",
")",
"(",
"Aggregations",
",",
"error",
")",
"{",
"config",
",",
"err",
":=",
"configparser",
".",
"Read",
"(",
"file",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"Aggregations",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"sections",
",",
"err",
":=",
"config",
".",
"AllSections",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"Aggregations",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"result",
":=",
"NewAggregations",
"(",
")",
"\n\n",
"for",
"_",
",",
"s",
":=",
"range",
"sections",
"{",
"item",
":=",
"Aggregation",
"{",
"}",
"\n",
"item",
".",
"Name",
"=",
"strings",
".",
"Trim",
"(",
"strings",
".",
"SplitN",
"(",
"s",
".",
"String",
"(",
")",
",",
"\"",
"\\n",
"\"",
",",
"2",
")",
"[",
"0",
"]",
",",
"\"",
"\"",
")",
"\n",
"if",
"item",
".",
"Name",
"==",
"\"",
"\"",
"||",
"strings",
".",
"HasPrefix",
"(",
"item",
".",
"Name",
",",
"\"",
"\"",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"item",
".",
"Pattern",
",",
"err",
"=",
"regexp",
".",
"Compile",
"(",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"Aggregations",
"{",
"}",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"item",
".",
"Name",
",",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n\n",
"item",
".",
"XFilesFactor",
",",
"err",
"=",
"strconv",
".",
"ParseFloat",
"(",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
",",
"64",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"Aggregations",
"{",
"}",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"item",
".",
"Name",
",",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n\n",
"aggregationMethodStr",
":=",
"s",
".",
"ValueOf",
"(",
"\"",
"\"",
")",
"\n",
"methodStrs",
":=",
"strings",
".",
"Split",
"(",
"aggregationMethodStr",
",",
"\"",
"\"",
")",
"\n",
"for",
"_",
",",
"methodStr",
":=",
"range",
"methodStrs",
"{",
"switch",
"methodStr",
"{",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"item",
".",
"AggregationMethod",
"=",
"append",
"(",
"item",
".",
"AggregationMethod",
",",
"Avg",
")",
"\n",
"case",
"\"",
"\"",
":",
"item",
".",
"AggregationMethod",
"=",
"append",
"(",
"item",
".",
"AggregationMethod",
",",
"Sum",
")",
"\n",
"case",
"\"",
"\"",
":",
"item",
".",
"AggregationMethod",
"=",
"append",
"(",
"item",
".",
"AggregationMethod",
",",
"Lst",
")",
"\n",
"case",
"\"",
"\"",
":",
"item",
".",
"AggregationMethod",
"=",
"append",
"(",
"item",
".",
"AggregationMethod",
",",
"Max",
")",
"\n",
"case",
"\"",
"\"",
":",
"item",
".",
"AggregationMethod",
"=",
"append",
"(",
"item",
".",
"AggregationMethod",
",",
"Min",
")",
"\n",
"default",
":",
"return",
"result",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"item",
".",
"Name",
",",
"methodStr",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"result",
".",
"Data",
"=",
"append",
"(",
"result",
".",
"Data",
",",
"item",
")",
"\n",
"}",
"\n\n",
"return",
"result",
",",
"nil",
"\n",
"}"
] |
// ReadAggregations returns the defined aggregations from a storage-aggregation.conf file
// and adds the default
|
[
"ReadAggregations",
"returns",
"the",
"defined",
"aggregations",
"from",
"a",
"storage",
"-",
"aggregation",
".",
"conf",
"file",
"and",
"adds",
"the",
"default"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L40-L92
|
train
|
grafana/metrictank
|
conf/aggregations.go
|
Match
|
func (a Aggregations) Match(metric string) (uint16, Aggregation) {
for i, s := range a.Data {
if s.Pattern.MatchString(metric) {
return uint16(i), s
}
}
return uint16(len(a.Data)), a.DefaultAggregation
}
|
go
|
func (a Aggregations) Match(metric string) (uint16, Aggregation) {
for i, s := range a.Data {
if s.Pattern.MatchString(metric) {
return uint16(i), s
}
}
return uint16(len(a.Data)), a.DefaultAggregation
}
|
[
"func",
"(",
"a",
"Aggregations",
")",
"Match",
"(",
"metric",
"string",
")",
"(",
"uint16",
",",
"Aggregation",
")",
"{",
"for",
"i",
",",
"s",
":=",
"range",
"a",
".",
"Data",
"{",
"if",
"s",
".",
"Pattern",
".",
"MatchString",
"(",
"metric",
")",
"{",
"return",
"uint16",
"(",
"i",
")",
",",
"s",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"uint16",
"(",
"len",
"(",
"a",
".",
"Data",
")",
")",
",",
"a",
".",
"DefaultAggregation",
"\n",
"}"
] |
// Match returns the correct aggregation setting for the given metric
// it can always find a valid setting, because there's a default catch all
// also returns the index of the setting, to efficiently reference it
|
[
"Match",
"returns",
"the",
"correct",
"aggregation",
"setting",
"for",
"the",
"given",
"metric",
"it",
"can",
"always",
"find",
"a",
"valid",
"setting",
"because",
"there",
"s",
"a",
"default",
"catch",
"all",
"also",
"returns",
"the",
"index",
"of",
"the",
"setting",
"to",
"efficiently",
"reference",
"it"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L97-L104
|
train
|
grafana/metrictank
|
conf/aggregations.go
|
Get
|
func (a Aggregations) Get(i uint16) Aggregation {
if i+1 > uint16(len(a.Data)) {
return a.DefaultAggregation
}
return a.Data[i]
}
|
go
|
func (a Aggregations) Get(i uint16) Aggregation {
if i+1 > uint16(len(a.Data)) {
return a.DefaultAggregation
}
return a.Data[i]
}
|
[
"func",
"(",
"a",
"Aggregations",
")",
"Get",
"(",
"i",
"uint16",
")",
"Aggregation",
"{",
"if",
"i",
"+",
"1",
">",
"uint16",
"(",
"len",
"(",
"a",
".",
"Data",
")",
")",
"{",
"return",
"a",
".",
"DefaultAggregation",
"\n",
"}",
"\n",
"return",
"a",
".",
"Data",
"[",
"i",
"]",
"\n",
"}"
] |
// Get returns the aggregation setting corresponding to the given index
|
[
"Get",
"returns",
"the",
"aggregation",
"setting",
"corresponding",
"to",
"the",
"given",
"index"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L107-L112
|
train
|
grafana/metrictank
|
idx/idx.go
|
NewArchiveBare
|
func NewArchiveBare(name string) Archive {
return Archive{
MetricDefinition: schema.MetricDefinition{
Name: name,
},
}
}
|
go
|
func NewArchiveBare(name string) Archive {
return Archive{
MetricDefinition: schema.MetricDefinition{
Name: name,
},
}
}
|
[
"func",
"NewArchiveBare",
"(",
"name",
"string",
")",
"Archive",
"{",
"return",
"Archive",
"{",
"MetricDefinition",
":",
"schema",
".",
"MetricDefinition",
"{",
"Name",
":",
"name",
",",
"}",
",",
"}",
"\n",
"}"
] |
// used primarily by tests, for convenience
|
[
"used",
"primarily",
"by",
"tests",
"for",
"convenience"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/idx.go#L30-L36
|
train
|
grafana/metrictank
|
api/middleware/stats.go
|
RequestStats
|
func RequestStats() macaron.Handler {
stats := requestStats{
responseCounts: make(map[string]map[int]*stats.Counter32),
latencyHistograms: make(map[string]*stats.LatencyHistogram15s32),
sizeMeters: make(map[string]*stats.Meter32),
}
return func(ctx *macaron.Context) {
start := time.Now()
rw := ctx.Resp.(macaron.ResponseWriter)
// call next handler. This will return after all handlers
// have completed and the request has been sent.
ctx.Next()
status := rw.Status()
path := pathSlug(ctx.Req.URL.Path)
// graphite cluster requests use local=1
// this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc)
// from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client
if ctx.Req.Request.Form.Get("local") == "1" {
path += "-local"
}
stats.PathStatusCount(path, status)
stats.PathLatency(path, time.Since(start))
// only record the request size if the request succeeded.
if status < 300 {
stats.PathSize(path, rw.Size())
}
}
}
|
go
|
func RequestStats() macaron.Handler {
stats := requestStats{
responseCounts: make(map[string]map[int]*stats.Counter32),
latencyHistograms: make(map[string]*stats.LatencyHistogram15s32),
sizeMeters: make(map[string]*stats.Meter32),
}
return func(ctx *macaron.Context) {
start := time.Now()
rw := ctx.Resp.(macaron.ResponseWriter)
// call next handler. This will return after all handlers
// have completed and the request has been sent.
ctx.Next()
status := rw.Status()
path := pathSlug(ctx.Req.URL.Path)
// graphite cluster requests use local=1
// this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc)
// from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client
if ctx.Req.Request.Form.Get("local") == "1" {
path += "-local"
}
stats.PathStatusCount(path, status)
stats.PathLatency(path, time.Since(start))
// only record the request size if the request succeeded.
if status < 300 {
stats.PathSize(path, rw.Size())
}
}
}
|
[
"func",
"RequestStats",
"(",
")",
"macaron",
".",
"Handler",
"{",
"stats",
":=",
"requestStats",
"{",
"responseCounts",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"map",
"[",
"int",
"]",
"*",
"stats",
".",
"Counter32",
")",
",",
"latencyHistograms",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"stats",
".",
"LatencyHistogram15s32",
")",
",",
"sizeMeters",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"stats",
".",
"Meter32",
")",
",",
"}",
"\n\n",
"return",
"func",
"(",
"ctx",
"*",
"macaron",
".",
"Context",
")",
"{",
"start",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"rw",
":=",
"ctx",
".",
"Resp",
".",
"(",
"macaron",
".",
"ResponseWriter",
")",
"\n",
"// call next handler. This will return after all handlers",
"// have completed and the request has been sent.",
"ctx",
".",
"Next",
"(",
")",
"\n",
"status",
":=",
"rw",
".",
"Status",
"(",
")",
"\n",
"path",
":=",
"pathSlug",
"(",
"ctx",
".",
"Req",
".",
"URL",
".",
"Path",
")",
"\n",
"// graphite cluster requests use local=1",
"// this way we can differentiate \"full\" render requests from client to MT (encompassing data processing, proxing to graphite, etc)",
"// from \"subrequests\" where metrictank is called by graphite and graphite does the processing and returns to the client",
"if",
"ctx",
".",
"Req",
".",
"Request",
".",
"Form",
".",
"Get",
"(",
"\"",
"\"",
")",
"==",
"\"",
"\"",
"{",
"path",
"+=",
"\"",
"\"",
"\n",
"}",
"\n",
"stats",
".",
"PathStatusCount",
"(",
"path",
",",
"status",
")",
"\n",
"stats",
".",
"PathLatency",
"(",
"path",
",",
"time",
".",
"Since",
"(",
"start",
")",
")",
"\n",
"// only record the request size if the request succeeded.",
"if",
"status",
"<",
"300",
"{",
"stats",
".",
"PathSize",
"(",
"path",
",",
"rw",
".",
"Size",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// RequestStats returns a middleware that tracks request metrics.
|
[
"RequestStats",
"returns",
"a",
"middleware",
"that",
"tracks",
"request",
"metrics",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/middleware/stats.go#L64-L92
|
train
|
grafana/metrictank
|
api/response/error.go
|
WrapErrorForTagDB
|
func WrapErrorForTagDB(e error) *ErrorResp {
b, err := json.Marshal(TagDBError{Error: e.Error()})
if err != nil {
return &ErrorResp{
err: "{\"error\": \"failed to encode error message\"}",
code: http.StatusInternalServerError,
}
}
resp := &ErrorResp{
err: string(b),
code: http.StatusInternalServerError,
}
if _, ok := e.(Error); ok {
resp.code = e.(Error).Code()
}
resp.ValidateAndFixCode()
return resp
}
|
go
|
func WrapErrorForTagDB(e error) *ErrorResp {
b, err := json.Marshal(TagDBError{Error: e.Error()})
if err != nil {
return &ErrorResp{
err: "{\"error\": \"failed to encode error message\"}",
code: http.StatusInternalServerError,
}
}
resp := &ErrorResp{
err: string(b),
code: http.StatusInternalServerError,
}
if _, ok := e.(Error); ok {
resp.code = e.(Error).Code()
}
resp.ValidateAndFixCode()
return resp
}
|
[
"func",
"WrapErrorForTagDB",
"(",
"e",
"error",
")",
"*",
"ErrorResp",
"{",
"b",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"TagDBError",
"{",
"Error",
":",
"e",
".",
"Error",
"(",
")",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"&",
"ErrorResp",
"{",
"err",
":",
"\"",
"\\\"",
"\\\"",
"\\\"",
"\\\"",
"\"",
",",
"code",
":",
"http",
".",
"StatusInternalServerError",
",",
"}",
"\n",
"}",
"\n\n",
"resp",
":=",
"&",
"ErrorResp",
"{",
"err",
":",
"string",
"(",
"b",
")",
",",
"code",
":",
"http",
".",
"StatusInternalServerError",
",",
"}",
"\n\n",
"if",
"_",
",",
"ok",
":=",
"e",
".",
"(",
"Error",
")",
";",
"ok",
"{",
"resp",
".",
"code",
"=",
"e",
".",
"(",
"Error",
")",
".",
"Code",
"(",
")",
"\n",
"}",
"\n\n",
"resp",
".",
"ValidateAndFixCode",
"(",
")",
"\n",
"return",
"resp",
"\n",
"}"
] |
// graphite's http tagdb client requires a specific error format
|
[
"graphite",
"s",
"http",
"tagdb",
"client",
"requires",
"a",
"specific",
"error",
"format"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/response/error.go#L43-L63
|
train
|
grafana/metrictank
|
idx/cassandra/config.go
|
NewIdxConfig
|
func NewIdxConfig() *IdxConfig {
return &IdxConfig{
Enabled: true,
hosts: "localhost:9042",
keyspace: "metrictank",
consistency: "one",
timeout: time.Second,
numConns: 10,
writeQueueSize: 100000,
updateCassIdx: true,
updateInterval: time.Hour * 3,
pruneInterval: time.Hour * 3,
protoVer: 4,
createKeyspace: true,
schemaFile: "/etc/metrictank/schema-idx-cassandra.toml",
disableInitialHostLookup: false,
ssl: false,
capath: "/etc/metrictank/ca.pem",
hostverification: true,
auth: false,
username: "cassandra",
password: "cassandra",
initLoadConcurrency: 1,
}
}
|
go
|
func NewIdxConfig() *IdxConfig {
return &IdxConfig{
Enabled: true,
hosts: "localhost:9042",
keyspace: "metrictank",
consistency: "one",
timeout: time.Second,
numConns: 10,
writeQueueSize: 100000,
updateCassIdx: true,
updateInterval: time.Hour * 3,
pruneInterval: time.Hour * 3,
protoVer: 4,
createKeyspace: true,
schemaFile: "/etc/metrictank/schema-idx-cassandra.toml",
disableInitialHostLookup: false,
ssl: false,
capath: "/etc/metrictank/ca.pem",
hostverification: true,
auth: false,
username: "cassandra",
password: "cassandra",
initLoadConcurrency: 1,
}
}
|
[
"func",
"NewIdxConfig",
"(",
")",
"*",
"IdxConfig",
"{",
"return",
"&",
"IdxConfig",
"{",
"Enabled",
":",
"true",
",",
"hosts",
":",
"\"",
"\"",
",",
"keyspace",
":",
"\"",
"\"",
",",
"consistency",
":",
"\"",
"\"",
",",
"timeout",
":",
"time",
".",
"Second",
",",
"numConns",
":",
"10",
",",
"writeQueueSize",
":",
"100000",
",",
"updateCassIdx",
":",
"true",
",",
"updateInterval",
":",
"time",
".",
"Hour",
"*",
"3",
",",
"pruneInterval",
":",
"time",
".",
"Hour",
"*",
"3",
",",
"protoVer",
":",
"4",
",",
"createKeyspace",
":",
"true",
",",
"schemaFile",
":",
"\"",
"\"",
",",
"disableInitialHostLookup",
":",
"false",
",",
"ssl",
":",
"false",
",",
"capath",
":",
"\"",
"\"",
",",
"hostverification",
":",
"true",
",",
"auth",
":",
"false",
",",
"username",
":",
"\"",
"\"",
",",
"password",
":",
"\"",
"\"",
",",
"initLoadConcurrency",
":",
"1",
",",
"}",
"\n",
"}"
] |
// NewIdxConfig returns IdxConfig with default values set.
|
[
"NewIdxConfig",
"returns",
"IdxConfig",
"with",
"default",
"values",
"set",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/config.go#L46-L70
|
train
|
grafana/metrictank
|
idx/cassandra/config.go
|
Validate
|
func (cfg *IdxConfig) Validate() error {
if cfg.pruneInterval == 0 {
return errors.New("pruneInterval must be greater then 0. " + timeUnits)
}
if cfg.timeout == 0 {
return errors.New("timeout must be greater than 0. " + timeUnits)
}
return nil
}
|
go
|
func (cfg *IdxConfig) Validate() error {
if cfg.pruneInterval == 0 {
return errors.New("pruneInterval must be greater then 0. " + timeUnits)
}
if cfg.timeout == 0 {
return errors.New("timeout must be greater than 0. " + timeUnits)
}
return nil
}
|
[
"func",
"(",
"cfg",
"*",
"IdxConfig",
")",
"Validate",
"(",
")",
"error",
"{",
"if",
"cfg",
".",
"pruneInterval",
"==",
"0",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
"+",
"timeUnits",
")",
"\n",
"}",
"\n",
"if",
"cfg",
".",
"timeout",
"==",
"0",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
"+",
"timeUnits",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// Validate validates IdxConfig settings
|
[
"Validate",
"validates",
"IdxConfig",
"settings"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/config.go#L73-L81
|
train
|
grafana/metrictank
|
mdata/aggmetric.go
|
NewAggMetric
|
func NewAggMetric(store Store, cachePusher cache.CachePusher, key schema.AMKey, retentions conf.Retentions, reorderWindow, interval uint32, agg *conf.Aggregation, dropFirstChunk bool) *AggMetric {
// note: during parsing of retentions, we assure there's at least 1.
ret := retentions[0]
m := AggMetric{
cachePusher: cachePusher,
store: store,
key: key,
chunkSpan: ret.ChunkSpan,
numChunks: ret.NumChunks,
chunks: make([]*chunk.Chunk, 0, ret.NumChunks),
dropFirstChunk: dropFirstChunk,
ttl: uint32(ret.MaxRetention()),
// we set LastWrite here to make sure a new Chunk doesn't get immediately
// garbage collected right after creating it, before we can push to it.
lastWrite: uint32(time.Now().Unix()),
}
if reorderWindow != 0 {
m.rob = NewReorderBuffer(reorderWindow, interval)
}
for _, ret := range retentions[1:] {
m.aggregators = append(m.aggregators, NewAggregator(store, cachePusher, key, ret, *agg, dropFirstChunk))
}
return &m
}
|
go
|
func NewAggMetric(store Store, cachePusher cache.CachePusher, key schema.AMKey, retentions conf.Retentions, reorderWindow, interval uint32, agg *conf.Aggregation, dropFirstChunk bool) *AggMetric {
// note: during parsing of retentions, we assure there's at least 1.
ret := retentions[0]
m := AggMetric{
cachePusher: cachePusher,
store: store,
key: key,
chunkSpan: ret.ChunkSpan,
numChunks: ret.NumChunks,
chunks: make([]*chunk.Chunk, 0, ret.NumChunks),
dropFirstChunk: dropFirstChunk,
ttl: uint32(ret.MaxRetention()),
// we set LastWrite here to make sure a new Chunk doesn't get immediately
// garbage collected right after creating it, before we can push to it.
lastWrite: uint32(time.Now().Unix()),
}
if reorderWindow != 0 {
m.rob = NewReorderBuffer(reorderWindow, interval)
}
for _, ret := range retentions[1:] {
m.aggregators = append(m.aggregators, NewAggregator(store, cachePusher, key, ret, *agg, dropFirstChunk))
}
return &m
}
|
[
"func",
"NewAggMetric",
"(",
"store",
"Store",
",",
"cachePusher",
"cache",
".",
"CachePusher",
",",
"key",
"schema",
".",
"AMKey",
",",
"retentions",
"conf",
".",
"Retentions",
",",
"reorderWindow",
",",
"interval",
"uint32",
",",
"agg",
"*",
"conf",
".",
"Aggregation",
",",
"dropFirstChunk",
"bool",
")",
"*",
"AggMetric",
"{",
"// note: during parsing of retentions, we assure there's at least 1.",
"ret",
":=",
"retentions",
"[",
"0",
"]",
"\n\n",
"m",
":=",
"AggMetric",
"{",
"cachePusher",
":",
"cachePusher",
",",
"store",
":",
"store",
",",
"key",
":",
"key",
",",
"chunkSpan",
":",
"ret",
".",
"ChunkSpan",
",",
"numChunks",
":",
"ret",
".",
"NumChunks",
",",
"chunks",
":",
"make",
"(",
"[",
"]",
"*",
"chunk",
".",
"Chunk",
",",
"0",
",",
"ret",
".",
"NumChunks",
")",
",",
"dropFirstChunk",
":",
"dropFirstChunk",
",",
"ttl",
":",
"uint32",
"(",
"ret",
".",
"MaxRetention",
"(",
")",
")",
",",
"// we set LastWrite here to make sure a new Chunk doesn't get immediately",
"// garbage collected right after creating it, before we can push to it.",
"lastWrite",
":",
"uint32",
"(",
"time",
".",
"Now",
"(",
")",
".",
"Unix",
"(",
")",
")",
",",
"}",
"\n",
"if",
"reorderWindow",
"!=",
"0",
"{",
"m",
".",
"rob",
"=",
"NewReorderBuffer",
"(",
"reorderWindow",
",",
"interval",
")",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"ret",
":=",
"range",
"retentions",
"[",
"1",
":",
"]",
"{",
"m",
".",
"aggregators",
"=",
"append",
"(",
"m",
".",
"aggregators",
",",
"NewAggregator",
"(",
"store",
",",
"cachePusher",
",",
"key",
",",
"ret",
",",
"*",
"agg",
",",
"dropFirstChunk",
")",
")",
"\n",
"}",
"\n\n",
"return",
"&",
"m",
"\n",
"}"
] |
// NewAggMetric creates a metric with given key, it retains the given number of chunks each chunkSpan seconds long
// it optionally also creates aggregations with the given settings
// the 0th retention is the native archive of this metric. if there's several others, we create aggregators, using agg.
// it's the callers responsibility to make sure agg is not nil in that case!
|
[
"NewAggMetric",
"creates",
"a",
"metric",
"with",
"given",
"key",
"it",
"retains",
"the",
"given",
"number",
"of",
"chunks",
"each",
"chunkSpan",
"seconds",
"long",
"it",
"optionally",
"also",
"creates",
"aggregations",
"with",
"the",
"given",
"settings",
"the",
"0th",
"retention",
"is",
"the",
"native",
"archive",
"of",
"this",
"metric",
".",
"if",
"there",
"s",
"several",
"others",
"we",
"create",
"aggregators",
"using",
"agg",
".",
"it",
"s",
"the",
"callers",
"responsibility",
"to",
"make",
"sure",
"agg",
"is",
"not",
"nil",
"in",
"that",
"case!"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L53-L80
|
train
|
grafana/metrictank
|
mdata/aggmetric.go
|
addAggregators
|
func (a *AggMetric) addAggregators(ts uint32, val float64) {
for _, agg := range a.aggregators {
log.Debugf("AM: %s pushing %d,%f to aggregator %d", a.key, ts, val, agg.span)
agg.Add(ts, val)
}
}
|
go
|
func (a *AggMetric) addAggregators(ts uint32, val float64) {
for _, agg := range a.aggregators {
log.Debugf("AM: %s pushing %d,%f to aggregator %d", a.key, ts, val, agg.span)
agg.Add(ts, val)
}
}
|
[
"func",
"(",
"a",
"*",
"AggMetric",
")",
"addAggregators",
"(",
"ts",
"uint32",
",",
"val",
"float64",
")",
"{",
"for",
"_",
",",
"agg",
":=",
"range",
"a",
".",
"aggregators",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"a",
".",
"key",
",",
"ts",
",",
"val",
",",
"agg",
".",
"span",
")",
"\n",
"agg",
".",
"Add",
"(",
"ts",
",",
"val",
")",
"\n",
"}",
"\n",
"}"
] |
// caller must hold lock
|
[
"caller",
"must",
"hold",
"lock"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L315-L320
|
train
|
grafana/metrictank
|
mdata/aggmetric.go
|
pushToCache
|
func (a *AggMetric) pushToCache(c *chunk.Chunk) {
if a.cachePusher == nil {
return
}
// push into cache
intervalHint := a.key.Archive.Span()
itergen, err := chunk.NewIterGen(c.Series.T0, intervalHint, c.Encode(a.chunkSpan))
if err != nil {
log.Errorf("AM: %s failed to generate IterGen. this should never happen: %s", a.key, err)
}
go a.cachePusher.AddIfHot(a.key, 0, itergen)
}
|
go
|
func (a *AggMetric) pushToCache(c *chunk.Chunk) {
if a.cachePusher == nil {
return
}
// push into cache
intervalHint := a.key.Archive.Span()
itergen, err := chunk.NewIterGen(c.Series.T0, intervalHint, c.Encode(a.chunkSpan))
if err != nil {
log.Errorf("AM: %s failed to generate IterGen. this should never happen: %s", a.key, err)
}
go a.cachePusher.AddIfHot(a.key, 0, itergen)
}
|
[
"func",
"(",
"a",
"*",
"AggMetric",
")",
"pushToCache",
"(",
"c",
"*",
"chunk",
".",
"Chunk",
")",
"{",
"if",
"a",
".",
"cachePusher",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"// push into cache",
"intervalHint",
":=",
"a",
".",
"key",
".",
"Archive",
".",
"Span",
"(",
")",
"\n\n",
"itergen",
",",
"err",
":=",
"chunk",
".",
"NewIterGen",
"(",
"c",
".",
"Series",
".",
"T0",
",",
"intervalHint",
",",
"c",
".",
"Encode",
"(",
"a",
".",
"chunkSpan",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"a",
".",
"key",
",",
"err",
")",
"\n",
"}",
"\n",
"go",
"a",
".",
"cachePusher",
".",
"AddIfHot",
"(",
"a",
".",
"key",
",",
"0",
",",
"itergen",
")",
"\n",
"}"
] |
// pushToCache adds the chunk into the cache if it is hot
// caller must hold lock
|
[
"pushToCache",
"adds",
"the",
"chunk",
"into",
"the",
"cache",
"if",
"it",
"is",
"hot",
"caller",
"must",
"hold",
"lock"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L324-L336
|
train
|
grafana/metrictank
|
mdata/aggmetric.go
|
Add
|
func (a *AggMetric) Add(ts uint32, val float64) {
a.Lock()
defer a.Unlock()
if a.rob == nil {
// write directly
a.add(ts, val)
} else {
// write through reorder buffer
res, err := a.rob.Add(ts, val)
if err == nil {
if len(res) == 0 {
a.lastWrite = uint32(time.Now().Unix())
} else {
for _, p := range res {
a.add(p.Ts, p.Val)
}
}
} else {
log.Debugf("AM: failed to add metric to reorder buffer for %s. %s", a.key, err)
a.discardedMetricsInc(err)
}
}
}
|
go
|
func (a *AggMetric) Add(ts uint32, val float64) {
a.Lock()
defer a.Unlock()
if a.rob == nil {
// write directly
a.add(ts, val)
} else {
// write through reorder buffer
res, err := a.rob.Add(ts, val)
if err == nil {
if len(res) == 0 {
a.lastWrite = uint32(time.Now().Unix())
} else {
for _, p := range res {
a.add(p.Ts, p.Val)
}
}
} else {
log.Debugf("AM: failed to add metric to reorder buffer for %s. %s", a.key, err)
a.discardedMetricsInc(err)
}
}
}
|
[
"func",
"(",
"a",
"*",
"AggMetric",
")",
"Add",
"(",
"ts",
"uint32",
",",
"val",
"float64",
")",
"{",
"a",
".",
"Lock",
"(",
")",
"\n",
"defer",
"a",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"a",
".",
"rob",
"==",
"nil",
"{",
"// write directly",
"a",
".",
"add",
"(",
"ts",
",",
"val",
")",
"\n",
"}",
"else",
"{",
"// write through reorder buffer",
"res",
",",
"err",
":=",
"a",
".",
"rob",
".",
"Add",
"(",
"ts",
",",
"val",
")",
"\n\n",
"if",
"err",
"==",
"nil",
"{",
"if",
"len",
"(",
"res",
")",
"==",
"0",
"{",
"a",
".",
"lastWrite",
"=",
"uint32",
"(",
"time",
".",
"Now",
"(",
")",
".",
"Unix",
"(",
")",
")",
"\n",
"}",
"else",
"{",
"for",
"_",
",",
"p",
":=",
"range",
"res",
"{",
"a",
".",
"add",
"(",
"p",
".",
"Ts",
",",
"p",
".",
"Val",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"else",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"a",
".",
"key",
",",
"err",
")",
"\n",
"a",
".",
"discardedMetricsInc",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// don't ever call with a ts of 0, cause we use 0 to mean not initialized!
|
[
"don",
"t",
"ever",
"call",
"with",
"a",
"ts",
"of",
"0",
"cause",
"we",
"use",
"0",
"to",
"mean",
"not",
"initialized!"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L405-L429
|
train
|
grafana/metrictank
|
mdata/aggmetric.go
|
GC
|
func (a *AggMetric) GC(now, chunkMinTs, metricMinTs uint32) (uint32, bool) {
a.Lock()
defer a.Unlock()
// unless it looks like the AggMetric is collectable, abort and mark as not stale
if !a.collectable(now, chunkMinTs) {
return 0, false
}
// make sure any points in the reorderBuffer are moved into our chunks so we can save the data
if a.rob != nil {
tmpLastWrite := a.lastWrite
pts := a.rob.Flush()
for _, p := range pts {
a.add(p.Ts, p.Val)
}
// adding points will cause our lastWrite to be updated, but we want to keep the old value
a.lastWrite = tmpLastWrite
}
// this aggMetric has never had metrics written to it.
if len(a.chunks) == 0 {
return a.gcAggregators(now, chunkMinTs, metricMinTs)
}
currentChunk := a.chunks[a.currentChunkPos]
// we must check collectable again. Imagine this scenario:
// * we didn't have any chunks when calling collectable() the first time so it returned true
// * data from the ROB is flushed and moved into a new chunk
// * this new chunk is active so we're not collectable, even though earlier we thought we were.
if !a.collectable(now, chunkMinTs) {
return 0, false
}
if !currentChunk.Series.Finished {
// chunk hasn't been written to in a while, and is not yet closed.
// Let's close it and persist it if we are a primary
log.Debugf("AM: Found stale Chunk, adding end-of-stream bytes. key: %v T0: %d", a.key, currentChunk.Series.T0)
currentChunk.Finish()
a.pushToCache(currentChunk)
if cluster.Manager.IsPrimary() {
log.Debugf("AM: persist(): node is primary, saving chunk. %v T0: %d", a.key, currentChunk.Series.T0)
// persist the chunk. If the writeQueue is full, then this will block.
a.persist(a.currentChunkPos)
}
}
var points uint32
for _, chunk := range a.chunks {
points += chunk.NumPoints
}
p, stale := a.gcAggregators(now, chunkMinTs, metricMinTs)
points += p
return points, stale && a.lastWrite < metricMinTs
}
|
go
|
func (a *AggMetric) GC(now, chunkMinTs, metricMinTs uint32) (uint32, bool) {
a.Lock()
defer a.Unlock()
// unless it looks like the AggMetric is collectable, abort and mark as not stale
if !a.collectable(now, chunkMinTs) {
return 0, false
}
// make sure any points in the reorderBuffer are moved into our chunks so we can save the data
if a.rob != nil {
tmpLastWrite := a.lastWrite
pts := a.rob.Flush()
for _, p := range pts {
a.add(p.Ts, p.Val)
}
// adding points will cause our lastWrite to be updated, but we want to keep the old value
a.lastWrite = tmpLastWrite
}
// this aggMetric has never had metrics written to it.
if len(a.chunks) == 0 {
return a.gcAggregators(now, chunkMinTs, metricMinTs)
}
currentChunk := a.chunks[a.currentChunkPos]
// we must check collectable again. Imagine this scenario:
// * we didn't have any chunks when calling collectable() the first time so it returned true
// * data from the ROB is flushed and moved into a new chunk
// * this new chunk is active so we're not collectable, even though earlier we thought we were.
if !a.collectable(now, chunkMinTs) {
return 0, false
}
if !currentChunk.Series.Finished {
// chunk hasn't been written to in a while, and is not yet closed.
// Let's close it and persist it if we are a primary
log.Debugf("AM: Found stale Chunk, adding end-of-stream bytes. key: %v T0: %d", a.key, currentChunk.Series.T0)
currentChunk.Finish()
a.pushToCache(currentChunk)
if cluster.Manager.IsPrimary() {
log.Debugf("AM: persist(): node is primary, saving chunk. %v T0: %d", a.key, currentChunk.Series.T0)
// persist the chunk. If the writeQueue is full, then this will block.
a.persist(a.currentChunkPos)
}
}
var points uint32
for _, chunk := range a.chunks {
points += chunk.NumPoints
}
p, stale := a.gcAggregators(now, chunkMinTs, metricMinTs)
points += p
return points, stale && a.lastWrite < metricMinTs
}
|
[
"func",
"(",
"a",
"*",
"AggMetric",
")",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
"uint32",
")",
"(",
"uint32",
",",
"bool",
")",
"{",
"a",
".",
"Lock",
"(",
")",
"\n",
"defer",
"a",
".",
"Unlock",
"(",
")",
"\n\n",
"// unless it looks like the AggMetric is collectable, abort and mark as not stale",
"if",
"!",
"a",
".",
"collectable",
"(",
"now",
",",
"chunkMinTs",
")",
"{",
"return",
"0",
",",
"false",
"\n",
"}",
"\n\n",
"// make sure any points in the reorderBuffer are moved into our chunks so we can save the data",
"if",
"a",
".",
"rob",
"!=",
"nil",
"{",
"tmpLastWrite",
":=",
"a",
".",
"lastWrite",
"\n",
"pts",
":=",
"a",
".",
"rob",
".",
"Flush",
"(",
")",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"pts",
"{",
"a",
".",
"add",
"(",
"p",
".",
"Ts",
",",
"p",
".",
"Val",
")",
"\n",
"}",
"\n\n",
"// adding points will cause our lastWrite to be updated, but we want to keep the old value",
"a",
".",
"lastWrite",
"=",
"tmpLastWrite",
"\n",
"}",
"\n\n",
"// this aggMetric has never had metrics written to it.",
"if",
"len",
"(",
"a",
".",
"chunks",
")",
"==",
"0",
"{",
"return",
"a",
".",
"gcAggregators",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"}",
"\n\n",
"currentChunk",
":=",
"a",
".",
"chunks",
"[",
"a",
".",
"currentChunkPos",
"]",
"\n\n",
"// we must check collectable again. Imagine this scenario:",
"// * we didn't have any chunks when calling collectable() the first time so it returned true",
"// * data from the ROB is flushed and moved into a new chunk",
"// * this new chunk is active so we're not collectable, even though earlier we thought we were.",
"if",
"!",
"a",
".",
"collectable",
"(",
"now",
",",
"chunkMinTs",
")",
"{",
"return",
"0",
",",
"false",
"\n",
"}",
"\n\n",
"if",
"!",
"currentChunk",
".",
"Series",
".",
"Finished",
"{",
"// chunk hasn't been written to in a while, and is not yet closed.",
"// Let's close it and persist it if we are a primary",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"a",
".",
"key",
",",
"currentChunk",
".",
"Series",
".",
"T0",
")",
"\n",
"currentChunk",
".",
"Finish",
"(",
")",
"\n",
"a",
".",
"pushToCache",
"(",
"currentChunk",
")",
"\n",
"if",
"cluster",
".",
"Manager",
".",
"IsPrimary",
"(",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"a",
".",
"key",
",",
"currentChunk",
".",
"Series",
".",
"T0",
")",
"\n",
"// persist the chunk. If the writeQueue is full, then this will block.",
"a",
".",
"persist",
"(",
"a",
".",
"currentChunkPos",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"var",
"points",
"uint32",
"\n",
"for",
"_",
",",
"chunk",
":=",
"range",
"a",
".",
"chunks",
"{",
"points",
"+=",
"chunk",
".",
"NumPoints",
"\n",
"}",
"\n",
"p",
",",
"stale",
":=",
"a",
".",
"gcAggregators",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
")",
"\n",
"points",
"+=",
"p",
"\n",
"return",
"points",
",",
"stale",
"&&",
"a",
".",
"lastWrite",
"<",
"metricMinTs",
"\n",
"}"
] |
// GC returns whether or not this AggMetric is stale and can be removed, and its pointcount if so
// chunkMinTs -> min timestamp of a chunk before to be considered stale and to be persisted to Cassandra
// metricMinTs -> min timestamp for a metric before to be considered stale and to be purged from the tank
|
[
"GC",
"returns",
"whether",
"or",
"not",
"this",
"AggMetric",
"is",
"stale",
"and",
"can",
"be",
"removed",
"and",
"its",
"pointcount",
"if",
"so",
"chunkMinTs",
"-",
">",
"min",
"timestamp",
"of",
"a",
"chunk",
"before",
"to",
"be",
"considered",
"stale",
"and",
"to",
"be",
"persisted",
"to",
"Cassandra",
"metricMinTs",
"-",
">",
"min",
"timestamp",
"for",
"a",
"metric",
"before",
"to",
"be",
"considered",
"stale",
"and",
"to",
"be",
"purged",
"from",
"the",
"tank"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L558-L614
|
train
|
grafana/metrictank
|
mdata/aggmetric.go
|
gcAggregators
|
func (a *AggMetric) gcAggregators(now, chunkMinTs, metricMinTs uint32) (uint32, bool) {
var points uint32
stale := true
for _, agg := range a.aggregators {
p, s := agg.GC(now, chunkMinTs, metricMinTs, a.lastWrite)
points += p
stale = stale && s
}
return points, stale
}
|
go
|
func (a *AggMetric) gcAggregators(now, chunkMinTs, metricMinTs uint32) (uint32, bool) {
var points uint32
stale := true
for _, agg := range a.aggregators {
p, s := agg.GC(now, chunkMinTs, metricMinTs, a.lastWrite)
points += p
stale = stale && s
}
return points, stale
}
|
[
"func",
"(",
"a",
"*",
"AggMetric",
")",
"gcAggregators",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
"uint32",
")",
"(",
"uint32",
",",
"bool",
")",
"{",
"var",
"points",
"uint32",
"\n",
"stale",
":=",
"true",
"\n",
"for",
"_",
",",
"agg",
":=",
"range",
"a",
".",
"aggregators",
"{",
"p",
",",
"s",
":=",
"agg",
".",
"GC",
"(",
"now",
",",
"chunkMinTs",
",",
"metricMinTs",
",",
"a",
".",
"lastWrite",
")",
"\n",
"points",
"+=",
"p",
"\n",
"stale",
"=",
"stale",
"&&",
"s",
"\n",
"}",
"\n",
"return",
"points",
",",
"stale",
"\n",
"}"
] |
// gcAggregators returns whether all aggregators are stale and can be removed, and their pointcount if so
|
[
"gcAggregators",
"returns",
"whether",
"all",
"aggregators",
"are",
"stale",
"and",
"can",
"be",
"removed",
"and",
"their",
"pointcount",
"if",
"so"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L617-L626
|
train
|
grafana/metrictank
|
idx/memory/find_cache.go
|
Purge
|
func (c *FindCache) Purge(orgId uint32) {
c.RLock()
cache, ok := c.cache[orgId]
c.RUnlock()
if !ok {
return
}
cache.Purge()
}
|
go
|
func (c *FindCache) Purge(orgId uint32) {
c.RLock()
cache, ok := c.cache[orgId]
c.RUnlock()
if !ok {
return
}
cache.Purge()
}
|
[
"func",
"(",
"c",
"*",
"FindCache",
")",
"Purge",
"(",
"orgId",
"uint32",
")",
"{",
"c",
".",
"RLock",
"(",
")",
"\n",
"cache",
",",
"ok",
":=",
"c",
".",
"cache",
"[",
"orgId",
"]",
"\n",
"c",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"\n",
"}",
"\n",
"cache",
".",
"Purge",
"(",
")",
"\n",
"}"
] |
// Purge clears the cache for the specified orgId
|
[
"Purge",
"clears",
"the",
"cache",
"for",
"the",
"specified",
"orgId"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L129-L137
|
train
|
grafana/metrictank
|
idx/memory/find_cache.go
|
PurgeAll
|
func (c *FindCache) PurgeAll() {
c.RLock()
orgs := make([]uint32, len(c.cache))
i := 0
for k := range c.cache {
orgs[i] = k
i++
}
c.RUnlock()
for _, org := range orgs {
c.Purge(org)
}
}
|
go
|
func (c *FindCache) PurgeAll() {
c.RLock()
orgs := make([]uint32, len(c.cache))
i := 0
for k := range c.cache {
orgs[i] = k
i++
}
c.RUnlock()
for _, org := range orgs {
c.Purge(org)
}
}
|
[
"func",
"(",
"c",
"*",
"FindCache",
")",
"PurgeAll",
"(",
")",
"{",
"c",
".",
"RLock",
"(",
")",
"\n",
"orgs",
":=",
"make",
"(",
"[",
"]",
"uint32",
",",
"len",
"(",
"c",
".",
"cache",
")",
")",
"\n",
"i",
":=",
"0",
"\n",
"for",
"k",
":=",
"range",
"c",
".",
"cache",
"{",
"orgs",
"[",
"i",
"]",
"=",
"k",
"\n",
"i",
"++",
"\n",
"}",
"\n",
"c",
".",
"RUnlock",
"(",
")",
"\n",
"for",
"_",
",",
"org",
":=",
"range",
"orgs",
"{",
"c",
".",
"Purge",
"(",
"org",
")",
"\n",
"}",
"\n",
"}"
] |
// PurgeAll clears the caches for all orgIds
|
[
"PurgeAll",
"clears",
"the",
"caches",
"for",
"all",
"orgIds"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L140-L152
|
train
|
grafana/metrictank
|
idx/memory/find_cache.go
|
InvalidateFor
|
func (c *FindCache) InvalidateFor(orgId uint32, path string) {
c.Lock()
findCacheInvalidationsReceived.Inc()
defer c.Unlock()
if c.backoff {
findCacheInvalidationsDropped.Inc()
return
}
cache, ok := c.cache[orgId]
if !ok || cache.Len() < 1 {
findCacheInvalidationsDropped.Inc()
return
}
req := invalidateRequest{
orgId: orgId,
path: path,
}
select {
case c.invalidateReqs <- req:
default:
c.triggerBackoff()
}
}
|
go
|
func (c *FindCache) InvalidateFor(orgId uint32, path string) {
c.Lock()
findCacheInvalidationsReceived.Inc()
defer c.Unlock()
if c.backoff {
findCacheInvalidationsDropped.Inc()
return
}
cache, ok := c.cache[orgId]
if !ok || cache.Len() < 1 {
findCacheInvalidationsDropped.Inc()
return
}
req := invalidateRequest{
orgId: orgId,
path: path,
}
select {
case c.invalidateReqs <- req:
default:
c.triggerBackoff()
}
}
|
[
"func",
"(",
"c",
"*",
"FindCache",
")",
"InvalidateFor",
"(",
"orgId",
"uint32",
",",
"path",
"string",
")",
"{",
"c",
".",
"Lock",
"(",
")",
"\n",
"findCacheInvalidationsReceived",
".",
"Inc",
"(",
")",
"\n",
"defer",
"c",
".",
"Unlock",
"(",
")",
"\n",
"if",
"c",
".",
"backoff",
"{",
"findCacheInvalidationsDropped",
".",
"Inc",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"cache",
",",
"ok",
":=",
"c",
".",
"cache",
"[",
"orgId",
"]",
"\n",
"if",
"!",
"ok",
"||",
"cache",
".",
"Len",
"(",
")",
"<",
"1",
"{",
"findCacheInvalidationsDropped",
".",
"Inc",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"req",
":=",
"invalidateRequest",
"{",
"orgId",
":",
"orgId",
",",
"path",
":",
"path",
",",
"}",
"\n",
"select",
"{",
"case",
"c",
".",
"invalidateReqs",
"<-",
"req",
":",
"default",
":",
"c",
".",
"triggerBackoff",
"(",
")",
"\n",
"}",
"\n",
"}"
] |
// InvalidateFor removes entries from the cache for 'orgId'
// that match the provided path. If lots of InvalidateFor calls
// are made at once and we end up with `invalidateQueueSize` concurrent
// goroutines processing the invalidations, we purge the cache and
// disable it for `backoffTime`. Future InvalidateFor calls made during
// the backoff time will then return immediately.
|
[
"InvalidateFor",
"removes",
"entries",
"from",
"the",
"cache",
"for",
"orgId",
"that",
"match",
"the",
"provided",
"path",
".",
"If",
"lots",
"of",
"InvalidateFor",
"calls",
"are",
"made",
"at",
"once",
"and",
"we",
"end",
"up",
"with",
"invalidateQueueSize",
"concurrent",
"goroutines",
"processing",
"the",
"invalidations",
"we",
"purge",
"the",
"cache",
"and",
"disable",
"it",
"for",
"backoffTime",
".",
"Future",
"InvalidateFor",
"calls",
"made",
"during",
"the",
"backoff",
"time",
"will",
"then",
"return",
"immediately",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L160-L184
|
train
|
grafana/metrictank
|
idx/memory/find_cache.go
|
triggerBackoff
|
func (c *FindCache) triggerBackoff() {
log.Infof("memory-idx: findCache invalidate-queue full. Disabling cache for %s", c.backoffTime.String())
findCacheBackoff.Inc()
c.backoff = true
time.AfterFunc(c.backoffTime, func() {
findCacheBackoff.Dec()
c.Lock()
c.backoff = false
c.Unlock()
})
c.cache = make(map[uint32]*lru.Cache)
// drain queue
L:
for {
select {
case <-c.invalidateReqs:
default:
break L
}
}
}
|
go
|
func (c *FindCache) triggerBackoff() {
log.Infof("memory-idx: findCache invalidate-queue full. Disabling cache for %s", c.backoffTime.String())
findCacheBackoff.Inc()
c.backoff = true
time.AfterFunc(c.backoffTime, func() {
findCacheBackoff.Dec()
c.Lock()
c.backoff = false
c.Unlock()
})
c.cache = make(map[uint32]*lru.Cache)
// drain queue
L:
for {
select {
case <-c.invalidateReqs:
default:
break L
}
}
}
|
[
"func",
"(",
"c",
"*",
"FindCache",
")",
"triggerBackoff",
"(",
")",
"{",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"c",
".",
"backoffTime",
".",
"String",
"(",
")",
")",
"\n",
"findCacheBackoff",
".",
"Inc",
"(",
")",
"\n",
"c",
".",
"backoff",
"=",
"true",
"\n",
"time",
".",
"AfterFunc",
"(",
"c",
".",
"backoffTime",
",",
"func",
"(",
")",
"{",
"findCacheBackoff",
".",
"Dec",
"(",
")",
"\n",
"c",
".",
"Lock",
"(",
")",
"\n",
"c",
".",
"backoff",
"=",
"false",
"\n",
"c",
".",
"Unlock",
"(",
")",
"\n",
"}",
")",
"\n",
"c",
".",
"cache",
"=",
"make",
"(",
"map",
"[",
"uint32",
"]",
"*",
"lru",
".",
"Cache",
")",
"\n",
"// drain queue",
"L",
":",
"for",
"{",
"select",
"{",
"case",
"<-",
"c",
".",
"invalidateReqs",
":",
"default",
":",
"break",
"L",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// caller must hold lock!
|
[
"caller",
"must",
"hold",
"lock!"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L187-L207
|
train
|
grafana/metrictank
|
idx/memory/find_cache.go
|
PurgeFindCache
|
func (p *PartitionedMemoryIdx) PurgeFindCache() {
for _, m := range p.Partition {
if m.findCache != nil {
m.findCache.PurgeAll()
}
}
}
|
go
|
func (p *PartitionedMemoryIdx) PurgeFindCache() {
for _, m := range p.Partition {
if m.findCache != nil {
m.findCache.PurgeAll()
}
}
}
|
[
"func",
"(",
"p",
"*",
"PartitionedMemoryIdx",
")",
"PurgeFindCache",
"(",
")",
"{",
"for",
"_",
",",
"m",
":=",
"range",
"p",
".",
"Partition",
"{",
"if",
"m",
".",
"findCache",
"!=",
"nil",
"{",
"m",
".",
"findCache",
".",
"PurgeAll",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// PurgeFindCache purges the findCaches for all orgIds
// across all partitions
|
[
"PurgeFindCache",
"purges",
"the",
"findCaches",
"for",
"all",
"orgIds",
"across",
"all",
"partitions"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L351-L357
|
train
|
grafana/metrictank
|
idx/memory/find_cache.go
|
ForceInvalidationFindCache
|
func (p *PartitionedMemoryIdx) ForceInvalidationFindCache() {
for _, m := range p.Partition {
if m.findCache != nil {
m.findCache.forceInvalidation()
}
}
}
|
go
|
func (p *PartitionedMemoryIdx) ForceInvalidationFindCache() {
for _, m := range p.Partition {
if m.findCache != nil {
m.findCache.forceInvalidation()
}
}
}
|
[
"func",
"(",
"p",
"*",
"PartitionedMemoryIdx",
")",
"ForceInvalidationFindCache",
"(",
")",
"{",
"for",
"_",
",",
"m",
":=",
"range",
"p",
".",
"Partition",
"{",
"if",
"m",
".",
"findCache",
"!=",
"nil",
"{",
"m",
".",
"findCache",
".",
"forceInvalidation",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// ForceInvalidationFindCache forces a full invalidation cycle of the find cache
|
[
"ForceInvalidationFindCache",
"forces",
"a",
"full",
"invalidation",
"cycle",
"of",
"the",
"find",
"cache"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L360-L366
|
train
|
grafana/metrictank
|
cmd/mt-store-cat/tables.go
|
getTables
|
func getTables(store *cassandra.CassandraStore, match string) ([]cassandra.Table, error) {
var tables []cassandra.Table
if match == "*" || match == "" {
for _, table := range store.TTLTables {
if table.Name == "metric_idx" || !strings.HasPrefix(table.Name, "metric_") {
continue
}
tables = append(tables, table)
}
sort.Sort(TablesByTTL(tables))
} else {
for _, table := range store.TTLTables {
if table.Name == match {
tables = append(tables, table)
return tables, nil
}
}
return nil, fmt.Errorf("table %q not found", match)
}
return tables, nil
}
|
go
|
func getTables(store *cassandra.CassandraStore, match string) ([]cassandra.Table, error) {
var tables []cassandra.Table
if match == "*" || match == "" {
for _, table := range store.TTLTables {
if table.Name == "metric_idx" || !strings.HasPrefix(table.Name, "metric_") {
continue
}
tables = append(tables, table)
}
sort.Sort(TablesByTTL(tables))
} else {
for _, table := range store.TTLTables {
if table.Name == match {
tables = append(tables, table)
return tables, nil
}
}
return nil, fmt.Errorf("table %q not found", match)
}
return tables, nil
}
|
[
"func",
"getTables",
"(",
"store",
"*",
"cassandra",
".",
"CassandraStore",
",",
"match",
"string",
")",
"(",
"[",
"]",
"cassandra",
".",
"Table",
",",
"error",
")",
"{",
"var",
"tables",
"[",
"]",
"cassandra",
".",
"Table",
"\n",
"if",
"match",
"==",
"\"",
"\"",
"||",
"match",
"==",
"\"",
"\"",
"{",
"for",
"_",
",",
"table",
":=",
"range",
"store",
".",
"TTLTables",
"{",
"if",
"table",
".",
"Name",
"==",
"\"",
"\"",
"||",
"!",
"strings",
".",
"HasPrefix",
"(",
"table",
".",
"Name",
",",
"\"",
"\"",
")",
"{",
"continue",
"\n",
"}",
"\n",
"tables",
"=",
"append",
"(",
"tables",
",",
"table",
")",
"\n",
"}",
"\n",
"sort",
".",
"Sort",
"(",
"TablesByTTL",
"(",
"tables",
")",
")",
"\n",
"}",
"else",
"{",
"for",
"_",
",",
"table",
":=",
"range",
"store",
".",
"TTLTables",
"{",
"if",
"table",
".",
"Name",
"==",
"match",
"{",
"tables",
"=",
"append",
"(",
"tables",
",",
"table",
")",
"\n",
"return",
"tables",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"match",
")",
"\n",
"}",
"\n",
"return",
"tables",
",",
"nil",
"\n",
"}"
] |
// getTables returns the requested cassandra store tables in TTL asc order based on match string
|
[
"getTables",
"returns",
"the",
"requested",
"cassandra",
"store",
"tables",
"in",
"TTL",
"asc",
"order",
"based",
"on",
"match",
"string"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/tables.go#L19-L39
|
train
|
grafana/metrictank
|
cmd/mt-store-cat/tables.go
|
printTables
|
func printTables(store *cassandra.CassandraStore) {
tables, err := getTables(store, "")
if err != nil {
log.Fatal(err.Error())
}
for _, table := range tables {
fmt.Printf("%s (%d hours <= ttl < %d hours)\n", table.Name, table.TTL, table.TTL*2)
}
}
|
go
|
func printTables(store *cassandra.CassandraStore) {
tables, err := getTables(store, "")
if err != nil {
log.Fatal(err.Error())
}
for _, table := range tables {
fmt.Printf("%s (%d hours <= ttl < %d hours)\n", table.Name, table.TTL, table.TTL*2)
}
}
|
[
"func",
"printTables",
"(",
"store",
"*",
"cassandra",
".",
"CassandraStore",
")",
"{",
"tables",
",",
"err",
":=",
"getTables",
"(",
"store",
",",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Fatal",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"for",
"_",
",",
"table",
":=",
"range",
"tables",
"{",
"fmt",
".",
"Printf",
"(",
"\"",
"\\n",
"\"",
",",
"table",
".",
"Name",
",",
"table",
".",
"TTL",
",",
"table",
".",
"TTL",
"*",
"2",
")",
"\n",
"}",
"\n",
"}"
] |
//printTables prints all tables in the store
|
[
"printTables",
"prints",
"all",
"tables",
"in",
"the",
"store"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/tables.go#L42-L50
|
train
|
grafana/metrictank
|
api/prometheus_querier.go
|
Querier
|
func (s *Server) Querier(ctx context.Context, min, max int64) (storage.Querier, error) {
from := uint32(min / 1000)
to := uint32(max / 1000)
return NewQuerier(ctx, s, from, to, ctx.Value(orgID("org-id")).(uint32), false), nil
}
|
go
|
func (s *Server) Querier(ctx context.Context, min, max int64) (storage.Querier, error) {
from := uint32(min / 1000)
to := uint32(max / 1000)
return NewQuerier(ctx, s, from, to, ctx.Value(orgID("org-id")).(uint32), false), nil
}
|
[
"func",
"(",
"s",
"*",
"Server",
")",
"Querier",
"(",
"ctx",
"context",
".",
"Context",
",",
"min",
",",
"max",
"int64",
")",
"(",
"storage",
".",
"Querier",
",",
"error",
")",
"{",
"from",
":=",
"uint32",
"(",
"min",
"/",
"1000",
")",
"\n",
"to",
":=",
"uint32",
"(",
"max",
"/",
"1000",
")",
"\n",
"return",
"NewQuerier",
"(",
"ctx",
",",
"s",
",",
"from",
",",
"to",
",",
"ctx",
".",
"Value",
"(",
"orgID",
"(",
"\"",
"\"",
")",
")",
".",
"(",
"uint32",
")",
",",
"false",
")",
",",
"nil",
"\n",
"}"
] |
// Querier creates a new querier that will operate on the subject server
// it needs the org-id stored in a context value
|
[
"Querier",
"creates",
"a",
"new",
"querier",
"that",
"will",
"operate",
"on",
"the",
"subject",
"server",
"it",
"needs",
"the",
"org",
"-",
"id",
"stored",
"in",
"a",
"context",
"value"
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/prometheus_querier.go#L21-L25
|
train
|
grafana/metrictank
|
api/prometheus_querier.go
|
Select
|
func (q *querier) Select(matchers ...*labels.Matcher) (storage.SeriesSet, error) {
minFrom := uint32(math.MaxUint32)
var maxTo uint32
var target string
var reqs []models.Req
expressions := []string{}
for _, matcher := range matchers {
if matcher.Name == model.MetricNameLabel {
matcher.Name = "name"
}
if matcher.Type == labels.MatchNotRegexp {
expressions = append(expressions, fmt.Sprintf("%s!=~%s", matcher.Name, matcher.Value))
} else {
expressions = append(expressions, fmt.Sprintf("%s%s%s", matcher.Name, matcher.Type, matcher.Value))
}
}
series, err := q.clusterFindByTag(q.ctx, q.OrgID, expressions, 0, maxSeriesPerReq)
if err != nil {
return nil, err
}
if q.metadataOnly {
return BuildMetadataSeriesSet(series)
}
minFrom = util.Min(minFrom, q.from)
maxTo = util.Max(maxTo, q.to)
for _, s := range series {
for _, metric := range s.Series {
for _, archive := range metric.Defs {
consReq := consolidation.None
fn := mdata.Aggregations.Get(archive.AggId).AggregationMethod[0]
cons := consolidation.Consolidator(fn)
newReq := models.NewReq(archive.Id, archive.NameWithTags(), target, q.from, q.to, math.MaxUint32, uint32(archive.Interval), cons, consReq, s.Node, archive.SchemaId, archive.AggId)
reqs = append(reqs, newReq)
}
}
}
select {
case <-q.ctx.Done():
//request canceled
return nil, fmt.Errorf("request canceled")
default:
}
reqRenderSeriesCount.Value(len(reqs))
if len(reqs) == 0 {
return nil, fmt.Errorf("no series found")
}
// note: if 1 series has a movingAvg that requires a long time range extension, it may push other reqs into another archive. can be optimized later
reqs, _, _, err = alignRequests(uint32(time.Now().Unix()), minFrom, maxTo, reqs)
if err != nil {
log.Errorf("HTTP Render alignReq error: %s", err.Error())
return nil, err
}
out, err := q.getTargets(q.ctx, reqs)
if err != nil {
log.Errorf("HTTP Render %s", err.Error())
return nil, err
}
return SeriesToSeriesSet(out)
}
|
go
|
func (q *querier) Select(matchers ...*labels.Matcher) (storage.SeriesSet, error) {
minFrom := uint32(math.MaxUint32)
var maxTo uint32
var target string
var reqs []models.Req
expressions := []string{}
for _, matcher := range matchers {
if matcher.Name == model.MetricNameLabel {
matcher.Name = "name"
}
if matcher.Type == labels.MatchNotRegexp {
expressions = append(expressions, fmt.Sprintf("%s!=~%s", matcher.Name, matcher.Value))
} else {
expressions = append(expressions, fmt.Sprintf("%s%s%s", matcher.Name, matcher.Type, matcher.Value))
}
}
series, err := q.clusterFindByTag(q.ctx, q.OrgID, expressions, 0, maxSeriesPerReq)
if err != nil {
return nil, err
}
if q.metadataOnly {
return BuildMetadataSeriesSet(series)
}
minFrom = util.Min(minFrom, q.from)
maxTo = util.Max(maxTo, q.to)
for _, s := range series {
for _, metric := range s.Series {
for _, archive := range metric.Defs {
consReq := consolidation.None
fn := mdata.Aggregations.Get(archive.AggId).AggregationMethod[0]
cons := consolidation.Consolidator(fn)
newReq := models.NewReq(archive.Id, archive.NameWithTags(), target, q.from, q.to, math.MaxUint32, uint32(archive.Interval), cons, consReq, s.Node, archive.SchemaId, archive.AggId)
reqs = append(reqs, newReq)
}
}
}
select {
case <-q.ctx.Done():
//request canceled
return nil, fmt.Errorf("request canceled")
default:
}
reqRenderSeriesCount.Value(len(reqs))
if len(reqs) == 0 {
return nil, fmt.Errorf("no series found")
}
// note: if 1 series has a movingAvg that requires a long time range extension, it may push other reqs into another archive. can be optimized later
reqs, _, _, err = alignRequests(uint32(time.Now().Unix()), minFrom, maxTo, reqs)
if err != nil {
log.Errorf("HTTP Render alignReq error: %s", err.Error())
return nil, err
}
out, err := q.getTargets(q.ctx, reqs)
if err != nil {
log.Errorf("HTTP Render %s", err.Error())
return nil, err
}
return SeriesToSeriesSet(out)
}
|
[
"func",
"(",
"q",
"*",
"querier",
")",
"Select",
"(",
"matchers",
"...",
"*",
"labels",
".",
"Matcher",
")",
"(",
"storage",
".",
"SeriesSet",
",",
"error",
")",
"{",
"minFrom",
":=",
"uint32",
"(",
"math",
".",
"MaxUint32",
")",
"\n",
"var",
"maxTo",
"uint32",
"\n",
"var",
"target",
"string",
"\n",
"var",
"reqs",
"[",
"]",
"models",
".",
"Req",
"\n\n",
"expressions",
":=",
"[",
"]",
"string",
"{",
"}",
"\n",
"for",
"_",
",",
"matcher",
":=",
"range",
"matchers",
"{",
"if",
"matcher",
".",
"Name",
"==",
"model",
".",
"MetricNameLabel",
"{",
"matcher",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"if",
"matcher",
".",
"Type",
"==",
"labels",
".",
"MatchNotRegexp",
"{",
"expressions",
"=",
"append",
"(",
"expressions",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"matcher",
".",
"Name",
",",
"matcher",
".",
"Value",
")",
")",
"\n",
"}",
"else",
"{",
"expressions",
"=",
"append",
"(",
"expressions",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"matcher",
".",
"Name",
",",
"matcher",
".",
"Type",
",",
"matcher",
".",
"Value",
")",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"series",
",",
"err",
":=",
"q",
".",
"clusterFindByTag",
"(",
"q",
".",
"ctx",
",",
"q",
".",
"OrgID",
",",
"expressions",
",",
"0",
",",
"maxSeriesPerReq",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"q",
".",
"metadataOnly",
"{",
"return",
"BuildMetadataSeriesSet",
"(",
"series",
")",
"\n",
"}",
"\n\n",
"minFrom",
"=",
"util",
".",
"Min",
"(",
"minFrom",
",",
"q",
".",
"from",
")",
"\n",
"maxTo",
"=",
"util",
".",
"Max",
"(",
"maxTo",
",",
"q",
".",
"to",
")",
"\n",
"for",
"_",
",",
"s",
":=",
"range",
"series",
"{",
"for",
"_",
",",
"metric",
":=",
"range",
"s",
".",
"Series",
"{",
"for",
"_",
",",
"archive",
":=",
"range",
"metric",
".",
"Defs",
"{",
"consReq",
":=",
"consolidation",
".",
"None",
"\n",
"fn",
":=",
"mdata",
".",
"Aggregations",
".",
"Get",
"(",
"archive",
".",
"AggId",
")",
".",
"AggregationMethod",
"[",
"0",
"]",
"\n",
"cons",
":=",
"consolidation",
".",
"Consolidator",
"(",
"fn",
")",
"\n\n",
"newReq",
":=",
"models",
".",
"NewReq",
"(",
"archive",
".",
"Id",
",",
"archive",
".",
"NameWithTags",
"(",
")",
",",
"target",
",",
"q",
".",
"from",
",",
"q",
".",
"to",
",",
"math",
".",
"MaxUint32",
",",
"uint32",
"(",
"archive",
".",
"Interval",
")",
",",
"cons",
",",
"consReq",
",",
"s",
".",
"Node",
",",
"archive",
".",
"SchemaId",
",",
"archive",
".",
"AggId",
")",
"\n",
"reqs",
"=",
"append",
"(",
"reqs",
",",
"newReq",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"select",
"{",
"case",
"<-",
"q",
".",
"ctx",
".",
"Done",
"(",
")",
":",
"//request canceled",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"default",
":",
"}",
"\n\n",
"reqRenderSeriesCount",
".",
"Value",
"(",
"len",
"(",
"reqs",
")",
")",
"\n",
"if",
"len",
"(",
"reqs",
")",
"==",
"0",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"// note: if 1 series has a movingAvg that requires a long time range extension, it may push other reqs into another archive. can be optimized later",
"reqs",
",",
"_",
",",
"_",
",",
"err",
"=",
"alignRequests",
"(",
"uint32",
"(",
"time",
".",
"Now",
"(",
")",
".",
"Unix",
"(",
")",
")",
",",
"minFrom",
",",
"maxTo",
",",
"reqs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"out",
",",
"err",
":=",
"q",
".",
"getTargets",
"(",
"q",
".",
"ctx",
",",
"reqs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"SeriesToSeriesSet",
"(",
"out",
")",
"\n",
"}"
] |
// Select returns a set of series that matches the given label matchers.
|
[
"Select",
"returns",
"a",
"set",
"of",
"series",
"that",
"matches",
"the",
"given",
"label",
"matchers",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/prometheus_querier.go#L49-L117
|
train
|
grafana/metrictank
|
api/prometheus_querier.go
|
LabelValues
|
func (q *querier) LabelValues(name string) ([]string, error) {
expressions := []string{"name=~[a-zA-Z_][a-zA-Z0-9_]*$"}
if name == model.MetricNameLabel {
name = "name"
expressions = append(expressions, "name=~[a-zA-Z_:][a-zA-Z0-9_:]*$")
}
return q.MetricIndex.FindTagValues(q.OrgID, name, "", expressions, 0, 100000)
}
|
go
|
func (q *querier) LabelValues(name string) ([]string, error) {
expressions := []string{"name=~[a-zA-Z_][a-zA-Z0-9_]*$"}
if name == model.MetricNameLabel {
name = "name"
expressions = append(expressions, "name=~[a-zA-Z_:][a-zA-Z0-9_:]*$")
}
return q.MetricIndex.FindTagValues(q.OrgID, name, "", expressions, 0, 100000)
}
|
[
"func",
"(",
"q",
"*",
"querier",
")",
"LabelValues",
"(",
"name",
"string",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"expressions",
":=",
"[",
"]",
"string",
"{",
"\"",
"\"",
"}",
"\n",
"if",
"name",
"==",
"model",
".",
"MetricNameLabel",
"{",
"name",
"=",
"\"",
"\"",
"\n",
"expressions",
"=",
"append",
"(",
"expressions",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"q",
".",
"MetricIndex",
".",
"FindTagValues",
"(",
"q",
".",
"OrgID",
",",
"name",
",",
"\"",
"\"",
",",
"expressions",
",",
"0",
",",
"100000",
")",
"\n",
"}"
] |
// LabelValues returns all potential values for a label name.
|
[
"LabelValues",
"returns",
"all",
"potential",
"values",
"for",
"a",
"label",
"name",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/prometheus_querier.go#L120-L127
|
train
|
grafana/metrictank
|
api/graphite.go
|
closestAggMethod
|
func closestAggMethod(requested consolidation.Consolidator, available []conf.Method) consolidation.Consolidator {
// if there is only 1 consolidation method available, then that is all we can return.
if len(available) == 1 {
return consolidation.Consolidator(available[0])
}
avail := map[consolidation.Consolidator]struct{}{}
for _, a := range available {
avail[consolidation.Consolidator(a)] = struct{}{}
}
var orderOfPreference []consolidation.Consolidator
orderOfPreference, ok := rollupPreference[requested]
if !ok {
return consolidation.Consolidator(available[0])
}
for _, p := range orderOfPreference {
if _, ok := avail[p]; ok {
return p
}
}
// fall back to the default aggregation method.
return consolidation.Consolidator(available[0])
}
|
go
|
func closestAggMethod(requested consolidation.Consolidator, available []conf.Method) consolidation.Consolidator {
// if there is only 1 consolidation method available, then that is all we can return.
if len(available) == 1 {
return consolidation.Consolidator(available[0])
}
avail := map[consolidation.Consolidator]struct{}{}
for _, a := range available {
avail[consolidation.Consolidator(a)] = struct{}{}
}
var orderOfPreference []consolidation.Consolidator
orderOfPreference, ok := rollupPreference[requested]
if !ok {
return consolidation.Consolidator(available[0])
}
for _, p := range orderOfPreference {
if _, ok := avail[p]; ok {
return p
}
}
// fall back to the default aggregation method.
return consolidation.Consolidator(available[0])
}
|
[
"func",
"closestAggMethod",
"(",
"requested",
"consolidation",
".",
"Consolidator",
",",
"available",
"[",
"]",
"conf",
".",
"Method",
")",
"consolidation",
".",
"Consolidator",
"{",
"// if there is only 1 consolidation method available, then that is all we can return.",
"if",
"len",
"(",
"available",
")",
"==",
"1",
"{",
"return",
"consolidation",
".",
"Consolidator",
"(",
"available",
"[",
"0",
"]",
")",
"\n",
"}",
"\n\n",
"avail",
":=",
"map",
"[",
"consolidation",
".",
"Consolidator",
"]",
"struct",
"{",
"}",
"{",
"}",
"\n",
"for",
"_",
",",
"a",
":=",
"range",
"available",
"{",
"avail",
"[",
"consolidation",
".",
"Consolidator",
"(",
"a",
")",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"var",
"orderOfPreference",
"[",
"]",
"consolidation",
".",
"Consolidator",
"\n",
"orderOfPreference",
",",
"ok",
":=",
"rollupPreference",
"[",
"requested",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"consolidation",
".",
"Consolidator",
"(",
"available",
"[",
"0",
"]",
")",
"\n",
"}",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"orderOfPreference",
"{",
"if",
"_",
",",
"ok",
":=",
"avail",
"[",
"p",
"]",
";",
"ok",
"{",
"return",
"p",
"\n",
"}",
"\n",
"}",
"\n",
"// fall back to the default aggregation method.",
"return",
"consolidation",
".",
"Consolidator",
"(",
"available",
"[",
"0",
"]",
")",
"\n",
"}"
] |
// find the best consolidation method based on what was requested and what aggregations are available.
|
[
"find",
"the",
"best",
"consolidation",
"method",
"based",
"on",
"what",
"was",
"requested",
"and",
"what",
"aggregations",
"are",
"available",
"."
] |
dd9b92db72d27553d9a8214bff5f01d2531f63b0
|
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/graphite.go#L760-L782
|
train
|
lightninglabs/neutrino
|
mock_store.go
|
newMockBlockHeaderStore
|
func newMockBlockHeaderStore() headerfs.BlockHeaderStore {
return &mockBlockHeaderStore{
headers: make(map[chainhash.Hash]wire.BlockHeader),
}
}
|
go
|
func newMockBlockHeaderStore() headerfs.BlockHeaderStore {
return &mockBlockHeaderStore{
headers: make(map[chainhash.Hash]wire.BlockHeader),
}
}
|
[
"func",
"newMockBlockHeaderStore",
"(",
")",
"headerfs",
".",
"BlockHeaderStore",
"{",
"return",
"&",
"mockBlockHeaderStore",
"{",
"headers",
":",
"make",
"(",
"map",
"[",
"chainhash",
".",
"Hash",
"]",
"wire",
".",
"BlockHeader",
")",
",",
"}",
"\n",
"}"
] |
// NewMockBlockHeaderStore returns a version of the BlockHeaderStore that's
// backed by an in-memory map. This instance is meant to be used by callers
// outside the package to unit test components that require a BlockHeaderStore
// interface.
|
[
"NewMockBlockHeaderStore",
"returns",
"a",
"version",
"of",
"the",
"BlockHeaderStore",
"that",
"s",
"backed",
"by",
"an",
"in",
"-",
"memory",
"map",
".",
"This",
"instance",
"is",
"meant",
"to",
"be",
"used",
"by",
"callers",
"outside",
"the",
"package",
"to",
"unit",
"test",
"components",
"that",
"require",
"a",
"BlockHeaderStore",
"interface",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/mock_store.go#L27-L31
|
train
|
lightninglabs/neutrino
|
cache/cacheable_block.go
|
Size
|
func (c *CacheableBlock) Size() (uint64, error) {
return uint64(c.Block.MsgBlock().SerializeSize()), nil
}
|
go
|
func (c *CacheableBlock) Size() (uint64, error) {
return uint64(c.Block.MsgBlock().SerializeSize()), nil
}
|
[
"func",
"(",
"c",
"*",
"CacheableBlock",
")",
"Size",
"(",
")",
"(",
"uint64",
",",
"error",
")",
"{",
"return",
"uint64",
"(",
"c",
".",
"Block",
".",
"MsgBlock",
"(",
")",
".",
"SerializeSize",
"(",
")",
")",
",",
"nil",
"\n",
"}"
] |
// Size returns size of this block in bytes.
|
[
"Size",
"returns",
"size",
"of",
"this",
"block",
"in",
"bytes",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/cache/cacheable_block.go#L12-L14
|
train
|
lightninglabs/neutrino
|
headerfs/index.go
|
newHeaderIndex
|
func newHeaderIndex(db walletdb.DB, indexType HeaderType) (*headerIndex, error) {
// As an initially step, we'll attempt to create all the buckets
// necessary for functioning of the index. If these buckets has already
// been created, then we can exit early.
err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error {
_, err := tx.CreateTopLevelBucket(indexBucket)
return err
})
if err != nil && err != walletdb.ErrBucketExists {
return nil, err
}
return &headerIndex{
db: db,
indexType: indexType,
}, nil
}
|
go
|
func newHeaderIndex(db walletdb.DB, indexType HeaderType) (*headerIndex, error) {
// As an initially step, we'll attempt to create all the buckets
// necessary for functioning of the index. If these buckets has already
// been created, then we can exit early.
err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error {
_, err := tx.CreateTopLevelBucket(indexBucket)
return err
})
if err != nil && err != walletdb.ErrBucketExists {
return nil, err
}
return &headerIndex{
db: db,
indexType: indexType,
}, nil
}
|
[
"func",
"newHeaderIndex",
"(",
"db",
"walletdb",
".",
"DB",
",",
"indexType",
"HeaderType",
")",
"(",
"*",
"headerIndex",
",",
"error",
")",
"{",
"// As an initially step, we'll attempt to create all the buckets",
"// necessary for functioning of the index. If these buckets has already",
"// been created, then we can exit early.",
"err",
":=",
"walletdb",
".",
"Update",
"(",
"db",
",",
"func",
"(",
"tx",
"walletdb",
".",
"ReadWriteTx",
")",
"error",
"{",
"_",
",",
"err",
":=",
"tx",
".",
"CreateTopLevelBucket",
"(",
"indexBucket",
")",
"\n",
"return",
"err",
"\n\n",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"&&",
"err",
"!=",
"walletdb",
".",
"ErrBucketExists",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"&",
"headerIndex",
"{",
"db",
":",
"db",
",",
"indexType",
":",
"indexType",
",",
"}",
",",
"nil",
"\n",
"}"
] |
// newHeaderIndex creates a new headerIndex given an already open database, and
// a particular header type.
|
[
"newHeaderIndex",
"creates",
"a",
"new",
"headerIndex",
"given",
"an",
"already",
"open",
"database",
"and",
"a",
"particular",
"header",
"type",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L83-L100
|
train
|
lightninglabs/neutrino
|
headerfs/index.go
|
addHeaders
|
func (h *headerIndex) addHeaders(batch headerBatch) error {
// If we're writing a 0-length batch, make no changes and return.
if len(batch) == 0 {
return nil
}
// In order to ensure optimal write performance, we'll ensure that the
// items are sorted by their hash before insertion into the database.
sort.Sort(batch)
return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the key that tracks the tip of the chain
// so we can update the index once all the header entries have
// been updated.
// TODO(roasbeef): only need block tip?
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
var (
chainTipHash chainhash.Hash
chainTipHeight uint32
)
for _, header := range batch {
var heightBytes [4]byte
binary.BigEndian.PutUint32(heightBytes[:], header.height)
err := rootBucket.Put(header.hash[:], heightBytes[:])
if err != nil {
return err
}
// TODO(roasbeef): need to remedy if side-chain
// tracking added
if header.height >= chainTipHeight {
chainTipHash = header.hash
chainTipHeight = header.height
}
}
return rootBucket.Put(tipKey, chainTipHash[:])
})
}
|
go
|
func (h *headerIndex) addHeaders(batch headerBatch) error {
// If we're writing a 0-length batch, make no changes and return.
if len(batch) == 0 {
return nil
}
// In order to ensure optimal write performance, we'll ensure that the
// items are sorted by their hash before insertion into the database.
sort.Sort(batch)
return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the key that tracks the tip of the chain
// so we can update the index once all the header entries have
// been updated.
// TODO(roasbeef): only need block tip?
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
var (
chainTipHash chainhash.Hash
chainTipHeight uint32
)
for _, header := range batch {
var heightBytes [4]byte
binary.BigEndian.PutUint32(heightBytes[:], header.height)
err := rootBucket.Put(header.hash[:], heightBytes[:])
if err != nil {
return err
}
// TODO(roasbeef): need to remedy if side-chain
// tracking added
if header.height >= chainTipHeight {
chainTipHash = header.hash
chainTipHeight = header.height
}
}
return rootBucket.Put(tipKey, chainTipHash[:])
})
}
|
[
"func",
"(",
"h",
"*",
"headerIndex",
")",
"addHeaders",
"(",
"batch",
"headerBatch",
")",
"error",
"{",
"// If we're writing a 0-length batch, make no changes and return.",
"if",
"len",
"(",
"batch",
")",
"==",
"0",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"// In order to ensure optimal write performance, we'll ensure that the",
"// items are sorted by their hash before insertion into the database.",
"sort",
".",
"Sort",
"(",
"batch",
")",
"\n\n",
"return",
"walletdb",
".",
"Update",
"(",
"h",
".",
"db",
",",
"func",
"(",
"tx",
"walletdb",
".",
"ReadWriteTx",
")",
"error",
"{",
"rootBucket",
":=",
"tx",
".",
"ReadWriteBucket",
"(",
"indexBucket",
")",
"\n\n",
"var",
"tipKey",
"[",
"]",
"byte",
"\n\n",
"// Based on the specified index type of this instance of the",
"// index, we'll grab the key that tracks the tip of the chain",
"// so we can update the index once all the header entries have",
"// been updated.",
"// TODO(roasbeef): only need block tip?",
"switch",
"h",
".",
"indexType",
"{",
"case",
"Block",
":",
"tipKey",
"=",
"bitcoinTip",
"\n",
"case",
"RegularFilter",
":",
"tipKey",
"=",
"regFilterTip",
"\n",
"default",
":",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"h",
".",
"indexType",
")",
"\n",
"}",
"\n\n",
"var",
"(",
"chainTipHash",
"chainhash",
".",
"Hash",
"\n",
"chainTipHeight",
"uint32",
"\n",
")",
"\n\n",
"for",
"_",
",",
"header",
":=",
"range",
"batch",
"{",
"var",
"heightBytes",
"[",
"4",
"]",
"byte",
"\n",
"binary",
".",
"BigEndian",
".",
"PutUint32",
"(",
"heightBytes",
"[",
":",
"]",
",",
"header",
".",
"height",
")",
"\n",
"err",
":=",
"rootBucket",
".",
"Put",
"(",
"header",
".",
"hash",
"[",
":",
"]",
",",
"heightBytes",
"[",
":",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// TODO(roasbeef): need to remedy if side-chain",
"// tracking added",
"if",
"header",
".",
"height",
">=",
"chainTipHeight",
"{",
"chainTipHash",
"=",
"header",
".",
"hash",
"\n",
"chainTipHeight",
"=",
"header",
".",
"height",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"rootBucket",
".",
"Put",
"(",
"tipKey",
",",
"chainTipHash",
"[",
":",
"]",
")",
"\n",
"}",
")",
"\n",
"}"
] |
// addHeaders writes a batch of header entries in a single atomic batch
|
[
"addHeaders",
"writes",
"a",
"batch",
"of",
"header",
"entries",
"in",
"a",
"single",
"atomic",
"batch"
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L139-L191
|
train
|
lightninglabs/neutrino
|
headerfs/index.go
|
heightFromHash
|
func (h *headerIndex) heightFromHash(hash *chainhash.Hash) (uint32, error) {
var height uint32
err := walletdb.View(h.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(indexBucket)
heightBytes := rootBucket.Get(hash[:])
if heightBytes == nil {
// If the hash wasn't found, then we don't know of this
// hash within the index.
return ErrHashNotFound
}
height = binary.BigEndian.Uint32(heightBytes)
return nil
})
if err != nil {
return 0, err
}
return height, nil
}
|
go
|
func (h *headerIndex) heightFromHash(hash *chainhash.Hash) (uint32, error) {
var height uint32
err := walletdb.View(h.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(indexBucket)
heightBytes := rootBucket.Get(hash[:])
if heightBytes == nil {
// If the hash wasn't found, then we don't know of this
// hash within the index.
return ErrHashNotFound
}
height = binary.BigEndian.Uint32(heightBytes)
return nil
})
if err != nil {
return 0, err
}
return height, nil
}
|
[
"func",
"(",
"h",
"*",
"headerIndex",
")",
"heightFromHash",
"(",
"hash",
"*",
"chainhash",
".",
"Hash",
")",
"(",
"uint32",
",",
"error",
")",
"{",
"var",
"height",
"uint32",
"\n",
"err",
":=",
"walletdb",
".",
"View",
"(",
"h",
".",
"db",
",",
"func",
"(",
"tx",
"walletdb",
".",
"ReadTx",
")",
"error",
"{",
"rootBucket",
":=",
"tx",
".",
"ReadBucket",
"(",
"indexBucket",
")",
"\n\n",
"heightBytes",
":=",
"rootBucket",
".",
"Get",
"(",
"hash",
"[",
":",
"]",
")",
"\n",
"if",
"heightBytes",
"==",
"nil",
"{",
"// If the hash wasn't found, then we don't know of this",
"// hash within the index.",
"return",
"ErrHashNotFound",
"\n",
"}",
"\n\n",
"height",
"=",
"binary",
".",
"BigEndian",
".",
"Uint32",
"(",
"heightBytes",
")",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"height",
",",
"nil",
"\n",
"}"
] |
// heightFromHash returns the height of the entry that matches the specified
// height. With this height, the caller is then able to seek to the appropriate
// spot in the flat files in order to extract the true header.
|
[
"heightFromHash",
"returns",
"the",
"height",
"of",
"the",
"entry",
"that",
"matches",
"the",
"specified",
"height",
".",
"With",
"this",
"height",
"the",
"caller",
"is",
"then",
"able",
"to",
"seek",
"to",
"the",
"appropriate",
"spot",
"in",
"the",
"flat",
"files",
"in",
"order",
"to",
"extract",
"the",
"true",
"header",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L196-L216
|
train
|
lightninglabs/neutrino
|
headerfs/index.go
|
chainTip
|
func (h *headerIndex) chainTip() (*chainhash.Hash, uint32, error) {
var (
tipHeight uint32
tipHash *chainhash.Hash
)
err := walletdb.View(h.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the particular key that tracks the chain
// tip.
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown chain tip index type: %v", h.indexType)
}
// Now that we have the particular tip key for this header
// type, we'll fetch the hash for this tip, then using that
// we'll fetch the height that corresponds to that hash.
tipHashBytes := rootBucket.Get(tipKey)
tipHeightBytes := rootBucket.Get(tipHashBytes)
if len(tipHeightBytes) != 4 {
return ErrHeightNotFound
}
// With the height fetched, we can now populate our return
// parameters.
h, err := chainhash.NewHash(tipHashBytes)
if err != nil {
return err
}
tipHash = h
tipHeight = binary.BigEndian.Uint32(tipHeightBytes)
return nil
})
if err != nil {
return nil, 0, err
}
return tipHash, tipHeight, nil
}
|
go
|
func (h *headerIndex) chainTip() (*chainhash.Hash, uint32, error) {
var (
tipHeight uint32
tipHash *chainhash.Hash
)
err := walletdb.View(h.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the particular key that tracks the chain
// tip.
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown chain tip index type: %v", h.indexType)
}
// Now that we have the particular tip key for this header
// type, we'll fetch the hash for this tip, then using that
// we'll fetch the height that corresponds to that hash.
tipHashBytes := rootBucket.Get(tipKey)
tipHeightBytes := rootBucket.Get(tipHashBytes)
if len(tipHeightBytes) != 4 {
return ErrHeightNotFound
}
// With the height fetched, we can now populate our return
// parameters.
h, err := chainhash.NewHash(tipHashBytes)
if err != nil {
return err
}
tipHash = h
tipHeight = binary.BigEndian.Uint32(tipHeightBytes)
return nil
})
if err != nil {
return nil, 0, err
}
return tipHash, tipHeight, nil
}
|
[
"func",
"(",
"h",
"*",
"headerIndex",
")",
"chainTip",
"(",
")",
"(",
"*",
"chainhash",
".",
"Hash",
",",
"uint32",
",",
"error",
")",
"{",
"var",
"(",
"tipHeight",
"uint32",
"\n",
"tipHash",
"*",
"chainhash",
".",
"Hash",
"\n",
")",
"\n\n",
"err",
":=",
"walletdb",
".",
"View",
"(",
"h",
".",
"db",
",",
"func",
"(",
"tx",
"walletdb",
".",
"ReadTx",
")",
"error",
"{",
"rootBucket",
":=",
"tx",
".",
"ReadBucket",
"(",
"indexBucket",
")",
"\n\n",
"var",
"tipKey",
"[",
"]",
"byte",
"\n\n",
"// Based on the specified index type of this instance of the",
"// index, we'll grab the particular key that tracks the chain",
"// tip.",
"switch",
"h",
".",
"indexType",
"{",
"case",
"Block",
":",
"tipKey",
"=",
"bitcoinTip",
"\n",
"case",
"RegularFilter",
":",
"tipKey",
"=",
"regFilterTip",
"\n",
"default",
":",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"h",
".",
"indexType",
")",
"\n",
"}",
"\n\n",
"// Now that we have the particular tip key for this header",
"// type, we'll fetch the hash for this tip, then using that",
"// we'll fetch the height that corresponds to that hash.",
"tipHashBytes",
":=",
"rootBucket",
".",
"Get",
"(",
"tipKey",
")",
"\n",
"tipHeightBytes",
":=",
"rootBucket",
".",
"Get",
"(",
"tipHashBytes",
")",
"\n",
"if",
"len",
"(",
"tipHeightBytes",
")",
"!=",
"4",
"{",
"return",
"ErrHeightNotFound",
"\n",
"}",
"\n\n",
"// With the height fetched, we can now populate our return",
"// parameters.",
"h",
",",
"err",
":=",
"chainhash",
".",
"NewHash",
"(",
"tipHashBytes",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"tipHash",
"=",
"h",
"\n",
"tipHeight",
"=",
"binary",
".",
"BigEndian",
".",
"Uint32",
"(",
"tipHeightBytes",
")",
"\n\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"tipHash",
",",
"tipHeight",
",",
"nil",
"\n",
"}"
] |
// chainTip returns the best hash and height that the index knows of.
|
[
"chainTip",
"returns",
"the",
"best",
"hash",
"and",
"height",
"that",
"the",
"index",
"knows",
"of",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L219-L267
|
train
|
lightninglabs/neutrino
|
headerfs/index.go
|
truncateIndex
|
func (h *headerIndex) truncateIndex(newTip *chainhash.Hash, delete bool) error {
return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the key that tracks the tip of the chain
// we need to update.
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
// If the delete flag is set, then we'll also delete this entry
// from the database as the primary index (block headers) is
// being rolled back.
if delete {
prevTipHash := rootBucket.Get(tipKey)
if err := rootBucket.Delete(prevTipHash); err != nil {
return err
}
}
// With the now stale entry deleted, we'll update the chain tip
// to point to the new hash.
return rootBucket.Put(tipKey, newTip[:])
})
}
|
go
|
func (h *headerIndex) truncateIndex(newTip *chainhash.Hash, delete bool) error {
return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the key that tracks the tip of the chain
// we need to update.
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
// If the delete flag is set, then we'll also delete this entry
// from the database as the primary index (block headers) is
// being rolled back.
if delete {
prevTipHash := rootBucket.Get(tipKey)
if err := rootBucket.Delete(prevTipHash); err != nil {
return err
}
}
// With the now stale entry deleted, we'll update the chain tip
// to point to the new hash.
return rootBucket.Put(tipKey, newTip[:])
})
}
|
[
"func",
"(",
"h",
"*",
"headerIndex",
")",
"truncateIndex",
"(",
"newTip",
"*",
"chainhash",
".",
"Hash",
",",
"delete",
"bool",
")",
"error",
"{",
"return",
"walletdb",
".",
"Update",
"(",
"h",
".",
"db",
",",
"func",
"(",
"tx",
"walletdb",
".",
"ReadWriteTx",
")",
"error",
"{",
"rootBucket",
":=",
"tx",
".",
"ReadWriteBucket",
"(",
"indexBucket",
")",
"\n\n",
"var",
"tipKey",
"[",
"]",
"byte",
"\n\n",
"// Based on the specified index type of this instance of the",
"// index, we'll grab the key that tracks the tip of the chain",
"// we need to update.",
"switch",
"h",
".",
"indexType",
"{",
"case",
"Block",
":",
"tipKey",
"=",
"bitcoinTip",
"\n",
"case",
"RegularFilter",
":",
"tipKey",
"=",
"regFilterTip",
"\n",
"default",
":",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"h",
".",
"indexType",
")",
"\n",
"}",
"\n\n",
"// If the delete flag is set, then we'll also delete this entry",
"// from the database as the primary index (block headers) is",
"// being rolled back.",
"if",
"delete",
"{",
"prevTipHash",
":=",
"rootBucket",
".",
"Get",
"(",
"tipKey",
")",
"\n",
"if",
"err",
":=",
"rootBucket",
".",
"Delete",
"(",
"prevTipHash",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"// With the now stale entry deleted, we'll update the chain tip",
"// to point to the new hash.",
"return",
"rootBucket",
".",
"Put",
"(",
"tipKey",
",",
"newTip",
"[",
":",
"]",
")",
"\n",
"}",
")",
"\n",
"}"
] |
// truncateIndex truncates the index for a particluar header type by a single
// header entry. The passed newTip pointer should point to the hash of the new
// chain tip. Optionally, if the entry is to be deleted as well, then the
// delete flag should be set to true.
|
[
"truncateIndex",
"truncates",
"the",
"index",
"for",
"a",
"particluar",
"header",
"type",
"by",
"a",
"single",
"header",
"entry",
".",
"The",
"passed",
"newTip",
"pointer",
"should",
"point",
"to",
"the",
"hash",
"of",
"the",
"new",
"chain",
"tip",
".",
"Optionally",
"if",
"the",
"entry",
"is",
"to",
"be",
"deleted",
"as",
"well",
"then",
"the",
"delete",
"flag",
"should",
"be",
"set",
"to",
"true",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L273-L305
|
train
|
lightninglabs/neutrino
|
batch_spend_reporter.go
|
newBatchSpendReporter
|
func newBatchSpendReporter() *batchSpendReporter {
return &batchSpendReporter{
requests: make(map[wire.OutPoint][]*GetUtxoRequest),
initialTxns: make(map[wire.OutPoint]*SpendReport),
outpoints: make(map[wire.OutPoint][]byte),
}
}
|
go
|
func newBatchSpendReporter() *batchSpendReporter {
return &batchSpendReporter{
requests: make(map[wire.OutPoint][]*GetUtxoRequest),
initialTxns: make(map[wire.OutPoint]*SpendReport),
outpoints: make(map[wire.OutPoint][]byte),
}
}
|
[
"func",
"newBatchSpendReporter",
"(",
")",
"*",
"batchSpendReporter",
"{",
"return",
"&",
"batchSpendReporter",
"{",
"requests",
":",
"make",
"(",
"map",
"[",
"wire",
".",
"OutPoint",
"]",
"[",
"]",
"*",
"GetUtxoRequest",
")",
",",
"initialTxns",
":",
"make",
"(",
"map",
"[",
"wire",
".",
"OutPoint",
"]",
"*",
"SpendReport",
")",
",",
"outpoints",
":",
"make",
"(",
"map",
"[",
"wire",
".",
"OutPoint",
"]",
"[",
"]",
"byte",
")",
",",
"}",
"\n",
"}"
] |
// newBatchSpendReporter instantiates a fresh batchSpendReporter.
|
[
"newBatchSpendReporter",
"instantiates",
"a",
"fresh",
"batchSpendReporter",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L37-L43
|
train
|
lightninglabs/neutrino
|
batch_spend_reporter.go
|
NotifyUnspentAndUnfound
|
func (b *batchSpendReporter) NotifyUnspentAndUnfound() {
log.Debugf("Finished batch, %d unspent outpoints", len(b.requests))
for outpoint, requests := range b.requests {
// A nil SpendReport indicates the output was not found.
tx, ok := b.initialTxns[outpoint]
if !ok {
log.Warnf("Unknown initial txn for getuxo request %v",
outpoint)
}
b.notifyRequests(&outpoint, requests, tx, nil)
}
}
|
go
|
func (b *batchSpendReporter) NotifyUnspentAndUnfound() {
log.Debugf("Finished batch, %d unspent outpoints", len(b.requests))
for outpoint, requests := range b.requests {
// A nil SpendReport indicates the output was not found.
tx, ok := b.initialTxns[outpoint]
if !ok {
log.Warnf("Unknown initial txn for getuxo request %v",
outpoint)
}
b.notifyRequests(&outpoint, requests, tx, nil)
}
}
|
[
"func",
"(",
"b",
"*",
"batchSpendReporter",
")",
"NotifyUnspentAndUnfound",
"(",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"len",
"(",
"b",
".",
"requests",
")",
")",
"\n\n",
"for",
"outpoint",
",",
"requests",
":=",
"range",
"b",
".",
"requests",
"{",
"// A nil SpendReport indicates the output was not found.",
"tx",
",",
"ok",
":=",
"b",
".",
"initialTxns",
"[",
"outpoint",
"]",
"\n",
"if",
"!",
"ok",
"{",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"outpoint",
")",
"\n",
"}",
"\n\n",
"b",
".",
"notifyRequests",
"(",
"&",
"outpoint",
",",
"requests",
",",
"tx",
",",
"nil",
")",
"\n",
"}",
"\n",
"}"
] |
// NotifyUnspentAndUnfound iterates through any requests for which no spends
// were detected. If we were able to find the initial output, this will be
// delivered signaling that no spend was detected. If the original output could
// not be found, a nil spend report is returned.
|
[
"NotifyUnspentAndUnfound",
"iterates",
"through",
"any",
"requests",
"for",
"which",
"no",
"spends",
"were",
"detected",
".",
"If",
"we",
"were",
"able",
"to",
"find",
"the",
"initial",
"output",
"this",
"will",
"be",
"delivered",
"signaling",
"that",
"no",
"spend",
"was",
"detected",
".",
"If",
"the",
"original",
"output",
"could",
"not",
"be",
"found",
"a",
"nil",
"spend",
"report",
"is",
"returned",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L49-L62
|
train
|
lightninglabs/neutrino
|
batch_spend_reporter.go
|
ProcessBlock
|
func (b *batchSpendReporter) ProcessBlock(blk *wire.MsgBlock,
newReqs []*GetUtxoRequest, height uint32) {
// If any requests want the UTXOs at this height, scan the block to find
// the original outputs that might be spent from.
if len(newReqs) > 0 {
b.addNewRequests(newReqs)
b.findInitialTransactions(blk, newReqs, height)
}
// Next, filter the block for any spends using the current set of
// watched outpoints. This will include any new requests added above.
spends := b.notifySpends(blk, height)
// Finally, rebuild filter entries from cached entries remaining in
// outpoints map. This will provide an updated watchlist used to scan
// the subsequent filters.
rebuildWatchlist := len(newReqs) > 0 || len(spends) > 0
if rebuildWatchlist {
b.filterEntries = b.filterEntries[:0]
for _, entry := range b.outpoints {
b.filterEntries = append(b.filterEntries, entry)
}
}
}
|
go
|
func (b *batchSpendReporter) ProcessBlock(blk *wire.MsgBlock,
newReqs []*GetUtxoRequest, height uint32) {
// If any requests want the UTXOs at this height, scan the block to find
// the original outputs that might be spent from.
if len(newReqs) > 0 {
b.addNewRequests(newReqs)
b.findInitialTransactions(blk, newReqs, height)
}
// Next, filter the block for any spends using the current set of
// watched outpoints. This will include any new requests added above.
spends := b.notifySpends(blk, height)
// Finally, rebuild filter entries from cached entries remaining in
// outpoints map. This will provide an updated watchlist used to scan
// the subsequent filters.
rebuildWatchlist := len(newReqs) > 0 || len(spends) > 0
if rebuildWatchlist {
b.filterEntries = b.filterEntries[:0]
for _, entry := range b.outpoints {
b.filterEntries = append(b.filterEntries, entry)
}
}
}
|
[
"func",
"(",
"b",
"*",
"batchSpendReporter",
")",
"ProcessBlock",
"(",
"blk",
"*",
"wire",
".",
"MsgBlock",
",",
"newReqs",
"[",
"]",
"*",
"GetUtxoRequest",
",",
"height",
"uint32",
")",
"{",
"// If any requests want the UTXOs at this height, scan the block to find",
"// the original outputs that might be spent from.",
"if",
"len",
"(",
"newReqs",
")",
">",
"0",
"{",
"b",
".",
"addNewRequests",
"(",
"newReqs",
")",
"\n",
"b",
".",
"findInitialTransactions",
"(",
"blk",
",",
"newReqs",
",",
"height",
")",
"\n",
"}",
"\n\n",
"// Next, filter the block for any spends using the current set of",
"// watched outpoints. This will include any new requests added above.",
"spends",
":=",
"b",
".",
"notifySpends",
"(",
"blk",
",",
"height",
")",
"\n\n",
"// Finally, rebuild filter entries from cached entries remaining in",
"// outpoints map. This will provide an updated watchlist used to scan",
"// the subsequent filters.",
"rebuildWatchlist",
":=",
"len",
"(",
"newReqs",
")",
">",
"0",
"||",
"len",
"(",
"spends",
")",
">",
"0",
"\n",
"if",
"rebuildWatchlist",
"{",
"b",
".",
"filterEntries",
"=",
"b",
".",
"filterEntries",
"[",
":",
"0",
"]",
"\n",
"for",
"_",
",",
"entry",
":=",
"range",
"b",
".",
"outpoints",
"{",
"b",
".",
"filterEntries",
"=",
"append",
"(",
"b",
".",
"filterEntries",
",",
"entry",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// ProcessBlock accepts a block, block height, and any new requests whose start
// height matches the provided height. If a non-zero number of new requests are
// presented, the block will first be checked for the initial outputs from which
// spends may occur. Afterwards, any spends detected in the block are
// immediately dispatched, and the watchlist updated in preparation of filtering
// the next block.
|
[
"ProcessBlock",
"accepts",
"a",
"block",
"block",
"height",
"and",
"any",
"new",
"requests",
"whose",
"start",
"height",
"matches",
"the",
"provided",
"height",
".",
"If",
"a",
"non",
"-",
"zero",
"number",
"of",
"new",
"requests",
"are",
"presented",
"the",
"block",
"will",
"first",
"be",
"checked",
"for",
"the",
"initial",
"outputs",
"from",
"which",
"spends",
"may",
"occur",
".",
"Afterwards",
"any",
"spends",
"detected",
"in",
"the",
"block",
"are",
"immediately",
"dispatched",
"and",
"the",
"watchlist",
"updated",
"in",
"preparation",
"of",
"filtering",
"the",
"next",
"block",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L100-L124
|
train
|
lightninglabs/neutrino
|
batch_spend_reporter.go
|
addNewRequests
|
func (b *batchSpendReporter) addNewRequests(reqs []*GetUtxoRequest) {
for _, req := range reqs {
outpoint := req.Input.OutPoint
log.Debugf("Adding outpoint=%s height=%d to watchlist",
outpoint, req.BirthHeight)
b.requests[outpoint] = append(b.requests[outpoint], req)
// Build the filter entry only if it is the first time seeing
// the outpoint.
if _, ok := b.outpoints[outpoint]; !ok {
entry := req.Input.PkScript
b.outpoints[outpoint] = entry
b.filterEntries = append(b.filterEntries, entry)
}
}
}
|
go
|
func (b *batchSpendReporter) addNewRequests(reqs []*GetUtxoRequest) {
for _, req := range reqs {
outpoint := req.Input.OutPoint
log.Debugf("Adding outpoint=%s height=%d to watchlist",
outpoint, req.BirthHeight)
b.requests[outpoint] = append(b.requests[outpoint], req)
// Build the filter entry only if it is the first time seeing
// the outpoint.
if _, ok := b.outpoints[outpoint]; !ok {
entry := req.Input.PkScript
b.outpoints[outpoint] = entry
b.filterEntries = append(b.filterEntries, entry)
}
}
}
|
[
"func",
"(",
"b",
"*",
"batchSpendReporter",
")",
"addNewRequests",
"(",
"reqs",
"[",
"]",
"*",
"GetUtxoRequest",
")",
"{",
"for",
"_",
",",
"req",
":=",
"range",
"reqs",
"{",
"outpoint",
":=",
"req",
".",
"Input",
".",
"OutPoint",
"\n\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"outpoint",
",",
"req",
".",
"BirthHeight",
")",
"\n\n",
"b",
".",
"requests",
"[",
"outpoint",
"]",
"=",
"append",
"(",
"b",
".",
"requests",
"[",
"outpoint",
"]",
",",
"req",
")",
"\n\n",
"// Build the filter entry only if it is the first time seeing",
"// the outpoint.",
"if",
"_",
",",
"ok",
":=",
"b",
".",
"outpoints",
"[",
"outpoint",
"]",
";",
"!",
"ok",
"{",
"entry",
":=",
"req",
".",
"Input",
".",
"PkScript",
"\n",
"b",
".",
"outpoints",
"[",
"outpoint",
"]",
"=",
"entry",
"\n",
"b",
".",
"filterEntries",
"=",
"append",
"(",
"b",
".",
"filterEntries",
",",
"entry",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] |
// addNewRequests adds a set of new GetUtxoRequests to the spend reporter's
// state. This method immediately adds the request's outpoints to the reporter's
// watchlist.
|
[
"addNewRequests",
"adds",
"a",
"set",
"of",
"new",
"GetUtxoRequests",
"to",
"the",
"spend",
"reporter",
"s",
"state",
".",
"This",
"method",
"immediately",
"adds",
"the",
"request",
"s",
"outpoints",
"to",
"the",
"reporter",
"s",
"watchlist",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L129-L146
|
train
|
lightninglabs/neutrino
|
batch_spend_reporter.go
|
findInitialTransactions
|
func (b *batchSpendReporter) findInitialTransactions(block *wire.MsgBlock,
newReqs []*GetUtxoRequest, height uint32) map[wire.OutPoint]*SpendReport {
// First, construct a reverse index from txid to all a list of requests
// whose outputs share the same txid.
txidReverseIndex := make(map[chainhash.Hash][]*GetUtxoRequest)
for _, req := range newReqs {
txidReverseIndex[req.Input.OutPoint.Hash] = append(
txidReverseIndex[req.Input.OutPoint.Hash], req,
)
}
// Iterate over the transactions in this block, hashing each and
// querying our reverse index to see if any requests depend on the txn.
initialTxns := make(map[wire.OutPoint]*SpendReport)
for _, tx := range block.Transactions {
// If our reverse index has been cleared, we are done.
if len(txidReverseIndex) == 0 {
break
}
hash := tx.TxHash()
txidReqs, ok := txidReverseIndex[hash]
if !ok {
continue
}
delete(txidReverseIndex, hash)
// For all requests that are watching this txid, use the output
// index of each to grab the initial output.
txOuts := tx.TxOut
for _, req := range txidReqs {
op := req.Input.OutPoint
// Ensure that the outpoint's index references an actual
// output on the transaction. If not, we will be unable
// to find the initial output.
if op.Index >= uint32(len(txOuts)) {
log.Errorf("Failed to find outpoint %s -- "+
"invalid output index", op)
initialTxns[op] = nil
continue
}
initialTxns[op] = &SpendReport{
Output: txOuts[op.Index],
}
}
}
// Finally, we must reconcile any requests for which the txid did not
// exist in this block. A nil spend report is saved for every initial
// txn that could not be found, otherwise the result is copied from scan
// above. The copied values can include valid initial txns, as well as
// nil spend report if the output index was invalid.
for _, req := range newReqs {
tx, ok := initialTxns[req.Input.OutPoint]
switch {
case !ok:
log.Errorf("Failed to find outpoint %s -- "+
"txid not found in block", req.Input.OutPoint)
initialTxns[req.Input.OutPoint] = nil
case tx != nil:
log.Tracef("Block %d creates output %s",
height, req.Input.OutPoint)
default:
}
b.initialTxns[req.Input.OutPoint] = tx
}
return initialTxns
}
|
go
|
func (b *batchSpendReporter) findInitialTransactions(block *wire.MsgBlock,
newReqs []*GetUtxoRequest, height uint32) map[wire.OutPoint]*SpendReport {
// First, construct a reverse index from txid to all a list of requests
// whose outputs share the same txid.
txidReverseIndex := make(map[chainhash.Hash][]*GetUtxoRequest)
for _, req := range newReqs {
txidReverseIndex[req.Input.OutPoint.Hash] = append(
txidReverseIndex[req.Input.OutPoint.Hash], req,
)
}
// Iterate over the transactions in this block, hashing each and
// querying our reverse index to see if any requests depend on the txn.
initialTxns := make(map[wire.OutPoint]*SpendReport)
for _, tx := range block.Transactions {
// If our reverse index has been cleared, we are done.
if len(txidReverseIndex) == 0 {
break
}
hash := tx.TxHash()
txidReqs, ok := txidReverseIndex[hash]
if !ok {
continue
}
delete(txidReverseIndex, hash)
// For all requests that are watching this txid, use the output
// index of each to grab the initial output.
txOuts := tx.TxOut
for _, req := range txidReqs {
op := req.Input.OutPoint
// Ensure that the outpoint's index references an actual
// output on the transaction. If not, we will be unable
// to find the initial output.
if op.Index >= uint32(len(txOuts)) {
log.Errorf("Failed to find outpoint %s -- "+
"invalid output index", op)
initialTxns[op] = nil
continue
}
initialTxns[op] = &SpendReport{
Output: txOuts[op.Index],
}
}
}
// Finally, we must reconcile any requests for which the txid did not
// exist in this block. A nil spend report is saved for every initial
// txn that could not be found, otherwise the result is copied from scan
// above. The copied values can include valid initial txns, as well as
// nil spend report if the output index was invalid.
for _, req := range newReqs {
tx, ok := initialTxns[req.Input.OutPoint]
switch {
case !ok:
log.Errorf("Failed to find outpoint %s -- "+
"txid not found in block", req.Input.OutPoint)
initialTxns[req.Input.OutPoint] = nil
case tx != nil:
log.Tracef("Block %d creates output %s",
height, req.Input.OutPoint)
default:
}
b.initialTxns[req.Input.OutPoint] = tx
}
return initialTxns
}
|
[
"func",
"(",
"b",
"*",
"batchSpendReporter",
")",
"findInitialTransactions",
"(",
"block",
"*",
"wire",
".",
"MsgBlock",
",",
"newReqs",
"[",
"]",
"*",
"GetUtxoRequest",
",",
"height",
"uint32",
")",
"map",
"[",
"wire",
".",
"OutPoint",
"]",
"*",
"SpendReport",
"{",
"// First, construct a reverse index from txid to all a list of requests",
"// whose outputs share the same txid.",
"txidReverseIndex",
":=",
"make",
"(",
"map",
"[",
"chainhash",
".",
"Hash",
"]",
"[",
"]",
"*",
"GetUtxoRequest",
")",
"\n",
"for",
"_",
",",
"req",
":=",
"range",
"newReqs",
"{",
"txidReverseIndex",
"[",
"req",
".",
"Input",
".",
"OutPoint",
".",
"Hash",
"]",
"=",
"append",
"(",
"txidReverseIndex",
"[",
"req",
".",
"Input",
".",
"OutPoint",
".",
"Hash",
"]",
",",
"req",
",",
")",
"\n",
"}",
"\n\n",
"// Iterate over the transactions in this block, hashing each and",
"// querying our reverse index to see if any requests depend on the txn.",
"initialTxns",
":=",
"make",
"(",
"map",
"[",
"wire",
".",
"OutPoint",
"]",
"*",
"SpendReport",
")",
"\n",
"for",
"_",
",",
"tx",
":=",
"range",
"block",
".",
"Transactions",
"{",
"// If our reverse index has been cleared, we are done.",
"if",
"len",
"(",
"txidReverseIndex",
")",
"==",
"0",
"{",
"break",
"\n",
"}",
"\n\n",
"hash",
":=",
"tx",
".",
"TxHash",
"(",
")",
"\n",
"txidReqs",
",",
"ok",
":=",
"txidReverseIndex",
"[",
"hash",
"]",
"\n",
"if",
"!",
"ok",
"{",
"continue",
"\n",
"}",
"\n",
"delete",
"(",
"txidReverseIndex",
",",
"hash",
")",
"\n\n",
"// For all requests that are watching this txid, use the output",
"// index of each to grab the initial output.",
"txOuts",
":=",
"tx",
".",
"TxOut",
"\n",
"for",
"_",
",",
"req",
":=",
"range",
"txidReqs",
"{",
"op",
":=",
"req",
".",
"Input",
".",
"OutPoint",
"\n\n",
"// Ensure that the outpoint's index references an actual",
"// output on the transaction. If not, we will be unable",
"// to find the initial output.",
"if",
"op",
".",
"Index",
">=",
"uint32",
"(",
"len",
"(",
"txOuts",
")",
")",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"op",
")",
"\n",
"initialTxns",
"[",
"op",
"]",
"=",
"nil",
"\n",
"continue",
"\n",
"}",
"\n\n",
"initialTxns",
"[",
"op",
"]",
"=",
"&",
"SpendReport",
"{",
"Output",
":",
"txOuts",
"[",
"op",
".",
"Index",
"]",
",",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Finally, we must reconcile any requests for which the txid did not",
"// exist in this block. A nil spend report is saved for every initial",
"// txn that could not be found, otherwise the result is copied from scan",
"// above. The copied values can include valid initial txns, as well as",
"// nil spend report if the output index was invalid.",
"for",
"_",
",",
"req",
":=",
"range",
"newReqs",
"{",
"tx",
",",
"ok",
":=",
"initialTxns",
"[",
"req",
".",
"Input",
".",
"OutPoint",
"]",
"\n",
"switch",
"{",
"case",
"!",
"ok",
":",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"req",
".",
"Input",
".",
"OutPoint",
")",
"\n",
"initialTxns",
"[",
"req",
".",
"Input",
".",
"OutPoint",
"]",
"=",
"nil",
"\n",
"case",
"tx",
"!=",
"nil",
":",
"log",
".",
"Tracef",
"(",
"\"",
"\"",
",",
"height",
",",
"req",
".",
"Input",
".",
"OutPoint",
")",
"\n",
"default",
":",
"}",
"\n\n",
"b",
".",
"initialTxns",
"[",
"req",
".",
"Input",
".",
"OutPoint",
"]",
"=",
"tx",
"\n",
"}",
"\n\n",
"return",
"initialTxns",
"\n",
"}"
] |
// findInitialTransactions searches the given block for the creation of the
// UTXOs that are supposed to be birthed in this block. If any are found, a
// spend report containing the initial outpoint will be saved in case the
// outpoint is not spent later on. Requests corresponding to outpoints that are
// not found in the block will return a nil spend report to indicate that the
// UTXO was not found.
|
[
"findInitialTransactions",
"searches",
"the",
"given",
"block",
"for",
"the",
"creation",
"of",
"the",
"UTXOs",
"that",
"are",
"supposed",
"to",
"be",
"birthed",
"in",
"this",
"block",
".",
"If",
"any",
"are",
"found",
"a",
"spend",
"report",
"containing",
"the",
"initial",
"outpoint",
"will",
"be",
"saved",
"in",
"case",
"the",
"outpoint",
"is",
"not",
"spent",
"later",
"on",
".",
"Requests",
"corresponding",
"to",
"outpoints",
"that",
"are",
"not",
"found",
"in",
"the",
"block",
"will",
"return",
"a",
"nil",
"spend",
"report",
"to",
"indicate",
"that",
"the",
"UTXO",
"was",
"not",
"found",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L154-L226
|
train
|
lightninglabs/neutrino
|
batch_spend_reporter.go
|
notifySpends
|
func (b *batchSpendReporter) notifySpends(block *wire.MsgBlock,
height uint32) map[wire.OutPoint]*SpendReport {
spends := make(map[wire.OutPoint]*SpendReport)
for _, tx := range block.Transactions {
// Check each input to see if this transaction spends one of our
// watched outpoints.
for i, ti := range tx.TxIn {
outpoint := ti.PreviousOutPoint
// Find the requests this spend relates to.
requests, ok := b.requests[outpoint]
if !ok {
continue
}
log.Debugf("UTXO %v spent by txn %v", outpoint,
tx.TxHash())
spend := &SpendReport{
SpendingTx: tx,
SpendingInputIndex: uint32(i),
SpendingTxHeight: height,
}
spends[outpoint] = spend
// With the requests located, we remove this outpoint
// from both the requests, outpoints, and initial txns
// map. This will ensures we don't continue watching
// this outpoint.
b.notifyRequests(&outpoint, requests, spend, nil)
}
}
return spends
}
|
go
|
func (b *batchSpendReporter) notifySpends(block *wire.MsgBlock,
height uint32) map[wire.OutPoint]*SpendReport {
spends := make(map[wire.OutPoint]*SpendReport)
for _, tx := range block.Transactions {
// Check each input to see if this transaction spends one of our
// watched outpoints.
for i, ti := range tx.TxIn {
outpoint := ti.PreviousOutPoint
// Find the requests this spend relates to.
requests, ok := b.requests[outpoint]
if !ok {
continue
}
log.Debugf("UTXO %v spent by txn %v", outpoint,
tx.TxHash())
spend := &SpendReport{
SpendingTx: tx,
SpendingInputIndex: uint32(i),
SpendingTxHeight: height,
}
spends[outpoint] = spend
// With the requests located, we remove this outpoint
// from both the requests, outpoints, and initial txns
// map. This will ensures we don't continue watching
// this outpoint.
b.notifyRequests(&outpoint, requests, spend, nil)
}
}
return spends
}
|
[
"func",
"(",
"b",
"*",
"batchSpendReporter",
")",
"notifySpends",
"(",
"block",
"*",
"wire",
".",
"MsgBlock",
",",
"height",
"uint32",
")",
"map",
"[",
"wire",
".",
"OutPoint",
"]",
"*",
"SpendReport",
"{",
"spends",
":=",
"make",
"(",
"map",
"[",
"wire",
".",
"OutPoint",
"]",
"*",
"SpendReport",
")",
"\n",
"for",
"_",
",",
"tx",
":=",
"range",
"block",
".",
"Transactions",
"{",
"// Check each input to see if this transaction spends one of our",
"// watched outpoints.",
"for",
"i",
",",
"ti",
":=",
"range",
"tx",
".",
"TxIn",
"{",
"outpoint",
":=",
"ti",
".",
"PreviousOutPoint",
"\n\n",
"// Find the requests this spend relates to.",
"requests",
",",
"ok",
":=",
"b",
".",
"requests",
"[",
"outpoint",
"]",
"\n",
"if",
"!",
"ok",
"{",
"continue",
"\n",
"}",
"\n\n",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"outpoint",
",",
"tx",
".",
"TxHash",
"(",
")",
")",
"\n\n",
"spend",
":=",
"&",
"SpendReport",
"{",
"SpendingTx",
":",
"tx",
",",
"SpendingInputIndex",
":",
"uint32",
"(",
"i",
")",
",",
"SpendingTxHeight",
":",
"height",
",",
"}",
"\n\n",
"spends",
"[",
"outpoint",
"]",
"=",
"spend",
"\n\n",
"// With the requests located, we remove this outpoint",
"// from both the requests, outpoints, and initial txns",
"// map. This will ensures we don't continue watching",
"// this outpoint.",
"b",
".",
"notifyRequests",
"(",
"&",
"outpoint",
",",
"requests",
",",
"spend",
",",
"nil",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"spends",
"\n",
"}"
] |
// notifySpends finds any transactions in the block that spend from our watched
// outpoints. If a spend is detected, it is immediately delivered and cleaned up
// from the reporter's internal state.
|
[
"notifySpends",
"finds",
"any",
"transactions",
"in",
"the",
"block",
"that",
"spend",
"from",
"our",
"watched",
"outpoints",
".",
"If",
"a",
"spend",
"is",
"detected",
"it",
"is",
"immediately",
"delivered",
"and",
"cleaned",
"up",
"from",
"the",
"reporter",
"s",
"internal",
"state",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L231-L267
|
train
|
lightninglabs/neutrino
|
headerfs/file.go
|
appendRaw
|
func (h *headerStore) appendRaw(header []byte) error {
if _, err := h.file.Write(header); err != nil {
return err
}
return nil
}
|
go
|
func (h *headerStore) appendRaw(header []byte) error {
if _, err := h.file.Write(header); err != nil {
return err
}
return nil
}
|
[
"func",
"(",
"h",
"*",
"headerStore",
")",
"appendRaw",
"(",
"header",
"[",
"]",
"byte",
")",
"error",
"{",
"if",
"_",
",",
"err",
":=",
"h",
".",
"file",
".",
"Write",
"(",
"header",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// appendRaw appends a new raw header to the end of the flat file.
|
[
"appendRaw",
"appends",
"a",
"new",
"raw",
"header",
"to",
"the",
"end",
"of",
"the",
"flat",
"file",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L13-L19
|
train
|
lightninglabs/neutrino
|
headerfs/file.go
|
readRaw
|
func (h *headerStore) readRaw(seekDist uint64) ([]byte, error) {
var headerSize uint32
// Based on the defined header type, we'll determine the number of
// bytes that we need to read past the sync point.
switch h.indexType {
case Block:
headerSize = 80
case RegularFilter:
headerSize = 32
default:
return nil, fmt.Errorf("unknown index type: %v", h.indexType)
}
// TODO(roasbeef): add buffer pool
// With the number of bytes to read determined, we'll create a slice
// for that number of bytes, and read directly from the file into the
// buffer.
rawHeader := make([]byte, headerSize)
if _, err := h.file.ReadAt(rawHeader[:], int64(seekDist)); err != nil {
return nil, err
}
return rawHeader[:], nil
}
|
go
|
func (h *headerStore) readRaw(seekDist uint64) ([]byte, error) {
var headerSize uint32
// Based on the defined header type, we'll determine the number of
// bytes that we need to read past the sync point.
switch h.indexType {
case Block:
headerSize = 80
case RegularFilter:
headerSize = 32
default:
return nil, fmt.Errorf("unknown index type: %v", h.indexType)
}
// TODO(roasbeef): add buffer pool
// With the number of bytes to read determined, we'll create a slice
// for that number of bytes, and read directly from the file into the
// buffer.
rawHeader := make([]byte, headerSize)
if _, err := h.file.ReadAt(rawHeader[:], int64(seekDist)); err != nil {
return nil, err
}
return rawHeader[:], nil
}
|
[
"func",
"(",
"h",
"*",
"headerStore",
")",
"readRaw",
"(",
"seekDist",
"uint64",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"var",
"headerSize",
"uint32",
"\n\n",
"// Based on the defined header type, we'll determine the number of",
"// bytes that we need to read past the sync point.",
"switch",
"h",
".",
"indexType",
"{",
"case",
"Block",
":",
"headerSize",
"=",
"80",
"\n\n",
"case",
"RegularFilter",
":",
"headerSize",
"=",
"32",
"\n\n",
"default",
":",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"h",
".",
"indexType",
")",
"\n",
"}",
"\n\n",
"// TODO(roasbeef): add buffer pool",
"// With the number of bytes to read determined, we'll create a slice",
"// for that number of bytes, and read directly from the file into the",
"// buffer.",
"rawHeader",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"headerSize",
")",
"\n",
"if",
"_",
",",
"err",
":=",
"h",
".",
"file",
".",
"ReadAt",
"(",
"rawHeader",
"[",
":",
"]",
",",
"int64",
"(",
"seekDist",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"rawHeader",
"[",
":",
"]",
",",
"nil",
"\n",
"}"
] |
// readRaw reads a raw header from disk from a particular seek distance. The
// amount of bytes read past the seek distance is determined by the specified
// header type.
|
[
"readRaw",
"reads",
"a",
"raw",
"header",
"from",
"disk",
"from",
"a",
"particular",
"seek",
"distance",
".",
"The",
"amount",
"of",
"bytes",
"read",
"past",
"the",
"seek",
"distance",
"is",
"determined",
"by",
"the",
"specified",
"header",
"type",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L24-L51
|
train
|
lightninglabs/neutrino
|
headerfs/file.go
|
readHeader
|
func (h *blockHeaderStore) readHeader(height uint32) (wire.BlockHeader, error) {
var header wire.BlockHeader
// Each header is 80 bytes, so using this information, we'll seek a
// distance to cover that height based on the size of block headers.
seekDistance := uint64(height) * 80
// With the distance calculated, we'll raw a raw header start from that
// offset.
rawHeader, err := h.readRaw(seekDistance)
if err != nil {
return header, err
}
headerReader := bytes.NewReader(rawHeader)
// Finally, decode the raw bytes into a proper bitcoin header.
if err := header.Deserialize(headerReader); err != nil {
return header, err
}
return header, nil
}
|
go
|
func (h *blockHeaderStore) readHeader(height uint32) (wire.BlockHeader, error) {
var header wire.BlockHeader
// Each header is 80 bytes, so using this information, we'll seek a
// distance to cover that height based on the size of block headers.
seekDistance := uint64(height) * 80
// With the distance calculated, we'll raw a raw header start from that
// offset.
rawHeader, err := h.readRaw(seekDistance)
if err != nil {
return header, err
}
headerReader := bytes.NewReader(rawHeader)
// Finally, decode the raw bytes into a proper bitcoin header.
if err := header.Deserialize(headerReader); err != nil {
return header, err
}
return header, nil
}
|
[
"func",
"(",
"h",
"*",
"blockHeaderStore",
")",
"readHeader",
"(",
"height",
"uint32",
")",
"(",
"wire",
".",
"BlockHeader",
",",
"error",
")",
"{",
"var",
"header",
"wire",
".",
"BlockHeader",
"\n\n",
"// Each header is 80 bytes, so using this information, we'll seek a",
"// distance to cover that height based on the size of block headers.",
"seekDistance",
":=",
"uint64",
"(",
"height",
")",
"*",
"80",
"\n\n",
"// With the distance calculated, we'll raw a raw header start from that",
"// offset.",
"rawHeader",
",",
"err",
":=",
"h",
".",
"readRaw",
"(",
"seekDistance",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"header",
",",
"err",
"\n",
"}",
"\n",
"headerReader",
":=",
"bytes",
".",
"NewReader",
"(",
"rawHeader",
")",
"\n\n",
"// Finally, decode the raw bytes into a proper bitcoin header.",
"if",
"err",
":=",
"header",
".",
"Deserialize",
"(",
"headerReader",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"header",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"header",
",",
"nil",
"\n",
"}"
] |
// readHeader reads a full block header from the flat-file. The header read is
// determined by the hight value.
|
[
"readHeader",
"reads",
"a",
"full",
"block",
"header",
"from",
"the",
"flat",
"-",
"file",
".",
"The",
"header",
"read",
"is",
"determined",
"by",
"the",
"hight",
"value",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L90-L111
|
train
|
lightninglabs/neutrino
|
headerfs/file.go
|
readHeader
|
func (f *FilterHeaderStore) readHeader(height uint32) (*chainhash.Hash, error) {
seekDistance := uint64(height) * 32
rawHeader, err := f.readRaw(seekDistance)
if err != nil {
return nil, err
}
return chainhash.NewHash(rawHeader)
}
|
go
|
func (f *FilterHeaderStore) readHeader(height uint32) (*chainhash.Hash, error) {
seekDistance := uint64(height) * 32
rawHeader, err := f.readRaw(seekDistance)
if err != nil {
return nil, err
}
return chainhash.NewHash(rawHeader)
}
|
[
"func",
"(",
"f",
"*",
"FilterHeaderStore",
")",
"readHeader",
"(",
"height",
"uint32",
")",
"(",
"*",
"chainhash",
".",
"Hash",
",",
"error",
")",
"{",
"seekDistance",
":=",
"uint64",
"(",
"height",
")",
"*",
"32",
"\n\n",
"rawHeader",
",",
"err",
":=",
"f",
".",
"readRaw",
"(",
"seekDistance",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"chainhash",
".",
"NewHash",
"(",
"rawHeader",
")",
"\n",
"}"
] |
// readHeader reads a single filter header at the specified height from the
// flat files on disk.
|
[
"readHeader",
"reads",
"a",
"single",
"filter",
"header",
"at",
"the",
"specified",
"height",
"from",
"the",
"flat",
"files",
"on",
"disk",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L115-L124
|
train
|
lightninglabs/neutrino
|
headerfs/file.go
|
readHeadersFromFile
|
func readHeadersFromFile(f *os.File, headerSize, startHeight,
endHeight uint32) (*bytes.Reader, error) {
// Each header is headerSize bytes, so using this information, we'll
// seek a distance to cover that height based on the size the headers.
seekDistance := uint64(startHeight) * uint64(headerSize)
// Based on the number of headers in the range, we'll allocate a single
// slice that's able to hold the entire range of headers.
numHeaders := endHeight - startHeight + 1
rawHeaderBytes := make([]byte, headerSize*numHeaders)
// Now that we have our slice allocated, we'll read out the entire
// range of headers with a single system call.
_, err := f.ReadAt(rawHeaderBytes, int64(seekDistance))
if err != nil {
return nil, err
}
return bytes.NewReader(rawHeaderBytes), nil
}
|
go
|
func readHeadersFromFile(f *os.File, headerSize, startHeight,
endHeight uint32) (*bytes.Reader, error) {
// Each header is headerSize bytes, so using this information, we'll
// seek a distance to cover that height based on the size the headers.
seekDistance := uint64(startHeight) * uint64(headerSize)
// Based on the number of headers in the range, we'll allocate a single
// slice that's able to hold the entire range of headers.
numHeaders := endHeight - startHeight + 1
rawHeaderBytes := make([]byte, headerSize*numHeaders)
// Now that we have our slice allocated, we'll read out the entire
// range of headers with a single system call.
_, err := f.ReadAt(rawHeaderBytes, int64(seekDistance))
if err != nil {
return nil, err
}
return bytes.NewReader(rawHeaderBytes), nil
}
|
[
"func",
"readHeadersFromFile",
"(",
"f",
"*",
"os",
".",
"File",
",",
"headerSize",
",",
"startHeight",
",",
"endHeight",
"uint32",
")",
"(",
"*",
"bytes",
".",
"Reader",
",",
"error",
")",
"{",
"// Each header is headerSize bytes, so using this information, we'll",
"// seek a distance to cover that height based on the size the headers.",
"seekDistance",
":=",
"uint64",
"(",
"startHeight",
")",
"*",
"uint64",
"(",
"headerSize",
")",
"\n\n",
"// Based on the number of headers in the range, we'll allocate a single",
"// slice that's able to hold the entire range of headers.",
"numHeaders",
":=",
"endHeight",
"-",
"startHeight",
"+",
"1",
"\n",
"rawHeaderBytes",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"headerSize",
"*",
"numHeaders",
")",
"\n\n",
"// Now that we have our slice allocated, we'll read out the entire",
"// range of headers with a single system call.",
"_",
",",
"err",
":=",
"f",
".",
"ReadAt",
"(",
"rawHeaderBytes",
",",
"int64",
"(",
"seekDistance",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"bytes",
".",
"NewReader",
"(",
"rawHeaderBytes",
")",
",",
"nil",
"\n",
"}"
] |
// readHeadersFromFile reads a chunk of headers, each of size headerSize, from
// the given file, from startHeight to endHeight.
|
[
"readHeadersFromFile",
"reads",
"a",
"chunk",
"of",
"headers",
"each",
"of",
"size",
"headerSize",
"from",
"the",
"given",
"file",
"from",
"startHeight",
"to",
"endHeight",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L163-L183
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
Count
|
func (ps *peerState) Count() int {
return len(ps.outboundPeers) + len(ps.persistentPeers)
}
|
go
|
func (ps *peerState) Count() int {
return len(ps.outboundPeers) + len(ps.persistentPeers)
}
|
[
"func",
"(",
"ps",
"*",
"peerState",
")",
"Count",
"(",
")",
"int",
"{",
"return",
"len",
"(",
"ps",
".",
"outboundPeers",
")",
"+",
"len",
"(",
"ps",
".",
"persistentPeers",
")",
"\n",
"}"
] |
// Count returns the count of all known peers.
|
[
"Count",
"returns",
"the",
"count",
"of",
"all",
"known",
"peers",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L108-L110
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
forAllOutboundPeers
|
func (ps *peerState) forAllOutboundPeers(closure func(sp *ServerPeer)) {
for _, e := range ps.outboundPeers {
closure(e)
}
for _, e := range ps.persistentPeers {
closure(e)
}
}
|
go
|
func (ps *peerState) forAllOutboundPeers(closure func(sp *ServerPeer)) {
for _, e := range ps.outboundPeers {
closure(e)
}
for _, e := range ps.persistentPeers {
closure(e)
}
}
|
[
"func",
"(",
"ps",
"*",
"peerState",
")",
"forAllOutboundPeers",
"(",
"closure",
"func",
"(",
"sp",
"*",
"ServerPeer",
")",
")",
"{",
"for",
"_",
",",
"e",
":=",
"range",
"ps",
".",
"outboundPeers",
"{",
"closure",
"(",
"e",
")",
"\n",
"}",
"\n",
"for",
"_",
",",
"e",
":=",
"range",
"ps",
".",
"persistentPeers",
"{",
"closure",
"(",
"e",
")",
"\n",
"}",
"\n",
"}"
] |
// forAllOutboundPeers is a helper function that runs closure on all outbound
// peers known to peerState.
|
[
"forAllOutboundPeers",
"is",
"a",
"helper",
"function",
"that",
"runs",
"closure",
"on",
"all",
"outbound",
"peers",
"known",
"to",
"peerState",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L114-L121
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
newServerPeer
|
func newServerPeer(s *ChainService, isPersistent bool) *ServerPeer {
return &ServerPeer{
server: s,
persistent: isPersistent,
knownAddresses: make(map[string]struct{}),
quit: make(chan struct{}),
recvSubscribers: make(map[spMsgSubscription]struct{}),
}
}
|
go
|
func newServerPeer(s *ChainService, isPersistent bool) *ServerPeer {
return &ServerPeer{
server: s,
persistent: isPersistent,
knownAddresses: make(map[string]struct{}),
quit: make(chan struct{}),
recvSubscribers: make(map[spMsgSubscription]struct{}),
}
}
|
[
"func",
"newServerPeer",
"(",
"s",
"*",
"ChainService",
",",
"isPersistent",
"bool",
")",
"*",
"ServerPeer",
"{",
"return",
"&",
"ServerPeer",
"{",
"server",
":",
"s",
",",
"persistent",
":",
"isPersistent",
",",
"knownAddresses",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
")",
",",
"quit",
":",
"make",
"(",
"chan",
"struct",
"{",
"}",
")",
",",
"recvSubscribers",
":",
"make",
"(",
"map",
"[",
"spMsgSubscription",
"]",
"struct",
"{",
"}",
")",
",",
"}",
"\n",
"}"
] |
// newServerPeer returns a new ServerPeer instance. The peer needs to be set by
// the caller.
|
[
"newServerPeer",
"returns",
"a",
"new",
"ServerPeer",
"instance",
".",
"The",
"peer",
"needs",
"to",
"be",
"set",
"by",
"the",
"caller",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L171-L179
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
addKnownAddresses
|
func (sp *ServerPeer) addKnownAddresses(addresses []*wire.NetAddress) {
for _, na := range addresses {
sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{}
}
}
|
go
|
func (sp *ServerPeer) addKnownAddresses(addresses []*wire.NetAddress) {
for _, na := range addresses {
sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{}
}
}
|
[
"func",
"(",
"sp",
"*",
"ServerPeer",
")",
"addKnownAddresses",
"(",
"addresses",
"[",
"]",
"*",
"wire",
".",
"NetAddress",
")",
"{",
"for",
"_",
",",
"na",
":=",
"range",
"addresses",
"{",
"sp",
".",
"knownAddresses",
"[",
"addrmgr",
".",
"NetAddressKey",
"(",
"na",
")",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"}"
] |
// addKnownAddresses adds the given addresses to the set of known addresses to
// the peer to prevent sending duplicate addresses.
|
[
"addKnownAddresses",
"adds",
"the",
"given",
"addresses",
"to",
"the",
"set",
"of",
"known",
"addresses",
"to",
"the",
"peer",
"to",
"prevent",
"sending",
"duplicate",
"addresses",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L194-L198
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
pushSendHeadersMsg
|
func (sp *ServerPeer) pushSendHeadersMsg() error {
if sp.VersionKnown() {
if sp.ProtocolVersion() > wire.SendHeadersVersion {
sp.QueueMessage(wire.NewMsgSendHeaders(), nil)
}
}
return nil
}
|
go
|
func (sp *ServerPeer) pushSendHeadersMsg() error {
if sp.VersionKnown() {
if sp.ProtocolVersion() > wire.SendHeadersVersion {
sp.QueueMessage(wire.NewMsgSendHeaders(), nil)
}
}
return nil
}
|
[
"func",
"(",
"sp",
"*",
"ServerPeer",
")",
"pushSendHeadersMsg",
"(",
")",
"error",
"{",
"if",
"sp",
".",
"VersionKnown",
"(",
")",
"{",
"if",
"sp",
".",
"ProtocolVersion",
"(",
")",
">",
"wire",
".",
"SendHeadersVersion",
"{",
"sp",
".",
"QueueMessage",
"(",
"wire",
".",
"NewMsgSendHeaders",
"(",
")",
",",
"nil",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// pushSendHeadersMsg sends a sendheaders message to the connected peer.
|
[
"pushSendHeadersMsg",
"sends",
"a",
"sendheaders",
"message",
"to",
"the",
"connected",
"peer",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L238-L245
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
OnVerAck
|
func (sp *ServerPeer) OnVerAck(_ *peer.Peer, msg *wire.MsgVerAck) {
sp.pushSendHeadersMsg()
}
|
go
|
func (sp *ServerPeer) OnVerAck(_ *peer.Peer, msg *wire.MsgVerAck) {
sp.pushSendHeadersMsg()
}
|
[
"func",
"(",
"sp",
"*",
"ServerPeer",
")",
"OnVerAck",
"(",
"_",
"*",
"peer",
".",
"Peer",
",",
"msg",
"*",
"wire",
".",
"MsgVerAck",
")",
"{",
"sp",
".",
"pushSendHeadersMsg",
"(",
")",
"\n",
"}"
] |
// OnVerAck is invoked when a peer receives a verack bitcoin message and is used
// to send the "sendheaders" command to peers that are of a sufficienty new
// protocol version.
|
[
"OnVerAck",
"is",
"invoked",
"when",
"a",
"peer",
"receives",
"a",
"verack",
"bitcoin",
"message",
"and",
"is",
"used",
"to",
"send",
"the",
"sendheaders",
"command",
"to",
"peers",
"that",
"are",
"of",
"a",
"sufficienty",
"new",
"protocol",
"version",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L250-L252
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
OnHeaders
|
func (sp *ServerPeer) OnHeaders(p *peer.Peer, msg *wire.MsgHeaders) {
log.Tracef("Got headers with %d items from %s", len(msg.Headers),
p.Addr())
sp.server.blockManager.QueueHeaders(msg, sp)
}
|
go
|
func (sp *ServerPeer) OnHeaders(p *peer.Peer, msg *wire.MsgHeaders) {
log.Tracef("Got headers with %d items from %s", len(msg.Headers),
p.Addr())
sp.server.blockManager.QueueHeaders(msg, sp)
}
|
[
"func",
"(",
"sp",
"*",
"ServerPeer",
")",
"OnHeaders",
"(",
"p",
"*",
"peer",
".",
"Peer",
",",
"msg",
"*",
"wire",
".",
"MsgHeaders",
")",
"{",
"log",
".",
"Tracef",
"(",
"\"",
"\"",
",",
"len",
"(",
"msg",
".",
"Headers",
")",
",",
"p",
".",
"Addr",
"(",
")",
")",
"\n",
"sp",
".",
"server",
".",
"blockManager",
".",
"QueueHeaders",
"(",
"msg",
",",
"sp",
")",
"\n",
"}"
] |
// OnHeaders is invoked when a peer receives a headers bitcoin
// message. The message is passed down to the block manager.
|
[
"OnHeaders",
"is",
"invoked",
"when",
"a",
"peer",
"receives",
"a",
"headers",
"bitcoin",
"message",
".",
"The",
"message",
"is",
"passed",
"down",
"to",
"the",
"block",
"manager",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L357-L361
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
subscribeRecvMsg
|
func (sp *ServerPeer) subscribeRecvMsg(subscription spMsgSubscription) {
sp.mtxSubscribers.Lock()
defer sp.mtxSubscribers.Unlock()
sp.recvSubscribers[subscription] = struct{}{}
}
|
go
|
func (sp *ServerPeer) subscribeRecvMsg(subscription spMsgSubscription) {
sp.mtxSubscribers.Lock()
defer sp.mtxSubscribers.Unlock()
sp.recvSubscribers[subscription] = struct{}{}
}
|
[
"func",
"(",
"sp",
"*",
"ServerPeer",
")",
"subscribeRecvMsg",
"(",
"subscription",
"spMsgSubscription",
")",
"{",
"sp",
".",
"mtxSubscribers",
".",
"Lock",
"(",
")",
"\n",
"defer",
"sp",
".",
"mtxSubscribers",
".",
"Unlock",
"(",
")",
"\n",
"sp",
".",
"recvSubscribers",
"[",
"subscription",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}"
] |
// subscribeRecvMsg handles adding OnRead subscriptions to the server peer.
|
[
"subscribeRecvMsg",
"handles",
"adding",
"OnRead",
"subscriptions",
"to",
"the",
"server",
"peer",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L476-L480
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
unsubscribeRecvMsgs
|
func (sp *ServerPeer) unsubscribeRecvMsgs(subscription spMsgSubscription) {
sp.mtxSubscribers.Lock()
defer sp.mtxSubscribers.Unlock()
delete(sp.recvSubscribers, subscription)
}
|
go
|
func (sp *ServerPeer) unsubscribeRecvMsgs(subscription spMsgSubscription) {
sp.mtxSubscribers.Lock()
defer sp.mtxSubscribers.Unlock()
delete(sp.recvSubscribers, subscription)
}
|
[
"func",
"(",
"sp",
"*",
"ServerPeer",
")",
"unsubscribeRecvMsgs",
"(",
"subscription",
"spMsgSubscription",
")",
"{",
"sp",
".",
"mtxSubscribers",
".",
"Lock",
"(",
")",
"\n",
"defer",
"sp",
".",
"mtxSubscribers",
".",
"Unlock",
"(",
")",
"\n",
"delete",
"(",
"sp",
".",
"recvSubscribers",
",",
"subscription",
")",
"\n",
"}"
] |
// unsubscribeRecvMsgs handles removing OnRead subscriptions from the server
// peer.
|
[
"unsubscribeRecvMsgs",
"handles",
"removing",
"OnRead",
"subscriptions",
"from",
"the",
"server",
"peer",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L484-L488
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
BestBlock
|
func (s *ChainService) BestBlock() (*waddrmgr.BlockStamp, error) {
bestHeader, bestHeight, err := s.BlockHeaders.ChainTip()
if err != nil {
return nil, err
}
_, filterHeight, err := s.RegFilterHeaders.ChainTip()
if err != nil {
return nil, err
}
// Filter headers might lag behind block headers, so we can can fetch a
// previous block header if the filter headers are not caught up.
if filterHeight < bestHeight {
bestHeight = filterHeight
bestHeader, err = s.BlockHeaders.FetchHeaderByHeight(
bestHeight,
)
if err != nil {
return nil, err
}
}
return &waddrmgr.BlockStamp{
Height: int32(bestHeight),
Hash: bestHeader.BlockHash(),
}, nil
}
|
go
|
func (s *ChainService) BestBlock() (*waddrmgr.BlockStamp, error) {
bestHeader, bestHeight, err := s.BlockHeaders.ChainTip()
if err != nil {
return nil, err
}
_, filterHeight, err := s.RegFilterHeaders.ChainTip()
if err != nil {
return nil, err
}
// Filter headers might lag behind block headers, so we can can fetch a
// previous block header if the filter headers are not caught up.
if filterHeight < bestHeight {
bestHeight = filterHeight
bestHeader, err = s.BlockHeaders.FetchHeaderByHeight(
bestHeight,
)
if err != nil {
return nil, err
}
}
return &waddrmgr.BlockStamp{
Height: int32(bestHeight),
Hash: bestHeader.BlockHash(),
}, nil
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"BestBlock",
"(",
")",
"(",
"*",
"waddrmgr",
".",
"BlockStamp",
",",
"error",
")",
"{",
"bestHeader",
",",
"bestHeight",
",",
"err",
":=",
"s",
".",
"BlockHeaders",
".",
"ChainTip",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"_",
",",
"filterHeight",
",",
"err",
":=",
"s",
".",
"RegFilterHeaders",
".",
"ChainTip",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"// Filter headers might lag behind block headers, so we can can fetch a",
"// previous block header if the filter headers are not caught up.",
"if",
"filterHeight",
"<",
"bestHeight",
"{",
"bestHeight",
"=",
"filterHeight",
"\n",
"bestHeader",
",",
"err",
"=",
"s",
".",
"BlockHeaders",
".",
"FetchHeaderByHeight",
"(",
"bestHeight",
",",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"&",
"waddrmgr",
".",
"BlockStamp",
"{",
"Height",
":",
"int32",
"(",
"bestHeight",
")",
",",
"Hash",
":",
"bestHeader",
".",
"BlockHash",
"(",
")",
",",
"}",
",",
"nil",
"\n",
"}"
] |
// BestBlock retrieves the most recent block's height and hash where we
// have both the header and filter header ready.
|
[
"BestBlock",
"retrieves",
"the",
"most",
"recent",
"block",
"s",
"height",
"and",
"hash",
"where",
"we",
"have",
"both",
"the",
"header",
"and",
"filter",
"header",
"ready",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L838-L865
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
GetBlockHash
|
func (s *ChainService) GetBlockHash(height int64) (*chainhash.Hash, error) {
header, err := s.BlockHeaders.FetchHeaderByHeight(uint32(height))
if err != nil {
return nil, err
}
hash := header.BlockHash()
return &hash, err
}
|
go
|
func (s *ChainService) GetBlockHash(height int64) (*chainhash.Hash, error) {
header, err := s.BlockHeaders.FetchHeaderByHeight(uint32(height))
if err != nil {
return nil, err
}
hash := header.BlockHash()
return &hash, err
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"GetBlockHash",
"(",
"height",
"int64",
")",
"(",
"*",
"chainhash",
".",
"Hash",
",",
"error",
")",
"{",
"header",
",",
"err",
":=",
"s",
".",
"BlockHeaders",
".",
"FetchHeaderByHeight",
"(",
"uint32",
"(",
"height",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"hash",
":=",
"header",
".",
"BlockHash",
"(",
")",
"\n",
"return",
"&",
"hash",
",",
"err",
"\n",
"}"
] |
// GetBlockHash returns the block hash at the given height.
|
[
"GetBlockHash",
"returns",
"the",
"block",
"hash",
"at",
"the",
"given",
"height",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L868-L875
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
GetBlockHeader
|
func (s *ChainService) GetBlockHeader(
blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
header, _, err := s.BlockHeaders.FetchHeader(blockHash)
return header, err
}
|
go
|
func (s *ChainService) GetBlockHeader(
blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
header, _, err := s.BlockHeaders.FetchHeader(blockHash)
return header, err
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"GetBlockHeader",
"(",
"blockHash",
"*",
"chainhash",
".",
"Hash",
")",
"(",
"*",
"wire",
".",
"BlockHeader",
",",
"error",
")",
"{",
"header",
",",
"_",
",",
"err",
":=",
"s",
".",
"BlockHeaders",
".",
"FetchHeader",
"(",
"blockHash",
")",
"\n",
"return",
"header",
",",
"err",
"\n",
"}"
] |
// GetBlockHeader returns the block header for the given block hash, or an
// error if the hash doesn't exist or is unknown.
|
[
"GetBlockHeader",
"returns",
"the",
"block",
"header",
"for",
"the",
"given",
"block",
"hash",
"or",
"an",
"error",
"if",
"the",
"hash",
"doesn",
"t",
"exist",
"or",
"is",
"unknown",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L879-L883
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
GetBlockHeight
|
func (s *ChainService) GetBlockHeight(hash *chainhash.Hash) (int32, error) {
_, height, err := s.BlockHeaders.FetchHeader(hash)
if err != nil {
return 0, err
}
return int32(height), nil
}
|
go
|
func (s *ChainService) GetBlockHeight(hash *chainhash.Hash) (int32, error) {
_, height, err := s.BlockHeaders.FetchHeader(hash)
if err != nil {
return 0, err
}
return int32(height), nil
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"GetBlockHeight",
"(",
"hash",
"*",
"chainhash",
".",
"Hash",
")",
"(",
"int32",
",",
"error",
")",
"{",
"_",
",",
"height",
",",
"err",
":=",
"s",
".",
"BlockHeaders",
".",
"FetchHeader",
"(",
"hash",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"return",
"int32",
"(",
"height",
")",
",",
"nil",
"\n",
"}"
] |
// GetBlockHeight gets the height of a block by its hash. An error is returned
// if the given block hash is unknown.
|
[
"GetBlockHeight",
"gets",
"the",
"height",
"of",
"a",
"block",
"by",
"its",
"hash",
".",
"An",
"error",
"is",
"returned",
"if",
"the",
"given",
"block",
"hash",
"is",
"unknown",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L887-L893
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
BanPeer
|
func (s *ChainService) BanPeer(sp *ServerPeer) {
select {
case s.banPeers <- sp:
case <-s.quit:
return
}
}
|
go
|
func (s *ChainService) BanPeer(sp *ServerPeer) {
select {
case s.banPeers <- sp:
case <-s.quit:
return
}
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"BanPeer",
"(",
"sp",
"*",
"ServerPeer",
")",
"{",
"select",
"{",
"case",
"s",
".",
"banPeers",
"<-",
"sp",
":",
"case",
"<-",
"s",
".",
"quit",
":",
"return",
"\n",
"}",
"\n",
"}"
] |
// BanPeer bans a peer that has already been connected to the server by ip.
|
[
"BanPeer",
"bans",
"a",
"peer",
"that",
"has",
"already",
"been",
"connected",
"to",
"the",
"server",
"by",
"ip",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L896-L902
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
AddPeer
|
func (s *ChainService) AddPeer(sp *ServerPeer) {
select {
case s.newPeers <- sp:
case <-s.quit:
return
}
}
|
go
|
func (s *ChainService) AddPeer(sp *ServerPeer) {
select {
case s.newPeers <- sp:
case <-s.quit:
return
}
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"AddPeer",
"(",
"sp",
"*",
"ServerPeer",
")",
"{",
"select",
"{",
"case",
"s",
".",
"newPeers",
"<-",
"sp",
":",
"case",
"<-",
"s",
".",
"quit",
":",
"return",
"\n",
"}",
"\n",
"}"
] |
// AddPeer adds a new peer that has already been connected to the server.
|
[
"AddPeer",
"adds",
"a",
"new",
"peer",
"that",
"has",
"already",
"been",
"connected",
"to",
"the",
"server",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L905-L911
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
rollBackToHeight
|
func (s *ChainService) rollBackToHeight(height uint32) (*waddrmgr.BlockStamp, error) {
header, headerHeight, err := s.BlockHeaders.ChainTip()
if err != nil {
return nil, err
}
bs := &waddrmgr.BlockStamp{
Height: int32(headerHeight),
Hash: header.BlockHash(),
}
_, regHeight, err := s.RegFilterHeaders.ChainTip()
if err != nil {
return nil, err
}
for uint32(bs.Height) > height {
header, _, err := s.BlockHeaders.FetchHeader(&bs.Hash)
if err != nil {
return nil, err
}
newTip := &header.PrevBlock
// Only roll back filter headers if they've caught up this far.
if uint32(bs.Height) <= regHeight {
newFilterTip, err := s.RegFilterHeaders.RollbackLastBlock(newTip)
if err != nil {
return nil, err
}
regHeight = uint32(newFilterTip.Height)
}
bs, err = s.BlockHeaders.RollbackLastBlock()
if err != nil {
return nil, err
}
// Notifications are asynchronous, so we include the previous
// header in the disconnected notification in case we're rolling
// back farther and the notification subscriber needs it but
// can't read it before it's deleted from the store.
prevHeader, _, err := s.BlockHeaders.FetchHeader(newTip)
if err != nil {
return nil, err
}
// Now we send the block disconnected notifications.
s.blockManager.onBlockDisconnected(
*header, headerHeight, *prevHeader,
)
}
return bs, nil
}
|
go
|
func (s *ChainService) rollBackToHeight(height uint32) (*waddrmgr.BlockStamp, error) {
header, headerHeight, err := s.BlockHeaders.ChainTip()
if err != nil {
return nil, err
}
bs := &waddrmgr.BlockStamp{
Height: int32(headerHeight),
Hash: header.BlockHash(),
}
_, regHeight, err := s.RegFilterHeaders.ChainTip()
if err != nil {
return nil, err
}
for uint32(bs.Height) > height {
header, _, err := s.BlockHeaders.FetchHeader(&bs.Hash)
if err != nil {
return nil, err
}
newTip := &header.PrevBlock
// Only roll back filter headers if they've caught up this far.
if uint32(bs.Height) <= regHeight {
newFilterTip, err := s.RegFilterHeaders.RollbackLastBlock(newTip)
if err != nil {
return nil, err
}
regHeight = uint32(newFilterTip.Height)
}
bs, err = s.BlockHeaders.RollbackLastBlock()
if err != nil {
return nil, err
}
// Notifications are asynchronous, so we include the previous
// header in the disconnected notification in case we're rolling
// back farther and the notification subscriber needs it but
// can't read it before it's deleted from the store.
prevHeader, _, err := s.BlockHeaders.FetchHeader(newTip)
if err != nil {
return nil, err
}
// Now we send the block disconnected notifications.
s.blockManager.onBlockDisconnected(
*header, headerHeight, *prevHeader,
)
}
return bs, nil
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"rollBackToHeight",
"(",
"height",
"uint32",
")",
"(",
"*",
"waddrmgr",
".",
"BlockStamp",
",",
"error",
")",
"{",
"header",
",",
"headerHeight",
",",
"err",
":=",
"s",
".",
"BlockHeaders",
".",
"ChainTip",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"bs",
":=",
"&",
"waddrmgr",
".",
"BlockStamp",
"{",
"Height",
":",
"int32",
"(",
"headerHeight",
")",
",",
"Hash",
":",
"header",
".",
"BlockHash",
"(",
")",
",",
"}",
"\n\n",
"_",
",",
"regHeight",
",",
"err",
":=",
"s",
".",
"RegFilterHeaders",
".",
"ChainTip",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"for",
"uint32",
"(",
"bs",
".",
"Height",
")",
">",
"height",
"{",
"header",
",",
"_",
",",
"err",
":=",
"s",
".",
"BlockHeaders",
".",
"FetchHeader",
"(",
"&",
"bs",
".",
"Hash",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"newTip",
":=",
"&",
"header",
".",
"PrevBlock",
"\n\n",
"// Only roll back filter headers if they've caught up this far.",
"if",
"uint32",
"(",
"bs",
".",
"Height",
")",
"<=",
"regHeight",
"{",
"newFilterTip",
",",
"err",
":=",
"s",
".",
"RegFilterHeaders",
".",
"RollbackLastBlock",
"(",
"newTip",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"regHeight",
"=",
"uint32",
"(",
"newFilterTip",
".",
"Height",
")",
"\n",
"}",
"\n\n",
"bs",
",",
"err",
"=",
"s",
".",
"BlockHeaders",
".",
"RollbackLastBlock",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"// Notifications are asynchronous, so we include the previous",
"// header in the disconnected notification in case we're rolling",
"// back farther and the notification subscriber needs it but",
"// can't read it before it's deleted from the store.",
"prevHeader",
",",
"_",
",",
"err",
":=",
"s",
".",
"BlockHeaders",
".",
"FetchHeader",
"(",
"newTip",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"// Now we send the block disconnected notifications.",
"s",
".",
"blockManager",
".",
"onBlockDisconnected",
"(",
"*",
"header",
",",
"headerHeight",
",",
"*",
"prevHeader",
",",
")",
"\n",
"}",
"\n",
"return",
"bs",
",",
"nil",
"\n",
"}"
] |
// rollBackToHeight rolls back all blocks until it hits the specified height.
// It sends notifications along the way.
|
[
"rollBackToHeight",
"rolls",
"back",
"all",
"blocks",
"until",
"it",
"hits",
"the",
"specified",
"height",
".",
"It",
"sends",
"notifications",
"along",
"the",
"way",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L934-L986
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
isBanned
|
func (s *ChainService) isBanned(addr string, state *peerState) bool {
// First, we'll extract the host so we can consider it without taking
// into account the target port.
host, _, err := net.SplitHostPort(addr)
if err != nil {
log.Debugf("can't split host/port: %s", err)
return false
}
// With the host obtained, we'll check on the ban status of this peer.
if banEnd, ok := state.banned[host]; ok {
// If the ban duration of this peer is still active, then we'll
// ignore it for now as it's still banned.
if time.Now().Before(banEnd) {
log.Debugf("Peer %s is banned for another %v - ignoring",
host, banEnd.Sub(time.Now()))
return true
}
// Otherwise, the peer was banned in the past, but is no longer
// banned, so we'll remove this ban entry and return back to
// the caller.
log.Infof("Peer %s is no longer banned", host)
delete(state.banned, host)
return false
}
return false
}
|
go
|
func (s *ChainService) isBanned(addr string, state *peerState) bool {
// First, we'll extract the host so we can consider it without taking
// into account the target port.
host, _, err := net.SplitHostPort(addr)
if err != nil {
log.Debugf("can't split host/port: %s", err)
return false
}
// With the host obtained, we'll check on the ban status of this peer.
if banEnd, ok := state.banned[host]; ok {
// If the ban duration of this peer is still active, then we'll
// ignore it for now as it's still banned.
if time.Now().Before(banEnd) {
log.Debugf("Peer %s is banned for another %v - ignoring",
host, banEnd.Sub(time.Now()))
return true
}
// Otherwise, the peer was banned in the past, but is no longer
// banned, so we'll remove this ban entry and return back to
// the caller.
log.Infof("Peer %s is no longer banned", host)
delete(state.banned, host)
return false
}
return false
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"isBanned",
"(",
"addr",
"string",
",",
"state",
"*",
"peerState",
")",
"bool",
"{",
"// First, we'll extract the host so we can consider it without taking",
"// into account the target port.",
"host",
",",
"_",
",",
"err",
":=",
"net",
".",
"SplitHostPort",
"(",
"addr",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"false",
"\n",
"}",
"\n\n",
"// With the host obtained, we'll check on the ban status of this peer.",
"if",
"banEnd",
",",
"ok",
":=",
"state",
".",
"banned",
"[",
"host",
"]",
";",
"ok",
"{",
"// If the ban duration of this peer is still active, then we'll",
"// ignore it for now as it's still banned.",
"if",
"time",
".",
"Now",
"(",
")",
".",
"Before",
"(",
"banEnd",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"host",
",",
"banEnd",
".",
"Sub",
"(",
"time",
".",
"Now",
"(",
")",
")",
")",
"\n",
"return",
"true",
"\n",
"}",
"\n\n",
"// Otherwise, the peer was banned in the past, but is no longer",
"// banned, so we'll remove this ban entry and return back to",
"// the caller.",
"log",
".",
"Infof",
"(",
"\"",
"\"",
",",
"host",
")",
"\n",
"delete",
"(",
"state",
".",
"banned",
",",
"host",
")",
"\n",
"return",
"false",
"\n",
"}",
"\n\n",
"return",
"false",
"\n",
"}"
] |
// isBanned returns true if the passed peer address is still considered to be
// banned.
|
[
"isBanned",
"returns",
"true",
"if",
"the",
"passed",
"peer",
"address",
"is",
"still",
"considered",
"to",
"be",
"banned",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1146-L1174
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
SendTransaction
|
func (s *ChainService) SendTransaction(tx *wire.MsgTx) error {
// TODO(roasbeef): pipe through querying interface
return s.broadcaster.Broadcast(tx)
}
|
go
|
func (s *ChainService) SendTransaction(tx *wire.MsgTx) error {
// TODO(roasbeef): pipe through querying interface
return s.broadcaster.Broadcast(tx)
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"SendTransaction",
"(",
"tx",
"*",
"wire",
".",
"MsgTx",
")",
"error",
"{",
"// TODO(roasbeef): pipe through querying interface",
"return",
"s",
".",
"broadcaster",
".",
"Broadcast",
"(",
"tx",
")",
"\n",
"}"
] |
// SendTransaction broadcasts the transaction to all currently active peers so
// it can be propagated to other nodes and eventually mined. An error won't be
// returned if the transaction already exists within the mempool. Any
// transaction broadcast through this method will be rebroadcast upon every
// change of the tip of the chain.
|
[
"SendTransaction",
"broadcasts",
"the",
"transaction",
"to",
"all",
"currently",
"active",
"peers",
"so",
"it",
"can",
"be",
"propagated",
"to",
"other",
"nodes",
"and",
"eventually",
"mined",
".",
"An",
"error",
"won",
"t",
"be",
"returned",
"if",
"the",
"transaction",
"already",
"exists",
"within",
"the",
"mempool",
".",
"Any",
"transaction",
"broadcast",
"through",
"this",
"method",
"will",
"be",
"rebroadcast",
"upon",
"every",
"change",
"of",
"the",
"tip",
"of",
"the",
"chain",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1311-L1314
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
newPeerConfig
|
func newPeerConfig(sp *ServerPeer) *peer.Config {
return &peer.Config{
Listeners: peer.MessageListeners{
OnVersion: sp.OnVersion,
//OnVerAck: sp.OnVerAck, // Don't use sendheaders yet
OnInv: sp.OnInv,
OnHeaders: sp.OnHeaders,
OnReject: sp.OnReject,
OnFeeFilter: sp.OnFeeFilter,
OnAddr: sp.OnAddr,
OnRead: sp.OnRead,
OnWrite: sp.OnWrite,
// Note: The reference client currently bans peers that send alerts
// not signed with its key. We could verify against their key, but
// since the reference client is currently unwilling to support
// other implementations' alert messages, we will not relay theirs.
OnAlert: nil,
},
NewestBlock: sp.newestBlock,
HostToNetAddress: sp.server.addrManager.HostToNetAddress,
UserAgentName: sp.server.userAgentName,
UserAgentVersion: sp.server.userAgentVersion,
ChainParams: &sp.server.chainParams,
Services: sp.server.services,
ProtocolVersion: wire.FeeFilterVersion,
DisableRelayTx: true,
}
}
|
go
|
func newPeerConfig(sp *ServerPeer) *peer.Config {
return &peer.Config{
Listeners: peer.MessageListeners{
OnVersion: sp.OnVersion,
//OnVerAck: sp.OnVerAck, // Don't use sendheaders yet
OnInv: sp.OnInv,
OnHeaders: sp.OnHeaders,
OnReject: sp.OnReject,
OnFeeFilter: sp.OnFeeFilter,
OnAddr: sp.OnAddr,
OnRead: sp.OnRead,
OnWrite: sp.OnWrite,
// Note: The reference client currently bans peers that send alerts
// not signed with its key. We could verify against their key, but
// since the reference client is currently unwilling to support
// other implementations' alert messages, we will not relay theirs.
OnAlert: nil,
},
NewestBlock: sp.newestBlock,
HostToNetAddress: sp.server.addrManager.HostToNetAddress,
UserAgentName: sp.server.userAgentName,
UserAgentVersion: sp.server.userAgentVersion,
ChainParams: &sp.server.chainParams,
Services: sp.server.services,
ProtocolVersion: wire.FeeFilterVersion,
DisableRelayTx: true,
}
}
|
[
"func",
"newPeerConfig",
"(",
"sp",
"*",
"ServerPeer",
")",
"*",
"peer",
".",
"Config",
"{",
"return",
"&",
"peer",
".",
"Config",
"{",
"Listeners",
":",
"peer",
".",
"MessageListeners",
"{",
"OnVersion",
":",
"sp",
".",
"OnVersion",
",",
"//OnVerAck: sp.OnVerAck, // Don't use sendheaders yet",
"OnInv",
":",
"sp",
".",
"OnInv",
",",
"OnHeaders",
":",
"sp",
".",
"OnHeaders",
",",
"OnReject",
":",
"sp",
".",
"OnReject",
",",
"OnFeeFilter",
":",
"sp",
".",
"OnFeeFilter",
",",
"OnAddr",
":",
"sp",
".",
"OnAddr",
",",
"OnRead",
":",
"sp",
".",
"OnRead",
",",
"OnWrite",
":",
"sp",
".",
"OnWrite",
",",
"// Note: The reference client currently bans peers that send alerts",
"// not signed with its key. We could verify against their key, but",
"// since the reference client is currently unwilling to support",
"// other implementations' alert messages, we will not relay theirs.",
"OnAlert",
":",
"nil",
",",
"}",
",",
"NewestBlock",
":",
"sp",
".",
"newestBlock",
",",
"HostToNetAddress",
":",
"sp",
".",
"server",
".",
"addrManager",
".",
"HostToNetAddress",
",",
"UserAgentName",
":",
"sp",
".",
"server",
".",
"userAgentName",
",",
"UserAgentVersion",
":",
"sp",
".",
"server",
".",
"userAgentVersion",
",",
"ChainParams",
":",
"&",
"sp",
".",
"server",
".",
"chainParams",
",",
"Services",
":",
"sp",
".",
"server",
".",
"services",
",",
"ProtocolVersion",
":",
"wire",
".",
"FeeFilterVersion",
",",
"DisableRelayTx",
":",
"true",
",",
"}",
"\n",
"}"
] |
// newPeerConfig returns the configuration for the given ServerPeer.
|
[
"newPeerConfig",
"returns",
"the",
"configuration",
"for",
"the",
"given",
"ServerPeer",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1317-L1345
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
Start
|
func (s *ChainService) Start() error {
// Already started?
if atomic.AddInt32(&s.started, 1) != 1 {
return nil
}
// Start the address manager and block manager, both of which are
// needed by peers.
s.addrManager.Start()
s.blockManager.Start()
s.blockSubscriptionMgr.Start()
s.utxoScanner.Start()
if err := s.broadcaster.Start(); err != nil {
return fmt.Errorf("unable to start transaction broadcaster: %v",
err)
}
go s.connManager.Start()
// Start the peer handler which in turn starts the address and block
// managers.
s.wg.Add(1)
go s.peerHandler()
return nil
}
|
go
|
func (s *ChainService) Start() error {
// Already started?
if atomic.AddInt32(&s.started, 1) != 1 {
return nil
}
// Start the address manager and block manager, both of which are
// needed by peers.
s.addrManager.Start()
s.blockManager.Start()
s.blockSubscriptionMgr.Start()
s.utxoScanner.Start()
if err := s.broadcaster.Start(); err != nil {
return fmt.Errorf("unable to start transaction broadcaster: %v",
err)
}
go s.connManager.Start()
// Start the peer handler which in turn starts the address and block
// managers.
s.wg.Add(1)
go s.peerHandler()
return nil
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"Start",
"(",
")",
"error",
"{",
"// Already started?",
"if",
"atomic",
".",
"AddInt32",
"(",
"&",
"s",
".",
"started",
",",
"1",
")",
"!=",
"1",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"// Start the address manager and block manager, both of which are",
"// needed by peers.",
"s",
".",
"addrManager",
".",
"Start",
"(",
")",
"\n",
"s",
".",
"blockManager",
".",
"Start",
"(",
")",
"\n",
"s",
".",
"blockSubscriptionMgr",
".",
"Start",
"(",
")",
"\n\n",
"s",
".",
"utxoScanner",
".",
"Start",
"(",
")",
"\n\n",
"if",
"err",
":=",
"s",
".",
"broadcaster",
".",
"Start",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"go",
"s",
".",
"connManager",
".",
"Start",
"(",
")",
"\n\n",
"// Start the peer handler which in turn starts the address and block",
"// managers.",
"s",
".",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"s",
".",
"peerHandler",
"(",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// Start begins connecting to peers and syncing the blockchain.
|
[
"Start",
"begins",
"connecting",
"to",
"peers",
"and",
"syncing",
"the",
"blockchain",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1423-L1450
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
PeerByAddr
|
func (s *ChainService) PeerByAddr(addr string) *ServerPeer {
for _, peer := range s.Peers() {
if peer.Addr() == addr {
return peer
}
}
return nil
}
|
go
|
func (s *ChainService) PeerByAddr(addr string) *ServerPeer {
for _, peer := range s.Peers() {
if peer.Addr() == addr {
return peer
}
}
return nil
}
|
[
"func",
"(",
"s",
"*",
"ChainService",
")",
"PeerByAddr",
"(",
"addr",
"string",
")",
"*",
"ServerPeer",
"{",
"for",
"_",
",",
"peer",
":=",
"range",
"s",
".",
"Peers",
"(",
")",
"{",
"if",
"peer",
".",
"Addr",
"(",
")",
"==",
"addr",
"{",
"return",
"peer",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// PeerByAddr lets the caller look up a peer address in the service's peer
// table, if connected to that peer address.
|
[
"PeerByAddr",
"lets",
"the",
"caller",
"look",
"up",
"a",
"peer",
"address",
"in",
"the",
"service",
"s",
"peer",
"table",
"if",
"connected",
"to",
"that",
"peer",
"address",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1481-L1488
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
GetBlockHeaderByHeight
|
func (s *RescanChainSource) GetBlockHeaderByHeight(
height uint32) (*wire.BlockHeader, error) {
return s.BlockHeaders.FetchHeaderByHeight(height)
}
|
go
|
func (s *RescanChainSource) GetBlockHeaderByHeight(
height uint32) (*wire.BlockHeader, error) {
return s.BlockHeaders.FetchHeaderByHeight(height)
}
|
[
"func",
"(",
"s",
"*",
"RescanChainSource",
")",
"GetBlockHeaderByHeight",
"(",
"height",
"uint32",
")",
"(",
"*",
"wire",
".",
"BlockHeader",
",",
"error",
")",
"{",
"return",
"s",
".",
"BlockHeaders",
".",
"FetchHeaderByHeight",
"(",
"height",
")",
"\n",
"}"
] |
// GetBlockHeaderByHeight returns the header of the block with the given height.
|
[
"GetBlockHeaderByHeight",
"returns",
"the",
"header",
"of",
"the",
"block",
"with",
"the",
"given",
"height",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1501-L1504
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
GetBlockHeader
|
func (s *RescanChainSource) GetBlockHeader(
hash *chainhash.Hash) (*wire.BlockHeader, uint32, error) {
return s.BlockHeaders.FetchHeader(hash)
}
|
go
|
func (s *RescanChainSource) GetBlockHeader(
hash *chainhash.Hash) (*wire.BlockHeader, uint32, error) {
return s.BlockHeaders.FetchHeader(hash)
}
|
[
"func",
"(",
"s",
"*",
"RescanChainSource",
")",
"GetBlockHeader",
"(",
"hash",
"*",
"chainhash",
".",
"Hash",
")",
"(",
"*",
"wire",
".",
"BlockHeader",
",",
"uint32",
",",
"error",
")",
"{",
"return",
"s",
".",
"BlockHeaders",
".",
"FetchHeader",
"(",
"hash",
")",
"\n",
"}"
] |
// GetBlockHeader returns the header of the block with the given hash.
|
[
"GetBlockHeader",
"returns",
"the",
"header",
"of",
"the",
"block",
"with",
"the",
"given",
"hash",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1507-L1510
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
GetFilterHeaderByHeight
|
func (s *RescanChainSource) GetFilterHeaderByHeight(
height uint32) (*chainhash.Hash, error) {
return s.RegFilterHeaders.FetchHeaderByHeight(height)
}
|
go
|
func (s *RescanChainSource) GetFilterHeaderByHeight(
height uint32) (*chainhash.Hash, error) {
return s.RegFilterHeaders.FetchHeaderByHeight(height)
}
|
[
"func",
"(",
"s",
"*",
"RescanChainSource",
")",
"GetFilterHeaderByHeight",
"(",
"height",
"uint32",
")",
"(",
"*",
"chainhash",
".",
"Hash",
",",
"error",
")",
"{",
"return",
"s",
".",
"RegFilterHeaders",
".",
"FetchHeaderByHeight",
"(",
"height",
")",
"\n",
"}"
] |
// GetFilterHeaderByHeight returns the filter header of the block with the given
// height.
|
[
"GetFilterHeaderByHeight",
"returns",
"the",
"filter",
"header",
"of",
"the",
"block",
"with",
"the",
"given",
"height",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1514-L1517
|
train
|
lightninglabs/neutrino
|
neutrino.go
|
Subscribe
|
func (s *RescanChainSource) Subscribe(
bestHeight uint32) (*blockntfns.Subscription, error) {
return s.blockSubscriptionMgr.NewSubscription(bestHeight)
}
|
go
|
func (s *RescanChainSource) Subscribe(
bestHeight uint32) (*blockntfns.Subscription, error) {
return s.blockSubscriptionMgr.NewSubscription(bestHeight)
}
|
[
"func",
"(",
"s",
"*",
"RescanChainSource",
")",
"Subscribe",
"(",
"bestHeight",
"uint32",
")",
"(",
"*",
"blockntfns",
".",
"Subscription",
",",
"error",
")",
"{",
"return",
"s",
".",
"blockSubscriptionMgr",
".",
"NewSubscription",
"(",
"bestHeight",
")",
"\n",
"}"
] |
// Subscribe returns a block subscription that delivers block notifications in
// order. The bestHeight parameter can be used to signal that a backlog of
// notifications should be delivered from this height. When providing a height
// of 0, a backlog will not be delivered.
|
[
"Subscribe",
"returns",
"a",
"block",
"subscription",
"that",
"delivers",
"block",
"notifications",
"in",
"order",
".",
"The",
"bestHeight",
"parameter",
"can",
"be",
"used",
"to",
"signal",
"that",
"a",
"backlog",
"of",
"notifications",
"should",
"be",
"delivered",
"from",
"this",
"height",
".",
"When",
"providing",
"a",
"height",
"of",
"0",
"a",
"backlog",
"will",
"not",
"be",
"delivered",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L1523-L1526
|
train
|
lightninglabs/neutrino
|
blockntfns/notification.go
|
NewBlockConnected
|
func NewBlockConnected(header wire.BlockHeader, height uint32) *Connected {
return &Connected{header: header, height: height}
}
|
go
|
func NewBlockConnected(header wire.BlockHeader, height uint32) *Connected {
return &Connected{header: header, height: height}
}
|
[
"func",
"NewBlockConnected",
"(",
"header",
"wire",
".",
"BlockHeader",
",",
"height",
"uint32",
")",
"*",
"Connected",
"{",
"return",
"&",
"Connected",
"{",
"header",
":",
"header",
",",
"height",
":",
"height",
"}",
"\n",
"}"
] |
// NewBlockConnected creates a new Connected notification for the given block.
|
[
"NewBlockConnected",
"creates",
"a",
"new",
"Connected",
"notification",
"for",
"the",
"given",
"block",
"."
] |
a655679fe131a5d1b4417872cc834fc3862ac70e
|
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/blockntfns/notification.go#L36-L38
|
train
|
Subsets and Splits
SQL Console for semeru/code-text-go
Retrieves a limited set of code samples with their languages, with a specific case adjustment for 'Go' language.