_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q13300
Sample
train
func (c *Counter) Sample(sample float64, sampleRate float32) { c.value += int64(sample) * int64(1/sampleRate) }
go
{ "resource": "" }
q13301
Flush
train
func (c *Counter) Flush(interval time.Duration) []InterMetric { tags := make([]string, len(c.Tags)) copy(tags, c.Tags) return []InterMetric{{ Name: c.Name, Timestamp: time.Now().Unix(), Value: float64(c.value), Tags: tags, Type: CounterMetric, Sinks: routeInfo(tags), }} }
go
{ "resource": "" }
q13302
Metric
train
func (c *Counter) Metric() (*metricpb.Metric, error) { return &metricpb.Metric{ Name: c.Name, Tags: c.Tags, Type: metricpb.Type_Counter, Value: &metricpb.Metric_Counter{&metricpb.CounterValue{Value: c.value}}, }, nil }
go
{ "resource": "" }
q13303
Merge
train
func (c *Counter) Merge(v *metricpb.CounterValue) { c.value += v.Value }
go
{ "resource": "" }
q13304
NewCounter
train
func NewCounter(Name string, Tags []string) *Counter { return &Counter{Name: Name, Tags: Tags} }
go
{ "resource": "" }
q13305
Flush
train
func (g *Gauge) Flush() []InterMetric { tags := make([]string, len(g.Tags)) copy(tags, g.Tags) return []InterMetric{{ Name: g.Name, Timestamp: time.Now().Unix(), Value: float64(g.value), Tags: tags, Type: GaugeMetric, Sinks: routeInfo(tags), }} }
go
{ "resource": "" }
q13306
Export
train
func (g *Gauge) Export() (JSONMetric, error) { var buf bytes.Buffer err := binary.Write(&buf, binary.LittleEndian, g.value) if err != nil { return JSONMetric{}, err } return JSONMetric{ MetricKey: MetricKey{ Name: g.Name, Type: "gauge", JoinedTags: strings.Join(g.Tags, ","), }, Tags: g.Tags, Value: buf.Bytes(), }, nil }
go
{ "resource": "" }
q13307
Metric
train
func (g *Gauge) Metric() (*metricpb.Metric, error) { return &metricpb.Metric{ Name: g.Name, Tags: g.Tags, Type: metricpb.Type_Gauge, Value: &metricpb.Metric_Gauge{&metricpb.GaugeValue{Value: g.value}}, }, nil }
go
{ "resource": "" }
q13308
Merge
train
func (g *Gauge) Merge(v *metricpb.GaugeValue) { g.value = v.Value }
go
{ "resource": "" }
q13309
Flush
train
func (s *StatusCheck) Flush() []InterMetric { s.Timestamp = time.Now().Unix() s.Type = StatusMetric s.Sinks = routeInfo(s.Tags) return []InterMetric{s.InterMetric} }
go
{ "resource": "" }
q13310
Export
train
func (s *StatusCheck) Export() (JSONMetric, error) { var buf bytes.Buffer err := binary.Write(&buf, binary.LittleEndian, s.Value) if err != nil { return JSONMetric{}, err } return JSONMetric{ MetricKey: MetricKey{ Name: s.Name, Type: "status", JoinedTags: strings.Join(s.Tags, ","), }, Tags: s.Tags, Value: buf.Bytes(), }, nil }
go
{ "resource": "" }
q13311
Sample
train
func (s *Set) Sample(sample string, sampleRate float32) { s.Hll.Insert([]byte(sample)) }
go
{ "resource": "" }
q13312
NewSet
train
func NewSet(Name string, Tags []string) *Set { // error is only returned if precision is outside the 4-18 range // TODO: this is the maximum precision, should it be configurable? Hll := hyperloglog.New() return &Set{ Name: Name, Tags: Tags, Hll: Hll, } }
go
{ "resource": "" }
q13313
Flush
train
func (s *Set) Flush() []InterMetric { tags := make([]string, len(s.Tags)) copy(tags, s.Tags) return []InterMetric{{ Name: s.Name, Timestamp: time.Now().Unix(), Value: float64(s.Hll.Estimate()), Tags: tags, Type: GaugeMetric, Sinks: routeInfo(tags), }} }
go
{ "resource": "" }
q13314
Export
train
func (s *Set) Export() (JSONMetric, error) { val, err := s.Hll.MarshalBinary() if err != nil { return JSONMetric{}, err } return JSONMetric{ MetricKey: MetricKey{ Name: s.Name, Type: "set", JoinedTags: strings.Join(s.Tags, ","), }, Tags: s.Tags, Value: val, }, nil }
go
{ "resource": "" }
q13315
Metric
train
func (s *Set) Metric() (*metricpb.Metric, error) { encoded, err := s.Hll.MarshalBinary() if err != nil { return nil, fmt.Errorf("failed to encode the HyperLogLog: %v", err) } return &metricpb.Metric{ Name: s.Name, Tags: s.Tags, Type: metricpb.Type_Set, Value: &metricpb.Metric_Set{&metricpb.SetValue{HyperLogLog: encoded}}, }, nil }
go
{ "resource": "" }
q13316
Merge
train
func (s *Set) Merge(v *metricpb.SetValue) error { return s.Combine(v.HyperLogLog) }
go
{ "resource": "" }
q13317
Sample
train
func (h *Histo) Sample(sample float64, sampleRate float32) { weight := float64(1 / sampleRate) h.Value.Add(sample, weight) h.LocalWeight += weight h.LocalMin = math.Min(h.LocalMin, sample) h.LocalMax = math.Max(h.LocalMax, sample) h.LocalSum += sample * weight h.LocalReciprocalSum += (1 / sample) * weight }
go
{ "resource": "" }
q13318
NewHist
train
func NewHist(Name string, Tags []string) *Histo { return &Histo{ Name: Name, Tags: Tags, // we're going to allocate a lot of these, so we don't want them to be huge Value: tdigest.NewMerging(100, false), LocalMin: math.Inf(+1), LocalMax: math.Inf(-1), LocalSum: 0, } }
go
{ "resource": "" }
q13319
Export
train
func (h *Histo) Export() (JSONMetric, error) { val, err := h.Value.GobEncode() if err != nil { return JSONMetric{}, err } return JSONMetric{ MetricKey: MetricKey{ Name: h.Name, Type: "histogram", JoinedTags: strings.Join(h.Tags, ","), }, Tags: h.Tags, Value: val, }, nil }
go
{ "resource": "" }
q13320
Metric
train
func (h *Histo) Metric() (*metricpb.Metric, error) { return &metricpb.Metric{ Name: h.Name, Tags: h.Tags, Type: metricpb.Type_Histogram, Value: &metricpb.Metric_Histogram{&metricpb.HistogramValue{ TDigest: h.Value.Data(), }}, }, nil }
go
{ "resource": "" }
q13321
Merge
train
func (h *Histo) Merge(v *metricpb.HistogramValue) { if v.TDigest != nil { h.Value.Merge(tdigest.NewMergingFromData(v.TDigest)) } }
go
{ "resource": "" }
q13322
ToPB
train
func (m MetricScope) ToPB() metricpb.Scope { switch m { case MixedScope: return metricpb.Scope_Mixed case LocalOnly: return metricpb.Scope_Local case GlobalOnly: return metricpb.Scope_Global } return 0 }
go
{ "resource": "" }
q13323
ScopeFromPB
train
func ScopeFromPB(scope metricpb.Scope) MetricScope { switch scope { case metricpb.Scope_Global: return GlobalOnly case metricpb.Scope_Local: return LocalOnly case metricpb.Scope_Mixed: return MixedScope } return 0 }
go
{ "resource": "" }
q13324
NewMetricKeyFromMetric
train
func NewMetricKeyFromMetric(m *metricpb.Metric) MetricKey { return MetricKey{ Name: m.Name, Type: strings.ToLower(m.Type.String()), JoinedTags: strings.Join(m.Tags, ","), } }
go
{ "resource": "" }
q13325
String
train
func (m MetricKey) String() string { var buff bytes.Buffer buff.WriteString(m.Name) buff.WriteString(m.Type) buff.WriteString(m.JoinedTags) return buff.String() }
go
{ "resource": "" }
q13326
ConvertMetrics
train
func ConvertMetrics(m *ssf.SSFSpan) ([]UDPMetric, error) { samples := m.Metrics metrics := make([]UDPMetric, 0, len(samples)+1) invalid := []*ssf.SSFSample{} for _, metricPacket := range samples { metric, err := ParseMetricSSF(metricPacket) if err != nil || !ValidMetric(metric) { invalid = append(invalid, metricPacket) continue } metrics = append(metrics, metric) } if len(invalid) != 0 { return metrics, &invalidMetrics{invalid} } return metrics, nil }
go
{ "resource": "" }
q13327
ValidMetric
train
func ValidMetric(sample UDPMetric) bool { ret := true ret = ret && sample.Name != "" ret = ret && sample.Value != nil return ret }
go
{ "resource": "" }
q13328
ParseMetricSSF
train
func ParseMetricSSF(metric *ssf.SSFSample) (UDPMetric, error) { ret := UDPMetric{ SampleRate: 1.0, } h := fnv1a.Init32 h = fnv1a.AddString32(h, metric.Name) ret.Name = metric.Name switch metric.Metric { case ssf.SSFSample_COUNTER: ret.Type = "counter" case ssf.SSFSample_GAUGE: ret.Type = "gauge" case ssf.SSFSample_HISTOGRAM: ret.Type = "histogram" case ssf.SSFSample_SET: ret.Type = "set" case ssf.SSFSample_STATUS: ret.Type = "status" default: return UDPMetric{}, invalidMetricTypeError } h = fnv1a.AddString32(h, ret.Type) switch metric.Metric { case ssf.SSFSample_SET: ret.Value = metric.Message case ssf.SSFSample_STATUS: ret.Value = metric.Status default: ret.Value = float64(metric.Value) } ret.SampleRate = metric.SampleRate tempTags := make([]string, 0, len(metric.Tags)) for key, value := range metric.Tags { if key == "veneurlocalonly" { ret.Scope = LocalOnly continue } if key == "veneurglobalonly" { ret.Scope = GlobalOnly continue } tempTags = append(tempTags, key+":"+value) } sort.Strings(tempTags) ret.Tags = tempTags ret.JoinedTags = strings.Join(tempTags, ",") h = fnv1a.AddString32(h, ret.JoinedTags) ret.Digest = h return ret, nil }
go
{ "resource": "" }
q13329
WithForwardTimeout
train
func WithForwardTimeout(d time.Duration) Option { return func(opts *options) { opts.forwardTimeout = d } }
go
{ "resource": "" }
q13330
WithLog
train
func WithLog(e *logrus.Entry) Option { return func(opts *options) { opts.log = e } }
go
{ "resource": "" }
q13331
WithStatsInterval
train
func WithStatsInterval(d time.Duration) Option { return func(opts *options) { opts.statsInterval = d } }
go
{ "resource": "" }
q13332
WithTraceClient
train
func WithTraceClient(c *trace.Client) Option { return func(opts *options) { opts.traceClient = c } }
go
{ "resource": "" }
q13333
EncodeInterMetricCSV
train
func EncodeInterMetricCSV(d samplers.InterMetric, w *csv.Writer, partitionDate *time.Time, hostName string, interval int) error { // TODO(aditya) some better error handling for this // to guarantee that the result is proper JSON tags := "{" + strings.Join(d.Tags, ",") + "}" metricType := "" metricValue := d.Value switch d.Type { case samplers.CounterMetric: metricValue = d.Value / float64(interval) metricType = "rate" case samplers.GaugeMetric: metricType = "gauge" default: return errors.New(fmt.Sprintf("Encountered an unknown metric type %s", d.Type.String())) } fields := [...]string{ // the order here doesn't actually matter // as long as the keys are right TsvName: d.Name, TsvTags: tags, TsvMetricType: metricType, TsvInterval: strconv.Itoa(interval), TsvVeneurHostname: hostName, TsvValue: strconv.FormatFloat(metricValue, 'f', -1, 64), TsvTimestamp: time.Unix(d.Timestamp, 0).UTC().Format(RedshiftDateFormat), // TODO avoid edge case at midnight TsvPartition: partitionDate.UTC().Format(PartitionDateFormat), } w.Write(fields[:]) return w.Error() }
go
{ "resource": "" }
q13334
New
train
func New(destinations *consistent.Consistent, opts ...Option) (*Server, error) { res := &Server{ Server: grpc.NewServer(), opts: &options{ forwardTimeout: defaultForwardTimeout, statsInterval: defaultReportStatsInterval, }, conns: newClientConnMap(grpc.WithInsecure()), activeProxyHandlers: new(int64), } for _, opt := range opts { opt(res.opts) } if res.opts.log == nil { log := logrus.New() log.Out = ioutil.Discard res.opts.log = logrus.NewEntry(log) } if err := res.SetDestinations(destinations); err != nil { return nil, fmt.Errorf("failed to set the destinations: %v", err) } forwardrpc.RegisterForwardServer(res.Server, res) return res, nil }
go
{ "resource": "" }
q13335
SetDestinations
train
func (s *Server) SetDestinations(dests *consistent.Consistent) error { s.updateMtx.Lock() defer s.updateMtx.Unlock() var current []string if s.destinations != nil { current = s.destinations.Members() } new := dests.Members() // for every connection in the map that isn't in either the current or // previous list of destinations, delete it for _, k := range s.conns.Keys() { if !strInSlice(k, current) && !strInSlice(k, new) { s.conns.Delete(k) } } // create a connection for each destination for _, dest := range new { if err := s.conns.Add(dest); err != nil { return fmt.Errorf("failed to setup a connection for the "+ "destination '%s': %v", dest, err) } } s.destinations = dests return nil }
go
{ "resource": "" }
q13336
SendMetrics
train
func (s *Server) SendMetrics(ctx context.Context, mlist *forwardrpc.MetricList) (*empty.Empty, error) { go func() { // Track the number of active goroutines in a counter atomic.AddInt64(s.activeProxyHandlers, 1) _ = s.sendMetrics(context.Background(), mlist) atomic.AddInt64(s.activeProxyHandlers, -1) }() return &empty.Empty{}, nil }
go
{ "resource": "" }
q13337
destForMetric
train
func (s *Server) destForMetric(m *metricpb.Metric) (string, error) { key := samplers.NewMetricKeyFromMetric(m) dest, err := s.destinations.Get(key.String()) if err != nil { return "", fmt.Errorf("failed to hash the MetricKey '%s' to a "+ "destination: %v", key.String(), err) } return dest, nil }
go
{ "resource": "" }
q13338
forward
train
func (s *Server) forward(ctx context.Context, dest string, ms []*metricpb.Metric) (err error) { conn, ok := s.conns.Get(dest) if !ok { return fmt.Errorf("no connection was found for the host '%s'", dest) } c := forwardrpc.NewForwardClient(conn) _, err = c.SendMetrics(ctx, &forwardrpc.MetricList{Metrics: ms}) if err != nil { return fmt.Errorf("failed to send %d metrics over gRPC: %v", len(ms), err) } _ = metrics.ReportBatch(s.opts.traceClient, ssf.RandomlySample(0.1, ssf.Count("metrics_by_destination", float32(len(ms)), map[string]string{"destination": dest, "protocol": "grpc"}), )) return nil }
go
{ "resource": "" }
q13339
reportStats
train
func (s *Server) reportStats() { _ = metrics.ReportOne(s.opts.traceClient, ssf.Gauge("proxy.active_goroutines", float32(atomic.LoadInt64(s.activeProxyHandlers)), globalProtocolTags)) }
go
{ "resource": "" }
q13340
Error
train
func (e forwardError) Error() string { return fmt.Sprintf("%s (cause=%s, metrics=%d): %v", e.msg, e.cause, e.numMetrics, e.err) }
go
{ "resource": "" }
q13341
reportMetrics
train
func (e forwardError) reportMetrics(span *trace.Span) { tags := map[string]string{ "cause": e.cause, "protocol": "grpc", } span.Add( ssf.Count("proxy.proxied_metrics_failed", float32(e.numMetrics), tags), ssf.Count("proxy.forward_errors", 1, tags), ) }
go
{ "resource": "" }
q13342
Error
train
func (errs forwardErrors) Error() string { // Only print 10 errors strsLen := len(errs) if len(errs) > 10 { strsLen = 10 } // convert the errors into a slice of strings strs := make([]string, strsLen) for i, err := range errs[:len(strs)] { strs[i] = err.Error() } // If there are errors that weren't printed, add a message to the end str := strings.Join(strs, "\n * ") if len(strs) < len(errs) { str += fmt.Sprintf("\nand %d more...", len(errs)-len(strs)) } return str }
go
{ "resource": "" }
q13343
StartStatsd
train
func StartStatsd(s *Server, a net.Addr, packetPool *sync.Pool) net.Addr { switch addr := a.(type) { case *net.UDPAddr: return startStatsdUDP(s, addr, packetPool) case *net.TCPAddr: return startStatsdTCP(s, addr, packetPool) case *net.UnixAddr: _, b := startStatsdUnix(s, addr, packetPool) return b default: panic(fmt.Sprintf("Can't listen on %v: only TCP, UDP and unixgram:// are supported", a)) } }
go
{ "resource": "" }
q13344
startProcessingOnUDP
train
func startProcessingOnUDP(s *Server, protocol string, addr *net.UDPAddr, pool *sync.Pool, proc udpProcessor) net.Addr { reusePort := s.numReaders != 1 // If we're reusing the port, make sure we're listening on the // exact same address always; this is mostly relevant for // tests, where port is typically 0 and the initial ListenUDP // call results in a contrete port. if reusePort { sock, err := NewSocket(addr, s.RcvbufBytes, reusePort) if err != nil { panic(fmt.Sprintf("couldn't listen on UDP socket %v: %v", addr, err)) } defer sock.Close() addr = sock.LocalAddr().(*net.UDPAddr) } addrChan := make(chan net.Addr, 1) once := sync.Once{} for i := 0; i < s.numReaders; i++ { go func() { defer func() { ConsumePanic(s.Sentry, s.TraceClient, s.Hostname, recover()) }() // each goroutine gets its own socket // if the sockets support SO_REUSEPORT, then this will cause the // kernel to distribute datagrams across them, for better read // performance sock, err := NewSocket(addr, s.RcvbufBytes, reusePort) if err != nil { // if any goroutine fails to create the socket, we can't really // recover, so we just blow up // this probably indicates a systemic issue, eg lack of // SO_REUSEPORT support panic(fmt.Sprintf("couldn't listen on UDP socket %v: %v", addr, err)) } // Pass the address that we are listening on // back to whoever spawned this goroutine so // it can return that address. once.Do(func() { addrChan <- sock.LocalAddr() log.WithFields(logrus.Fields{ "address": sock.LocalAddr(), "protocol": protocol, "listeners": s.numReaders, }).Info("Listening on UDP address") close(addrChan) }) proc(sock, pool) }() } return <-addrChan }
go
{ "resource": "" }
q13345
startStatsdUnix
train
func startStatsdUnix(s *Server, addr *net.UnixAddr, packetPool *sync.Pool) (<-chan struct{}, net.Addr) { done := make(chan struct{}) // ensure we are the only ones locking this socket: lock := acquireLockForSocket(addr) conn, err := net.ListenUnixgram(addr.Network(), addr) if err != nil { panic(fmt.Sprintf("Couldn't listen on UNIX socket %v: %v", addr, err)) } // Make the socket connectable by everyone with access to the socket pathname: err = os.Chmod(addr.String(), 0666) if err != nil { panic(fmt.Sprintf("Couldn't set permissions on %v: %v", addr, err)) } go func() { defer func() { lock.Unlock() close(done) }() for { _, open := <-s.shutdown // occurs when cleanly shutting down the server e.g. in tests; ignore errors if !open { conn.Close() return } } }() for i := 0; i < s.numReaders; i++ { go s.ReadStatsdDatagramSocket(conn, packetPool) } return done, addr }
go
{ "resource": "" }
q13346
StartSSF
train
func StartSSF(s *Server, a net.Addr, tracePool *sync.Pool) net.Addr { switch addr := a.(type) { case *net.UDPAddr: a = startSSFUDP(s, addr, tracePool) case *net.UnixAddr: _, a = startSSFUnix(s, addr) default: panic(fmt.Sprintf("Can't listen for SSF on %v: only udp:// & unix:// are supported", a)) } log.WithFields(logrus.Fields{ "address": a.String(), "network": a.Network(), }).Info("Listening for SSF traces") return a }
go
{ "resource": "" }
q13347
startSSFUnix
train
func startSSFUnix(s *Server, addr *net.UnixAddr) (<-chan struct{}, net.Addr) { done := make(chan struct{}) if addr.Network() != "unix" { panic(fmt.Sprintf("Can't listen for SSF on %v: only udp:// and unix:// addresses are supported", addr)) } // ensure we are the only ones locking this socket: lock := acquireLockForSocket(addr) listener, err := net.ListenUnix(addr.Network(), addr) if err != nil { panic(fmt.Sprintf("Couldn't listen on UNIX socket %v: %v", addr, err)) } // Make the socket connectable by everyone with access to the socket pathname: err = os.Chmod(addr.String(), 0666) if err != nil { panic(fmt.Sprintf("Couldn't set permissions on %v: %v", addr, err)) } go func() { conns := make(chan net.Conn) go func() { defer func() { lock.Unlock() close(done) }() for { conn, err := listener.AcceptUnix() if err != nil { select { case <-s.shutdown: // occurs when cleanly shutting down the server e.g. in tests; ignore errors log.WithError(err).Info("Ignoring Accept error while shutting down") return default: log.WithError(err).Fatal("Unix accept failed") } } conns <- conn } }() for { select { case conn := <-conns: go s.ReadSSFStreamSocket(conn) case <-s.shutdown: listener.Close() return } } }() return done, listener.Addr() }
go
{ "resource": "" }
q13348
acquireLockForSocket
train
func acquireLockForSocket(addr *net.UnixAddr) *flock.Flock { lockname := fmt.Sprintf("%s.lock", addr.String()) lock := flock.NewFlock(lockname) locked, err := lock.TryLock() if err != nil { panic(fmt.Sprintf("Could not acquire the lock %q to listen on %v: %v", lockname, addr, err)) } if !locked { panic(fmt.Sprintf("Lock file %q for %v is in use by another process already", lockname, addr)) } // We have the exclusive use of the socket, clear away any old sockets and listen: _ = os.Remove(addr.String()) return lock }
go
{ "resource": "" }
q13349
Clone
train
func (t textMapReaderWriter) Clone() textMapReaderWriter { clone := textMapReaderWriter(map[string]string{}) t.CloneTo(clone) return clone }
go
{ "resource": "" }
q13350
CloneTo
train
func (t textMapReaderWriter) CloneTo(w opentracing.TextMapWriter) { t.ForeachKey(func(k, v string) error { w.Set(k, v) return nil }) }
go
{ "resource": "" }
q13351
parseBaggageInt64
train
func (c *spanContext) parseBaggageInt64(key string) int64 { var val int64 c.ForeachBaggageItem(func(k, v string) bool { if strings.ToLower(k) == strings.ToLower(key) { i, err := strconv.ParseInt(v, 10, 64) if err != nil { // TODO handle err return true } val = i return false } return true }) return val }
go
{ "resource": "" }
q13352
Resource
train
func (c *spanContext) Resource() string { var resource string c.ForeachBaggageItem(func(k, v string) bool { if strings.ToLower(k) == ResourceKey { resource = v return false } return true }) return resource }
go
{ "resource": "" }
q13353
ClientFinish
train
func (s *Span) ClientFinish(cl *Client) { // This should never happen, // but calling defer span.Finish() should always be // a safe operation. if s == nil { return } s.ClientFinishWithOptions(cl, opentracing.FinishOptions{ FinishTime: time.Now(), LogRecords: nil, BulkLogData: nil, }) }
go
{ "resource": "" }
q13354
FinishWithOptions
train
func (s *Span) FinishWithOptions(opts opentracing.FinishOptions) { s.ClientFinishWithOptions(DefaultClient, opts) }
go
{ "resource": "" }
q13355
ClientFinishWithOptions
train
func (s *Span) ClientFinishWithOptions(cl *Client, opts opentracing.FinishOptions) { // This should never happen, // but calling defer span.FinishWithOptions() should always be // a safe operation. if s == nil { return } // TODO remove the name tag from the slice of tags s.recordErr = s.ClientRecord(cl, s.Name, s.Tags) }
go
{ "resource": "" }
q13356
SetOperationName
train
func (s *Span) SetOperationName(name string) opentracing.Span { s.Trace.Resource = name return s }
go
{ "resource": "" }
q13357
SetTag
train
func (s *Span) SetTag(key string, value interface{}) opentracing.Span { if s.Tags == nil { s.Tags = map[string]string{} } var val string // TODO mutex switch v := value.(type) { case string: val = v case fmt.Stringer: val = v.String() default: // TODO maybe just ban non-strings? val = fmt.Sprintf("%#v", value) } s.Tags[key] = val return s }
go
{ "resource": "" }
q13358
Attach
train
func (s *Span) Attach(ctx context.Context) context.Context { return opentracing.ContextWithSpan(ctx, s) }
go
{ "resource": "" }
q13359
LogFields
train
func (s *Span) LogFields(fields ...opentracinglog.Field) { // TODO mutex this s.logLines = append(s.logLines, fields...) }
go
{ "resource": "" }
q13360
SetBaggageItem
train
func (s *Span) SetBaggageItem(restrictedKey, value string) opentracing.Span { s.contextAsParent().baggageItems[restrictedKey] = value return s }
go
{ "resource": "" }
q13361
BaggageItem
train
func (s *Span) BaggageItem(restrictedKey string) string { return s.contextAsParent().baggageItems[restrictedKey] }
go
{ "resource": "" }
q13362
customSpanStart
train
func customSpanStart(t time.Time) opentracing.StartSpanOption { return &spanOption{ apply: func(sso *opentracing.StartSpanOptions) { sso.StartTime = t }, } }
go
{ "resource": "" }
q13363
InjectRequest
train
func (tracer Tracer) InjectRequest(t *Trace, req *http.Request) error { return tracer.InjectHeader(t, req.Header) }
go
{ "resource": "" }
q13364
InjectHeader
train
func (tracer Tracer) InjectHeader(t *Trace, h http.Header) error { carrier := opentracing.HTTPHeadersCarrier(h) return tracer.Inject(t.context(), opentracing.HTTPHeaders, carrier) }
go
{ "resource": "" }
q13365
ExtractRequestChild
train
func (tracer Tracer) ExtractRequestChild(resource string, req *http.Request, name string) (*Span, error) { carrier := opentracing.HTTPHeadersCarrier(req.Header) parentSpan, err := tracer.Extract(opentracing.HTTPHeaders, carrier) if err != nil { return nil, err } parent := parentSpan.(*spanContext) t := StartChildSpan(&Trace{ SpanID: parent.SpanID(), TraceID: parent.TraceID(), ParentID: parent.ParentID(), Resource: resource, }) t.Name = name return &Span{ tracer: tracer, Trace: t, }, nil }
go
{ "resource": "" }
q13366
EncodeInterMetricsCSV
train
func EncodeInterMetricsCSV(metrics []samplers.InterMetric, delimiter rune, includeHeaders bool, hostname string, interval int) (io.ReadSeeker, error) { b := &bytes.Buffer{} gzw := gzip.NewWriter(b) w := csv.NewWriter(gzw) w.Comma = delimiter if includeHeaders { // Write the headers first headers := [...]string{ // the order here doesn't actually matter // as long as the keys are right TsvName: TsvName.String(), TsvTags: TsvTags.String(), TsvMetricType: TsvMetricType.String(), TsvInterval: TsvInterval.String(), TsvVeneurHostname: TsvVeneurHostname.String(), TsvValue: TsvValue.String(), TsvTimestamp: TsvTimestamp.String(), TsvPartition: TsvPartition.String(), } w.Write(headers[:]) } // TODO avoid edge case at midnight partitionDate := time.Now() for _, metric := range metrics { EncodeInterMetricCSV(metric, w, &partitionDate, hostname, interval) } w.Flush() gzw.Close() return bytes.NewReader(b.Bytes()), w.Error() }
go
{ "resource": "" }
q13367
NewSplitBytes
train
func NewSplitBytes(buf []byte, delim byte) *SplitBytes { return &SplitBytes{ buf: buf, delim: delim, } }
go
{ "resource": "" }
q13368
Next
train
func (sb *SplitBytes) Next() bool { if sb.lastChunk { // we do not check the length here, this ensures that we return the // last chunk in the sequence (even if it's empty) return false } next := bytes.IndexByte(sb.buf, sb.delim) if next == -1 { // no newline, consume the entire buffer sb.currentChunk = sb.buf sb.buf = nil sb.lastChunk = true } else { sb.currentChunk = sb.buf[:next] sb.buf = sb.buf[next+1:] } return true }
go
{ "resource": "" }
q13369
Start
train
func (gs *GRPCSpanSink) Start(cl *trace.Client) error { gs.traceClient = cl // Run a background goroutine to do a little bit of connection state // tracking. go func() { for { // This call will block on a channel receive until the gRPC connection // state changes. When it does, flip the marker over to allow another // error to be logged from Ingest(). gs.grpcConn.WaitForStateChange(ocontext.Background(), gs.grpcConn.GetState()) atomic.StoreUint32(&gs.loggedSinceTransition, 0) } }() return nil }
go
{ "resource": "" }
q13370
Ingest
train
func (gs *GRPCSpanSink) Ingest(ssfSpan *ssf.SSFSpan) error { if err := protocol.ValidateTrace(ssfSpan); err != nil { return err } ctx := metadata.AppendToOutgoingContext(ocontext.Background(), "x-veneur-trace-id", strconv.FormatInt(ssfSpan.TraceId, 16)) _, err := gs.ssc.SendSpan(ctx, ssfSpan) if err != nil { atomic.AddUint32(&gs.dropCount, 1) // gRPC guarantees that an error returned from an RPC call will be of // type status.Status. In the unexpected event that they're not, this // call creates a dummy type, so there's no risk of panic. serr := status.Convert(err) state := gs.grpcConn.GetState() // Log all errors that occur in Ready state - that's weird. Otherwise, // Log only one error per underlying connection state transition. This // should be a reasonable heuristic to get an indicator that problems // are occurring, without resulting in massive log spew while // connections are under duress. if state == connectivity.Ready || atomic.CompareAndSwapUint32(&gs.loggedSinceTransition, 0, 1) { gs.log.WithFields(logrus.Fields{ logrus.ErrorKey: err, "target": gs.target, "name": gs.name, "chanstate": state.String(), "code": serr.Code(), "details": serr.Details(), "message": serr.Message(), }).Error("Error sending span to gRPC sink target") } } else { atomic.AddUint32(&gs.sentCount, 1) } return err }
go
{ "resource": "" }
q13371
newRequest
train
func (c *hecClient) newRequest() *hecRequest { req := &hecRequest{url: c.url(c.idGen.String()), authHeader: c.authHeader()} req.r, req.w = io.Pipe() return req }
go
{ "resource": "" }
q13372
newHTTPClientTracer
train
func newHTTPClientTracer(ctx context.Context, tc *trace.Client, prefix string) *httpClientTracer { span, _ := trace.StartSpanFromContext(ctx, "http.start") return &httpClientTracer{ prefix: prefix, traceClient: tc, mutex: &sync.Mutex{}, ctx: ctx, currentSpan: span, } }
go
{ "resource": "" }
q13373
getClientTrace
train
func (hct *httpClientTracer) getClientTrace() *httptrace.ClientTrace { return &httptrace.ClientTrace{ GotConn: hct.gotConn, DNSStart: hct.dnsStart, GotFirstResponseByte: hct.gotFirstResponseByte, ConnectStart: hct.connectStart, WroteHeaders: hct.wroteHeaders, WroteRequest: hct.wroteRequest, } }
go
{ "resource": "" }
q13374
startSpan
train
func (hct *httpClientTracer) startSpan(name string) *trace.Span { hct.mutex.Lock() defer hct.mutex.Unlock() newSpan, _ := trace.StartSpanFromContext(hct.ctx, name) hct.currentSpan.ClientFinish(hct.traceClient) hct.currentSpan = newSpan return newSpan }
go
{ "resource": "" }
q13375
finishSpan
train
func (hct *httpClientTracer) finishSpan() { hct.mutex.Lock() defer hct.mutex.Unlock() hct.currentSpan.ClientFinish(hct.traceClient) }
go
{ "resource": "" }
q13376
NewXRaySpanSink
train
func NewXRaySpanSink(daemonAddr string, sampleRatePercentage int, commonTags map[string]string, annotationTags []string, log *logrus.Logger) (*XRaySpanSink, error) { log.WithFields(logrus.Fields{ "Address": daemonAddr, }).Info("Creating X-Ray client") var sampleThreshold uint32 if sampleRatePercentage < 0 { log.WithField("sampleRatePercentage", sampleRatePercentage).Warn("Sample rate < 0 is invalid, defaulting to 0") sampleRatePercentage = 0 } if sampleRatePercentage > 100 { log.WithField("sampleRatePercentage", sampleRatePercentage).Warn("Sample rate > 100 is invalid, defaulting to 100") sampleRatePercentage = 100 } // Set the sample threshold to (sample rate) * (maximum value of uint32), so that // we can store it as a uint32 instead of a float64 and compare apples-to-apples // with the output of our hashing algorithm. sampleThreshold = uint32(sampleRatePercentage * math.MaxUint32 / 100) // Build a regex for cleaning names based on valid characters from: // https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-fields reg, err := regexp.Compile(`[^a-zA-Z0-9_\.\:\/\%\&#=+\-\@\s\\]+`) if err != nil { return nil, err } annotationTagsMap := map[string]struct{}{} for _, key := range annotationTags { annotationTagsMap[key] = struct{}{} } return &XRaySpanSink{ daemonAddr: daemonAddr, sampleThreshold: sampleThreshold, commonTags: commonTags, log: log, nameRegex: reg, annotationTags: annotationTagsMap, }, nil }
go
{ "resource": "" }
q13377
Start
train
func (x *XRaySpanSink) Start(cl *trace.Client) error { x.traceClient = cl xrayDaemon, err := net.ResolveUDPAddr("udp", x.daemonAddr) if err != nil { return err } conn, err := net.DialUDP("udp", nil, xrayDaemon) if err != nil { return err } x.conn = conn return nil }
go
{ "resource": "" }
q13378
Flush
train
func (x *XRaySpanSink) Flush() { x.log.WithFields(logrus.Fields{ "flushed_spans": atomic.LoadInt64(&x.spansHandled), "dropped_spans": atomic.LoadInt64(&x.spansDropped), }).Debug("Checkpointing flushed spans for X-Ray") metrics.ReportBatch(x.traceClient, []*ssf.SSFSample{ ssf.Count(sinks.MetricKeyTotalSpansFlushed, float32(atomic.SwapInt64(&x.spansHandled, 0)), map[string]string{"sink": x.Name()}), ssf.Count(sinks.MetricKeyTotalSpansDropped, float32(atomic.SwapInt64(&x.spansDropped, 0)), map[string]string{"sink": x.Name()}), }) }
go
{ "resource": "" }
q13379
Report
train
func Report(cl *trace.Client, samples *ssf.Samples) error { return ReportBatch(cl, samples.Batch) }
go
{ "resource": "" }
q13380
ReportBatch
train
func ReportBatch(cl *trace.Client, samples []*ssf.SSFSample) error { return ReportAsync(cl, samples, nil) }
go
{ "resource": "" }
q13381
ReportOne
train
func ReportOne(cl *trace.Client, metric *ssf.SSFSample) error { return ReportAsync(cl, []*ssf.SSFSample{metric}, nil) }
go
{ "resource": "" }
q13382
finish
train
func (t *Trace) finish() { if t.End.IsZero() { t.End = time.Now() } }
go
{ "resource": "" }
q13383
Duration
train
func (t *Trace) Duration() time.Duration { if t.End.IsZero() { return -1 } return t.End.Sub(t.Start) }
go
{ "resource": "" }
q13384
ProtoMarshalTo
train
func (t *Trace) ProtoMarshalTo(w io.Writer) error { packet, err := proto.Marshal(t.SSFSpan()) if err != nil { return err } _, err = w.Write(packet) return err }
go
{ "resource": "" }
q13385
Record
train
func (t *Trace) Record(name string, tags map[string]string) error { return t.ClientRecord(DefaultClient, name, tags) }
go
{ "resource": "" }
q13386
ClientRecord
train
func (t *Trace) ClientRecord(cl *Client, name string, tags map[string]string) error { if t.Tags == nil { t.Tags = map[string]string{} } t.finish() for k, v := range tags { t.Tags[k] = v } if name == "" { name = t.Name } span := t.SSFSpan() span.Name = name return Record(cl, span, t.Sent) }
go
{ "resource": "" }
q13387
Attach
train
func (t *Trace) Attach(c context.Context) context.Context { return context.WithValue(c, traceKey, t) }
go
{ "resource": "" }
q13388
StartTrace
train
func StartTrace(resource string) *Trace { traceID := proto.Int64(rand.Int63()) t := &Trace{ TraceID: *traceID, SpanID: *traceID, ParentID: 0, Resource: resource, Tags: map[string]string{}, } t.Start = time.Now() return t }
go
{ "resource": "" }
q13389
StartChildSpan
train
func StartChildSpan(parent *Trace) *Trace { spanID := proto.Int64(rand.Int63()) span := &Trace{ SpanID: *spanID, } span.SetParent(parent) span.Start = time.Now() return span }
go
{ "resource": "" }
q13390
NewKafkaMetricSink
train
func NewKafkaMetricSink(logger *logrus.Logger, cl *trace.Client, brokers string, checkTopic string, eventTopic string, metricTopic string, ackRequirement string, partitioner string, retries int, bufferBytes int, bufferMessages int, bufferDuration string) (*KafkaMetricSink, error) { if logger == nil { logger = &logrus.Logger{Out: ioutil.Discard} } if checkTopic == "" && eventTopic == "" && metricTopic == "" { return nil, errors.New("Unable to start Kafka sink with no valid topic names") } ll := logger.WithField("metric_sink", "kafka") var finalBufferDuration time.Duration if bufferDuration != "" { var err error finalBufferDuration, err = time.ParseDuration(bufferDuration) if err != nil { return nil, err } } config, _ := newProducerConfig(ll, ackRequirement, partitioner, retries, bufferBytes, bufferMessages, finalBufferDuration) ll.WithFields(logrus.Fields{ "brokers": brokers, "check_topic": checkTopic, "event_topic": eventTopic, "metric_topic": metricTopic, "partitioner": partitioner, "ack_requirement": ackRequirement, "max_retries": retries, "buffer_bytes": bufferBytes, "buffer_messages": bufferMessages, "buffer_duration": bufferDuration, }).Info("Created Kafka metric sink") return &KafkaMetricSink{ logger: ll, checkTopic: checkTopic, eventTopic: eventTopic, metricTopic: metricTopic, brokers: brokers, config: config, traceClient: cl, }, nil }
go
{ "resource": "" }
q13391
newConfiguredProducer
train
func newConfiguredProducer(logger *logrus.Entry, brokerString string, config *sarama.Config) (sarama.AsyncProducer, error) { brokerList := strings.Split(brokerString, ",") if len(brokerList) < 1 { logger.WithField("addrs", brokerString).Error("No brokers?") return nil, errors.New("No brokers in broker list") } logger.WithField("addrs", brokerList).Info("Connecting to Kafka") producer, err := sarama.NewAsyncProducer(brokerList, config) if err != nil { logger.Error("Error Connecting to Kafka. client error: ", err) } return producer, nil }
go
{ "resource": "" }
q13392
Flush
train
func (k *KafkaMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error { samples := &ssf.Samples{} defer metrics.Report(k.traceClient, samples) if len(interMetrics) == 0 { k.logger.Info("Nothing to flush, skipping.") return nil } successes := int64(0) for _, metric := range interMetrics { if !sinks.IsAcceptableMetric(metric, k) { continue } k.logger.Debug("Emitting Metric: ", metric.Name) j, err := json.Marshal(metric) if err != nil { k.logger.Error("Error marshalling metric: ", metric.Name) samples.Add(ssf.Count("kafka.marshal.error_total", 1, nil)) return err } k.producer.Input() <- &sarama.ProducerMessage{ Topic: k.metricTopic, Value: sarama.StringEncoder(j), } successes++ } samples.Add(ssf.Count(sinks.MetricKeyTotalMetricsFlushed, float32(successes), map[string]string{"sink": k.Name()})) return nil }
go
{ "resource": "" }
q13393
NewKafkaSpanSink
train
func NewKafkaSpanSink(logger *logrus.Logger, cl *trace.Client, brokers string, topic string, partitioner string, ackRequirement string, retries int, bufferBytes int, bufferMessages int, bufferDuration string, serializationFormat string, sampleTag string, sampleRatePercentage int) (*KafkaSpanSink, error) { if logger == nil { logger = &logrus.Logger{Out: ioutil.Discard} } if topic == "" { return nil, errors.New("Cannot start Kafka span sink with no span topic") } ll := logger.WithField("span_sink", "kafka") serializer := serializationFormat if serializer != "json" && serializer != "protobuf" { ll.WithField("serializer", serializer).Warn("Unknown serializer, defaulting to protobuf") serializer = "protobuf" } var sampleThreshold uint32 if sampleRatePercentage <= 0 || sampleRatePercentage > 100 { return nil, errors.New("Span sample rate percentage must be greater than 0%% and less than or equal to 100%%") } // Set the sample threshold to (sample rate) * (maximum value of uint32), so that // we can store it as a uint32 instead of a float64 and compare apples-to-apples // with the output of our hashing algorithm. sampleThreshold = uint32(sampleRatePercentage * math.MaxUint32 / 100) var finalBufferDuration time.Duration if bufferDuration != "" { var err error finalBufferDuration, err = time.ParseDuration(bufferDuration) if err != nil { return nil, err } } config, _ := newProducerConfig(ll, ackRequirement, partitioner, retries, bufferBytes, bufferMessages, finalBufferDuration) ll.WithFields(logrus.Fields{ "brokers": brokers, "topic": topic, "partitioner": partitioner, "ack_requirement": ackRequirement, "max_retries": retries, "buffer_bytes": bufferBytes, "buffer_messages": bufferMessages, "buffer_duration": bufferDuration, }).Info("Started Kafka span sink") return &KafkaSpanSink{ logger: ll, topic: topic, brokers: brokers, config: config, serializer: serializer, sampleTag: sampleTag, sampleThreshold: sampleThreshold, }, nil }
go
{ "resource": "" }
q13394
Flush
train
func (k *KafkaSpanSink) Flush() { // TODO We have no stuff in here for detecting failed writes from the async // producer. We should add that. k.logger.WithFields(logrus.Fields{ "flushed_spans": atomic.LoadInt64(&k.spansFlushed), }).Debug("Checkpointing flushed spans for Kafka") metrics.ReportOne(k.traceClient, ssf.Count(sinks.MetricKeyTotalSpansFlushed, float32(atomic.LoadInt64(&k.spansFlushed)), map[string]string{"sink": k.Name()})) atomic.SwapInt64(&k.spansFlushed, 0) }
go
{ "resource": "" }
q13395
New
train
func New(metricOuts []MetricIngester, opts ...Option) *Server { res := &Server{ Server: grpc.NewServer(), metricOuts: metricOuts, opts: &options{}, } for _, opt := range opts { opt(res.opts) } if res.opts.traceClient == nil { res.opts.traceClient = trace.DefaultClient } forwardrpc.RegisterForwardServer(res.Server, res) return res }
go
{ "resource": "" }
q13396
NewWorkerMetrics
train
func NewWorkerMetrics() WorkerMetrics { return WorkerMetrics{ counters: map[samplers.MetricKey]*samplers.Counter{}, globalCounters: map[samplers.MetricKey]*samplers.Counter{}, globalGauges: map[samplers.MetricKey]*samplers.Gauge{}, globalHistograms: map[samplers.MetricKey]*samplers.Histo{}, globalTimers: map[samplers.MetricKey]*samplers.Histo{}, gauges: map[samplers.MetricKey]*samplers.Gauge{}, histograms: map[samplers.MetricKey]*samplers.Histo{}, sets: map[samplers.MetricKey]*samplers.Set{}, timers: map[samplers.MetricKey]*samplers.Histo{}, localHistograms: map[samplers.MetricKey]*samplers.Histo{}, localSets: map[samplers.MetricKey]*samplers.Set{}, localTimers: map[samplers.MetricKey]*samplers.Histo{}, localStatusChecks: map[samplers.MetricKey]*samplers.StatusCheck{}, } }
go
{ "resource": "" }
q13397
appendExportedMetric
train
func (wm WorkerMetrics) appendExportedMetric(res []*metricpb.Metric, exp metricExporter, mType metricpb.Type, cl *trace.Client, scope samplers.MetricScope) []*metricpb.Metric { m, err := exp.Metric() m.Scope = scope.ToPB() if err != nil { log.WithFields(logrus.Fields{ logrus.ErrorKey: err, "type": mType, "name": exp.GetName(), }).Error("Could not export metric") metrics.ReportOne(cl, ssf.Count("worker_metrics.export_metric.errors", 1, map[string]string{ "type": mType.String(), }), ) return res } m.Type = mType return append(res, m) }
go
{ "resource": "" }
q13398
NewWorker
train
func NewWorker(id int, cl *trace.Client, logger *logrus.Logger, stats *statsd.Client) *Worker { return &Worker{ id: id, PacketChan: make(chan samplers.UDPMetric, 32), ImportChan: make(chan []samplers.JSONMetric, 32), ImportMetricChan: make(chan []*metricpb.Metric, 32), QuitChan: make(chan struct{}), processed: 0, imported: 0, mutex: &sync.Mutex{}, traceClient: cl, logger: logger, wm: NewWorkerMetrics(), stats: stats, } }
go
{ "resource": "" }
q13399
MetricsProcessedCount
train
func (w *Worker) MetricsProcessedCount() int64 { w.mutex.Lock() defer w.mutex.Unlock() return w.processed }
go
{ "resource": "" }