_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q13400
ImportMetric
train
func (w *Worker) ImportMetric(other samplers.JSONMetric) { w.mutex.Lock() defer w.mutex.Unlock() // we don't increment the processed metric counter here, it was already // counted by the original veneur that sent this to us w.imported++ if other.Type == counterTypeName || other.Type == gaugeTypeName { // this is an odd special case -- counters that are imported are global w.wm.Upsert(other.MetricKey, samplers.GlobalOnly, other.Tags) } else { w.wm.Upsert(other.MetricKey, samplers.MixedScope, other.Tags) } switch other.Type { case counterTypeName: if err := w.wm.globalCounters[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge counters") } case gaugeTypeName: if err := w.wm.globalGauges[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge gauges") } case setTypeName: if err := w.wm.sets[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge sets") } case histogramTypeName: if err := w.wm.histograms[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge histograms") } case timerTypeName: if err := w.wm.timers[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge timers") } default: log.WithField("type", other.Type).Error("Unknown metric type for importing") } }
go
{ "resource": "" }
q13401
ImportMetricGRPC
train
func (w *Worker) ImportMetricGRPC(other *metricpb.Metric) (err error) { w.mutex.Lock() defer w.mutex.Unlock() key := samplers.NewMetricKeyFromMetric(other) scope := samplers.ScopeFromPB(other.Scope) if other.Type == metricpb.Type_Counter || other.Type == metricpb.Type_Gauge { scope = samplers.GlobalOnly } if scope == samplers.LocalOnly { return fmt.Errorf("gRPC import does not accept local metrics") } w.wm.Upsert(key, scope, other.Tags) w.imported++ switch v := other.GetValue().(type) { case *metricpb.Metric_Counter: w.wm.globalCounters[key].Merge(v.Counter) case *metricpb.Metric_Gauge: w.wm.globalGauges[key].Merge(v.Gauge) case *metricpb.Metric_Set: if merr := w.wm.sets[key].Merge(v.Set); merr != nil { err = fmt.Errorf("could not merge a set: %v", err) } case *metricpb.Metric_Histogram: switch other.Type { case metricpb.Type_Histogram: if other.Scope == metricpb.Scope_Mixed { w.wm.histograms[key].Merge(v.Histogram) } else if other.Scope == metricpb.Scope_Global { w.wm.globalHistograms[key].Merge(v.Histogram) } case metricpb.Type_Timer: if other.Scope == metricpb.Scope_Mixed { w.wm.timers[key].Merge(v.Histogram) } else if other.Scope == metricpb.Scope_Global { w.wm.globalTimers[key].Merge(v.Histogram) } } case nil: err = errors.New("Can't import a metric with a nil value") default: err = fmt.Errorf("Unknown metric type for importing: %T", v) } if err != nil { log.WithError(err).WithFields(logrus.Fields{ "type": other.Type, "name": other.Name, "protocol": "grpc", }).Error("Failed to import a metric") } return err }
go
{ "resource": "" }
q13402
Flush
train
func (w *Worker) Flush() WorkerMetrics { // This is a critical spot. The worker can't process metrics while this // mutex is held! So we try and minimize it by copying the maps of values // and assigning new ones. wm := NewWorkerMetrics() w.mutex.Lock() ret := w.wm processed := w.processed imported := w.imported w.wm = wm w.processed = 0 w.imported = 0 w.mutex.Unlock() w.stats.Count("worker.metrics_processed_total", processed, []string{}, 1.0) w.stats.Count("worker.metrics_imported_total", imported, []string{}, 1.0) return ret }
go
{ "resource": "" }
q13403
NewEventWorker
train
func NewEventWorker(cl *trace.Client, stats *statsd.Client) *EventWorker { return &EventWorker{ sampleChan: make(chan ssf.SSFSample), mutex: &sync.Mutex{}, traceClient: cl, stats: stats, } }
go
{ "resource": "" }
q13404
Work
train
func (ew *EventWorker) Work() { for { select { case s := <-ew.sampleChan: ew.mutex.Lock() ew.samples = append(ew.samples, s) ew.mutex.Unlock() } } }
go
{ "resource": "" }
q13405
Flush
train
func (ew *EventWorker) Flush() []ssf.SSFSample { ew.mutex.Lock() retsamples := ew.samples // these slices will be allocated again at append time ew.samples = nil ew.mutex.Unlock() if len(retsamples) != 0 { ew.stats.Count("worker.other_samples_flushed_total", int64(len(retsamples)), nil, 1.0) } return retsamples }
go
{ "resource": "" }
q13406
NewSpanWorker
train
func NewSpanWorker(sinks []sinks.SpanSink, cl *trace.Client, statsd *statsd.Client, spanChan <-chan *ssf.SSFSpan, commonTags map[string]string) *SpanWorker { tags := make([]map[string]string, len(sinks)) for i, sink := range sinks { tags[i] = map[string]string{ "sink": sink.Name(), } } return &SpanWorker{ SpanChan: spanChan, sinks: sinks, sinkTags: tags, commonTags: commonTags, cumulativeTimes: make([]int64, len(sinks)), traceClient: cl, statsd: statsd, } }
go
{ "resource": "" }
q13407
Flush
train
func (tw *SpanWorker) Flush() { samples := &ssf.Samples{} // Flush and time each sink. for i, s := range tw.sinks { tags := make([]string, 0, len(tw.sinkTags[i])) for k, v := range tw.sinkTags[i] { tags = append(tags, fmt.Sprintf("%s:%s", k, v)) } sinkFlushStart := time.Now() s.Flush() tw.statsd.Timing("worker.span.flush_duration_ns", time.Since(sinkFlushStart), tags, 1.0) // cumulative time is measured in nanoseconds cumulative := time.Duration(atomic.SwapInt64(&tw.cumulativeTimes[i], 0)) * time.Nanosecond tw.statsd.Timing(sinks.MetricKeySpanIngestDuration, cumulative, tags, 1.0) } metrics.Report(tw.traceClient, samples) tw.statsd.Count("worker.span.hit_chan_cap", atomic.SwapInt64(&tw.capCount, 0), nil, 1.0) tw.statsd.Count("worker.ssf.empty_total", atomic.SwapInt64(&tw.emptySSFCount, 0), nil, 1.0) }
go
{ "resource": "" }
q13408
ReadProxyConfig
train
func ReadProxyConfig(path string) (c ProxyConfig, err error) { f, err := os.Open(path) if err != nil { return c, err } defer f.Close() c, err = readProxyConfig(f) c.applyDefaults() return }
go
{ "resource": "" }
q13409
ParseInterval
train
func (c Config) ParseInterval() (time.Duration, error) { return time.ParseDuration(c.Interval) }
go
{ "resource": "" }
q13410
RefreshDestinations
train
func (p *Proxy) RefreshDestinations(serviceName string, ring *consistent.Consistent, mtx *sync.Mutex) { samples := &ssf.Samples{} defer metrics.Report(p.TraceClient, samples) srvTags := map[string]string{"service": serviceName} start := time.Now() destinations, err := p.Discoverer.GetDestinationsForService(serviceName) samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags)) log.WithFields(logrus.Fields{ "destinations": destinations, "service": serviceName, }).Debug("Got destinations") samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags)) if err != nil || len(destinations) == 0 { log.WithError(err).WithFields(logrus.Fields{ "service": serviceName, "errorType": reflect.TypeOf(err), "numDestinations": len(destinations), }).Error("Discoverer found zero destinations and/or returned an error. Destinations may be stale!") samples.Add(ssf.Count("discoverer.errors", 1, srvTags)) // Return since we got no hosts. We don't want to zero out the list. This // should result in us leaving the "last good" values in the ring. return } mtx.Lock() ring.Set(destinations) mtx.Unlock() samples.Add(ssf.Gauge("discoverer.destination_number", float32(len(destinations)), srvTags)) }
go
{ "resource": "" }
q13411
ProxyMetrics
train
func (p *Proxy) ProxyMetrics(ctx context.Context, jsonMetrics []samplers.JSONMetric, origin string) { span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.proxy.proxy_metrics") defer span.ClientFinish(p.TraceClient) if p.ForwardTimeout > 0 { var cancel func() ctx, cancel = context.WithTimeout(ctx, p.ForwardTimeout) defer cancel() } metricCount := len(jsonMetrics) span.Add(ssf.RandomlySample(0.1, ssf.Count("import.metrics_total", float32(metricCount), map[string]string{ "remote_addr": origin, "veneurglobalonly": "", }), )...) jsonMetricsByDestination := make(map[string][]samplers.JSONMetric) for _, h := range p.ForwardDestinations.Members() { jsonMetricsByDestination[h] = make([]samplers.JSONMetric, 0) } for _, jm := range jsonMetrics { dest, _ := p.ForwardDestinations.Get(jm.MetricKey.String()) jsonMetricsByDestination[dest] = append(jsonMetricsByDestination[dest], jm) } // nb The response has already been returned at this point, because we wg := sync.WaitGroup{} wg.Add(len(jsonMetricsByDestination)) // Make our waitgroup the size of our destinations for dest, batch := range jsonMetricsByDestination { go p.doPost(ctx, &wg, dest, batch) } wg.Wait() // Wait for all the above goroutines to complete log.WithField("count", metricCount).Debug("Completed forward") span.Add(ssf.RandomlySample(0.1, ssf.Timing("proxy.duration_ns", time.Since(span.Start), time.Nanosecond, nil), ssf.Count("proxy.proxied_metrics_total", float32(len(jsonMetrics)), nil), )...) }
go
{ "resource": "" }
q13412
ImportMetrics
train
func (s *Server) ImportMetrics(ctx context.Context, jsonMetrics []samplers.JSONMetric) { span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.import.import_metrics") defer span.Finish() // we have a slice of json metrics that we need to divide up across the workers // we don't want to push one metric at a time (too much channel contention // and goroutine switching) and we also don't want to allocate a temp // slice for each worker (which we'll have to append to, therefore lots // of allocations) // instead, we'll compute the fnv hash of every metric in the array, // and sort the array by the hashes sortedIter := newJSONMetricsByWorker(jsonMetrics, len(s.Workers)) for sortedIter.Next() { nextChunk, workerIndex := sortedIter.Chunk() s.Workers[workerIndex].ImportChan <- nextChunk } metrics.ReportOne(s.TraceClient, ssf.Timing("import.response_duration_ns", time.Since(span.Start), time.Nanosecond, map[string]string{"part": "merge"})) }
go
{ "resource": "" }
q13413
newJSONMetricsByWorker
train
func newJSONMetricsByWorker(metrics []samplers.JSONMetric, numWorkers int) *jsonMetricsByWorker { ret := &jsonMetricsByWorker{ sjm: newSortableJSONMetrics(metrics, numWorkers), } sort.Sort(ret.sjm) return ret }
go
{ "resource": "" }
q13414
submitter
train
func (sss *splunkSpanSink) submitter(sync chan struct{}, ready chan struct{}) { ctx := context.Background() for { exit := sss.submitBatch(ctx, sync, ready) if exit { return } } }
go
{ "resource": "" }
q13415
setupHTTPRequest
train
func (sss *splunkSpanSink) setupHTTPRequest(ctx context.Context) (context.CancelFunc, *hecRequest, io.Writer, error) { ctx, cancel := context.WithCancel(ctx) hecReq := sss.hec.newRequest() req, w, err := hecReq.Start(ctx) if err != nil { cancel() return nil, nil, nil, err } // At this point, we have a workable HTTP connection; // open it in the background: go sss.makeHTTPRequest(req, cancel) return cancel, hecReq, w, nil }
go
{ "resource": "" }
q13416
submitOneEvent
train
func (sss *splunkSpanSink) submitOneEvent(ctx context.Context, w io.Writer, ev *Event) error { if sss.sendTimeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, sss.sendTimeout) defer cancel() } encodeErrors := make(chan error) enc := json.NewEncoder(w) go func() { err := enc.Encode(ev) select { case encodeErrors <- err: case <-ctx.Done(): } }() select { case <-ctx.Done(): return ctx.Err() case err := <-encodeErrors: return err } }
go
{ "resource": "" }
q13417
Flush
train
func (sss *splunkSpanSink) Flush() { // report the sink stats: samples := &ssf.Samples{} samples.Add( ssf.Count( sinks.MetricKeyTotalSpansFlushed, float32(atomic.SwapUint32(&sss.ingestedSpans, 0)), map[string]string{"sink": sss.Name()}), ssf.Count( sinks.MetricKeyTotalSpansDropped, float32(atomic.SwapUint32(&sss.droppedSpans, 0)), map[string]string{"sink": sss.Name()}, ), ssf.Count( sinks.MetricKeyTotalSpansSkipped, float32(atomic.SwapUint32(&sss.skippedSpans, 0)), map[string]string{"sink": sss.Name()}, ), ) metrics.Report(sss.traceClient, samples) }
go
{ "resource": "" }
q13418
NewLightStepSpanSink
train
func NewLightStepSpanSink(collector string, reconnectPeriod string, maximumSpans int, numClients int, accessToken string, log *logrus.Logger) (*LightStepSpanSink, error) { var host *url.URL host, err := url.Parse(collector) if err != nil { log.WithError(err).WithField( "host", collector, ).Error("Error parsing LightStep collector URL") return &LightStepSpanSink{}, err } port, err := strconv.Atoi(host.Port()) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "port": port, "default_port": lightstepDefaultPort, }).Warn("Error parsing LightStep port, using default") port = lightstepDefaultPort } reconPeriod := lightstepDefaultInterval if reconnectPeriod != "" { reconPeriod, err = time.ParseDuration(reconnectPeriod) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "interval": reconnectPeriod, "default_interval": lightstepDefaultInterval, }).Warn("Failed to parse reconnect duration, using default.") reconPeriod = lightstepDefaultInterval } } log.WithFields(logrus.Fields{ "Host": host.Hostname(), "Port": port, }).Info("Dialing lightstep host") lightstepMultiplexTracerNum := numClients // If config value is missing, this value should default to one client if lightstepMultiplexTracerNum <= 0 { lightstepMultiplexTracerNum = 1 } tracers := make([]opentracing.Tracer, 0, lightstepMultiplexTracerNum) plaintext := false if host.Scheme == "http" { plaintext = true } for i := 0; i < lightstepMultiplexTracerNum; i++ { tracers = append(tracers, lightstep.NewTracer(lightstep.Options{ AccessToken: accessToken, ReconnectPeriod: reconPeriod, Collector: lightstep.Endpoint{ Host: host.Hostname(), Port: port, Plaintext: plaintext, }, UseGRPC: true, MaxBufferedSpans: maximumSpans, })) } return &LightStepSpanSink{ tracers: tracers, serviceCount: sync.Map{}, mutex: &sync.Mutex{}, log: log, }, nil }
go
{ "resource": "" }
q13419
Ingest
train
func (ls *LightStepSpanSink) Ingest(ssfSpan *ssf.SSFSpan) error { if err := protocol.ValidateTrace(ssfSpan); err != nil { return err } parentID := ssfSpan.ParentId if parentID <= 0 { parentID = 0 } var errorCode int64 if ssfSpan.Error { errorCode = 1 } timestamp := time.Unix(ssfSpan.StartTimestamp/1e9, ssfSpan.StartTimestamp%1e9) if len(ls.tracers) == 0 { err := fmt.Errorf("No lightstep tracer clients initialized") ls.log.Error(err) return err } // pick the tracer to use tracerIndex := ssfSpan.TraceId % int64(len(ls.tracers)) tracer := ls.tracers[tracerIndex] sp := tracer.StartSpan( ssfSpan.Name, opentracing.StartTime(timestamp), lightstep.SetTraceID(uint64(ssfSpan.TraceId)), lightstep.SetSpanID(uint64(ssfSpan.Id)), lightstep.SetParentSpanID(uint64(parentID))) sp.SetTag(trace.ResourceKey, ssfSpan.Tags[trace.ResourceKey]) // TODO Why is this here? sp.SetTag(lightstep.ComponentNameKey, ssfSpan.Service) sp.SetTag(indicatorSpanTagName, strconv.FormatBool(ssfSpan.Indicator)) // TODO don't hardcode sp.SetTag("type", "http") sp.SetTag("error-code", errorCode) for k, v := range ssfSpan.Tags { sp.SetTag(k, v) } // TODO add metrics as tags to the span as well? if errorCode > 0 { // Note: this sets the OT-standard "error" tag, which // LightStep uses to flag error spans. ext.Error.Set(sp, true) } endTime := time.Unix(ssfSpan.EndTimestamp/1e9, ssfSpan.EndTimestamp%1e9) finishOpts := opentracing.FinishOptions{FinishTime: endTime} sp.FinishWithOptions(finishOpts) service := ssfSpan.Service if service == "" { service = "unknown" } count, ok := ls.serviceCount.Load(service) if !ok { // ensure the value is in the map // we only do this if the value was not found in the map once already, to save an // allocation and more expensive operation in the typical case var c int64 = 0 count, _ = ls.serviceCount.LoadOrStore(service, &c) } c, ok := count.(*int64) if !ok { ls.log.WithField("type", reflect.TypeOf(count)).Debug(unexpectedCountTypeErr.Error()) return unexpectedCountTypeErr } atomic.AddInt64(c, 1) return nil }
go
{ "resource": "" }
q13420
Flush
train
func (ls *LightStepSpanSink) Flush() { ls.mutex.Lock() defer ls.mutex.Unlock() samples := &ssf.Samples{} defer metrics.Report(ls.traceClient, samples) totalCount := int64(0) ls.serviceCount.Range(func(keyI, valueI interface{}) bool { service, ok := keyI.(string) if !ok { ls.log.WithFields(logrus.Fields{ "key": keyI, "type": reflect.TypeOf(keyI), }).Error("Invalid key type in map when flushing Lightstep client") return true } value, ok := valueI.(*int64) if !ok { ls.log.WithFields(logrus.Fields{ "value": valueI, "type": reflect.TypeOf(valueI), }).Error("Invalid value type in map when flushing Lightstep client") return true } count := atomic.SwapInt64(value, 0) totalCount += count samples.Add(ssf.Count(sinks.MetricKeyTotalSpansFlushed, float32(count), map[string]string{"sink": ls.Name(), "service": service})) return true }) ls.log.WithField("total_spans", totalCount).Debug("Checkpointing flushed spans for Lightstep") }
go
{ "resource": "" }
q13421
NewConsul
train
func NewConsul(config *api.Config) (*Consul, error) { consulClient, err := api.NewClient(config) if err != nil { return nil, err } return &Consul{ ConsulHealth: consulClient.Health(), }, nil }
go
{ "resource": "" }
q13422
sendMetrics
train
func (m *metricExtractionSink) sendMetrics(metrics []samplers.UDPMetric) { for _, metric := range metrics { m.workers[metric.Digest%uint32(len(m.workers))].IngestUDP(metric) } }
go
{ "resource": "" }
q13423
Ingest
train
func (m *metricExtractionSink) Ingest(span *ssf.SSFSpan) error { var metricsCount int defer func() { atomic.AddInt64(&m.metricsGenerated, int64(metricsCount)) atomic.AddInt64(&m.spansProcessed, 1) }() metrics, err := samplers.ConvertMetrics(span) if err != nil { if _, ok := err.(samplers.InvalidMetrics); ok { m.log.WithError(err). Warn("Could not parse metrics from SSF Message") m.SendSample(ssf.Count("ssf.error_total", 1, map[string]string{ "packet_type": "ssf_metric", "step": "extract_metrics", "reason": "invalid_metrics", })) } else { m.log.WithError(err).Error("Unexpected error extracting metrics from SSF Message") m.SendSample(ssf.Count("ssf.error_total", 1, map[string]string{ "packet_type": "ssf_metric", "step": "extract_metrics", "reason": "unexpected_error", "error": err.Error(), })) return err } } metricsCount += len(metrics) m.sendMetrics(metrics) if err := protocol.ValidateTrace(span); err != nil { return err } // If we made it here, we are dealing with a fully-fledged // trace span, not just a mere carrier for Samples: indicatorMetrics, err := samplers.ConvertIndicatorMetrics(span, m.indicatorSpanTimerName, m.objectiveSpanTimerName) if err != nil { m.log.WithError(err). WithField("span_name", span.Name). Warn("Couldn't extract indicator metrics for span") return err } metricsCount += len(indicatorMetrics) spanMetrics, err := samplers.ConvertSpanUniquenessMetrics(span, 0.01) if err != nil { m.log.WithError(err). WithField("span_name", span.Name). Warn("Couldn't extract uniqueness metrics for span") return err } metricsCount += len(spanMetrics) m.sendMetrics(append(indicatorMetrics, spanMetrics...)) return nil }
go
{ "resource": "" }
q13424
NewSocket
train
func NewSocket(addr *net.UDPAddr, recvBuf int, reuseport bool) (net.PacketConn, error) { if reuseport { panic("SO_REUSEPORT not supported on this platform") } serverConn, err := net.ListenUDP("udp", addr) if err != nil { return nil, err } if err := serverConn.SetReadBuffer(recvBuf); err != nil { return nil, err } return serverConn, nil }
go
{ "resource": "" }
q13425
Flush
train
func (p *Plugin) Flush(ctx context.Context, metrics []samplers.InterMetric) error { f, err := os.OpenFile(p.FilePath, os.O_RDWR|os.O_APPEND|os.O_CREATE, os.ModePerm) defer f.Close() if err != nil { return fmt.Errorf("couldn't open %s for appending: %s", p.FilePath, err) } appendToWriter(f, metrics, p.hostname, p.interval) return nil }
go
{ "resource": "" }
q13426
IsFramingError
train
func IsFramingError(err error) bool { switch err.(type) { case *errFrameVersion: return true case *errFramingIO: return true case *errFrameLength: return true } return false }
go
{ "resource": "" }
q13427
Add
train
func (s *Samples) Add(sample ...*SSFSample) { if s.Batch == nil { s.Batch = []*SSFSample{} } s.Batch = append(s.Batch, sample...) }
go
{ "resource": "" }
q13428
Timestamp
train
func Timestamp(ts time.Time) SampleOption { return func(s *SSFSample) { s.Timestamp = ts.UnixNano() } }
go
{ "resource": "" }
q13429
TimeUnit
train
func TimeUnit(resolution time.Duration) SampleOption { return func(s *SSFSample) { if unit, ok := resolutions[resolution]; ok { s.Unit = unit } } }
go
{ "resource": "" }
q13430
Gauge
train
func Gauge(name string, value float32, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_GAUGE, Name: name, Value: value, Tags: tags, SampleRate: 1.0, }, opts) }
go
{ "resource": "" }
q13431
Histogram
train
func Histogram(name string, value float32, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_HISTOGRAM, Name: name, Value: value, Tags: tags, SampleRate: 1.0, }, opts) }
go
{ "resource": "" }
q13432
Set
train
func Set(name string, value string, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_SET, Name: name, Message: value, Tags: tags, SampleRate: 1.0, }, opts) }
go
{ "resource": "" }
q13433
Status
train
func Status(name string, state SSFSample_Status, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_STATUS, Name: name, Status: state, Tags: tags, SampleRate: 1.0, }, opts) }
go
{ "resource": "" }
q13434
IsAcceptableMetric
train
func IsAcceptableMetric(metric samplers.InterMetric, sink MetricSink) bool { if metric.Sinks == nil { return true } return metric.Sinks.RouteTo(sink.Name()) }
go
{ "resource": "" }
q13435
runOnce
train
func runOnce(distribution func() float64, compression float64, samples int, distname string, run int, deviations, centroidErrors, errors, sizes *csv.Writer) { td := tdigest.NewMerging(compression, true) allSamples := make([]float64, samples) for i := 0; i < samples; i++ { sample := distribution() td.Add(sample, 1) allSamples[i] = sample } sort.Float64s(allSamples) centroids := td.Centroids() for i, centroid := range centroids { // compute the approximate cdf for this centroid's approximate mean // this is roughly equivalent to the sum of all previous centroids' // weights, plus half this centroid's weight, divided by the total weight // https://github.com/tdunning/t-digest/blob/master/src/test/java/com/tdunning/math/stats/TDigestTest.java#L357 thisCDF := td.CDF(centroid.Mean) // compute the cdf of the centroid's approximate mean, but over the real sample set realCDF := floatCDF(allSamples, centroid.Mean) // find the real sample that matches this centroid's approximate cdf // this should be close to the centroid's real mean realMean := floatQuantile(allSamples, thisCDF) // compute distances to previous and next centroids (ie the range // that this centroid is expected to cover) distanceToPrev := centroid.Mean - td.Min() if i > 0 { distanceToPrev = centroid.Mean - centroids[i-1].Mean } distanceToNext := td.Max() - centroid.Mean if i < len(centroids)-1 { distanceToNext = centroids[i+1].Mean - centroid.Mean } // compute the centroid's real mean using its sample set sampledMean := 0.0 for _, sample := range centroid.Samples { sampledMean += sample // equivalent to deviations.csv from dunning's tests deviations.Write(stringifySlice( distname, run, thisCDF, centroid.Weight, sample, centroid.Mean, distanceToPrev, distanceToNext, // where is this sample, as a proportion of the range covered by its centroid? (sample-centroid.Mean)/(distanceToNext+distanceToPrev), )) } sampledMean /= float64(len(centroid.Samples)) // and compute the CDF corresopnding to this value sampledCDF := floatCDF(allSamples, sampledMean) // this csv is equivalent to errors.csv from dunning's tests, but // instead of testing a fixed range of quantiles, we test every centroid centroidErrors.Write(stringifySlice( distname, run, centroid.Mean, realMean, // this column is equivalent to the quantile section sampledMean, thisCDF, realCDF, // this column is equivalent to the cdf section sampledCDF, centroid.Weight, distanceToPrev, distanceToNext, )) // this csv is equivalent to sizes.csv from dunning's tests sizes.Write(stringifySlice( distname, run, i, thisCDF, centroid.Weight, )) } // now we compute errors for a fixed set of quantiles, as with errors.csv // in dunning's tests // we cover a wider range of quantiles just for the sake of completeness for i := 0; i <= 1000; i++ { quantile := float64(i) / 1000.0 // find the real sample for the target quantile realQuantile := floatQuantile(allSamples, quantile) // find the estimated location of the target quantile estimatedQuantile := td.Quantile(quantile) // find the estimated cdf of the real sample estimatedCDF := td.CDF(realQuantile) errors.Write(stringifySlice( distname, run, quantile, estimatedCDF, // this column is equivalent to the cdf section realQuantile, estimatedQuantile, // this column is equivalent to the quantile section )) } }
go
{ "resource": "" }
q13436
Get
train
func (m *clientConnMap) Get(dest string) (conn *grpc.ClientConn, ok bool) { m.RLock() conn, ok = m.conns[dest] m.RUnlock() return }
go
{ "resource": "" }
q13437
Add
train
func (m *clientConnMap) Add(dest string) error { // If the connection already exists, just exit early if _, ok := m.Get(dest); ok { return nil } conn, err := grpc.Dial(dest, m.options...) m.Lock() _, ok := m.conns[dest] if !ok && err == nil { m.conns[dest] = conn } m.Unlock() if ok && err == nil { _ = conn.Close() } return err }
go
{ "resource": "" }
q13438
Delete
train
func (m *clientConnMap) Delete(dest string) { m.Lock() if conn, ok := m.conns[dest]; ok { _ = conn.Close() } delete(m.conns, dest) m.Unlock() }
go
{ "resource": "" }
q13439
Keys
train
func (m *clientConnMap) Keys() []string { m.RLock() res := make([]string, 0, len(m.conns)) for k := range m.conns { res = append(res, k) } m.RUnlock() return res }
go
{ "resource": "" }
q13440
Clear
train
func (m *clientConnMap) Clear() { m.Lock() for _, conn := range m.conns { _ = conn.Close() } m.conns = make(map[string]*grpc.ClientConn) m.Unlock() }
go
{ "resource": "" }
q13441
NewClient
train
func NewClient(endpoint, apiKey string, client *http.Client) DPClient { baseURL, err := url.Parse(endpoint) if err != nil { panic(fmt.Sprintf("Could not parse endpoint base URL %q: %v", endpoint, err)) } httpSink := sfxclient.NewHTTPSink() httpSink.AuthToken = apiKey httpSink.DatapointEndpoint = baseURL.ResolveReference(datapointURL).String() httpSink.EventEndpoint = baseURL.ResolveReference(eventURL).String() httpSink.Client = client return httpSink }
go
{ "resource": "" }
q13442
NewSignalFxSink
train
func NewSignalFxSink(hostnameTag string, hostname string, commonDimensions map[string]string, log *logrus.Logger, client DPClient, varyBy string, perTagClients map[string]DPClient, metricNamePrefixDrops []string, metricTagPrefixDrops []string, derivedMetrics samplers.DerivedMetricsProcessor, maxPointsInBatch int) (*SignalFxSink, error) { return &SignalFxSink{ defaultClient: client, clientsByTagValue: perTagClients, hostnameTag: hostnameTag, hostname: hostname, commonDimensions: commonDimensions, log: log, varyBy: varyBy, metricNamePrefixDrops: metricNamePrefixDrops, metricTagPrefixDrops: metricTagPrefixDrops, derivedMetrics: derivedMetrics, maxPointsInBatch: maxPointsInBatch, }, nil }
go
{ "resource": "" }
q13443
Start
train
func (sfx *SignalFxSink) Start(traceClient *trace.Client) error { sfx.traceClient = traceClient return nil }
go
{ "resource": "" }
q13444
client
train
func (sfx *SignalFxSink) client(key string) DPClient { if cl, ok := sfx.clientsByTagValue[key]; ok { return cl } return sfx.defaultClient }
go
{ "resource": "" }
q13445
newPointCollection
train
func (sfx *SignalFxSink) newPointCollection() *collection { return &collection{ sink: sfx, points: []*datapoint.Datapoint{}, pointsByKey: map[string][]*datapoint.Datapoint{}, } }
go
{ "resource": "" }
q13446
FlushOtherSamples
train
func (sfx *SignalFxSink) FlushOtherSamples(ctx context.Context, samples []ssf.SSFSample) { span, _ := trace.StartSpanFromContext(ctx, "") defer span.ClientFinish(sfx.traceClient) var countFailed = 0 var countSuccess = 0 for _, sample := range samples { if _, ok := sample.Tags[dogstatsd.EventIdentifierKey]; ok { err := sfx.reportEvent(ctx, &sample) if err != nil { countFailed++ } else { countSuccess++ } } } if countSuccess > 0 { span.Add(ssf.Count(sinks.EventReportedCount, float32(countSuccess), successSpanTags)) } if countFailed > 0 { span.Add(ssf.Count(sinks.EventReportedCount, float32(countFailed), failureSpanTags)) } }
go
{ "resource": "" }
q13447
HandleTracePacket
train
func (s *Server) HandleTracePacket(packet []byte) { samples := &ssf.Samples{} defer metrics.Report(s.TraceClient, samples) // Unlike metrics, protobuf shouldn't have an issue with 0-length packets if len(packet) == 0 { s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:unknown", "reason:zerolength"}, 1.0) log.Warn("received zero-length trace packet") return } s.Statsd.Histogram("ssf.packet_size", float64(len(packet)), nil, .1) span, err := protocol.ParseSSF(packet) if err != nil { reason := "reason:" + err.Error() s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:ssf_metric", reason}, 1.0) log.WithError(err).Warn("ParseSSF") return } // we want to keep track of this, because it's a client problem, but still // handle the span normally if span.Id == 0 { reason := "reason:" + "empty_id" s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:ssf_metric", reason}, 1.0) log.WithError(err).Warn("ParseSSF") } s.handleSSF(span, "packet") }
go
{ "resource": "" }
q13448
ReadMetricSocket
train
func (s *Server) ReadMetricSocket(serverConn net.PacketConn, packetPool *sync.Pool) { for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { log.WithError(err).Error("Error reading from UDP metrics socket") continue } s.processMetricPacket(n, buf, packetPool) } }
go
{ "resource": "" }
q13449
processMetricPacket
train
func (s *Server) processMetricPacket(numBytes int, buf []byte, packetPool *sync.Pool) { if numBytes > s.metricMaxLength { metrics.ReportOne(s.TraceClient, ssf.Count("packet.error_total", 1, map[string]string{"packet_type": "unknown", "reason": "toolong"})) return } // statsd allows multiple packets to be joined by newlines and sent as // one larger packet // note that spurious newlines are not allowed in this format, it has // to be exactly one newline between each packet, with no leading or // trailing newlines splitPacket := samplers.NewSplitBytes(buf[:numBytes], '\n') for splitPacket.Next() { s.HandleMetricPacket(splitPacket.Chunk()) } // the Metric struct created by HandleMetricPacket has no byte slices in it, // only strings // therefore there are no outstanding references to this byte slice, we // can return it to the pool packetPool.Put(buf) }
go
{ "resource": "" }
q13450
ReadStatsdDatagramSocket
train
func (s *Server) ReadStatsdDatagramSocket(serverConn *net.UnixConn, packetPool *sync.Pool) { for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFromUnix(buf) if err != nil { select { case <-s.shutdown: log.WithError(err).Info("Ignoring ReadFrom error while shutting down") return default: log.WithError(err).Error("Error reading packet from Unix domain socket") continue } } s.processMetricPacket(n, buf, packetPool) } }
go
{ "resource": "" }
q13451
ReadSSFPacketSocket
train
func (s *Server) ReadSSFPacketSocket(serverConn net.PacketConn, packetPool *sync.Pool) { // TODO This is duplicated from ReadMetricSocket and feels like it could be it's // own function? p := packetPool.Get().([]byte) if len(p) == 0 { log.WithField("len", len(p)).Fatal( "packetPool making empty slices: trace_max_length_bytes must be >= 0") } packetPool.Put(p) for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { // In tests, the probably-best way to // terminate this reader is to issue a shutdown and close the listening // socket, which returns an error, so let's handle it here: select { case <-s.shutdown: log.WithError(err).Info("Ignoring ReadFrom error while shutting down") return default: log.WithError(err).Error("Error reading from UDP trace socket") continue } } s.HandleTracePacket(buf[:n]) packetPool.Put(buf) } }
go
{ "resource": "" }
q13452
ReadTCPSocket
train
func (s *Server) ReadTCPSocket(listener net.Listener) { for { conn, err := listener.Accept() if err != nil { select { case <-s.shutdown: // occurs when cleanly shutting down the server e.g. in tests; ignore errors log.WithError(err).Info("Ignoring Accept error while shutting down") return default: log.WithError(err).Fatal("TCP accept failed") } } go s.handleTCPGoroutine(conn) } }
go
{ "resource": "" }
q13453
HTTPServe
train
func (s *Server) HTTPServe() { var prf interface { Stop() } // We want to make sure the profile is stopped // exactly once (and only once), even if the // shutdown pre-hook does not run (which it may not) profileStopOnce := sync.Once{} if s.enableProfiling { profileStartOnce.Do(func() { prf = profile.Start() }) defer func() { profileStopOnce.Do(prf.Stop) }() } httpSocket := bind.Socket(s.HTTPAddr) graceful.Timeout(10 * time.Second) graceful.PreHook(func() { if prf != nil { profileStopOnce.Do(prf.Stop) } log.Info("Terminating HTTP listener") }) // Ensure that the server responds to SIGUSR2 even // when *not* running under einhorn. graceful.AddSignal(syscall.SIGUSR2, syscall.SIGHUP) graceful.HandleSignals() gracefulSocket := graceful.WrapListener(httpSocket) log.WithField("address", s.HTTPAddr).Info("HTTP server listening") // Signal that the HTTP server is starting atomic.AddInt32(s.numListeningHTTP, 1) defer atomic.AddInt32(s.numListeningHTTP, -1) bind.Ready() if err := http.Serve(gracefulSocket, s.Handler()); err != nil { log.WithError(err).Error("HTTP server shut down due to error") } log.Info("Stopped HTTP server") graceful.Shutdown() }
go
{ "resource": "" }
q13454
registerPlugin
train
func (s *Server) registerPlugin(p plugins.Plugin) { s.pluginMtx.Lock() defer s.pluginMtx.Unlock() s.plugins = append(s.plugins, p) }
go
{ "resource": "" }
q13455
CalculateTickDelay
train
func CalculateTickDelay(interval time.Duration, t time.Time) time.Duration { return t.Truncate(interval).Add(interval).Sub(t) }
go
{ "resource": "" }
q13456
setSinkExcludedTags
train
func setSinkExcludedTags(excludeRules []string, metricSinks []sinks.MetricSink) { type excludableSink interface { SetExcludedTags([]string) } for _, sink := range metricSinks { if s, ok := sink.(excludableSink); ok { excludedTags := generateExcludedTags(excludeRules, sink.Name()) log.WithFields(logrus.Fields{ "sink": sink.Name(), "excludedTags": excludedTags, }).Debug("Setting excluded tags on sink") s.SetExcludedTags(excludedTags) } } }
go
{ "resource": "" }
q13457
ValidTrace
train
func ValidTrace(span *ssf.SSFSpan) bool { return span.Id != 0 && span.TraceId != 0 && span.StartTimestamp != 0 && span.EndTimestamp != 0 && span.Name != "" }
go
{ "resource": "" }
q13458
ValidateTrace
train
func ValidateTrace(span *ssf.SSFSpan) error { if !ValidTrace(span) { return &InvalidTrace{span} } return nil }
go
{ "resource": "" }
q13459
WriteSSF
train
func WriteSSF(out io.Writer, ssf *ssf.SSFSpan) (int, error) { pbuf := pbufPool.Get().(*proto.Buffer) err := pbuf.Marshal(ssf) if err != nil { // This is not a framing error, as we haven't written // anything to the stream yet. return 0, err } defer func() { // Make sure we reset the scratch protobuffer (by default, it // would retain its contents) and put it back into the pool: pbuf.Reset() pbufPool.Put(pbuf) }() if err = binary.Write(out, binary.BigEndian, version0); err != nil { return 0, &errFramingIO{err} } if err = binary.Write(out, binary.BigEndian, uint32(len(pbuf.Bytes()))); err != nil { return 0, &errFramingIO{err} } n, err := out.Write(pbuf.Bytes()) if err != nil { return n, &errFramingIO{err} } return n, nil }
go
{ "resource": "" }
q13460
NewDatadogMetricSink
train
func NewDatadogMetricSink(interval float64, flushMaxPerBody int, hostname string, tags []string, ddHostname string, apiKey string, httpClient *http.Client, log *logrus.Logger) (*DatadogMetricSink, error) { return &DatadogMetricSink{ HTTPClient: httpClient, APIKey: apiKey, DDHostname: ddHostname, interval: interval, flushMaxPerBody: flushMaxPerBody, hostname: hostname, tags: tags, log: log, }, nil }
go
{ "resource": "" }
q13461
Flush
train
func (dd *DatadogMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error { span, _ := trace.StartSpanFromContext(ctx, "") defer span.ClientFinish(dd.traceClient) ddmetrics, checks := dd.finalizeMetrics(interMetrics) if len(checks) != 0 { // this endpoint is not documented to take an array... but it does // another curious constraint of this endpoint is that it does not // support "Content-Encoding: deflate" err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/api/v1/check_run?api_key=%s", dd.DDHostname, dd.APIKey), checks, "flush_checks", false, map[string]string{"sink": "datadog"}, dd.log) if err == nil { dd.log.WithField("checks", len(checks)).Info("Completed flushing service checks to Datadog") } else { dd.log.WithFields(logrus.Fields{ "checks": len(checks), logrus.ErrorKey: err}).Warn("Error flushing checks to Datadog") } } // break the metrics into chunks of approximately equal size, such that // each chunk is less than the limit // we compute the chunks using rounding-up integer division workers := ((len(ddmetrics) - 1) / dd.flushMaxPerBody) + 1 chunkSize := ((len(ddmetrics) - 1) / workers) + 1 dd.log.WithField("workers", workers).Debug("Worker count chosen") dd.log.WithField("chunkSize", chunkSize).Debug("Chunk size chosen") var wg sync.WaitGroup flushStart := time.Now() for i := 0; i < workers; i++ { chunk := ddmetrics[i*chunkSize:] if i < workers-1 { // trim to chunk size unless this is the last one chunk = chunk[:chunkSize] } wg.Add(1) go dd.flushPart(span.Attach(ctx), chunk, &wg) } wg.Wait() tags := map[string]string{"sink": dd.Name()} span.Add( ssf.Timing(sinks.MetricKeyMetricFlushDuration, time.Since(flushStart), time.Nanosecond, tags), ssf.Count(sinks.MetricKeyTotalMetricsFlushed, float32(len(ddmetrics)), tags), ) dd.log.WithField("metrics", len(ddmetrics)).Info("Completed flush to Datadog") return nil }
go
{ "resource": "" }
q13462
NewDatadogSpanSink
train
func NewDatadogSpanSink(address string, bufferSize int, httpClient *http.Client, log *logrus.Logger) (*DatadogSpanSink, error) { if bufferSize == 0 { bufferSize = datadogSpanBufferSize } return &DatadogSpanSink{ HTTPClient: httpClient, bufferSize: bufferSize, buffer: ring.New(bufferSize), mutex: &sync.Mutex{}, traceAddress: address, log: log, }, nil }
go
{ "resource": "" }
q13463
Ingest
train
func (dd *DatadogSpanSink) Ingest(span *ssf.SSFSpan) error { if err := protocol.ValidateTrace(span); err != nil { return err } dd.mutex.Lock() defer dd.mutex.Unlock() dd.buffer.Value = span dd.buffer = dd.buffer.Next() return nil }
go
{ "resource": "" }
q13464
Capacity
train
func Capacity(n uint) ClientParam { return func(cl *Client) error { cl.cap = n return nil } }
go
{ "resource": "" }
q13465
BufferedSize
train
func BufferedSize(size uint) ClientParam { return func(cl *Client) error { if cl.backendParams != nil { cl.backendParams.bufferSize = size return nil } return ErrClientNotNetworked } }
go
{ "resource": "" }
q13466
FlushInterval
train
func FlushInterval(interval time.Duration) ClientParam { t := time.NewTicker(interval) return FlushChannel(t.C, t.Stop) }
go
{ "resource": "" }
q13467
FlushChannel
train
func FlushChannel(ch <-chan time.Time, stop func()) ClientParam { return func(cl *Client) error { if cl.backendParams == nil { return ErrClientNotNetworked } cl.flush = func(ctx context.Context) { defer stop() for { select { case <-ch: _ = Flush(cl) case <-ctx.Done(): return } } } return nil } }
go
{ "resource": "" }
q13468
MaxBackoffTime
train
func MaxBackoffTime(t time.Duration) ClientParam { return func(cl *Client) error { if cl.backendParams != nil { cl.backendParams.maxBackoff = t return nil } return ErrClientNotNetworked } }
go
{ "resource": "" }
q13469
ParallelBackends
train
func ParallelBackends(nBackends uint) ClientParam { return func(cl *Client) error { if cl.backendParams == nil { return ErrClientNotNetworked } cl.nBackends = nBackends return nil } }
go
{ "resource": "" }
q13470
NewChannelClient
train
func NewChannelClient(spanChan chan<- *ssf.SSFSpan, opts ...ClientParam) (*Client, error) { cl := &Client{} cl.flushBackends = []flushNotifier{} cl.spans = spanChan for _, opt := range opts { if err := opt(cl); err != nil { return nil, err } } ctx := context.Background() ctx, cl.cancel = context.WithCancel(ctx) cl.run(ctx) return cl, nil }
go
{ "resource": "" }
q13471
SetDefaultClient
train
func SetDefaultClient(client *Client) { oldClient := DefaultClient DefaultClient = client // Ensure the old client is closed so it does not leak connections if oldClient != nil { oldClient.Close() } }
go
{ "resource": "" }
q13472
NeutralizeClient
train
func NeutralizeClient(client *Client) { client.Close() client.records = nil client.spans = nil client.flushBackends = []flushNotifier{} }
go
{ "resource": "" }
q13473
Record
train
func Record(cl *Client, span *ssf.SSFSpan, done chan<- error) error { if cl == nil { return ErrNoClient } op := &recordOp{span: span, result: done} select { case cl.spans <- span: atomic.AddInt64(&cl.successfulRecords, 1) if done != nil { go func() { done <- nil }() } return nil case cl.records <- op: atomic.AddInt64(&cl.successfulRecords, 1) return nil default: } atomic.AddInt64(&cl.failedRecords, 1) return ErrWouldBlock }
go
{ "resource": "" }
q13474
NewMerging
train
func NewMerging(compression float64, debug bool) *MergingDigest { // this is a provable upper bound on the size of the centroid list // TODO: derive it myself sizeBound := int((math.Pi * compression / 2) + 0.5) return &MergingDigest{ compression: compression, mainCentroids: make([]Centroid, 0, sizeBound), tempCentroids: make([]Centroid, 0, estimateTempBuffer(compression)), min: math.Inf(+1), max: math.Inf(-1), debug: debug, } }
go
{ "resource": "" }
q13475
NewMergingFromData
train
func NewMergingFromData(d *MergingDigestData) *MergingDigest { td := &MergingDigest{ compression: d.Compression, mainCentroids: d.MainCentroids, tempCentroids: make([]Centroid, 0, estimateTempBuffer(d.Compression)), min: d.Min, max: d.Max, reciprocalSum: d.ReciprocalSum, } // Initialize the weight to the sum of the weights of the centroids td.mainWeight = 0 for _, c := range td.mainCentroids { td.mainWeight += c.Weight } return td }
go
{ "resource": "" }
q13476
Add
train
func (td *MergingDigest) Add(value float64, weight float64) { if math.IsNaN(value) || math.IsInf(value, 0) || weight <= 0 { panic("invalid value added") } if len(td.tempCentroids) == cap(td.tempCentroids) { td.mergeAllTemps() } td.min = math.Min(td.min, value) td.max = math.Max(td.max, value) td.reciprocalSum += (1 / value) * weight next := Centroid{ Mean: value, Weight: weight, } if td.debug { next.Samples = []float64{value} } td.tempCentroids = append(td.tempCentroids, next) td.tempWeight += weight }
go
{ "resource": "" }
q13477
mergeAllTemps
train
func (td *MergingDigest) mergeAllTemps() { // this optimization is really important! if you remove it, the main list // will get merged into itself every time this is called if len(td.tempCentroids) == 0 { return } // we iterate over both centroid lists from least to greatest mean, so first // we have to sort this one sort.Sort(centroidList(td.tempCentroids)) tempIndex := 0 // total weight that the final t-digest will have, after everything is merged totalWeight := td.mainWeight + td.tempWeight // how much weight has been merged so far mergedWeight := 0.0 // the index of the last quantile to be merged into the previous centroid // this value gets updated each time we split a new centroid out instead of // merging into the current one lastMergedIndex := 0.0 // since we will be merging in-place into td.mainCentroids, we need to keep // track of the indices of the remaining elements actualMainCentroids := td.mainCentroids td.mainCentroids = td.mainCentroids[:0] // to facilitate the in-place merge, we will need a place to store the main // centroids that would be overwritten - we will use space from the start // of tempCentroids for this swappedCentroids := td.tempCentroids[:0] for len(actualMainCentroids)+len(swappedCentroids) != 0 || tempIndex < len(td.tempCentroids) { nextTemp := Centroid{ Mean: math.Inf(+1), Weight: 0, } if tempIndex < len(td.tempCentroids) { nextTemp = td.tempCentroids[tempIndex] } nextMain := Centroid{ Mean: math.Inf(+1), Weight: 0, } if len(swappedCentroids) != 0 { nextMain = swappedCentroids[0] } else if len(actualMainCentroids) != 0 { nextMain = actualMainCentroids[0] } if nextMain.Mean < nextTemp.Mean { if len(actualMainCentroids) != 0 { if len(swappedCentroids) != 0 { // if this came from swap, before merging, we have to save // the next main centroid at the end // this copy is probably the most expensive part of the // in-place merge, compared to merging into a separate buffer copy(swappedCentroids, swappedCentroids[1:]) swappedCentroids[len(swappedCentroids)-1] = actualMainCentroids[0] } actualMainCentroids = actualMainCentroids[1:] } else { // the real main has been completely exhausted, so we're just // cleaning out swapped mains now swappedCentroids = swappedCentroids[1:] } lastMergedIndex = td.mergeOne(mergedWeight, totalWeight, lastMergedIndex, nextMain) mergedWeight += nextMain.Weight } else { // before merging, we have to save the next main centroid somewhere // else, so that we don't overwrite it if len(actualMainCentroids) != 0 { swappedCentroids = append(swappedCentroids, actualMainCentroids[0]) actualMainCentroids = actualMainCentroids[1:] } tempIndex++ lastMergedIndex = td.mergeOne(mergedWeight, totalWeight, lastMergedIndex, nextTemp) mergedWeight += nextTemp.Weight } } td.tempCentroids = td.tempCentroids[:0] td.tempWeight = 0 td.mainWeight = totalWeight }
go
{ "resource": "" }
q13478
indexEstimate
train
func (td *MergingDigest) indexEstimate(quantile float64) float64 { // TODO: a polynomial approximation of arcsine should be a lot faster return td.compression * ((math.Asin(2*quantile-1) / math.Pi) + 0.5) }
go
{ "resource": "" }
q13479
Quantile
train
func (td *MergingDigest) Quantile(quantile float64) float64 { if quantile < 0 || quantile > 1 { panic("quantile out of bounds") } td.mergeAllTemps() // add up the weights of centroids in ascending order until we reach a // centroid that pushes us over the quantile q := quantile * td.mainWeight weightSoFar := 0.0 lowerBound := td.min for i, c := range td.mainCentroids { upperBound := td.centroidUpperBound(i) if q <= weightSoFar+c.Weight { // the target quantile is somewhere inside this centroid // we compute how much of this centroid's weight falls into the quantile proportion := (q - weightSoFar) / c.Weight // and interpolate what value that corresponds to inside a uniform // distribution return lowerBound + (proportion * (upperBound - lowerBound)) } // the quantile is above this centroid, so sum the weight and carry on weightSoFar += c.Weight lowerBound = upperBound } // should never be reached unless empty, since the final comparison is // q <= td.mainWeight return math.NaN() }
go
{ "resource": "" }
q13480
Merge
train
func (td *MergingDigest) Merge(other *MergingDigest) { oldReciprocalSum := td.reciprocalSum shuffledIndices := rand.Perm(len(other.mainCentroids)) for _, i := range shuffledIndices { td.Add(other.mainCentroids[i].Mean, other.mainCentroids[i].Weight) } // we did not merge other's temps, so we need to add those too // they're unsorted so there's no need to shuffle them for i := range other.tempCentroids { td.Add(other.tempCentroids[i].Mean, other.tempCentroids[i].Weight) } td.reciprocalSum = oldReciprocalSum + other.reciprocalSum }
go
{ "resource": "" }
q13481
Centroids
train
func (td *MergingDigest) Centroids() []Centroid { if !td.debug { panic("must enable debug to call Centroids()") } td.mergeAllTemps() return td.mainCentroids }
go
{ "resource": "" }
q13482
tallyMetrics
train
func (s *Server) tallyMetrics(percentiles []float64) ([]WorkerMetrics, metricsSummary) { // allocating this long array to count up the sizes is cheaper than appending // the []WorkerMetrics together one at a time tempMetrics := make([]WorkerMetrics, 0, len(s.Workers)) ms := metricsSummary{} for i, w := range s.Workers { log.WithField("worker", i).Debug("Flushing") wm := w.Flush() tempMetrics = append(tempMetrics, wm) ms.totalCounters += len(wm.counters) ms.totalGauges += len(wm.gauges) ms.totalHistograms += len(wm.histograms) ms.totalSets += len(wm.sets) ms.totalTimers += len(wm.timers) ms.totalGlobalCounters += len(wm.globalCounters) ms.totalGlobalGauges += len(wm.globalGauges) ms.totalGlobalHistograms += len(wm.globalHistograms) ms.totalGlobalTimers += len(wm.globalTimers) ms.totalLocalHistograms += len(wm.localHistograms) ms.totalLocalSets += len(wm.localSets) ms.totalLocalTimers += len(wm.localTimers) ms.totalLocalStatusChecks += len(wm.localStatusChecks) } ms.totalLength = ms.totalCounters + ms.totalGauges + // histograms and timers each report a metric point for each percentile // plus a point for each of their aggregates (ms.totalTimers+ms.totalHistograms)*(s.HistogramAggregates.Count+len(percentiles)) + // local-only histograms will be flushed with percentiles, so we intentionally // use the original percentile list here. // remember that both the global veneur and the local instances have // 'local-only' histograms. ms.totalLocalSets + (ms.totalLocalTimers+ms.totalLocalHistograms)*(s.HistogramAggregates.Count+len(s.HistogramPercentiles)) // Global instances also flush sets and global counters, so be sure and add // them to the total size if !s.IsLocal() { ms.totalLength += ms.totalSets ms.totalLength += ms.totalGlobalCounters ms.totalLength += ms.totalGlobalGauges ms.totalLength += ms.totalGlobalHistograms * (s.HistogramAggregates.Count + len(s.HistogramPercentiles)) ms.totalLength += ms.totalGlobalTimers * (s.HistogramAggregates.Count + len(s.HistogramPercentiles)) } return tempMetrics, ms }
go
{ "resource": "" }
q13483
forwardGRPC
train
func (s *Server) forwardGRPC(ctx context.Context, wms []WorkerMetrics) { span, _ := trace.StartSpanFromContext(ctx, "") span.SetTag("protocol", "grpc") defer span.ClientFinish(s.TraceClient) exportStart := time.Now() // Collect all of the forwardable metrics from the various WorkerMetrics. var metrics []*metricpb.Metric for _, wm := range wms { metrics = append(metrics, wm.ForwardableMetrics(s.TraceClient)...) } span.Add( ssf.Timing("forward.duration_ns", time.Since(exportStart), time.Nanosecond, map[string]string{"part": "export"}), ssf.Gauge("forward.metrics_total", float32(len(metrics)), nil), // Maintain compatibility with metrics used in HTTP-based forwarding ssf.Count("forward.post_metrics_total", float32(len(metrics)), nil), ) if len(metrics) == 0 { log.Debug("Nothing to forward, skipping.") return } entry := log.WithFields(logrus.Fields{ "metrics": len(metrics), "destination": s.ForwardAddr, "protocol": "grpc", "grpcstate": s.grpcForwardConn.GetState().String(), }) c := forwardrpc.NewForwardClient(s.grpcForwardConn) grpcStart := time.Now() _, err := c.SendMetrics(ctx, &forwardrpc.MetricList{Metrics: metrics}) if err != nil { if ctx.Err() != nil { // We exceeded the deadline of the flush context. span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "deadline_exceeded"})) } else if statErr, ok := status.FromError(err); ok && (statErr.Message() == "all SubConns are in TransientFailure" || statErr.Message() == "transport is closing") { // We could check statErr.Code() == codes.Unavailable, but we don't know all of the cases that // could return that code. These two particular cases are fairly safe and usually associated // with connection rebalancing or host replacement, so we don't want them going to sentry. span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "transient_unavailable"})) } else { span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "send"})) entry.WithError(err).Error("Failed to forward to an upstream Veneur") } } else { entry.Info("Completed forward to an upstream Veneur") } span.Add( ssf.Timing("forward.duration_ns", time.Since(grpcStart), time.Nanosecond, map[string]string{"part": "grpc"}), ssf.Count("forward.error_total", 0, nil), ) }
go
{ "resource": "" }
q13484
EvaluateAsNodeset
train
func (xpath *XPath) EvaluateAsNodeset(nodePtr unsafe.Pointer, xpathExpr *Expression) (nodes []unsafe.Pointer, err error) { if nodePtr == nil { //evaluating xpath on a nil node returns no result. return } err = xpath.Evaluate(nodePtr, xpathExpr) if err != nil { return } nodes, err = xpath.ResultAsNodeset() return }
go
{ "resource": "" }
q13485
ResultAsNodeset
train
func (xpath *XPath) ResultAsNodeset() (nodes []unsafe.Pointer, err error) { if xpath.ResultPtr == nil { return } if xpath.ReturnType() != XPATH_NODESET { err = errors.New("Cannot convert XPath result to nodeset") } if nodesetPtr := xpath.ResultPtr.nodesetval; nodesetPtr != nil { if nodesetSize := int(nodesetPtr.nodeNr); nodesetSize > 0 { nodes = make([]unsafe.Pointer, nodesetSize) for i := 0; i < nodesetSize; i++ { nodes[i] = unsafe.Pointer(C.fetchNode(nodesetPtr, C.int(i))) } } } return }
go
{ "resource": "" }
q13486
ResultAsString
train
func (xpath *XPath) ResultAsString() (val string, err error) { if xpath.ReturnType() != XPATH_STRING { xpath.ResultPtr = C.xmlXPathConvertString(xpath.ResultPtr) } val = C.GoString((*C.char)(unsafe.Pointer(xpath.ResultPtr.stringval))) return }
go
{ "resource": "" }
q13487
ResultAsNumber
train
func (xpath *XPath) ResultAsNumber() (val float64, err error) { if xpath.ReturnType() != XPATH_NUMBER { xpath.ResultPtr = C.xmlXPathConvertNumber(xpath.ResultPtr) } val = float64(xpath.ResultPtr.floatval) return }
go
{ "resource": "" }
q13488
ResultAsBoolean
train
func (xpath *XPath) ResultAsBoolean() (val bool, err error) { xpath.ResultPtr = C.xmlXPathConvertBoolean(xpath.ResultPtr) val = xpath.ResultPtr.boolval != 0 return }
go
{ "resource": "" }
q13489
SetResolver
train
func (xpath *XPath) SetResolver(v VariableScope) { C.set_var_lookup(xpath.ContextPtr, unsafe.Pointer(&v)) C.set_function_lookup(xpath.ContextPtr, unsafe.Pointer(&v)) }
go
{ "resource": "" }
q13490
NewNode
train
func NewNode(nodePtr unsafe.Pointer, document Document) (node Node) { if nodePtr == nil { return nil } xmlNode := &XmlNode{ Ptr: (*C.xmlNode)(nodePtr), Document: document, valid: true, } nodeType := NodeType(C.getNodeType((*C.xmlNode)(nodePtr))) switch nodeType { default: node = xmlNode case XML_ATTRIBUTE_NODE: node = &AttributeNode{XmlNode: xmlNode} case XML_ELEMENT_NODE: node = &ElementNode{XmlNode: xmlNode} case XML_CDATA_SECTION_NODE: node = &CDataNode{XmlNode: xmlNode} case XML_COMMENT_NODE: node = &CommentNode{XmlNode: xmlNode} case XML_PI_NODE: node = &ProcessingInstructionNode{XmlNode: xmlNode} case XML_TEXT_NODE: node = &TextNode{XmlNode: xmlNode} } return }
go
{ "resource": "" }
q13491
AddChild
train
func (xmlNode *XmlNode) AddChild(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addChild(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addChild(node); err != nil { break } } } case Node: err = xmlNode.addChild(t) } return }
go
{ "resource": "" }
q13492
AddPreviousSibling
train
func (xmlNode *XmlNode) AddPreviousSibling(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addPreviousSibling(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addPreviousSibling(node); err != nil { break } } } case Node: err = xmlNode.addPreviousSibling(t) } return }
go
{ "resource": "" }
q13493
AddNextSibling
train
func (xmlNode *XmlNode) AddNextSibling(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] if err = xmlNode.addNextSibling(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] if err = xmlNode.addNextSibling(node); err != nil { break } } } case Node: err = xmlNode.addNextSibling(t) } return }
go
{ "resource": "" }
q13494
NodePtr
train
func (xmlNode *XmlNode) NodePtr() (p unsafe.Pointer) { p = unsafe.Pointer(xmlNode.Ptr) return }
go
{ "resource": "" }
q13495
Path
train
func (xmlNode *XmlNode) Path() (path string) { pathPtr := C.xmlGetNodePath(xmlNode.Ptr) if pathPtr != nil { p := (*C.char)(unsafe.Pointer(pathPtr)) defer C.xmlFreeChars(p) path = C.GoString(p) } return }
go
{ "resource": "" }
q13496
Attribute
train
func (xmlNode *XmlNode) Attribute(name string) (attribute *AttributeNode) { if xmlNode.NodeType() != XML_ELEMENT_NODE { return } nameBytes := GetCString([]byte(name)) namePtr := unsafe.Pointer(&nameBytes[0]) attrPtr := C.xmlHasNsProp(xmlNode.Ptr, (*C.xmlChar)(namePtr), nil) if attrPtr == nil { return } else { node := NewNode(unsafe.Pointer(attrPtr), xmlNode.Document) if node, ok := node.(*AttributeNode); ok { attribute = node } } return }
go
{ "resource": "" }
q13497
Attr
train
func (xmlNode *XmlNode) Attr(name string) (val string) { if xmlNode.NodeType() != XML_ELEMENT_NODE { return } nameBytes := GetCString([]byte(name)) namePtr := unsafe.Pointer(&nameBytes[0]) valPtr := C.xmlGetProp(xmlNode.Ptr, (*C.xmlChar)(namePtr)) if valPtr == nil { return } p := unsafe.Pointer(valPtr) defer C.xmlFreeChars((*C.char)(p)) val = C.GoString((*C.char)(p)) return }
go
{ "resource": "" }
q13498
Search
train
func (xmlNode *XmlNode) Search(data interface{}) (result []Node, err error) { switch data := data.(type) { default: err = ERR_UNDEFINED_SEARCH_PARAM case string: if xpathExpr := xpath.Compile(data); xpathExpr != nil { defer xpathExpr.Free() result, err = xmlNode.Search(xpathExpr) } else { err = errors.New("cannot compile xpath: " + data) } case []byte: result, err = xmlNode.Search(string(data)) case *xpath.Expression: xpathCtx := xmlNode.Document.DocXPathCtx() nodePtrs, err := xpathCtx.EvaluateAsNodeset(unsafe.Pointer(xmlNode.Ptr), data) if nodePtrs == nil || err != nil { return nil, err } for _, nodePtr := range nodePtrs { result = append(result, NewNode(nodePtr, xmlNode.Document)) } } return }
go
{ "resource": "" }
q13499
EvalXPathAsBoolean
train
func (xmlNode *XmlNode) EvalXPathAsBoolean(data interface{}, v xpath.VariableScope) (result bool) { switch data := data.(type) { case string: if xpathExpr := xpath.Compile(data); xpathExpr != nil { defer xpathExpr.Free() result = xmlNode.EvalXPathAsBoolean(xpathExpr, v) } else { //err = errors.New("cannot compile xpath: " + data) } case []byte: result = xmlNode.EvalXPathAsBoolean(string(data), v) case *xpath.Expression: xpathCtx := xmlNode.Document.DocXPathCtx() xpathCtx.SetResolver(v) err := xpathCtx.Evaluate(unsafe.Pointer(xmlNode.Ptr), data) if err != nil { return false } result, _ = xpathCtx.ResultAsBoolean() default: //err = ERR_UNDEFINED_SEARCH_PARAM } return }
go
{ "resource": "" }