Dataset Viewer
Auto-converted to Parquet Duplicate
document_id
stringlengths
6
12
document
stringlengths
22
960k
split
stringclasses
1 value
d_go_0
func Show(f func(dx, dy int) [][]uint8) { const ( dx = 256 dy = 256 ) data := f(dx, dy) m := image.NewNRGBA(image.Rect(0, 0, dx, dy)) for y := 0; y < dy; y++ { for x := 0; x < dx; x++ { v := data[y][x] i := y*m.Stride + x*4 m.Pix[i] = v m.Pix[i+1] = v m.Pix[i+2] = 255 m.Pix[i+3] = 255 } } ShowImage(m) }
go
d_go_1
func ShowImage(m image.Image) { w := bufio.NewWriter(os.Stdout) defer w.Flush() io.WriteString(w, "IMAGE:") b64 := base64.NewEncoder(base64.StdEncoding, w) err := (&png.Encoder{CompressionLevel: png.BestCompression}).Encode(b64, m) if err != nil { panic(err) } b64.Close() io.WriteString(w, "\n") }
go
d_go_2
func New(k int) *Tree { var t *Tree for _, v := range rand.Perm(10) { t = insert(t, (1+v)*k) } return t }
go
d_go_3
func DecodeData(ctx context.Context, in []byte, out interface{}) error { outmsg, ok := out.(proto.Message) if !ok { return fmt.Errorf("can only decode protobuf into proto.Message. got %T", out) } if err := proto.Unmarshal(in, outmsg); err != nil { return fmt.Errorf("failed to unmarshal message: %s", err) } return nil }
go
d_go_4
func EncodeData(ctx context.Context, in interface{}) ([]byte, error) { if b, ok := in.([]byte); ok { return b, nil } var pbmsg proto.Message var ok bool if pbmsg, ok = in.(proto.Message); !ok { return nil, fmt.Errorf("protobuf encoding only works with protobuf messages. got %T", in) } return proto.Marshal(pbmsg) }
go
d_go_5
func StringOfApplicationCloudEventsProtobuf() *string { a := ApplicationCloudEventsProtobuf return &a }
go
d_go_6
func ToProto(e *event.Event) (*pb.CloudEvent, error) { container := &pb.CloudEvent{ Id: e.ID(), Source: e.Source(), SpecVersion: e.SpecVersion(), Type: e.Type(), Attributes: make(map[string]*pb.CloudEventAttributeValue), } if e.DataContentType() != "" { container.Attributes[datacontenttype], _ = attributeFor(e.DataContentType()) } if e.DataSchema() != "" { dataSchemaStr := e.DataSchema() uri, err := url.Parse(dataSchemaStr) if err != nil { return nil, fmt.Errorf("failed to url.Parse %s: %w", dataSchemaStr, err) } container.Attributes[dataschema], _ = attributeFor(uri) } if e.Subject() != "" { container.Attributes[subject], _ = attributeFor(e.Subject()) } if e.Time() != zeroTime { container.Attributes[time], _ = attributeFor(e.Time()) } for name, value := range e.Extensions() { attr, err := attributeFor(value) if err != nil { return nil, fmt.Errorf("failed to encode attribute %s: %s", name, err) } container.Attributes[name] = attr } container.Data = &pb.CloudEvent_BinaryData{ BinaryData: e.Data(), } if e.DataContentType() == ContentTypeProtobuf { anymsg := &anypb.Any{ TypeUrl: e.DataSchema(), Value: e.Data(), } container.Data = &pb.CloudEvent_ProtoData{ ProtoData: anymsg, } } return container, nil }
go
d_go_7
func FromProto(container *pb.CloudEvent) (*event.Event, error) { e := event.New() e.SetID(container.Id) e.SetSource(container.Source) e.SetSpecVersion(container.SpecVersion) e.SetType(container.Type) // NOTE: There are some issues around missing data content type values that // are still unresolved. It is an optional field and if unset then it is // implied that the encoding used for the envelope was also used for the // data. However, there is no mapping that exists between data content types // and the envelope content types. For example, how would this system know // that receiving an envelope in application/cloudevents+protobuf know that // the implied data content type if missing is application/protobuf. // // It is also not clear what should happen if the data content type is unset // but it is known that the data content type is _not_ the same as the // envelope. For example, a JSON encoded data value would be stored within // the BinaryData attribute of the protobuf formatted envelope. Protobuf // data values, however, are _always_ stored as a protobuf encoded Any type // within the ProtoData field. Any use of the BinaryData or TextData fields // means the value is _not_ protobuf. If content type is not set then have // no way of knowing what the data encoding actually is. Currently, this // code does not address this and only loads explicitly set data content // type values. contentType := "" if container.Attributes != nil { attr := container.Attributes[datacontenttype] if attr != nil { if stattr, ok := attr.Attr.(*pb.CloudEventAttributeValue_CeString); ok { contentType = stattr.CeString } } } switch dt := container.Data.(type) { case *pb.CloudEvent_BinaryData: e.DataEncoded = dt.BinaryData // NOTE: If we use SetData then the current implementation always sets // the Base64 bit to true. Direct assignment appears to be the only way // to set non-base64 encoded binary data. // if err := e.SetData(contentType, dt.BinaryData); err != nil { // return nil, fmt.Errorf("failed to convert binary type (%s) data: %s", contentType, err) // } case *pb.CloudEvent_TextData: if err := e.SetData(contentType, dt.TextData); err != nil { return nil, fmt.Errorf("failed to convert text type (%s) data: %s", contentType, err) } case *pb.CloudEvent_ProtoData: e.SetDataContentType(ContentTypeProtobuf) e.DataEncoded = dt.ProtoData.Value } for name, value := range container.Attributes { v, err := valueFrom(value) if err != nil { return nil, fmt.Errorf("failed to convert attribute %s: %s", name, err) } switch name { case datacontenttype: vs, _ := v.(string) e.SetDataContentType(vs) case dataschema: vs, _ := v.(types.URI) e.SetDataSchema(vs.String()) case subject: vs, _ := v.(string) e.SetSubject(vs) case time: vs, _ := v.(types.Timestamp) e.SetTime(vs.Time) default: e.SetExtension(name, v) } } return &e, nil }
go
d_go_8
func (*CloudEvent) Descriptor() ([]byte, []int) { return file_cloudevent_proto_rawDescGZIP(), []int{0} }
go
d_go_9
func (*CloudEventAttributeValue) Descriptor() ([]byte, []int) { return file_cloudevent_proto_rawDescGZIP(), []int{1} }
go
d_go_10
func NewReporter(ctx context.Context, on Observable) (context.Context, Reporter) { r := &reporter{ ctx: ctx, on: on, start: time.Now(), } r.tagMethod() return ctx, r }
go
d_go_11
func (r *reporter) Error() { r.once.Do(func() { r.result(ResultError) }) }
go
d_go_12
func (r *reporter) OK() { r.once.Do(func() { r.result(ResultOK) }) }
go
d_go_13
func NewObservedHTTP(opts ...cehttp.Option) (*cehttp.Protocol, error) { return cehttp.New(append( []cehttp.Option{ cehttp.WithRoundTripperDecorator(roundtripperDecorator), cehttp.WithMiddleware(tracecontextMiddleware), }, opts..., )...) }
go
d_go_14
func NewClientHTTP(topt []cehttp.Option, copt []client.Option, obsOpts ...OTelObservabilityServiceOption) (client.Client, error) { t, err := obshttp.NewObservedHTTP(topt...) if err != nil { return nil, err } copt = append( copt, client.WithTimeNow(), client.WithUUIDs(), client.WithObservabilityService(NewOTelObservabilityService(obsOpts...)), ) c, err := client.New(t, copt...) if err != nil { return nil, err } return c, nil }
go
d_go_15
func NewCloudEventCarrier() CloudEventCarrier { return CloudEventCarrier{Extension: &extensions.DistributedTracingExtension{}} }
go
d_go_16
func NewCloudEventCarrierWithEvent(ctx context.Context, event cloudevents.Event) CloudEventCarrier { var te, ok = extensions.GetDistributedTracingExtension(event) if !ok { cecontext.LoggerFrom(ctx).Warn("Could not get the distributed tracing extension from the event.") return CloudEventCarrier{Extension: &extensions.DistributedTracingExtension{}} } return CloudEventCarrier{Extension: &te} }
go
d_go_17
func (cec CloudEventCarrier) Get(key string) string { switch key { case extensions.TraceParentExtension: return cec.Extension.TraceParent case extensions.TraceStateExtension: return cec.Extension.TraceState default: return "" } }
go
d_go_18
func (cec CloudEventCarrier) Set(key string, value string) { switch key { case extensions.TraceParentExtension: cec.Extension.TraceParent = value case extensions.TraceStateExtension: cec.Extension.TraceState = value } }
go
d_go_19
func (cec CloudEventCarrier) Keys() []string { return []string{extensions.TraceParentExtension, extensions.TraceStateExtension} }
go
d_go_20
func InjectDistributedTracingExtension(ctx context.Context, event cloudevents.Event) { tc := propagation.TraceContext{} carrier := NewCloudEventCarrier() tc.Inject(ctx, carrier) carrier.Extension.AddTracingAttributes(&event) }
go
d_go_21
func ExtractDistributedTracingExtension(ctx context.Context, event cloudevents.Event) context.Context { tc := propagation.TraceContext{} carrier := NewCloudEventCarrierWithEvent(ctx, event) return tc.Extract(ctx, carrier) }
go
d_go_22
func NewOTelObservabilityService(opts ...OTelObservabilityServiceOption) *OTelObservabilityService { tracerProvider := otel.GetTracerProvider() o := &OTelObservabilityService{ tracer: tracerProvider.Tracer( instrumentationName, // TODO: Can we have the package version here? // trace.WithInstrumentationVersion("1.0.0"), ), spanNameFormatter: defaultSpanNameFormatter, } // apply passed options for _, opt := range opts { opt(o) } return o }
go
d_go_23
func (o OTelObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { return []func(context.Context, binding.Message) context.Context{tracePropagatorContextDecorator} }
go
d_go_24
func (o OTelObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) { spanName := observability.ClientSpanName + ".malformed receive" _, span := o.tracer.Start( ctx, spanName, trace.WithSpanKind(trace.SpanKindConsumer), trace.WithAttributes(attribute.String(string(semconv.CodeFunctionKey), getFuncName()))) recordSpanError(span, err) span.End() }
go
d_go_25
func (o OTelObservabilityService) RecordCallingInvoker(ctx context.Context, event *cloudevents.Event) (context.Context, func(errOrResult error)) { spanName := o.getSpanName(event, "process") ctx, span := o.tracer.Start( ctx, spanName, trace.WithSpanKind(trace.SpanKindConsumer), trace.WithAttributes(GetDefaultSpanAttributes(event, getFuncName())...)) if span.IsRecording() && o.spanAttributesGetter != nil { span.SetAttributes(o.spanAttributesGetter(*event)...) } return ctx, func(errOrResult error) { recordSpanError(span, errOrResult) span.End() } }
go
d_go_26
func (o OTelObservabilityService) RecordSendingEvent(ctx context.Context, event cloudevents.Event) (context.Context, func(errOrResult error)) { spanName := o.getSpanName(&event, "send") ctx, span := o.tracer.Start( ctx, spanName, trace.WithSpanKind(trace.SpanKindProducer), trace.WithAttributes(GetDefaultSpanAttributes(&event, getFuncName())...)) if span.IsRecording() && o.spanAttributesGetter != nil { span.SetAttributes(o.spanAttributesGetter(event)...) } return ctx, func(errOrResult error) { recordSpanError(span, errOrResult) span.End() } }
go
d_go_27
func GetDefaultSpanAttributes(e *cloudevents.Event, method string) []attribute.KeyValue { attr := []attribute.KeyValue{ attribute.String(string(semconv.CodeFunctionKey), method), attribute.String(observability.SpecversionAttr, e.SpecVersion()), attribute.String(observability.IdAttr, e.ID()), attribute.String(observability.TypeAttr, e.Type()), attribute.String(observability.SourceAttr, e.Source()), } if sub := e.Subject(); sub != "" { attr = append(attr, attribute.String(observability.SubjectAttr, sub)) } if dct := e.DataContentType(); dct != "" { attr = append(attr, attribute.String(observability.DatacontenttypeAttr, dct)) } return attr }
go
d_go_28
func tracePropagatorContextDecorator(ctx context.Context, msg binding.Message) context.Context { var messageCtx context.Context if mctx, ok := msg.(binding.MessageContext); ok { messageCtx = mctx.Context() } else if mctx, ok := binding.UnwrapMessage(msg).(binding.MessageContext); ok { messageCtx = mctx.Context() } if messageCtx == nil { return ctx } span := trace.SpanFromContext(messageCtx) if span == nil { return ctx } return trace.ContextWithSpan(ctx, span) }
go
d_go_29
func (o OTelObservabilityService) getSpanName(e *cloudevents.Event, suffix string) string { name := o.spanNameFormatter(*e) // make sure the span name ends with the suffix from the semantic conventions (receive, send, process) if !strings.HasSuffix(name, suffix) { return name + " " + suffix } return name }
go
d_go_30
func WithSpanAttributesGetter(attrGetter func(cloudevents.Event) []attribute.KeyValue) OTelObservabilityServiceOption { return func(os *OTelObservabilityService) { if attrGetter != nil { os.spanAttributesGetter = attrGetter } } }
go
d_go_31
func WithSpanNameFormatter(nameFormatter func(cloudevents.Event) string) OTelObservabilityServiceOption { return func(os *OTelObservabilityService) { if nameFormatter != nil { os.spanNameFormatter = nameFormatter } } }
go
d_go_32
func NewObservedHTTP(opts ...cehttp.Option) (*cehttp.Protocol, error) { // appends the OpenTelemetry Http transport + Middleware wrapper // to properly trace outgoing and incoming requests from the client using this protocol return cehttp.New(append( []cehttp.Option{ cehttp.WithRoundTripper(otelhttp.NewTransport(http.DefaultTransport)), cehttp.WithMiddleware(func(next http.Handler) http.Handler { return otelhttp.NewHandler(next, "cloudevents.http.receiver") }), }, opts..., )...) }
go
d_go_33
func NewMessage(message *amqp.Message, receiver *amqp.Receiver) *Message { var vn spec.Version var fmt format.Format if message.Properties != nil && message.Properties.ContentType != nil && format.IsFormat(*message.Properties.ContentType) { fmt = format.Lookup(*message.Properties.ContentType) } else if sv := getSpecVersion(message); sv != nil { vn = sv } return &Message{AMQP: message, AMQPrcv: receiver, format: fmt, version: vn} }
go
d_go_34
func (m *Message) getAmqpData() []byte { var data []byte amqpData := m.AMQP.Data // TODO: replace with slices.Concat once go mod bumped to 1.22 for idx := range amqpData { data = append(data, amqpData[idx]...) } return data }
go
d_go_35
func WithConnOpt(opt amqp.ConnOption) Option { return func(t *Protocol) error { t.connOpts = append(t.connOpts, opt) return nil } }
go
d_go_36
func WithConnSASLPlain(username, password string) Option { return WithConnOpt(amqp.ConnSASLPlain(username, password)) }
go
d_go_37
func WithSessionOpt(opt amqp.SessionOption) Option { return func(t *Protocol) error { t.sessionOpts = append(t.sessionOpts, opt) return nil } }
go
d_go_38
func WithSenderLinkOption(opt amqp.LinkOption) Option { return func(t *Protocol) error { t.senderLinkOpts = append(t.senderLinkOpts, opt) return nil } }
go
d_go_39
func WithReceiverLinkOption(opt amqp.LinkOption) Option { return func(t *Protocol) error { t.receiverLinkOpts = append(t.receiverLinkOpts, opt) return nil } }
go
d_go_40
func NewSenderProtocolFromClient(client *amqp.Client, session *amqp.Session, address string, opts ...Option) (*Protocol, error) { t := &Protocol{ Node: address, senderLinkOpts: []amqp.LinkOption(nil), receiverLinkOpts: []amqp.LinkOption(nil), Client: client, Session: session, } if err := t.applyOptions(opts...); err != nil { return nil, err } t.senderLinkOpts = append(t.senderLinkOpts, amqp.LinkTargetAddress(address)) // Create a sender amqpSender, err := session.NewSender(t.senderLinkOpts...) if err != nil { _ = client.Close() _ = session.Close(context.Background()) return nil, err } t.Sender = NewSender(amqpSender).(*sender) t.SenderContextDecorators = []func(context.Context) context.Context{} return t, nil }
go
d_go_41
func NewReceiverProtocolFromClient(client *amqp.Client, session *amqp.Session, address string, opts ...Option) (*Protocol, error) { t := &Protocol{ Node: address, senderLinkOpts: []amqp.LinkOption(nil), receiverLinkOpts: []amqp.LinkOption(nil), Client: client, Session: session, } if err := t.applyOptions(opts...); err != nil { return nil, err } t.Node = address t.receiverLinkOpts = append(t.receiverLinkOpts, amqp.LinkSourceAddress(address)) amqpReceiver, err := t.Session.NewReceiver(t.receiverLinkOpts...) if err != nil { return nil, err } t.Receiver = NewReceiver(amqpReceiver).(*receiver) return t, nil }
go
d_go_42
func NewSenderProtocol(server, address string, connOption []amqp.ConnOption, sessionOption []amqp.SessionOption, opts ...Option) (*Protocol, error) { client, err := amqp.Dial(server, connOption...) if err != nil { return nil, err } // Open a session session, err := client.NewSession(sessionOption...) if err != nil { _ = client.Close() return nil, err } p, err := NewSenderProtocolFromClient(client, session, address, opts...) if err != nil { return nil, err } p.ownedClient = true return p, nil }
go
d_go_43
func NewReceiverProtocol(server, address string, connOption []amqp.ConnOption, sessionOption []amqp.SessionOption, opts ...Option) (*Protocol, error) { client, err := amqp.Dial(server, connOption...) if err != nil { return nil, err } // Open a session session, err := client.NewSession(sessionOption...) if err != nil { _ = client.Close() return nil, err } p, err := NewReceiverProtocolFromClient(client, session, address, opts...) if err != nil { return nil, err } p.ownedClient = true return p, nil }
go
d_go_44
func NewReceiver(amqp *amqp.Receiver) protocol.Receiver { return &receiver{amqp: amqp} }
go
d_go_45
func NewSender(amqpSender *amqp.Sender, options ...SenderOptionFunc) protocol.Sender { s := &sender{amqp: amqpSender} for _, o := range options { o(s) } return s }
go
d_go_46
func WriteMessage(ctx context.Context, m binding.Message, amqpMessage *amqp.Message, transformers ...binding.Transformer) error { structuredWriter := (*amqpMessageWriter)(amqpMessage) binaryWriter := (*amqpMessageWriter)(amqpMessage) _, err := binding.Write( ctx, m, structuredWriter, binaryWriter, transformers..., ) return err }
go
d_go_47
func NewMessage(msg *kafka.Message) *Message { if msg == nil { panic("the kafka.Message shouldn't be nil") } if msg.TopicPartition.Topic == nil { panic("the topic of kafka.Message shouldn't be nil") } if msg.TopicPartition.Partition < 0 || msg.TopicPartition.Offset < 0 { panic("the partition or offset of the kafka.Message must be non-negative") } var contentType, contentVersion string properties := make(map[string][]byte, len(msg.Headers)+3) for _, header := range msg.Headers { k := strings.ToLower(string(header.Key)) if k == strings.ToLower(contentTypeKey) { contentType = string(header.Value) } if k == specs.PrefixedSpecVersionName() { contentVersion = string(header.Value) } properties[k] = header.Value } // add the kafka message key, topic, partition and partition key to the properties properties[prefix+KafkaOffsetKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Offset), 10)) properties[prefix+KafkaPartitionKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Partition), 10)) properties[prefix+KafkaTopicKey] = []byte(*msg.TopicPartition.Topic) if msg.Key != nil { properties[prefix+KafkaMessageKey] = msg.Key } message := &Message{ internal: msg, properties: properties, } if ft := format.Lookup(contentType); ft != nil { message.format = ft } else if v := specs.Version(contentVersion); v != nil { message.version = v } return message }
go
d_go_48
func WithConfigMap(config *kafka.ConfigMap) Option { return func(p *Protocol) error { if config == nil { return errors.New("the kafka.ConfigMap option must not be nil") } p.kafkaConfigMap = config return nil } }
go
d_go_49
func WithSenderTopic(defaultTopic string) Option { return func(p *Protocol) error { if defaultTopic == "" { return errors.New("the producer topic option must not be nil") } p.producerDefaultTopic = defaultTopic return nil } }
go
d_go_50
func WithReceiverTopics(topics []string) Option { return func(p *Protocol) error { if topics == nil { return errors.New("the consumer topics option must not be nil") } p.consumerTopics = topics return nil } }
go
d_go_51
func WithRebalanceCallBack(rebalanceCb kafka.RebalanceCb) Option { return func(p *Protocol) error { if rebalanceCb == nil { return errors.New("the consumer group rebalance callback must not be nil") } p.consumerRebalanceCb = rebalanceCb return nil } }
go
d_go_52
func WithPollTimeout(timeoutMs int) Option { return func(p *Protocol) error { p.consumerPollTimeout = timeoutMs return nil } }
go
d_go_53
func WithSender(producer *kafka.Producer) Option { return func(p *Protocol) error { if producer == nil { return errors.New("the producer option must not be nil") } p.producer = producer return nil } }
go
d_go_54
func WithErrorHandler(handler func(ctx context.Context, err kafka.Error)) Option { return func(p *Protocol) error { p.consumerErrorHandler = handler return nil } }
go
d_go_55
func WithReceiver(consumer *kafka.Consumer) Option { return func(p *Protocol) error { if consumer == nil { return errors.New("the consumer option must not be nil") } p.consumer = consumer return nil } }
go
d_go_56
func WithTopicPartitionOffsets(ctx context.Context, topicPartitionOffsets []kafka.TopicPartition) context.Context { if len(topicPartitionOffsets) == 0 { panic("the topicPartitionOffsets cannot be empty") } for _, offset := range topicPartitionOffsets { if offset.Topic == nil || *(offset.Topic) == "" { panic("the kafka topic cannot be nil or empty") } if offset.Partition < 0 || offset.Offset < 0 { panic("the kafka partition/offset must be non-negative") } } return context.WithValue(ctx, offsetKey, topicPartitionOffsets) }
go
d_go_57
func TopicPartitionOffsetsFrom(ctx context.Context) []kafka.TopicPartition { c := ctx.Value(offsetKey) if c != nil { if s, ok := c.([]kafka.TopicPartition); ok { return s } } return nil }
go
d_go_58
func WithMessageKey(ctx context.Context, messageKey string) context.Context { return context.WithValue(ctx, keyForMessageKey, messageKey) }
go
d_go_59
func MessageKeyFrom(ctx context.Context) string { c := ctx.Value(keyForMessageKey) if c != nil { if s, ok := c.(string); ok { return s } } return "" }
go
d_go_60
func (p *Protocol) Events() (chan kafka.Event, error) { if p.producer == nil { return nil, errors.New("producer not set") } return p.producer.Events(), nil }
go
d_go_61
func (p *Protocol) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) (err error) { if p.producer == nil { return errors.New("producer client must be set") } p.closerMux.Lock() defer p.closerMux.Unlock() if p.producer.IsClosed() { return errors.New("producer is closed") } defer in.Finish(err) kafkaMsg := &kafka.Message{ TopicPartition: kafka.TopicPartition{ Topic: &p.producerDefaultTopic, Partition: kafka.PartitionAny, }, } if topic := cecontext.TopicFrom(ctx); topic != "" { kafkaMsg.TopicPartition.Topic = &topic } if messageKey := MessageKeyFrom(ctx); messageKey != "" { kafkaMsg.Key = []byte(messageKey) } if err = WriteProducerMessage(ctx, in, kafkaMsg, transformers...); err != nil { return fmt.Errorf("create producer message: %w", err) } if err = p.producer.Produce(kafkaMsg, nil); err != nil { return fmt.Errorf("produce message: %w", err) } return nil }
go
d_go_62
func (p *Protocol) Close(ctx context.Context) error { p.closerMux.Lock() defer p.closerMux.Unlock() logger := cecontext.LoggerFrom(ctx) if p.consumerCancel != nil { p.consumerCancel() } if p.producer != nil && !p.producer.IsClosed() { // Flush and close the producer with a 10 seconds timeout (closes Events channel) for p.producer.Flush(10000) > 0 { logger.Info("Flushing outstanding messages") } p.producer.Close() } return nil }
go
d_go_63
func WriteProducerMessage(ctx context.Context, in binding.Message, kafkaMsg *kafka.Message, transformers ...binding.Transformer, ) error { structuredWriter := (*kafkaMessageWriter)(kafkaMsg) binaryWriter := (*kafkaMessageWriter)(kafkaMsg) _, err := binding.Write( ctx, in, structuredWriter, binaryWriter, transformers..., ) return err }
go
d_go_64
func NewMessageFromConsumerMessage(cm *sarama.ConsumerMessage) *Message { var contentType string headers := make(map[string][]byte, len(cm.Headers)+3) for _, r := range cm.Headers { k := strings.ToLower(string(r.Key)) if k == contentTypeHeader { contentType = string(r.Value) } headers[k] = r.Value } headers[prefix+"kafkaoffset"] = []byte(strconv.FormatInt(cm.Offset, 10)) headers[prefix+"kafkapartition"] = []byte(strconv.FormatInt(int64(cm.Partition), 10)) headers[prefix+"kafkatopic"] = []byte(cm.Topic) return NewMessage(cm.Value, contentType, headers) }
go
d_go_65
func NewMessage(value []byte, contentType string, headers map[string][]byte) *Message { if ft := format.Lookup(contentType); ft != nil { return &Message{ Value: value, ContentType: contentType, Headers: headers, format: ft, } } else if v := specs.Version(string(headers[specs.PrefixedSpecVersionName()])); v != nil { return &Message{ Value: value, ContentType: contentType, Headers: headers, version: v, } } return &Message{ Value: value, ContentType: contentType, Headers: headers, } }
go
d_go_66
func NewProtocolFromClient(client sarama.Client, sendToTopic string, receiveFromTopic string, opts ...ProtocolOptionFunc) (*Protocol, error) { p := &Protocol{ Client: client, SenderContextDecorators: make([]func(context.Context) context.Context, 0), receiverGroupId: defaultGroupId, senderTopic: sendToTopic, receiverTopic: receiveFromTopic, ownsClient: false, } var err error if err = p.applyOptions(opts...); err != nil { return nil, err } if p.senderTopic == "" { return nil, errors.New("you didn't specify the topic to send to") } p.Sender, err = NewSenderFromClient(p.Client, p.senderTopic) if err != nil { return nil, err } if p.receiverTopic == "" { return nil, errors.New("you didn't specify the topic to receive from") } p.Consumer = NewConsumerFromClient(p.Client, p.receiverGroupId, p.receiverTopic) return p, nil }
go
d_go_67
func (p *Protocol) OpenInbound(ctx context.Context) error { p.consumerMux.Lock() defer p.consumerMux.Unlock() logger := cecontext.LoggerFrom(ctx) logger.Infof("Starting consumer group to topic %s and group id %s", p.receiverTopic, p.receiverGroupId) return p.Consumer.OpenInbound(ctx) }
go
d_go_68
func NewReceiver() *Receiver { return &Receiver{ incoming: make(chan msgErr), } }
go
d_go_69
func (r *Receiver) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // NOTE: // Do not move the code below to a goroutine. // The `ConsumeClaim` itself is called within a goroutine, see: // https://github.com/Shopify/sarama/blob/main/consumer_group.go#L27-L29 for { select { case msg, ok := <-claim.Messages(): if !ok { return nil } m := NewMessageFromConsumerMessage(msg) msgErrObj := msgErr{ msg: binding.WithFinish(m, func(err error) { if protocol.IsACK(err) { session.MarkMessage(msg, "") } }), } // Need to use select clause here, otherwise r.incoming <- msgErrObj can become a blocking operation, // resulting in never reaching outside block's case <-session.Context().Done() select { case r.incoming <- msgErrObj: // do nothing case <-session.Context().Done(): return nil } // Should return when `session.Context()` is done. // If not, will raise `ErrRebalanceInProgress` or `read tcp <ip>:<port>: i/o timeout` when kafka rebalance. see: // https://github.com/Shopify/sarama/issues/1192 // https://github.com/Shopify/sarama/issues/2118 // Also checked Shopify/sarama code which calls this ConsumeClaim method, and don't see if there is any difference // whether this method returns error or not. If it returns the error, as per current implementation, it could // get printed in logs and later drained when the ConsumerGroup gets closed. // For now, to be on safer side, returning nil instead of session.Context().Err() as suggested in // https://github.com/Shopify/sarama/blob/5e2c2ef0e429f895c86152189f625bfdad7d3452/examples/consumergroup/main.go case <-session.Context().Done(): return nil } } }
go
d_go_70
func NewSender(brokers []string, saramaConfig *sarama.Config, topic string, options ...SenderOptionFunc) (*Sender, error) { // Force this setting because it's required by sarama SyncProducer saramaConfig.Producer.Return.Successes = true producer, err := sarama.NewSyncProducer(brokers, saramaConfig) if err != nil { return nil, err } return makeSender(producer, topic, options...), nil }
go
d_go_71
func NewSenderFromClient(client sarama.Client, topic string, options ...SenderOptionFunc) (*Sender, error) { producer, err := sarama.NewSyncProducerFromClient(client) if err != nil { return nil, err } return makeSender(producer, topic, options...), nil }
go
d_go_72
func NewSenderFromSyncProducer(topic string, syncProducer sarama.SyncProducer, options ...SenderOptionFunc) (*Sender, error) { return makeSender(syncProducer, topic, options...), nil }
go
d_go_73
func WithMessageKey(ctx context.Context, key sarama.Encoder) context.Context { return context.WithValue(ctx, withMessageKey{}, key) }
go
d_go_74
func WriteProducerMessage(ctx context.Context, m binding.Message, producerMessage *sarama.ProducerMessage, transformers ...binding.Transformer) error { writer := (*kafkaProducerMessageWriter)(producerMessage) skipKey := binding.GetOrDefaultFromCtx(ctx, skipKeyKey{}, false).(bool) var key string // If skipKey = false, then we add a transformer that extracts the key if !skipKey { transformers = append(transformers, binding.TransformerFunc(func(r binding.MessageMetadataReader, w binding.MessageMetadataWriter) error { ext := r.GetExtension(partitionKey) if !types.IsZero(ext) { extStr, err := types.Format(ext) if err != nil { return err } key = extStr } return nil })) } _, err := binding.Write( ctx, m, writer, writer, transformers..., ) if key != "" { producerMessage.Key = sarama.StringEncoder(key) } return err }
go
d_go_75
func WithConnect(connOpt *paho.Connect) Option { return func(p *Protocol) error { if connOpt == nil { return fmt.Errorf("the paho.Connect option must not be nil") } p.connOption = connOpt return nil } }
go
d_go_76
func WithPublish(publishOpt *paho.Publish) Option { return func(p *Protocol) error { if publishOpt == nil { return fmt.Errorf("the paho.Publish option must not be nil") } p.publishOption = publishOpt return nil } }
go
d_go_77
func WithSubscribe(subscribeOpt *paho.Subscribe) Option { return func(p *Protocol) error { if subscribeOpt == nil { return fmt.Errorf("the paho.Subscribe option must not be nil") } p.subscribeOption = subscribeOpt return nil } }
go
d_go_78
func (p *Protocol) publishMsg() *paho.Publish { return &paho.Publish{ QoS: p.publishOption.QoS, Retain: p.publishOption.Retain, Topic: p.publishOption.Topic, Properties: p.publishOption.Properties, } }
go
d_go_79
func WritePubMessage(ctx context.Context, m binding.Message, pubMessage *paho.Publish, transformers ...binding.Transformer) error { structuredWriter := (*pubMessageWriter)(pubMessage) binaryWriter := (*pubMessageWriter)(pubMessage) _, err := binding.Write( ctx, m, structuredWriter, binaryWriter, transformers..., ) return err }
go
d_go_80
func NewMessage(msg *nats.Msg) *Message { return &Message{Msg: msg, encoding: binding.EncodingStructured} }
go
d_go_81
func (s *Sender) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) (err error) { defer func() { if err2 := in.Finish(err); err2 != nil { if err == nil { err = err2 } else { err = fmt.Errorf("failed to call in.Finish() when error already occurred: %s: %w", err2.Error(), err) } } }() writer := new(bytes.Buffer) header, err := WriteMsg(ctx, in, writer, transformers...) if err != nil { return err } natsMsg := &nats.Msg{ Subject: s.Subject, Data: writer.Bytes(), Header: header, } _, err = s.Jsm.PublishMsg(natsMsg) return err }
go
d_go_82
func WithURL(url string) ProtocolOption { return func(p *Protocol) error { p.url = url return nil } }
go
d_go_83
func WithNatsOptions(natsOpts []nats.Option) ProtocolOption { return func(p *Protocol) error { p.natsOpts = natsOpts return nil } }
go
d_go_84
func WithConnection(conn *nats.Conn) ProtocolOption { return func(p *Protocol) error { p.conn = conn return nil } }
go
d_go_85
func WithJetStreamOptions(jetStreamOpts []jetstream.JetStreamOpt) ProtocolOption { return func(p *Protocol) error { p.jetStreamOpts = jetStreamOpts return nil } }
go
d_go_86
func WithPublishOptions(publishOpts []jetstream.PublishOpt) ProtocolOption { return func(p *Protocol) error { p.publishOpts = publishOpts return nil } }
go
d_go_87
func WithSendSubject(sendSubject string) ProtocolOption { return func(p *Protocol) error { p.sendSubject = sendSubject return nil } }
go
d_go_88
func WithConsumerConfig(consumerConfig *jetstream.ConsumerConfig) ProtocolOption { return func(p *Protocol) error { p.consumerConfig = consumerConfig return nil } }
go
d_go_89
func WithOrderedConsumerConfig(orderedConsumerConfig *jetstream.OrderedConsumerConfig) ProtocolOption { return func(p *Protocol) error { p.orderedConsumerConfig = orderedConsumerConfig return nil } }
go
d_go_90
func WithPullConsumerOptions(pullConsumeOpts []jetstream.PullConsumeOpt) ProtocolOption { return func(p *Protocol) error { p.pullConsumeOpts = pullConsumeOpts return nil } }
go
d_go_91
func WithSubject(ctx context.Context, subject string) context.Context { return context.WithValue(ctx, ctxKeySubject, subject) }
go
d_go_92
func (p *Protocol) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) (err error) { subject := p.getSendSubject(ctx) if subject == "" { return newValidationError(fieldSendSubject, messageNoSendSubject) } defer func() { if err2 := in.Finish(err); err2 != nil { if err == nil { err = err2 } else { err = fmt.Errorf("failed to call in.Finish() when error already occurred: %s: %w", err2.Error(), err) } } }() if _, err = p.jetStream.StreamNameBySubject(ctx, subject); err != nil { return err } writer := new(bytes.Buffer) header, err := WriteMsg(ctx, in, writer, transformers...) if err != nil { return err } natsMsg := &nats.Msg{ Subject: subject, Data: writer.Bytes(), Header: header, } _, err = p.jetStream.PublishMsg(ctx, natsMsg, p.publishOpts...) return err }
go
d_go_93
func (p *Protocol) Close(ctx context.Context) error { // Before closing, let's be sure OpenInbound completes // We send a signal to close and then we lock on subMtx in order // to wait OpenInbound to finish draining the queue p.internalClose <- struct{}{} p.subMtx.Lock() defer p.subMtx.Unlock() // if an URL was provided, then we must close the internally opened NATS connection // since the connection is not exposed. // If the connection was passed in, then leave the connection available. if p.url != "" && p.conn != nil { p.conn.Close() } close(p.internalClose) return nil }
go
d_go_94
func (p *Protocol) applyOptions(opts ...ProtocolOption) error { for _, fn := range opts { if err := fn(p); err != nil { return err } } return nil }
go
d_go_95
func (p *Protocol) createJetstreamConsumer(ctx context.Context) error { var err error var stream string if stream, err = p.getStreamFromSubjects(ctx); err != nil { return err } var consumerErr error if p.consumerConfig != nil { p.jetstreamConsumer, consumerErr = p.jetStream.CreateOrUpdateConsumer(ctx, stream, *p.consumerConfig) } else if p.orderedConsumerConfig != nil { p.jetstreamConsumer, consumerErr = p.jetStream.OrderedConsumer(ctx, stream, *p.orderedConsumerConfig) } else { return newValidationError(fieldConsumerConfig, messageNoConsumerConfig) } return consumerErr }
go
d_go_96
func (p *Protocol) getStreamFromSubjects(ctx context.Context) (string, error) { var subjects []string if p.consumerConfig != nil && p.consumerConfig.FilterSubject != "" { subjects = []string{p.consumerConfig.FilterSubject} } if p.consumerConfig != nil && len(p.consumerConfig.FilterSubjects) > 0 { subjects = p.consumerConfig.FilterSubjects } if p.orderedConsumerConfig != nil && len(p.orderedConsumerConfig.FilterSubjects) > 0 { subjects = p.orderedConsumerConfig.FilterSubjects } if len(subjects) == 0 { return "", newValidationError(fieldFilterSubjects, messageNoFilterSubjects) } var finalStream string for i, subject := range subjects { currentStream, err := p.jetStream.StreamNameBySubject(ctx, subject) if err != nil { return "", err } if i == 0 { finalStream = currentStream continue } if finalStream != currentStream { return "", newValidationError(fieldFilterSubjects, messageMoreThanOneStream) } } return finalStream, nil }
go
d_go_97
func validateOptions(p *Protocol) error { if p.url == "" && p.conn == nil { return newValidationError(fieldURL, messageNoConnection) } if p.url != "" && p.conn != nil { return newValidationError(fieldURL, messageConflictingConnection) } consumerConfigOptions := 0 if p.consumerConfig != nil { consumerConfigOptions++ } if p.orderedConsumerConfig != nil { consumerConfigOptions++ } if consumerConfigOptions > 1 { return newValidationError(fieldConsumerConfig, messageMoreThanOneConsumerConfig) } if len(p.pullConsumeOpts) > 0 && consumerConfigOptions == 0 { return newValidationError(fieldPullConsumerOpts, messageReceiverOptionsWithoutConfig) } if len(p.publishOpts) > 0 && p.sendSubject == "" { return newValidationError(fieldPublishOptions, messageSenderOptionsWithoutSubject) } return nil }
go
d_go_98
func (v validationError) Error() string { return fmt.Sprintf("invalid parameters provided: %q: %s", v.field, v.message) }
go
d_go_99
func WithCustomAttributes(ctx context.Context, attrs map[string]string) context.Context { return context.WithValue(ctx, withCustomAttributes{}, attrs) }
go
End of preview. Expand in Data Studio

Owl Code Search Hard Negative Datasets

Knowledge Distillation (KD) ベースのハードネガティブ付きコード検索データセットです。
コード検索モデルShuu12121/CodeSearch-ModernBERT-Crow-v3-large-len1024-Plusを教師モデルとして、各コメントと説明コメントのペアのデータセットから各クエリに対する関数の類似度スコアを計算し、ハードネガティブ(正解に類似しているが不正解の文書)を付与しています。

概要

  • 目的: コード検索モデルの Contrastive Learning / Knowledge Distillation ファインチューニング
  • 言語: Go, Java, JavaScript, PHP, Python, Ruby, Rust, TypeScript(8言語)
  • 総サンプル数: 4,787,740
  • データサイズ: 8.73 GB(展開後) / 3.37 GB(ダウンロード時)
  • フォーマット: Per-language config 形式(scores_{lang}, queries_{lang}, documents_{lang}

データ構造

各言語ごとに 3 つの config が存在します:

queries_{lang}

各クエリ(自然言語による検索文)を格納。

カラム 説明
query_id string クエリの一意識別子
query string 自然言語のクエリテキスト(docstring / コメント)
split string 元データの分割情報

documents_{lang}

各文書(ソースコード)を格納。

カラム 説明
document_id string 文書の一意識別子
document string ソースコード本文
split string 元データの分割情報

scores_{lang}

教師モデルによる類似度スコアを格納。各クエリに対して、スコア順にソートされた文書 ID リストとスコアリストを保持。

カラム 説明
query_id string 対応するクエリの ID
document_ids list[string] スコア順にソートされた文書 ID のリスト
scores list[float64] 対応する類似度スコアのリスト
split string 元データの分割情報

スコアの解釈:

  • scores[0] / document_ids[0] が正例(実際のペアだったもの)
  • score[0] = -1 は正解が上位32件に検索結果が含まれていなかった場合

言語別統計

言語 クエリ数 文書数 スコア数
Go 1,361,475 1,361,475 1,361,475
Java 1,281,018 1,281,018 1,281,018
JavaScript 129,007 129,007 129,007
PHP 424,463 424,463 424,463
Python 776,900 776,900 776,900
Ruby 104,899 104,899 104,899
Rust 381,521 381,521 381,521
TypeScript 328,457 328,457 328,457
合計 4,787,740 4,787,740 4,787,740

注意点

全データをメモリに載せようとするとOOMになる可能性があります!!

使い方

基本的な読み込み

from datasets import load_dataset

# Python の scores を読み込む
scores = load_dataset(
    "Shuu12121/owl_code_search_hard_negative_datasets-Pre_kd",
    name="scores_python",
    split="train",
)

# Python の queries を読み込む
queries = load_dataset(
    "Shuu12121/owl_code_search_hard_negative_datasets-Pre_kd",
    name="queries_python",
    split="train",
)

# Python の documents を読み込む
documents = load_dataset(
    "Shuu12121/owl_code_search_hard_negative_datasets-Pre_kd",
    name="documents_python",
    split="train",
)

ハードネガティブの抽出

# クエリ・文書テキストの辞書を構築
query_texts = dict(zip(queries["query_id"], queries["query"]))
doc_texts = dict(zip(documents["document_id"], documents["document"]))

# 閾値の設定
nv_threshold = 0.99  # positive スコアの 99% 未満をネガティブとする

# 1 サンプルの処理例
sample = scores[0]
query_text = query_texts[sample["query_id"]]
positive_doc = doc_texts[sample["document_ids"][0]]  # scores[0] が正例
positive_score = sample["scores"][0]

hard_negatives = []
for doc_id, score in zip(sample["document_ids"][1:], sample["scores"][1:]):
    if score < nv_threshold * positive_score and score != -1:
        hard_negatives.append(doc_texts[doc_id])

print(f"Query: {query_text[:100]}...")
print(f"Positive: {positive_doc[:100]}...")
print(f"Hard negatives: {len(hard_negatives)}")

作成に使用されたプログラム

リポジトリはこちら

Downloads last month
139