_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q11800
oneServiceHandler
train
func (s *SidecarApi) oneServiceHandler(response http.ResponseWriter, req *http.Request, params map[string]string) { defer req.Body.Close() response.Header().Set("Access-Control-Allow-Origin", "*") response.Header().Set("Access-Control-Allow-Methods", "GET") response.Header().Set("Content-Type", "application/json") if params["extension"] != "json" { sendJsonError(response, 404, "Not Found - Invalid content type extension") return } name, ok := params["name"] if !ok { sendJsonError(response, 404, "Not Found - No service name provided") return } if s.state == nil { sendJsonError(response, 500, "Internal Server Error - Something went terribly wrong") return } var instances []*service.Service // Enter critical section s.state.RLock() defer s.state.RUnlock() s.state.EachService(func(hostname *string, id *string, svc *service.Service) { if svc.Name == name { instances = append(instances, svc) } }) // Did we have any entries for this service in the catalog? if len(instances) == 0 { sendJsonError(response, 404, fmt.Sprintf("no instances of %s found", name)) return } clusterName := "" if s.list != nil { clusterName = s.list.ClusterName() } // Everything went fine, we found entries for this service. // Send the json back. svcInstances := make(map[string][]*service.Service) svcInstances[name] = instances result := ApiServices{ Services: svcInstances, ClusterName: clusterName, } jsonBytes, err := json.MarshalIndent(&result, "", " ") if err != nil { log.Errorf("Error marshaling state in oneServiceHandler: %s", err.Error()) sendJsonError(response, 500, "Internal server error") return } response.Write(jsonBytes) }
go
{ "resource": "" }
q11801
servicesHandler
train
func (s *SidecarApi) servicesHandler(response http.ResponseWriter, req *http.Request, params map[string]string) { defer req.Body.Close() response.Header().Set("Access-Control-Allow-Origin", "*") response.Header().Set("Access-Control-Allow-Methods", "GET") // We only support JSON if params["extension"] != "json" { sendJsonError(response, 404, "Not Found - Invalid content type extension") return } response.Header().Set("Content-Type", "application/json") var listMembers []*memberlist.Node var clusterName string if s.list != nil { listMembers = s.list.Members() sort.Sort(catalog.ListByName(listMembers)) clusterName = s.list.ClusterName() } members := make(map[string]*ApiServer, len(listMembers)) var jsonBytes []byte var err error func() { // Wrap critical section s.state.RLock() defer s.state.RUnlock() for _, member := range listMembers { if s.state.HasServer(member.Name) { members[member.Name] = &ApiServer{ Name: member.Name, LastUpdated: s.state.Servers[member.Name].LastUpdated, ServiceCount: len(s.state.Servers[member.Name].Services), } } else { members[member.Name] = &ApiServer{ Name: member.Name, LastUpdated: time.Unix(0, 0), ServiceCount: 0, } } } result := ApiServices{ Services: s.state.ByService(), ClusterMembers: members, ClusterName: clusterName, } jsonBytes, err = json.MarshalIndent(&result, "", " ") }() if err != nil { log.Errorf("Error marshaling state in servicesHandler: %s", err.Error()) sendJsonError(response, 500, "Internal server error") return } response.Write(jsonBytes) }
go
{ "resource": "" }
q11802
stateHandler
train
func (s *SidecarApi) stateHandler(response http.ResponseWriter, req *http.Request, params map[string]string) { defer req.Body.Close() s.state.RLock() defer s.state.RUnlock() if params["extension"] != "json" { sendJsonError(response, 404, "Not Found - Invalid content type extension") return } response.Header().Set("Content-Type", "application/json") response.Header().Set("Access-Control-Allow-Origin", "*") response.Header().Set("Access-Control-Allow-Methods", "GET") response.Write(s.state.Encode()) return }
go
{ "resource": "" }
q11803
sendJsonError
train
func sendJsonError(response http.ResponseWriter, status int, message string) { output := map[string]string{ "status": "error", "message": message, } jsonBytes, err := json.Marshal(output) if err != nil { log.Errorf("Error encoding json error response: %s", err.Error()) response.WriteHeader(500) response.Write([]byte("Interval server error")) return } response.Header().Set("Content-Type", "application/json") response.WriteHeader(status) response.Write(jsonBytes) }
go
{ "resource": "" }
q11804
defaultCheckForService
train
func (m *Monitor) defaultCheckForService(svc *service.Service) *Check { port := findFirstTCPPort(svc) if port == nil { return &Check{ID: svc.ID, Command: &AlwaysSuccessfulCmd{}} } // Use the const default unless we've been provided something else defaultCheckEndpoint := DEFAULT_STATUS_ENDPOINT if len(m.DefaultCheckEndpoint) != 0 { defaultCheckEndpoint = m.DefaultCheckEndpoint } url := fmt.Sprintf("http://%v:%v%v", m.DefaultCheckHost, port.Port, defaultCheckEndpoint) return &Check{ ID: svc.ID, Type: "HttpGet", Args: url, Status: FAILED, Command: &HttpGetCmd{}, } }
go
{ "resource": "" }
q11805
fetchCheckForService
train
func (m *Monitor) fetchCheckForService(svc *service.Service, disco discovery.Discoverer) *Check { check := &Check{} check.Type, check.Args = disco.HealthCheck(svc) if check.Type == "" { log.Warnf("Got empty check type for service %s (id: %s) with args: %s!", svc.Name, svc.ID, check.Args) return nil } // Setup some other parts of the check that don't come from discovery check.ID = svc.ID check.Command = m.GetCommandNamed(check.Type) check.Status = FAILED return check }
go
{ "resource": "" }
q11806
templateCheckArgs
train
func (m *Monitor) templateCheckArgs(check *Check, svc *service.Service) string { funcMap := template.FuncMap{ "tcp": func(p int64) int64 { return svc.PortForServicePort(p, "tcp") }, "udp": func(p int64) int64 { return svc.PortForServicePort(p, "udp") }, "host": func() string { return m.DefaultCheckHost }, "container": func() string { return svc.Hostname }, } t, err := template.New("check").Funcs(funcMap).Parse(check.Args) if err != nil { log.Errorf("Unable to parse check Args: '%s'", check.Args) return check.Args } var output bytes.Buffer t.Execute(&output, svc) return output.String() }
go
{ "resource": "" }
q11807
CheckForService
train
func (m *Monitor) CheckForService(svc *service.Service, disco discovery.Discoverer) *Check { check := m.fetchCheckForService(svc, disco) if check == nil { // We got nothing log.Warnf("Using default check for service %s (id: %s).", svc.Name, svc.ID) check = m.defaultCheckForService(svc) } check.Args = m.templateCheckArgs(check, svc) return check }
go
{ "resource": "" }
q11808
Watch
train
func (m *Monitor) Watch(disco discovery.Discoverer, looper director.Looper) { m.DiscoveryFn = disco.Services // Store this so we can use it from Services() looper.Loop(func() error { services := disco.Services() // Add checks when new services are found for _, svc := range services { if m.Checks[svc.ID] == nil { check := m.CheckForService(&svc, disco) if check.Command == nil { log.Errorf( "Attempted to add %s (id: %s) but no check configured!", svc.Name, svc.ID, ) } else { m.AddCheck(check) } } } m.Lock() defer m.Unlock() OUTER: // We remove checks when encountering a missing service. This // prevents us from storing up checks forever. This is the only // way we'll find out about a service going away. for _, check := range m.Checks { for _, svc := range services { // Continue if we have a matching service/check pair if svc.ID == check.ID { continue OUTER } } // Remove checks for services that are no longer running delete(m.Checks, check.ID) } return nil }) }
go
{ "resource": "" }
q11809
Services
train
func (d *StaticDiscovery) Services() []service.Service { var services []service.Service for _, target := range d.Targets { target.Service.Updated = time.Now().UTC() services = append(services, target.Service) } return services }
go
{ "resource": "" }
q11810
Listeners
train
func (d *StaticDiscovery) Listeners() []ChangeListener { var listeners []ChangeListener for _, target := range d.Targets { if target.ListenPort > 0 { listener := ChangeListener{ Name: target.Service.ListenerName(), Url: fmt.Sprintf("http://%s:%d/sidecar/update", d.Hostname, target.ListenPort), } listeners = append(listeners, listener) } } return listeners }
go
{ "resource": "" }
q11811
Run
train
func (d *StaticDiscovery) Run(looper director.Looper) { var err error d.Targets, err = d.ParseConfig(d.ConfigFile) if err != nil { log.Errorf("StaticDiscovery cannot parse: %s", err.Error()) looper.Done(nil) } }
go
{ "resource": "" }
q11812
ParseConfig
train
func (d *StaticDiscovery) ParseConfig(filename string) ([]*Target, error) { file, err := ioutil.ReadFile(filename) if err != nil { log.Errorf("Unable to read announcements file: '%s!'", err.Error()) return nil, err } var targets []*Target err = json.Unmarshal(file, &targets) if err != nil { return nil, fmt.Errorf("Unable to unmarshal Target: %s", err) } // Have to loop with traditional 'for' loop so we can modify entries for _, target := range targets { idBytes, err := RandomHex(6) if err != nil { log.Errorf("ParseConfig(): Unable to get random bytes (%s)", err.Error()) return nil, err } target.Service.ID = string(idBytes) target.Service.Created = time.Now().UTC() // We _can_ export services for a 3rd party. If we don't specify // the hostname, then it's for this host. if target.Service.Hostname == "" { target.Service.Hostname = d.Hostname } // Make sure we have an IP address on ports for i, port := range target.Service.Ports { if len(port.IP) == 0 { target.Service.Ports[i].IP = d.DefaultIP } } log.Printf("Discovered service: %s, ID: %s", target.Service.Name, target.Service.ID, ) } return targets, nil }
go
{ "resource": "" }
q11813
RandomHex
train
func RandomHex(count int) ([]byte, error) { raw := make([]byte, count) _, err := rand.Read(raw) if err != nil { log.Errorf("RandomBytes(): Error %s", err.Error()) return nil, err } encoded := make([]byte, count*2) hex.Encode(encoded, raw) return encoded, nil }
go
{ "resource": "" }
q11814
optionsHandler
train
func (s *EnvoyApi) optionsHandler(response http.ResponseWriter, req *http.Request) { response.Header().Set("Access-Control-Allow-Origin", "*") response.Header().Set("Access-Control-Allow-Methods", "GET") return }
go
{ "resource": "" }
q11815
registrationHandler
train
func (s *EnvoyApi) registrationHandler(response http.ResponseWriter, req *http.Request, params map[string]string) { defer req.Body.Close() response.Header().Set("Content-Type", "application/json") name, ok := params["service"] if !ok { log.Debug("No service name provided to Envoy registrationHandler") sendJsonError(response, 404, "Not Found - No service name provided") return } svcName, svcPort, err := SvcNameSplit(name) if err != nil { log.Debugf("Envoy Service '%s' not found in registrationHandler: %s", name, err) sendJsonError(response, 404, "Not Found - "+err.Error()) return } instances := make([]*EnvoyService, 0) // Enter critical section func() { s.state.RLock() defer s.state.RUnlock() s.state.EachService(func(hostname *string, id *string, svc *service.Service) { if svc.Name == svcName && svc.IsAlive() { newInstance := s.EnvoyServiceFromService(svc, svcPort) if newInstance != nil { instances = append(instances, newInstance) } } }) }() clusterName := "" if s.list != nil { clusterName = s.list.ClusterName() } result := SDSResult{ Env: clusterName, Hosts: instances, Service: name, } jsonBytes, err := result.MarshalJSON() defer ffjson.Pool(jsonBytes) if err != nil { log.Errorf("Error marshaling state in registrationHandler: %s", err.Error()) sendJsonError(response, 500, "Internal server error") return } response.Write(jsonBytes) }
go
{ "resource": "" }
q11816
clustersHandler
train
func (s *EnvoyApi) clustersHandler(response http.ResponseWriter, req *http.Request, params map[string]string) { defer req.Body.Close() response.Header().Set("Content-Type", "application/json") clusters := s.EnvoyClustersFromState() log.Debugf("Reporting Envoy cluster information for cluster '%s' and node '%s'", params["service_cluster"], params["service_node"]) result := CDSResult{clusters} jsonBytes, err := result.MarshalJSON() defer ffjson.Pool(jsonBytes) if err != nil { log.Errorf("Error marshaling state in servicesHandler: %s", err.Error()) sendJsonError(response, 500, "Internal server error") return } response.Write(jsonBytes) }
go
{ "resource": "" }
q11817
listenersHandler
train
func (s *EnvoyApi) listenersHandler(response http.ResponseWriter, req *http.Request, params map[string]string) { defer req.Body.Close() response.Header().Set("Content-Type", "application/json") log.Debugf("Reporting Envoy cluster information for cluster '%s' and node '%s'", params["service_cluster"], params["service_node"]) listeners := s.EnvoyListenersFromState() result := LDSResult{listeners} jsonBytes, err := result.MarshalJSON() defer ffjson.Pool(jsonBytes) if err != nil { log.Errorf("Error marshaling state in servicesHandler: %s", err.Error()) sendJsonError(response, 500, "Internal server error") return } response.Write(jsonBytes) }
go
{ "resource": "" }
q11818
lookupHost
train
func lookupHost(hostname string) (string, error) { addrs, err := net.LookupHost(hostname) if err != nil { return "", err } return addrs[0], nil }
go
{ "resource": "" }
q11819
EnvoyServiceFromService
train
func (s *EnvoyApi) EnvoyServiceFromService(svc *service.Service, svcPort int64) *EnvoyService { if len(svc.Ports) < 1 { return nil } for _, port := range svc.Ports { // No sense worrying about unexposed ports if port.ServicePort == svcPort { address := port.IP // NOT recommended... this is very slow. Useful in dev modes where you // need to resolve to a different IP address only. if s.config.UseHostnames { var err error address, err = lookupHost(svc.Hostname) if err != nil { log.Warnf("Unable to resolve %s, using IP address", svc.Hostname) } address = port.IP } return &EnvoyService{ IPAddress: address, LastCheckIn: svc.Updated.String(), Port: port.Port, Revision: svc.Version(), Service: SvcName(svc.Name, port.ServicePort), ServiceRepoName: svc.Image, Tags: map[string]string{}, } } } return nil }
go
{ "resource": "" }
q11820
EnvoyClustersFromState
train
func (s *EnvoyApi) EnvoyClustersFromState() []*EnvoyCluster { clusters := make([]*EnvoyCluster, 0) s.state.RLock() defer s.state.RUnlock() svcs := s.state.ByService() for svcName, endpoints := range svcs { if len(endpoints) < 1 { continue } var svc *service.Service for _, endpoint := range endpoints { if endpoint.IsAlive() { svc = endpoint break } } if svc == nil { continue } for _, port := range svc.Ports { if port.ServicePort < 1 { continue } clusters = append(clusters, &EnvoyCluster{ Name: SvcName(svcName, port.ServicePort), Type: "sds", // use Sidecar's SDS endpoint for the hosts ConnectTimeoutMs: 500, LBType: "round_robin", // TODO figure this out! ServiceName: SvcName(svcName, port.ServicePort), }) } } return clusters }
go
{ "resource": "" }
q11821
EnvoyListenersFromState
train
func (s *EnvoyApi) EnvoyListenersFromState() []*EnvoyListener { listeners := make([]*EnvoyListener, 0) s.state.RLock() defer s.state.RUnlock() svcs := s.state.ByService() // Loop over all the services by service name for _, endpoints := range svcs { if len(endpoints) < 1 { continue } var svc *service.Service // Find the first alive service and use that as the definition. // If none are alive, we won't open the port. for _, endpoint := range endpoints { if endpoint.IsAlive() { svc = endpoint break } } if svc == nil { continue } // Loop over the ports and generate a named listener for // each port. for _, port := range svc.Ports { // Only listen on ServicePorts if port.ServicePort < 1 { continue } listeners = append(listeners, s.EnvoyListenerFromService(svc, port.ServicePort)) } } return listeners }
go
{ "resource": "" }
q11822
SvcName
train
func SvcName(name string, port int64) string { return fmt.Sprintf("%s%s%d", name, ServiceNameSeparator, port) }
go
{ "resource": "" }
q11823
SvcNameSplit
train
func SvcNameSplit(name string) (string, int64, error) { parts := strings.Split(name, ServiceNameSeparator) if len(parts) < 2 { return "", -1, fmt.Errorf("%s", "Unable to split service name and port!") } svcName := parts[0] svcPort, err := strconv.ParseInt(parts[1], 10, 64) if err != nil { return "", -1, fmt.Errorf("%s", "Unable to parse port!") } return svcName, svcPort, nil }
go
{ "resource": "" }
q11824
HttpMux
train
func (s *EnvoyApi) HttpMux() http.Handler { router := mux.NewRouter() router.HandleFunc("/registration/{service}", wrap(s.registrationHandler)).Methods("GET") router.HandleFunc("/clusters/{service_cluster}/{service_node}", wrap(s.clustersHandler)).Methods("GET") router.HandleFunc("/clusters", wrap(s.clustersHandler)).Methods("GET") router.HandleFunc("/listeners/{service_cluster}/{service_node}", wrap(s.listenersHandler)).Methods("GET") router.HandleFunc("/listeners", wrap(s.listenersHandler)).Methods("GET") router.HandleFunc("/{path}", s.optionsHandler).Methods("OPTIONS") return router }
go
{ "resource": "" }
q11825
NewCheck
train
func NewCheck(id string) *Check { check := Check{ ID: id, Count: 0, Type: "http", Command: &HttpGetCmd{}, MaxCount: 1, Status: UNKNOWN, } return &check }
go
{ "resource": "" }
q11826
UpdateStatus
train
func (check *Check) UpdateStatus(status int, err error) { if err != nil { log.Debugf("Error executing check, status UNKNOWN: (id %s)", check.ID) check.Status = UNKNOWN check.LastError = err } else { check.Status = status } if status == HEALTHY { check.Count = 0 return } check.Count = check.Count + 1 if check.Count >= check.MaxCount { check.Status = FAILED } }
go
{ "resource": "" }
q11827
NewMonitor
train
func NewMonitor(defaultCheckHost string, defaultCheckEndpoint string) *Monitor { monitor := Monitor{ Checks: make(map[string]*Check, 5), CheckInterval: HEALTH_INTERVAL, DefaultCheckHost: defaultCheckHost, DefaultCheckEndpoint: defaultCheckEndpoint, } return &monitor }
go
{ "resource": "" }
q11828
AddCheck
train
func (m *Monitor) AddCheck(check *Check) { m.Lock() defer m.Unlock() log.Printf("Adding health check: %s (ID: %s), Args: %s", check.Type, check.ID, check.Args) m.Checks[check.ID] = check }
go
{ "resource": "" }
q11829
MarkService
train
func (m *Monitor) MarkService(svc *service.Service) { // We remove checks when encountering a Tombstone record. This // prevents us from storing up checks forever. The discovery // mechanism must create tombstones when services go away, so // this is the best signal we'll get that a check is no longer // needed. Assumes we're only health checking _our own_ services. m.RLock() if _, ok := m.Checks[svc.ID]; ok { svc.Status = m.Checks[svc.ID].ServiceStatus() } else { svc.Status = service.UNKNOWN } m.RUnlock() }
go
{ "resource": "" }
q11830
Run
train
func (m *Monitor) Run(looper director.Looper) { looper.Loop(func() error { log.Debugf("Running checks") var wg sync.WaitGroup // Make immutable copy of m.Checks (checks are still mutable) m.RLock() checks := make(map[string]*Check, len(m.Checks)) for k, v := range m.Checks { checks[k] = v } m.RUnlock() wg.Add(len(checks)) for _, check := range checks { // Run all checks in parallel in goroutines resultChan := make(chan checkResult, 1) go func(check *Check, resultChan chan checkResult) { result, err := check.Command.Run(check.Args) resultChan <- checkResult{result, err} }(check, resultChan) // copy check pointer for the goroutine go func(check *Check, resultChan chan checkResult) { defer wg.Done() // We make the call but we time out if it gets too close to the // m.CheckInterval. select { case result := <-resultChan: check.UpdateStatus(result.status, result.err) case <-time.After(m.CheckInterval - 1*time.Millisecond): log.Errorf("Error, check %s timed out! (%v)", check.ID, check.Args) check.UpdateStatus(UNKNOWN, errors.New("Timed out!")) } }(check, resultChan) // copy check pointer for the goroutine } // Let's make sure we don't continue to spool up // huge quantities of goroutines. Wait on all of them // to complete before moving on. This could slow down // our check loop if something doesn't time out properly. wg.Wait() return nil }) }
go
{ "resource": "" }
q11831
ServiceName
train
func (r *RegexpNamer) ServiceName(container *docker.APIContainers) string { if container == nil { log.Warn("ServiceName() called with nil service passed!") return "" } if r.expression == nil { log.Errorf("Invalid regex can't match using: %s", r.ServiceNameMatch) return container.Image } var svcName string toMatch := []byte(container.Names[0]) matches := r.expression.FindSubmatch(toMatch) if len(matches) < 1 { svcName = container.Image } else { svcName = string(matches[1]) } return svcName }
go
{ "resource": "" }
q11832
ServiceName
train
func (d *DockerLabelNamer) ServiceName(container *docker.APIContainers) string { if container == nil { log.Warn("ServiceName() called with nil service passed!") return "" } for label, value := range container.Labels { if label == d.Label { return value } } log.Debugf( "Found container with no '%s' label: %s (%s), returning '%s'", d.Label, container.ID, container.Names[0], container.Image, ) return container.Image }
go
{ "resource": "" }
q11833
UnmarshalJSON
train
func (j *EnvoyListener) UnmarshalJSON(input []byte) error { fs := fflib.NewFFLexer(input) return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) }
go
{ "resource": "" }
q11834
MarshalJSON
train
func (j *EnvoyRoute) MarshalJSON() ([]byte, error) { var buf fflib.Buffer if j == nil { buf.WriteString("null") return buf.Bytes(), nil } err := j.MarshalJSONBuf(&buf) if err != nil { return nil, err } return buf.Bytes(), nil }
go
{ "resource": "" }
q11835
Start
train
func (d *servicesDelegate) Start() { go func() { for message := range d.notifications { entry := service.Decode(message) if entry == nil { log.Errorf("NotifyMsg(): error decoding!") continue } d.state.ServiceMsgs <- *entry } }() d.Started = true d.StartedAt = time.Now().UTC() }
go
{ "resource": "" }
q11836
packPacket
train
func (d *servicesDelegate) packPacket(broadcasts [][]byte, limit int, overhead int) (packet [][]byte, leftover [][]byte) { total := 0 lastItem := -1 // Find the index of the last item that fits into the packet we're building for i, message := range broadcasts { if total+len(message)+overhead > limit { break } lastItem = i total += len(message) + overhead } if lastItem < 0 && len(broadcasts) > 0 { // Don't warn on startup... it's fairly normal gracePeriod := time.Now().UTC().Add(0 - (5 * time.Second)) if d.StartedAt.Before(gracePeriod) { log.Warnf("All messages were too long to fit! No broadcasts!") } // There could be a scenario here where one hugely long broadcast could // get stuck forever and prevent anything else from going out. There // may be a better way to handle this. Scanning for the next message that // does fit results in lots of memory copying and doesn't perform at scale. return nil, broadcasts } // Save the leftover messages after the last one that fit. If this is too // much, then set it to the lastItem. firstLeftover := lastItem + 1 return broadcasts[:lastItem+1], broadcasts[firstLeftover:] }
go
{ "resource": "" }
q11837
Version
train
func (svc *Service) Version() string { parts := strings.Split(svc.Image, ":") if len(parts) > 1 { return parts[1] } return parts[0] }
go
{ "resource": "" }
q11838
ToService
train
func ToService(container *docker.APIContainers, ip string) Service { var svc Service hostname, _ := os.Hostname() svc.ID = container.ID[0:12] // Use short IDs svc.Name = container.Names[0] // Use the first name svc.Image = container.Image svc.Created = time.Unix(container.Created, 0).UTC() svc.Updated = time.Now().UTC() svc.Hostname = hostname svc.Status = ALIVE if _, ok := container.Labels["ProxyMode"]; ok { svc.ProxyMode = container.Labels["ProxyMode"] } else { svc.ProxyMode = "http" } svc.Ports = make([]Port, 0) for _, port := range container.Ports { if port.PublicPort != 0 { svc.Ports = append(svc.Ports, buildPortFor(&port, container, ip)) } } return svc }
go
{ "resource": "" }
q11839
buildPortFor
train
func buildPortFor(port *docker.APIPort, container *docker.APIContainers, ip string) Port { // We look up service port labels by convention in the format "ServicePort_80=8080" svcPortLabel := fmt.Sprintf("ServicePort_%d", port.PrivatePort) // You can override the default IP by binding your container on a specific IP if port.IP != "0.0.0.0" && port.IP != "" { ip = port.IP } returnPort := Port{Port: port.PublicPort, Type: port.Type, IP: ip} if svcPort, ok := container.Labels[svcPortLabel]; ok { svcPortInt, err := strconv.Atoi(svcPort) if err != nil { log.Errorf("Error converting label value for %s to integer: %s", svcPortLabel, err.Error(), ) return returnPort } // Everything was good, set the service port returnPort.ServicePort = int64(svcPortInt) } return returnPort }
go
{ "resource": "" }
q11840
Eval
train
func (a Anomalyzer) Eval() float64 { threshold := a.Conf.referenceSize + a.Conf.ActiveSize if a.Conf.Delay && len(a.Data) < threshold { return 0.0 } probmap := make(map[string]float64) for _, method := range a.Conf.Methods { algorithm := Algorithms[method] prob := cap(algorithm(a.Data, *a.Conf), 0, 1) if prob != NA { // if highrank and lowrank methods exist then only listen to // the max of either if method == "highrank" || method == "lowrank" { if math.IsNaN(probmap["rank"]) { probmap["rank"] = 0 } probmap["rank"] = math.Max(probmap["rank"], prob) } else { probmap[method] = prob } } } probs := make(govector.Vector, 0, len(probmap)) weights := make(govector.Vector, 0, len(probmap)) for method, prob := range probmap { if method == "magnitude" && prob < a.Conf.Sensitivity { return 0.0 } probs = append(probs, prob) weights = append(weights, a.getWeight(method, prob)) } // ignore the error since we force the length of probs // and the weights to be equal weighted, _ := probs.WeightedMean(weights) // if all the weights are zero, then our weighted mean // function attempts to divide by zero which returns a // NaN. we'd like it to return 0. if math.IsNaN(weighted) { weighted = 0 } return weighted }
go
{ "resource": "" }
q11841
getWeight
train
func (a Anomalyzer) getWeight(name string, prob float64) float64 { weight := 0.5 dynamicWeights := []string{"magnitude", "fence"} // If either the magnitude and fence methods don't have any // probability to contribute, we don't want to hear about it. // If they do, we upweight them substantially. if exists(name, dynamicWeights) { if prob > 0.8 { weight = 5.0 } else { weight = 0.5 } } return weight }
go
{ "resource": "" }
q11842
extractWindows
train
func extractWindows(vector govector.Vector, refSize, activeSize, minRefSize int) (govector.Vector, govector.Vector, error) { n := len(vector) activeSize = min(activeSize, n) refSize = min(refSize, n-activeSize) // make sure the reference size is at least as big as the active size // note that this penalty might be overly severe for some tests if refSize < minRefSize { return nil, nil, fmt.Errorf("Reference size must be at least as big as active size") } // return reference and active windows return vector[n-activeSize-refSize : n-activeSize], vector[n-activeSize:], nil }
go
{ "resource": "" }
q11843
weightExp
train
func weightExp(x, base float64) float64 { return (math.Pow(base, x) - 1) / (math.Pow(base, 1) - 1) }
go
{ "resource": "" }
q11844
KsStat
train
func KsStat(vector govector.Vector, conf AnomalyzerConf) float64 { reference, active, err := extractWindows(vector, conf.referenceSize, conf.ActiveSize, conf.ActiveSize) if err != nil { return NA } n1 := len(reference) n2 := len(active) if n1%n2 != 0 { return NA } // First sort the active data and generate a cummulative distribution function // using that data. Do the same for the reference data. activeEcdf := active.Ecdf() refEcdf := reference.Ecdf() // We want the reference and active vectors to have the same length n, so we // consider the min and max for each and interpolated the points between. min := math.Min(reference.Min(), active.Min()) max := math.Max(reference.Max(), active.Max()) interpolated := interpolate(min, max, n1+n2) // Then we apply the distribution function over the interpolated data. activeDist := interpolated.Apply(activeEcdf) refDist := interpolated.Apply(refEcdf) // Find the maximum displacement between both distributions. d := 0.0 for i := 0; i < n1+n2; i++ { d = math.Max(d, math.Abs(activeDist[i]-refDist[i])) } return d }
go
{ "resource": "" }
q11845
interpolate
train
func interpolate(min, max float64, npoints int) govector.Vector { interp := make(govector.Vector, npoints) step := (max - min) / (float64(npoints) - 1) interp[0] = min i := 1 for i < npoints { interp[i] = interp[i-1] + step i++ } return interp }
go
{ "resource": "" }
q11846
marshalValue
train
func marshalValue(options *Options, v reflect.Value) (interface{}, error) { // return nil on nil pointer struct fields if !v.IsValid() || !v.CanInterface() { return nil, nil } val := v.Interface() if marshaller, ok := val.(Marshaller); ok { return marshaller.Marshal(options) } // types which are e.g. structs, slices or maps and implement one of the following interfaces should not be // marshalled by sheriff because they'll be correctly marshalled by json.Marshal instead. // Otherwise (e.g. net.IP) a byte slice may be output as a list of uints instead of as an IP string. switch val.(type) { case json.Marshaler, encoding.TextMarshaler, fmt.Stringer: return val, nil } k := v.Kind() if k == reflect.Ptr { v = v.Elem() val = v.Interface() k = v.Kind() } if k == reflect.Interface || k == reflect.Struct { return Marshal(options, val) } if k == reflect.Slice { l := v.Len() dest := make([]interface{}, l) for i := 0; i < l; i++ { d, err := marshalValue(options, v.Index(i)) if err != nil { return nil, err } dest[i] = d } return dest, nil } if k == reflect.Map { mapKeys := v.MapKeys() if len(mapKeys) == 0 { return nil, nil } if mapKeys[0].Kind() != reflect.String { return nil, MarshalInvalidTypeError{t: mapKeys[0].Kind(), data: val} } dest := make(map[string]interface{}) for _, key := range mapKeys { d, err := marshalValue(options, v.MapIndex(key)) if err != nil { return nil, err } dest[key.Interface().(string)] = d } return dest, nil } return val, nil }
go
{ "resource": "" }
q11847
contains
train
func contains(key string, list []string) bool { for _, innerKey := range list { if key == innerKey { return true } } return false }
go
{ "resource": "" }
q11848
listContains
train
func listContains(a []string, b []string) bool { for _, key := range a { if contains(key, b) { return true } } return false }
go
{ "resource": "" }
q11849
Error
train
func (e *RPCError) Error() string { return strconv.Itoa(e.Code) + ":" + e.Message }
go
{ "resource": "" }
q11850
AsMap
train
func (res RPCResponses) AsMap() map[int]*RPCResponse { resMap := make(map[int]*RPCResponse, 0) for _, r := range res { resMap[r.ID] = r } return resMap }
go
{ "resource": "" }
q11851
GetByID
train
func (res RPCResponses) GetByID(id int) *RPCResponse { for _, r := range res { if r.ID == id { return r } } return nil }
go
{ "resource": "" }
q11852
HasError
train
func (res RPCResponses) HasError() bool { for _, res := range res { if res.Error != nil { return true } } return false }
go
{ "resource": "" }
q11853
GetInt
train
func (RPCResponse *RPCResponse) GetInt() (int64, error) { val, ok := RPCResponse.Result.(json.Number) if !ok { return 0, fmt.Errorf("could not parse int64 from %s", RPCResponse.Result) } i, err := val.Int64() if err != nil { return 0, err } return i, nil }
go
{ "resource": "" }
q11854
GetFloat
train
func (RPCResponse *RPCResponse) GetFloat() (float64, error) { val, ok := RPCResponse.Result.(json.Number) if !ok { return 0, fmt.Errorf("could not parse float64 from %s", RPCResponse.Result) } f, err := val.Float64() if err != nil { return 0, err } return f, nil }
go
{ "resource": "" }
q11855
GetBool
train
func (RPCResponse *RPCResponse) GetBool() (bool, error) { val, ok := RPCResponse.Result.(bool) if !ok { return false, fmt.Errorf("could not parse bool from %s", RPCResponse.Result) } return val, nil }
go
{ "resource": "" }
q11856
GetString
train
func (RPCResponse *RPCResponse) GetString() (string, error) { val, ok := RPCResponse.Result.(string) if !ok { return "", fmt.Errorf("could not parse string from %s", RPCResponse.Result) } return val, nil }
go
{ "resource": "" }
q11857
NewBatch
train
func NewBatch(id []byte) *Batch { return &Batch{ ID: id, ReplaySet: NewReplaySet(), entries: make(map[uint16]batchEntry), replayCache: make(map[HashPrefix]struct{}), } }
go
{ "resource": "" }
q11858
ForEach
train
func (b *Batch) ForEach(fn func(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error) error { for seqNum, entry := range b.entries { if err := fn(seqNum, &entry.hashPrefix, entry.cltv); err != nil { return err } } return nil }
go
{ "resource": "" }
q11859
hashSharedSecret
train
func hashSharedSecret(sharedSecret *Hash256) *HashPrefix { // Sha256 hash of sharedSecret h := sha256.New() h.Write(sharedSecret[:]) var sharedHash HashPrefix // Copy bytes to sharedHash copy(sharedHash[:], h.Sum(nil)) return &sharedHash }
go
{ "resource": "" }
q11860
Start
train
func (rl *MemoryReplayLog) Start() error { rl.batches = make(map[string]*ReplaySet) rl.entries = make(map[HashPrefix]uint32) return nil }
go
{ "resource": "" }
q11861
Stop
train
func (rl *MemoryReplayLog) Stop() error { if rl.entries == nil || rl.batches == nil { return errReplayLogNotStarted } rl.batches = nil rl.entries = nil return nil }
go
{ "resource": "" }
q11862
Get
train
func (rl *MemoryReplayLog) Get(hash *HashPrefix) (uint32, error) { if rl.entries == nil || rl.batches == nil { return 0, errReplayLogNotStarted } cltv, exists := rl.entries[*hash] if !exists { return 0, ErrLogEntryNotFound } return cltv, nil }
go
{ "resource": "" }
q11863
Put
train
func (rl *MemoryReplayLog) Put(hash *HashPrefix, cltv uint32) error { if rl.entries == nil || rl.batches == nil { return errReplayLogNotStarted } _, exists := rl.entries[*hash] if exists { return ErrReplayedPacket } rl.entries[*hash] = cltv return nil }
go
{ "resource": "" }
q11864
Delete
train
func (rl *MemoryReplayLog) Delete(hash *HashPrefix) error { if rl.entries == nil || rl.batches == nil { return errReplayLogNotStarted } delete(rl.entries, *hash) return nil }
go
{ "resource": "" }
q11865
PutBatch
train
func (rl *MemoryReplayLog) PutBatch(batch *Batch) (*ReplaySet, error) { if rl.entries == nil || rl.batches == nil { return nil, errReplayLogNotStarted } // Return the result when the batch was first processed to provide // idempotence. replays, exists := rl.batches[string(batch.ID)] if !exists { replays = NewReplaySet() err := batch.ForEach(func(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error { err := rl.Put(hashPrefix, cltv) if err == ErrReplayedPacket { replays.Add(seqNum) return nil } // An error would be bad because we have already updated the entries // map, but no errors other than ErrReplayedPacket should occur. return err }) if err != nil { return nil, err } replays.Merge(batch.ReplaySet) rl.batches[string(batch.ID)] = replays } batch.ReplaySet = replays batch.IsCommitted = true return replays, nil }
go
{ "resource": "" }
q11866
main
train
func main() { args := os.Args assocData := bytes.Repeat([]byte{'B'}, 32) if len(args) == 1 { fmt.Printf("Usage: %s (generate|decode) <private-keys>\n", args[0]) } else if args[1] == "generate" { var path sphinx.PaymentPath for i, hexKey := range args[2:] { binKey, err := hex.DecodeString(hexKey) if err != nil || len(binKey) != 33 { log.Fatalf("%s is not a valid hex pubkey %s", hexKey, err) } pubkey, err := btcec.ParsePubKey(binKey, btcec.S256()) if err != nil { panic(err) } path[i] = sphinx.OnionHop{ NodePub: *pubkey, HopData: sphinx.HopData{ Realm: [1]byte{0x00}, ForwardAmount: uint64(i), OutgoingCltv: uint32(i), }, } copy(path[i].HopData.NextAddress[:], bytes.Repeat([]byte{byte(i)}, 8)) fmt.Fprintf(os.Stderr, "Node %d pubkey %x\n", i, pubkey.SerializeCompressed()) } sessionKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), bytes.Repeat([]byte{'A'}, 32)) msg, err := sphinx.NewOnionPacket(&path, sessionKey, assocData) if err != nil { log.Fatalf("Error creating message: %v", err) } w := bytes.NewBuffer([]byte{}) err = msg.Encode(w) if err != nil { log.Fatalf("Error serializing message: %v", err) } fmt.Printf("%x\n", w.Bytes()) } else if args[1] == "decode" { binKey, err := hex.DecodeString(args[2]) if len(binKey) != 32 || err != nil { log.Fatalf("Argument not a valid hex private key") } hexBytes, _ := ioutil.ReadAll(os.Stdin) binMsg, err := hex.DecodeString(strings.TrimSpace(string(hexBytes))) if err != nil { log.Fatalf("Error decoding message: %s", err) } privkey, _ := btcec.PrivKeyFromBytes(btcec.S256(), binKey) s := sphinx.NewRouter(privkey, &chaincfg.TestNet3Params, sphinx.NewMemoryReplayLog()) var packet sphinx.OnionPacket err = packet.Decode(bytes.NewBuffer(binMsg)) if err != nil { log.Fatalf("Error parsing message: %v", err) } p, err := s.ProcessOnionPacket(&packet, assocData, 10) if err != nil { log.Fatalf("Failed to decode message: %s", err) } w := bytes.NewBuffer([]byte{}) err = p.NextPacket.Encode(w) if err != nil { log.Fatalf("Error serializing message: %v", err) } fmt.Printf("%x\n", w.Bytes()) } }
go
{ "resource": "" }
q11867
Encode
train
func (hd *HopData) Encode(w io.Writer) error { if _, err := w.Write(hd.Realm[:]); err != nil { return err } if _, err := w.Write(hd.NextAddress[:]); err != nil { return err } if err := binary.Write(w, binary.BigEndian, hd.ForwardAmount); err != nil { return err } if err := binary.Write(w, binary.BigEndian, hd.OutgoingCltv); err != nil { return err } if _, err := w.Write(hd.ExtraBytes[:]); err != nil { return err } if _, err := w.Write(hd.HMAC[:]); err != nil { return err } return nil }
go
{ "resource": "" }
q11868
Decode
train
func (hd *HopData) Decode(r io.Reader) error { if _, err := io.ReadFull(r, hd.Realm[:]); err != nil { return err } if _, err := io.ReadFull(r, hd.NextAddress[:]); err != nil { return err } if err := binary.Read(r, binary.BigEndian, &hd.ForwardAmount); err != nil { return err } if err := binary.Read(r, binary.BigEndian, &hd.OutgoingCltv); err != nil { return err } if _, err := io.ReadFull(r, hd.ExtraBytes[:]); err != nil { return err } if _, err := io.ReadFull(r, hd.HMAC[:]); err != nil { return err } return nil }
go
{ "resource": "" }
q11869
generateSharedSecrets
train
func generateSharedSecrets(paymentPath []*btcec.PublicKey, sessionKey *btcec.PrivateKey) []Hash256 { // Each hop performs ECDH with our ephemeral key pair to arrive at a // shared secret. Additionally, each hop randomizes the group element // for the next hop by multiplying it by the blinding factor. This way // we only need to transmit a single group element, and hops can't link // a session back to us if they have several nodes in the path. numHops := len(paymentPath) hopSharedSecrets := make([]Hash256, numHops) // Compute the triplet for the first hop outside of the main loop. // Within the loop each new triplet will be computed recursively based // off of the blinding factor of the last hop. lastEphemeralPubKey := sessionKey.PubKey() hopSharedSecrets[0] = generateSharedSecret(paymentPath[0], sessionKey) lastBlindingFactor := computeBlindingFactor(lastEphemeralPubKey, hopSharedSecrets[0][:]) // The cached blinding factor will contain the running product of the // session private key x and blinding factors b_i, computed as // c_0 = x // c_i = c_{i-1} * b_{i-1} (mod |F(G)|). // = x * b_0 * b_1 * ... * b_{i-1} (mod |F(G)|). // // We begin with just the session private key x, so that base case // c_0 = x. At the beginning of each iteration, the previous blinding // factor is aggregated into the modular product, and used as the scalar // value in deriving the hop ephemeral keys and shared secrets. var cachedBlindingFactor big.Int cachedBlindingFactor.SetBytes(sessionKey.D.Bytes()) // Now recursively compute the cached blinding factor, ephemeral ECDH // pub keys, and shared secret for each hop. var nextBlindingFactor big.Int for i := 1; i <= numHops-1; i++ { // Update the cached blinding factor with b_{i-1}. nextBlindingFactor.SetBytes(lastBlindingFactor[:]) cachedBlindingFactor.Mul(&cachedBlindingFactor, &nextBlindingFactor) cachedBlindingFactor.Mod(&cachedBlindingFactor, btcec.S256().Params().N) // a_i = g ^ c_i // = g^( x * b_0 * ... * b_{i-1} ) // = X^( b_0 * ... * b_{i-1} ) // X_our_session_pub_key x all prev blinding factors lastEphemeralPubKey = blindBaseElement(cachedBlindingFactor.Bytes()) // e_i = Y_i ^ c_i // = ( Y_i ^ x )^( b_0 * ... * b_{i-1} ) // (Y_their_pub_key x x_our_priv) x all prev blinding factors hopBlindedPubKey := blindGroupElement( paymentPath[i], cachedBlindingFactor.Bytes(), ) // s_i = sha256( e_i ) // = sha256( Y_i ^ (x * b_0 * ... * b_{i-1} ) hopSharedSecrets[i] = sha256.Sum256(hopBlindedPubKey.SerializeCompressed()) // Only need to evaluate up to the penultimate blinding factor. if i >= numHops-1 { break } // b_i = sha256( a_i || s_i ) lastBlindingFactor = computeBlindingFactor( lastEphemeralPubKey, hopSharedSecrets[i][:], ) } return hopSharedSecrets }
go
{ "resource": "" }
q11870
NewOnionPacket
train
func NewOnionPacket(paymentPath *PaymentPath, sessionKey *btcec.PrivateKey, assocData []byte) (*OnionPacket, error) { numHops := paymentPath.TrueRouteLength() hopSharedSecrets := generateSharedSecrets( paymentPath.NodeKeys(), sessionKey, ) // Generate the padding, called "filler strings" in the paper. filler := generateHeaderPadding( "rho", numHops, HopDataSize, hopSharedSecrets, ) // Allocate zero'd out byte slices to store the final mix header packet // and the hmac for each hop. var ( mixHeader [routingInfoSize]byte nextHmac [HMACSize]byte hopDataBuf bytes.Buffer ) // Now we compute the routing information for each hop, along with a // MAC of the routing info using the shared key for that hop. for i := numHops - 1; i >= 0; i-- { // We'll derive the two keys we need for each hop in order to: // generate our stream cipher bytes for the mixHeader, and // calculate the MAC over the entire constructed packet. rhoKey := generateKey("rho", &hopSharedSecrets[i]) muKey := generateKey("mu", &hopSharedSecrets[i]) // The HMAC for the final hop is simply zeroes. This allows the // last hop to recognize that it is the destination for a // particular payment. paymentPath[i].HopData.HMAC = nextHmac // Next, using the key dedicated for our stream cipher, we'll // generate enough bytes to obfuscate this layer of the onion // packet. streamBytes := generateCipherStream(rhoKey, numStreamBytes) // Before we assemble the packet, we'll shift the current // mix-header to the write in order to make room for this next // per-hop data. rightShift(mixHeader[:], HopDataSize) // With the mix header right-shifted, we'll encode the current // hop data into a buffer we'll re-use during the packet // construction. err := paymentPath[i].HopData.Encode(&hopDataBuf) if err != nil { return nil, err } copy(mixHeader[:], hopDataBuf.Bytes()) // Once the packet for this hop has been assembled, we'll // re-encrypt the packet by XOR'ing with a stream of bytes // generated using our shared secret. xor(mixHeader[:], mixHeader[:], streamBytes[:routingInfoSize]) // If this is the "last" hop, then we'll override the tail of // the hop data. if i == numHops-1 { copy(mixHeader[len(mixHeader)-len(filler):], filler) } // The packet for this hop consists of: mixHeader. When // calculating the MAC, we'll also include the optional // associated data which can allow higher level applications to // prevent replay attacks. packet := append(mixHeader[:], assocData...) nextHmac = calcMac(muKey, packet) hopDataBuf.Reset() } return &OnionPacket{ Version: baseVersion, EphemeralKey: sessionKey.PubKey(), RoutingInfo: mixHeader, HeaderMAC: nextHmac, }, nil }
go
{ "resource": "" }
q11871
rightShift
train
func rightShift(slice []byte, num int) { for i := len(slice) - num - 1; i >= 0; i-- { slice[num+i] = slice[i] } for i := 0; i < num; i++ { slice[i] = 0 } }
go
{ "resource": "" }
q11872
Encode
train
func (f *OnionPacket) Encode(w io.Writer) error { ephemeral := f.EphemeralKey.SerializeCompressed() if _, err := w.Write([]byte{f.Version}); err != nil { return err } if _, err := w.Write(ephemeral); err != nil { return err } if _, err := w.Write(f.RoutingInfo[:]); err != nil { return err } if _, err := w.Write(f.HeaderMAC[:]); err != nil { return err } return nil }
go
{ "resource": "" }
q11873
Decode
train
func (f *OnionPacket) Decode(r io.Reader) error { var err error var buf [1]byte if _, err := io.ReadFull(r, buf[:]); err != nil { return err } f.Version = buf[0] // If version of the onion packet protocol unknown for us than in might // lead to improperly decoded data. if f.Version != baseVersion { return ErrInvalidOnionVersion } var ephemeral [33]byte if _, err := io.ReadFull(r, ephemeral[:]); err != nil { return err } f.EphemeralKey, err = btcec.ParsePubKey(ephemeral[:], btcec.S256()) if err != nil { return ErrInvalidOnionKey } if _, err := io.ReadFull(r, f.RoutingInfo[:]); err != nil { return err } if _, err := io.ReadFull(r, f.HeaderMAC[:]); err != nil { return err } return nil }
go
{ "resource": "" }
q11874
NewRouter
train
func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, log ReplayLog) *Router { var nodeID [AddressSize]byte copy(nodeID[:], btcutil.Hash160(nodeKey.PubKey().SerializeCompressed())) // Safe to ignore the error here, nodeID is 20 bytes. nodeAddr, _ := btcutil.NewAddressPubKeyHash(nodeID[:], net) return &Router{ nodeID: nodeID, nodeAddr: nodeAddr, onionKey: &btcec.PrivateKey{ PublicKey: ecdsa.PublicKey{ Curve: btcec.S256(), X: nodeKey.X, Y: nodeKey.Y, }, D: nodeKey.D, }, log: log, } }
go
{ "resource": "" }
q11875
unwrapPacket
train
func unwrapPacket(onionPkt *OnionPacket, sharedSecret *Hash256, assocData []byte) (*OnionPacket, *HopData, error) { dhKey := onionPkt.EphemeralKey routeInfo := onionPkt.RoutingInfo headerMac := onionPkt.HeaderMAC // Using the derived shared secret, ensure the integrity of the routing // information by checking the attached MAC without leaking timing // information. message := append(routeInfo[:], assocData...) calculatedMac := calcMac(generateKey("mu", sharedSecret), message) if !hmac.Equal(headerMac[:], calculatedMac[:]) { return nil, nil, ErrInvalidOnionHMAC } // Attach the padding zeroes in order to properly strip an encryption // layer off the routing info revealing the routing information for the // next hop. streamBytes := generateCipherStream( generateKey("rho", sharedSecret), numStreamBytes, ) zeroBytes := bytes.Repeat([]byte{0}, HopDataSize) headerWithPadding := append(routeInfo[:], zeroBytes...) var hopInfo [numStreamBytes]byte xor(hopInfo[:], headerWithPadding, streamBytes) // Randomize the DH group element for the next hop using the // deterministic blinding factor. blindingFactor := computeBlindingFactor(dhKey, sharedSecret[:]) nextDHKey := blindGroupElement(dhKey, blindingFactor[:]) // With the MAC checked, and the payload decrypted, we can now parse // out the per-hop data so we can derive the specified forwarding // instructions. var hopData HopData if err := hopData.Decode(bytes.NewReader(hopInfo[:])); err != nil { return nil, nil, err } // With the necessary items extracted, we'll copy of the onion packet // for the next node, snipping off our per-hop data. var nextMixHeader [routingInfoSize]byte copy(nextMixHeader[:], hopInfo[HopDataSize:]) innerPkt := &OnionPacket{ Version: onionPkt.Version, EphemeralKey: nextDHKey, RoutingInfo: nextMixHeader, HeaderMAC: hopData.HMAC, } return innerPkt, &hopData, nil }
go
{ "resource": "" }
q11876
processOnionPacket
train
func processOnionPacket(onionPkt *OnionPacket, sharedSecret *Hash256, assocData []byte, sharedSecretGen sharedSecretGenerator) (*ProcessedPacket, error) { // First, we'll unwrap an initial layer of the onion packet. Typically, // we'll only have a single layer to unwrap, However, if the sender has // additional data for us within the Extra Onion Blobs (EOBs), then we // may have to unwrap additional layers. By default, the inner most // mix header is the one that we'll want to pass onto the next hop so // they can properly check the HMAC and unwrap a layer for their // handoff hop. innerPkt, outerHopData, err := unwrapPacket( onionPkt, sharedSecret, assocData, ) if err != nil { return nil, err } // By default we'll assume that there are additional hops in the route. // However if the uncovered 'nextMac' is all zeroes, then this // indicates that we're the final hop in the route. var action ProcessCode = MoreHops if bytes.Compare(zeroHMAC[:], outerHopData.HMAC[:]) == 0 { action = ExitNode } // Finally, we'll return a fully processed packet with the outer most // hop data (where the primary forwarding instructions lie) and the // inner most onion packet that we unwrapped. return &ProcessedPacket{ Action: action, ForwardingInstructions: *outerHopData, NextPacket: innerPkt, }, nil }
go
{ "resource": "" }
q11877
Commit
train
func (t *Tx) Commit() ([]ProcessedPacket, *ReplaySet, error) { if t.batch.IsCommitted { return t.packets, t.batch.ReplaySet, nil } rs, err := t.router.log.PutBatch(t.batch) return t.packets, rs, err }
go
{ "resource": "" }
q11878
IsEmpty
train
func (o OnionHop) IsEmpty() bool { return o.NodePub.X == nil || o.NodePub.Y == nil }
go
{ "resource": "" }
q11879
calcMac
train
func calcMac(key [keyLen]byte, msg []byte) [HMACSize]byte { hmac := hmac.New(sha256.New, key[:]) hmac.Write(msg) h := hmac.Sum(nil) var mac [HMACSize]byte copy(mac[:], h[:HMACSize]) return mac }
go
{ "resource": "" }
q11880
generateCipherStream
train
func generateCipherStream(key [keyLen]byte, numBytes uint) []byte { var ( nonce [8]byte ) cipher, err := chacha20.NewCipher(nonce[:], key[:]) if err != nil { panic(err) } output := make([]byte, numBytes) cipher.XORKeyStream(output, output) return output }
go
{ "resource": "" }
q11881
generateSharedSecret
train
func (r *Router) generateSharedSecret(dhKey *btcec.PublicKey) (Hash256, error) { var sharedSecret Hash256 // Ensure that the public key is on our curve. if !btcec.S256().IsOnCurve(dhKey.X, dhKey.Y) { return sharedSecret, ErrInvalidOnionKey } // Compute our shared secret. sharedSecret = generateSharedSecret(dhKey, r.onionKey) return sharedSecret, nil }
go
{ "resource": "" }
q11882
generateSharedSecret
train
func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) Hash256 { s := &btcec.PublicKey{} s.X, s.Y = btcec.S256().ScalarMult(pub.X, pub.Y, priv.D.Bytes()) return sha256.Sum256(s.SerializeCompressed()) }
go
{ "resource": "" }
q11883
DecryptError
train
func (o *OnionErrorDecrypter) DecryptError(encryptedData []byte) (*btcec.PublicKey, []byte, error) { // Ensure the error message length is as expected. if len(encryptedData) != onionErrorLength { return nil, nil, fmt.Errorf("invalid error length: "+ "expected %v got %v", onionErrorLength, len(encryptedData)) } sharedSecrets := generateSharedSecrets( o.circuit.PaymentPath, o.circuit.SessionKey, ) var ( sender *btcec.PublicKey msg []byte dummySecret Hash256 ) copy(dummySecret[:], bytes.Repeat([]byte{1}, 32)) // We'll iterate a constant amount of hops to ensure that we don't give // away an timing information pertaining to the position in the route // that the error emanated from. for i := 0; i < NumMaxHops; i++ { var sharedSecret Hash256 // If we've already found the sender, then we'll use our dummy // secret to continue decryption attempts to fill out the rest // of the loop. Otherwise, we'll use the next shared secret in // line. if sender != nil || i > len(sharedSecrets)-1 { sharedSecret = dummySecret } else { sharedSecret = sharedSecrets[i] } // With the shared secret, we'll now strip off a layer of // encryption from the encrypted error payload. encryptedData = onionEncrypt(&sharedSecret, encryptedData) // Next, we'll need to separate the data, from the MAC itself // so we can reconstruct and verify it. expectedMac := encryptedData[:sha256.Size] data := encryptedData[sha256.Size:] // With the data split, we'll now re-generate the MAC using its // specified key. umKey := generateKey("um", &sharedSecret) h := hmac.New(sha256.New, umKey[:]) h.Write(data) // If the MAC matches up, then we've found the sender of the // error and have also obtained the fully decrypted message. realMac := h.Sum(nil) if hmac.Equal(realMac, expectedMac) && sender == nil { sender = o.circuit.PaymentPath[i] msg = data } } // If the sender pointer is still nil, then we haven't found the // sender, meaning we've failed to decrypt. if sender == nil { return nil, nil, errors.New("unable to retrieve onion failure") } return sender, msg, nil }
go
{ "resource": "" }
q11884
EncryptError
train
func (o *OnionErrorEncrypter) EncryptError(initial bool, data []byte) []byte { if initial { umKey := generateKey("um", &o.sharedSecret) hash := hmac.New(sha256.New, umKey[:]) hash.Write(data) h := hash.Sum(nil) data = append(h, data...) } return onionEncrypt(&o.sharedSecret, data) }
go
{ "resource": "" }
q11885
NewOnionErrorEncrypter
train
func NewOnionErrorEncrypter(router *Router, ephemeralKey *btcec.PublicKey) (*OnionErrorEncrypter, error) { sharedSecret, err := router.generateSharedSecret(ephemeralKey) if err != nil { return nil, err } return &OnionErrorEncrypter{ sharedSecret: sharedSecret, }, nil }
go
{ "resource": "" }
q11886
Encode
train
func (o *OnionErrorEncrypter) Encode(w io.Writer) error { _, err := w.Write(o.sharedSecret[:]) return err }
go
{ "resource": "" }
q11887
Decode
train
func (o *OnionErrorEncrypter) Decode(r io.Reader) error { _, err := io.ReadFull(r, o.sharedSecret[:]) return err }
go
{ "resource": "" }
q11888
Decode
train
func (c *Circuit) Decode(r io.Reader) error { var keyLength [1]byte if _, err := r.Read(keyLength[:]); err != nil { return err } sessionKeyData := make([]byte, uint8(keyLength[0])) if _, err := r.Read(sessionKeyData[:]); err != nil { return err } c.SessionKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), sessionKeyData) var pathLength [1]byte if _, err := r.Read(pathLength[:]); err != nil { return err } c.PaymentPath = make([]*btcec.PublicKey, uint8(pathLength[0])) for i := 0; i < len(c.PaymentPath); i++ { var pubKeyData [btcec.PubKeyBytesLenCompressed]byte if _, err := r.Read(pubKeyData[:]); err != nil { return err } pubKey, err := btcec.ParsePubKey(pubKeyData[:], btcec.S256()) if err != nil { return err } c.PaymentPath[i] = pubKey } return nil }
go
{ "resource": "" }
q11889
Encode
train
func (c *Circuit) Encode(w io.Writer) error { var keyLength [1]byte keyLength[0] = uint8(len(c.SessionKey.Serialize())) if _, err := w.Write(keyLength[:]); err != nil { return err } if _, err := w.Write(c.SessionKey.Serialize()); err != nil { return err } var pathLength [1]byte pathLength[0] = uint8(len(c.PaymentPath)) if _, err := w.Write(pathLength[:]); err != nil { return err } for _, pubKey := range c.PaymentPath { if _, err := w.Write(pubKey.SerializeCompressed()); err != nil { return err } } return nil }
go
{ "resource": "" }
q11890
Contains
train
func (rs *ReplaySet) Contains(idx uint16) bool { _, ok := rs.replays[idx] return ok }
go
{ "resource": "" }
q11891
Merge
train
func (rs *ReplaySet) Merge(rs2 *ReplaySet) { for seqNum := range rs2.replays { rs.Add(seqNum) } }
go
{ "resource": "" }
q11892
Encode
train
func (rs *ReplaySet) Encode(w io.Writer) error { for seqNum := range rs.replays { err := binary.Write(w, binary.BigEndian, seqNum) if err != nil { return err } } return nil }
go
{ "resource": "" }
q11893
Decode
train
func (rs *ReplaySet) Decode(r io.Reader) error { for { // seqNum provides to buffer to read the next uint16 index. var seqNum uint16 err := binary.Read(r, binary.BigEndian, &seqNum) switch err { case nil: // Successful read, proceed. case io.EOF: return nil default: // Can return ErrShortBuffer or ErrUnexpectedEOF. return err } // Add this decoded sequence number to the set. rs.Add(seqNum) } }
go
{ "resource": "" }
q11894
SetHttpContext
train
func (hook *SentryHook) SetHttpContext(h *raven.Http) { hook.client.SetHttpContext(h) }
go
{ "resource": "" }
q11895
SetIgnoreErrors
train
func (hook *SentryHook) SetIgnoreErrors(errs ...string) error { return hook.client.SetIgnoreErrors(errs) }
go
{ "resource": "" }
q11896
SetSampleRate
train
func (hook *SentryHook) SetSampleRate(rate float32) error { return hook.client.SetSampleRate(rate) }
go
{ "resource": "" }
q11897
SetTagsContext
train
func (hook *SentryHook) SetTagsContext(t map[string]string) { hook.client.SetTagsContext(t) }
go
{ "resource": "" }
q11898
SetUserContext
train
func (hook *SentryHook) SetUserContext(u *raven.User) { hook.client.SetUserContext(u) }
go
{ "resource": "" }
q11899
xtob
train
func xtob(x string) (byte, bool) { b1 := xvalues[x[0]] b2 := xvalues[x[1]] return (b1 << 4) | b2, b1 != 255 && b2 != 255 }
go
{ "resource": "" }