_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q7000
Unregister
train
func (r *Registry) Unregister(puID string) error { r.Lock() defer r.Unlock() delete(r.indexByName, puID) r.indexByPort.DeleteByID(puID, true) r.indexByPort.DeleteByID(puID, false) return nil }
go
{ "resource": "" }
q7001
RetrieveServiceByID
train
func (r *Registry) RetrieveServiceByID(id string) (*ServiceContext, error) { r.Lock() defer r.Unlock() svc, ok := r.indexByName[id] if !ok { return nil, fmt.Errorf("Service not found: %s", id) } return svc, nil }
go
{ "resource": "" }
q7002
RetrieveExposedServiceContext
train
func (r *Registry) RetrieveExposedServiceContext(ip net.IP, port int, host string) (*PortContext, error) { r.Lock() defer r.Unlock() data := r.indexByPort.Find(ip, port, host, true) if data == nil { return nil, fmt.Errorf("Service information not found: %s %d %s", ip.String(), port, host) } portContext, ok := data.(*PortContext) if !ok { return nil, fmt.Errorf("Internal server error") } return portContext, nil }
go
{ "resource": "" }
q7003
updateExposedPortAssociations
train
func (r *Registry) updateExposedPortAssociations(sctx *ServiceContext, service *policy.ApplicationService, secrets secrets.Secrets) error { // Do All the basic validations first. if service.PrivateNetworkInfo == nil { return fmt.Errorf("Private network is required for exposed services") } port, err := service.PrivateNetworkInfo.Ports.SinglePort() if err != nil { return fmt.Errorf("Multi-port is not supported for exposed services: %s", err) } if service.PublicNetworkInfo != nil { if _, err := service.PublicNetworkInfo.Ports.SinglePort(); err != nil { return fmt.Errorf("Multi-port is not supported for public network services: %s", err) } } // Find any existing state and get the authorizer. We do not want // to re-initialize the authorizer for every policy update. authProcessor, err := r.createOrUpdateAuthProcessor(sctx, service, secrets) if err != nil { return err } clientCAs := x509.NewCertPool() if (service.UserAuthorizationType == policy.UserAuthorizationMutualTLS || service.UserAuthorizationType == policy.UserAuthorizationJWT) && len(service.MutualTLSTrustedRoots) > 0 { if !clientCAs.AppendCertsFromPEM(service.MutualTLSTrustedRoots) { return fmt.Errorf("Unable to process client CAs") } } // Add the new references. if err := r.indexByPort.Add( service.PrivateNetworkInfo, sctx.PU.ContextID, &PortContext{ ID: sctx.PU.ContextID, Service: service, TargetPort: int(port), Type: serviceTypeToNetworkListenerType(service.Type, false), Authorizer: authProcessor, ClientTrustedRoots: clientCAs, PUContext: sctx.PUContext, }, true, ); err != nil { return fmt.Errorf("Possible port overlap: %s", err) } if service.Type == policy.ServiceHTTP && service.PublicNetworkInfo != nil { if err := r.indexByPort.Add( service.PublicNetworkInfo, sctx.PU.ContextID, &PortContext{ ID: sctx.PU.ContextID, Service: service, TargetPort: int(port), Type: serviceTypeToNetworkListenerType(service.Type, service.PublicServiceNoTLS), Authorizer: authProcessor, ClientTrustedRoots: clientCAs, PUContext: sctx.PUContext, }, true, ); err != nil { return fmt.Errorf("Possible port overlap with public services: %s", err) } } return nil }
go
{ "resource": "" }
q7004
updateExposedServices
train
func (r *Registry) updateExposedServices(sctx *ServiceContext, secrets secrets.Secrets) error { for _, service := range sctx.PU.Policy.ExposedServices() { if service.Type != policy.ServiceHTTP && service.Type != policy.ServiceTCP { continue } if err := r.updateExposedPortAssociations(sctx, service, secrets); err != nil { return err } } return nil }
go
{ "resource": "" }
q7005
updateDependentServices
train
func (r *Registry) updateDependentServices(sctx *ServiceContext) error { for _, service := range sctx.PU.Policy.DependentServices() { if len(service.CACert) != 0 { sctx.RootCA = append(sctx.RootCA, service.CACert) } serviceData := &DependentServiceData{ ServiceType: serviceTypeToApplicationListenerType(service.Type), } if service.Type == policy.ServiceHTTP { serviceData.APICache = urisearch.NewAPICache(service.HTTPRules, service.ID, service.External) } if err := sctx.dependentServiceCache.Add( service.NetworkInfo, sctx.PU.ContextID, serviceData, false, ); err != nil { return fmt.Errorf("Possible overlap in dependent services: %s", err) } } return nil }
go
{ "resource": "" }
q7006
resync
train
func (d *Datapath) resync(newPortMap map[string]map[string]bool) { iptablesInstance := iptablesctrl.GetInstance() if iptablesInstance == nil { return } for k, vs := range d.puToPortsMap { m := newPortMap[k] for v := range vs { if m == nil || !m[v] { err := iptablesInstance.DeletePortFromPortSet(k, v) if err != nil { zap.L().Debug("Delete port set returned error", zap.Error(err)) } // delete the port from contextIDFromTCPPort cache err = d.contextIDFromTCPPort.RemoveStringPorts(v) if err != nil { zap.L().Debug("can not remove port from cache", zap.Error(err)) } } } } for k, vs := range newPortMap { m := d.puToPortsMap[k] for v := range vs { if m == nil || !m[v] { portSpec, err := portspec.NewPortSpecFromString(v, k) if err != nil { continue } d.contextIDFromTCPPort.AddPortSpec(portSpec) err = iptablesInstance.AddPortToPortSet(k, v) if err != nil { zap.L().Error("Failed to add port to portset", zap.String("context", k), zap.String("port", v)) } } } } d.puToPortsMap = newPortMap }
go
{ "resource": "" }
q7007
FlattenClaim
train
func FlattenClaim(key string, claim interface{}) []string { attributes := []string{} if slice, ok := claim.([]string); ok { for _, data := range slice { attributes = append(attributes, key+"="+data) } } if attr, ok := claim.(string); ok { attributes = append(attributes, key+"="+attr) } if kv, ok := claim.(map[string]interface{}); ok { for ikey, ivalue := range kv { if attr, ok := ivalue.(string); ok { attributes = append(attributes, key+":"+ikey+"="+attr) } } } return attributes }
go
{ "resource": "" }
q7008
SetupConfig
train
func (d *DockerMonitor) SetupConfig(registerer registerer.Registerer, cfg interface{}) (err error) { defaultConfig := DefaultConfig() if cfg == nil { cfg = defaultConfig } dockerConfig, ok := cfg.(*Config) if !ok { return fmt.Errorf("Invalid configuration specified") } // Setup defaults dockerConfig = SetupDefaultConfig(dockerConfig) d.socketType = dockerConfig.SocketType d.socketAddress = dockerConfig.SocketAddress d.metadataExtractor = dockerConfig.EventMetadataExtractor d.syncAtStart = dockerConfig.SyncAtStart d.killContainerOnPolicyError = dockerConfig.KillContainerOnPolicyError d.handlers = make(map[Event]func(ctx context.Context, event *events.Message) error) d.stoplistener = make(chan bool) d.netcls = cgnetcls.NewDockerCgroupNetController() d.numberOfQueues = runtime.NumCPU() * 8 d.eventnotifications = make([]chan *events.Message, d.numberOfQueues) d.stopprocessor = make([]chan bool, d.numberOfQueues) for i := 0; i < d.numberOfQueues; i++ { d.eventnotifications[i] = make(chan *events.Message, 1000) d.stopprocessor[i] = make(chan bool) } // Add handlers for the events that we know how to process d.addHandler(EventCreate, d.handleCreateEvent) d.addHandler(EventStart, d.handleStartEvent) d.addHandler(EventDie, d.handleDieEvent) d.addHandler(EventDestroy, d.handleDestroyEvent) d.addHandler(EventPause, d.handlePauseEvent) d.addHandler(EventUnpause, d.handleUnpauseEvent) return nil }
go
{ "resource": "" }
q7009
Run
train
func (d *DockerMonitor) Run(ctx context.Context) error { if err := d.config.IsComplete(); err != nil { return fmt.Errorf("docker: %s", err) } err := d.waitForDockerDaemon(ctx) if err != nil { zap.L().Error("Docker daemon is not running - skipping container processing", zap.Error(err)) return nil } if d.syncAtStart && d.config.Policy != nil { options := types.ContainerListOptions{All: true} containers, err := d.dockerClient.ContainerList(ctx, options) if err != nil { return fmt.Errorf("unable to get container list: %s", err) } // Starting the eventListener and wait to hear on channel for it to be ready. // Need to start before the resync process so that we don't loose any events. // They will be buffered. We don't want to start the listener before // getting the list from docker though, to avoid duplicates. listenerReady := make(chan struct{}) go d.eventListener(ctx, listenerReady) <-listenerReady zap.L().Debug("Syncing all existing containers") // Syncing all Existing containers depending on MonitorSetting if err := d.resyncContainers(ctx, containers); err != nil { zap.L().Error("Unable to sync existing containers", zap.Error(err)) } } else { // Starting the eventListener and wait to hear on channel for it to be ready. // We are not doing resync. We just start the listener. listenerReady := make(chan struct{}) go d.eventListener(ctx, listenerReady) <-listenerReady } // Start processing the events go d.eventProcessors(ctx) return nil }
go
{ "resource": "" }
q7010
sendRequestToQueue
train
func (d *DockerMonitor) sendRequestToQueue(r *events.Message) { key0 := uint64(256203161) key1 := uint64(982451653) key := d.getHashKey(r) h := siphash.Hash(key0, key1, []byte(key)) d.eventnotifications[int(h%uint64(d.numberOfQueues))] <- r }
go
{ "resource": "" }
q7011
eventProcessors
train
func (d *DockerMonitor) eventProcessors(ctx context.Context) { for i := 0; i < d.numberOfQueues; i++ { go func(i int) { for { select { case event := <-d.eventnotifications[i]: if f, ok := d.handlers[Event(event.Action)]; ok { if err := f(ctx, event); err != nil { zap.L().Error("Unable to handle docker event", zap.String("action", event.Action), zap.Error(err), ) } continue } case <-ctx.Done(): return } } }(i) } }
go
{ "resource": "" }
q7012
eventListener
train
func (d *DockerMonitor) eventListener(ctx context.Context, listenerReady chan struct{}) { f := filters.NewArgs() f.Add("type", "container") options := types.EventsOptions{ Filters: f, } messages, errs := d.dockerClient.Events(context.Background(), options) // Once the buffered event channel was returned by Docker we return the ready status. listenerReady <- struct{}{} for { select { case message := <-messages: zap.L().Debug("Got message from docker client", zap.String("action", message.Action), zap.String("ID", message.ID), ) d.sendRequestToQueue(&message) case err := <-errs: if err != nil && err != io.EOF { zap.L().Warn("Received docker event error", zap.Error(err), ) } case <-ctx.Done(): return } } }
go
{ "resource": "" }
q7013
Resync
train
func (d *DockerMonitor) Resync(ctx context.Context) error { if !d.syncAtStart || d.config.Policy == nil { zap.L().Debug("No synchronization of containers performed") return nil } zap.L().Debug("Syncing all existing containers") options := types.ContainerListOptions{All: true} containers, err := d.dockerClient.ContainerList(ctx, options) if err != nil { return fmt.Errorf("unable to get container list: %s", err) } return d.resyncContainers(ctx, containers) }
go
{ "resource": "" }
q7014
resyncContainersByOrder
train
func (d *DockerMonitor) resyncContainersByOrder(ctx context.Context, containers []types.Container, syncHost bool) error { for _, c := range containers { container, err := d.dockerClient.ContainerInspect(ctx, c.ID) if err != nil { continue } if (syncHost && container.HostConfig.NetworkMode != constants.DockerHostMode) || (!syncHost && container.HostConfig.NetworkMode == constants.DockerHostMode) { continue } puID, _ := puIDFromDockerID(container.ID) runtime, err := d.extractMetadata(&container) if err != nil { continue } event := common.EventStop if container.State.Running { if !container.State.Paused { event = common.EventStart } else { event = common.EventPause } } // If it is a host container, we need to activate it as a Linux process. We will // override the options that the metadata extractor provided. if container.HostConfig.NetworkMode == constants.DockerHostMode { options := hostModeOptions(&container) options.PolicyExtensions = runtime.Options().PolicyExtensions runtime.SetOptions(*options) runtime.SetPUType(common.LinuxProcessPU) } runtime.SetOptions(runtime.Options()) if err := d.config.Policy.HandlePUEvent(ctx, puID, event, runtime); err != nil { zap.L().Error("Unable to sync existing Container", zap.String("dockerID", c.ID), zap.Error(err), ) } // if the container has hostnet set to true or is linked // to container with hostnet set to true, program the cgroup. if isHostNetworkContainer(runtime) { if err = d.setupHostMode(puID, runtime, &container); err != nil { return fmt.Errorf("unable to setup host mode for container %s: %s", puID, err) } } } return nil }
go
{ "resource": "" }
q7015
setupHostMode
train
func (d *DockerMonitor) setupHostMode(puID string, runtimeInfo policy.RuntimeReader, dockerInfo *types.ContainerJSON) (err error) { pausePUID := puID if dockerInfo.HostConfig.NetworkMode == constants.DockerHostMode { if err = d.netcls.Creategroup(puID); err != nil { return err } // Clean the cgroup on exit, if we have failed t activate. defer func() { if err != nil { if derr := d.netcls.DeleteCgroup(puID); derr != nil { zap.L().Warn("Failed to clean cgroup", zap.String("puID", puID), zap.Error(derr), zap.Error(err), ) } } }() markval := runtimeInfo.Options().CgroupMark if markval == "" { return errors.New("mark value not found") } mark, _ := strconv.ParseUint(markval, 10, 32) if err := d.netcls.AssignMark(puID, mark); err != nil { return err } } else { // Add the container pid that is linked to hostnet to // the cgroup of the parent container. pausePUID = getPausePUID(policyExtensions(runtimeInfo)) } return d.netcls.AddProcess(pausePUID, dockerInfo.State.Pid) }
go
{ "resource": "" }
q7016
extractMetadata
train
func (d *DockerMonitor) extractMetadata(dockerInfo *types.ContainerJSON) (*policy.PURuntime, error) { if dockerInfo == nil { return nil, errors.New("docker info is empty") } if d.metadataExtractor != nil { return d.metadataExtractor(dockerInfo) } return extractors.DefaultMetadataExtractor(dockerInfo) }
go
{ "resource": "" }
q7017
handleCreateEvent
train
func (d *DockerMonitor) handleCreateEvent(ctx context.Context, event *events.Message) error { puID, err := puIDFromDockerID(event.ID) if err != nil { return err } container, err := d.retrieveDockerInfo(ctx, event) if err != nil { return err } runtime, err := d.extractMetadata(container) if err != nil { return err } // If it is a host container, we need to activate it as a Linux process. We will // override the options that the metadata extractor provided. We will maintain // any policy extensions in the object. if container.HostConfig.NetworkMode == constants.DockerHostMode { options := hostModeOptions(container) options.PolicyExtensions = runtime.Options().PolicyExtensions runtime.SetOptions(*options) runtime.SetPUType(common.LinuxProcessPU) } runtime.SetOptions(runtime.Options()) return d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventCreate, runtime) }
go
{ "resource": "" }
q7018
handleStartEvent
train
func (d *DockerMonitor) handleStartEvent(ctx context.Context, event *events.Message) error { container, err := d.retrieveDockerInfo(ctx, event) if err != nil { return err } if !container.State.Running { return nil } puID, err := puIDFromDockerID(container.ID) if err != nil { return err } runtime, err := d.extractMetadata(container) if err != nil { return err } // If it is a host container, we need to activate it as a Linux process. We will // override the options that the metadata extractor provided. if container.HostConfig.NetworkMode == constants.DockerHostMode { options := hostModeOptions(container) options.PolicyExtensions = runtime.Options().PolicyExtensions runtime.SetOptions(*options) runtime.SetPUType(common.LinuxProcessPU) } runtime.SetOptions(runtime.Options()) if err = d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventStart, runtime); err != nil { if d.killContainerOnPolicyError { timeout := 0 * time.Second if err1 := d.dockerClient.ContainerStop(ctx, event.ID, &timeout); err1 != nil { zap.L().Warn("Unable to stop illegal container", zap.String("dockerID", event.ID), zap.Error(err1), ) } d.config.Collector.CollectContainerEvent(&collector.ContainerRecord{ ContextID: event.ID, IPAddress: nil, Tags: nil, Event: collector.ContainerFailed, }) return fmt.Errorf("unable to start container because of policy: container %s killed: %s", event.ID, err) } return fmt.Errorf("unable to set policy: container %s kept alive per policy: %s", puID, err) } // if the container has hostnet set to true or is linked // to container with hostnet set to true, program the cgroup. if isHostNetworkContainer(runtime) { if err = d.setupHostMode(puID, runtime, container); err != nil { return fmt.Errorf("unable to setup host mode for container %s: %s", puID, err) } } return nil }
go
{ "resource": "" }
q7019
handleDestroyEvent
train
func (d *DockerMonitor) handleDestroyEvent(ctx context.Context, event *events.Message) error { puID, err := puIDFromDockerID(event.ID) if err != nil { return err } runtime := policy.NewPURuntimeWithDefaults() runtime.SetOptions(runtime.Options()) err = d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventDestroy, runtime) if err != nil { zap.L().Error("Failed to handle delete event", zap.Error(err), ) } if err := d.netcls.DeleteCgroup(puID); err != nil { zap.L().Warn("Failed to clean netcls group", zap.String("puID", puID), zap.Error(err), ) } return nil }
go
{ "resource": "" }
q7020
handlePauseEvent
train
func (d *DockerMonitor) handlePauseEvent(ctx context.Context, event *events.Message) error { zap.L().Info("UnPause Event for nativeID", zap.String("ID", event.ID)) puID, err := puIDFromDockerID(event.ID) if err != nil { return err } runtime := policy.NewPURuntimeWithDefaults() runtime.SetOptions(runtime.Options()) return d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventPause, runtime) }
go
{ "resource": "" }
q7021
handleUnpauseEvent
train
func (d *DockerMonitor) handleUnpauseEvent(ctx context.Context, event *events.Message) error { puID, err := puIDFromDockerID(event.ID) if err != nil { return err } runtime := policy.NewPURuntimeWithDefaults() runtime.SetOptions(runtime.Options()) return d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventUnpause, runtime) }
go
{ "resource": "" }
q7022
waitForDockerDaemon
train
func (d *DockerMonitor) waitForDockerDaemon(ctx context.Context) (err error) { done := make(chan bool) go func() { for errg := d.setupDockerDaemon(); errg != nil; { zap.L().Debug("Unable to init docker client. Retrying...", zap.Error(errg)) <-time.After(dockerRetryTimer) continue } done <- true }() select { case <-ctx.Done(): return nil case <-time.After(dockerInitializationWait): return fmt.Errorf("Unable to connect to docker daemon") case <-done: } return nil }
go
{ "resource": "" }
q7023
NewSecrets
train
func NewSecrets(s PublicSecrets) (Secrets, error) { switch s.SecretsType() { case PKICompactType: t := s.(*CompactPKIPublicSecrets) return NewCompactPKIWithTokenCA(t.Key, t.Certificate, t.CA, t.TokenCAs, t.Token, t.Compressed) default: return nil, fmt.Errorf("Unsupported type") } }
go
{ "resource": "" }
q7024
NewTable
train
func NewTable() *ServiceCache { return &ServiceCache{ local: map[int]map[uint32]entryList{}, remote: map[int]map[uint32]entryList{}, remoteHosts: map[string]entryList{}, localHosts: map[string]entryList{}, } }
go
{ "resource": "" }
q7025
Add
train
func (s *ServiceCache) Add(e *common.Service, id string, data interface{}, local bool) error { s.Lock() defer s.Unlock() record := &entry{ ports: e.Ports, data: data, id: id, } if err := s.addPorts(e, record, local); err != nil { return err } if err := s.addHostService(e, record, local); err != nil { return err } return s.addIPService(e, record, local) }
go
{ "resource": "" }
q7026
Find
train
func (s *ServiceCache) Find(ip net.IP, port int, host string, local bool) interface{} { s.RLock() defer s.RUnlock() if host != "" { if data := s.findHost(host, port, local); data != nil { return data } } return s.findIP(ip, port, local) }
go
{ "resource": "" }
q7027
FindListeningServicesForPU
train
func (s *ServiceCache) FindListeningServicesForPU(id string) (interface{}, *portspec.PortSpec) { s.RLock() defer s.RUnlock() for _, spec := range s.localPorts { if spec.id == id { return spec.data, spec.ports } } return nil, nil }
go
{ "resource": "" }
q7028
DeleteByID
train
func (s *ServiceCache) DeleteByID(id string, local bool) { s.Lock() defer s.Unlock() hosts := s.remoteHosts prefixes := s.remote if local { hosts = s.localHosts prefixes = s.local } if local { s.localPorts = deleteMatchingPorts(s.localPorts, id) } else { s.remotePorts = deleteMatchingPorts(s.remotePorts, id) } for host, ports := range hosts { hosts[host] = deleteMatchingPorts(ports, id) if len(hosts[host]) == 0 { delete(hosts, host) } } for l, prefix := range prefixes { for ip, ports := range prefix { prefix[ip] = deleteMatchingPorts(ports, id) if len(prefix[ip]) == 0 { delete(prefix, ip) } } if len(prefix) == 0 { delete(prefixes, l) } } }
go
{ "resource": "" }
q7029
addPorts
train
func (s *ServiceCache) addPorts(e *common.Service, record *entry, local bool) error { if !local { return nil } for _, spec := range s.localPorts { if spec.ports.Overlaps(e.Ports) { return fmt.Errorf("service port overlap in the global port list: %+v %s", e.Addresses, e.Ports.String()) } } s.localPorts = append(s.localPorts, record) return nil }
go
{ "resource": "" }
q7030
Supervise
train
func (s *ProxyInfo) Supervise(contextID string, puInfo *policy.PUInfo) error { return nil }
go
{ "resource": "" }
q7031
NewMockMonitor
train
func NewMockMonitor(ctrl *gomock.Controller) *MockMonitor { mock := &MockMonitor{ctrl: ctrl} mock.recorder = &MockMonitorMockRecorder{mock} return mock }
go
{ "resource": "" }
q7032
NewMockImplementation
train
func NewMockImplementation(ctrl *gomock.Controller) *MockImplementation { mock := &MockImplementation{ctrl: ctrl} mock.recorder = &MockImplementationMockRecorder{mock} return mock }
go
{ "resource": "" }
q7033
SetupConfig
train
func (m *MockImplementation) SetupConfig(registerer registerer.Registerer, cfg interface{}) error { ret := m.ctrl.Call(m, "SetupConfig", registerer, cfg) ret0, _ := ret[0].(error) return ret0 }
go
{ "resource": "" }
q7034
SetupConfig
train
func (mr *MockImplementationMockRecorder) SetupConfig(registerer, cfg interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupConfig", reflect.TypeOf((*MockImplementation)(nil).SetupConfig), registerer, cfg) }
go
{ "resource": "" }
q7035
SetupHandlers
train
func (m *MockImplementation) SetupHandlers(c *config.ProcessorConfig) { m.ctrl.Call(m, "SetupHandlers", c) }
go
{ "resource": "" }
q7036
SetupHandlers
train
func (mr *MockImplementationMockRecorder) SetupHandlers(c interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupHandlers", reflect.TypeOf((*MockImplementation)(nil).SetupHandlers), c) }
go
{ "resource": "" }
q7037
NewIpset
train
func (i *goIpsetProvider) NewIpset(name string, hasht string, p *ipset.Params) (Ipset, error) { return ipset.New(name, hasht, p) }
go
{ "resource": "" }
q7038
GetIpset
train
func (i *goIpsetProvider) GetIpset(name string) Ipset { return &ipset.IPSet{ Name: name, } }
go
{ "resource": "" }
q7039
DestroyAll
train
func (i *goIpsetProvider) DestroyAll(prefix string) error { sets, err := i.ListIPSets() if err != nil { return ipset.DestroyAll() } for _, s := range sets { if !strings.HasPrefix(s, prefix) { continue } ips := i.GetIpset(s) if err := ips.Destroy(); err != nil { return ipset.DestroyAll() } } return nil }
go
{ "resource": "" }
q7040
UpdateConfiguration
train
func (m *monitors) UpdateConfiguration(ctx context.Context, config *config.MonitorConfig) error { // Monitor configuration cannot change at this time. // TODO: return nil }
go
{ "resource": "" }
q7041
Resync
train
func (m *monitors) Resync(ctx context.Context) error { failure := false var errs string for _, i := range m.monitors { if err := i.Resync(ctx); err != nil { errs = errs + err.Error() failure = true } } if failure { return fmt.Errorf("Monitor resync failed: %s", errs) } return nil }
go
{ "resource": "" }
q7042
Enforce
train
func (d *Datapath) Enforce(contextID string, puInfo *policy.PUInfo) error { // Always create a new PU context pu, err := pucontext.NewPU(contextID, puInfo, d.ExternalIPCacheTimeout) if err != nil { return fmt.Errorf("error creating new pu: %s", err) } // Cache PUs for retrieval based on packet information if pu.Type() != common.ContainerPU { mark, tcpPorts, udpPorts := pu.GetProcessKeys() d.puFromMark.AddOrUpdate(mark, pu) if pu.Type() == common.UIDLoginPU { user := puInfo.Runtime.Options().UserID d.puFromUser.AddOrUpdate(user, pu) } for _, port := range tcpPorts { if port == "0" { continue } portSpec, err := portspec.NewPortSpecFromString(port, contextID) if err != nil { continue } if puInfo.Runtime.PUType() == common.HostPU { d.contextIDFromTCPPort.AddPortSpecToEnd(portSpec) } else { d.contextIDFromTCPPort.AddPortSpec(portSpec) } } for _, port := range udpPorts { portSpec, err := portspec.NewPortSpecFromString(port, contextID) if err != nil { continue } // check for host pu and add its ports to the end. if puInfo.Runtime.PUType() == common.HostPU { d.contextIDFromUDPPort.AddPortSpecToEnd(portSpec) } else { d.contextIDFromUDPPort.AddPortSpec(portSpec) } } } else { d.puFromIP = pu } // pucontext launches a go routine to periodically // lookup dns names. ctx cancel signals the go routine to exit if prevPU, _ := d.puFromContextID.Get(contextID); prevPU != nil { prevPU.(*pucontext.PUContext).CancelFunc() } // Cache PU from contextID for management and policy updates d.puFromContextID.AddOrUpdate(contextID, pu) return nil }
go
{ "resource": "" }
q7043
Unenforce
train
func (d *Datapath) Unenforce(contextID string) error { var err error puContext, err := d.puFromContextID.Get(contextID) if err != nil { return fmt.Errorf("contextid not found in enforcer: %s", err) } // Cleanup the IP based lookup pu := puContext.(*pucontext.PUContext) // Cleanup the mark information if err = d.puFromMark.Remove(pu.Mark()); err != nil { zap.L().Debug("Unable to remove cache entry during unenforcement", zap.String("Mark", pu.Mark()), zap.Error(err), ) } // Cleanup the username if pu.Type() == common.UIDLoginPU { if err = d.puFromUser.Remove(pu.Username()); err != nil { zap.L().Debug("PU not found for the username", zap.String("username", pu.Username())) } } // Cleanup the port cache for _, port := range pu.TCPPorts() { if port == "0" { continue } if err := d.contextIDFromTCPPort.RemoveStringPorts(port); err != nil { zap.L().Debug("Unable to remove cache entry during unenforcement", zap.String("TCPPort", port), zap.Error(err), ) } } for _, port := range pu.UDPPorts() { if err := d.contextIDFromUDPPort.RemoveStringPorts(port); err != nil { zap.L().Debug("Unable to remove cache entry during unenforcement", zap.String("UDPPort", port), zap.Error(err), ) } } // Cleanup the contextID cache if err := d.puFromContextID.RemoveWithDelay(contextID, 10*time.Second); err != nil { zap.L().Warn("Unable to remove context from cache", zap.String("contextID", contextID), zap.Error(err), ) } return nil }
go
{ "resource": "" }
q7044
SetTargetNetworks
train
func (d *Datapath) SetTargetNetworks(cfg *runtime.Configuration) error { networks := cfg.TCPTargetNetworks if len(networks) == 0 { networks = []string{"0.0.0.0/1", "128.0.0.0/1"} } d.targetNetworks = acls.NewACLCache() targetacl := createPolicy(networks) return d.targetNetworks.AddRuleList(targetacl) }
go
{ "resource": "" }
q7045
Run
train
func (d *Datapath) Run(ctx context.Context) error { zap.L().Debug("Start enforcer", zap.Int("mode", int(d.mode))) if d.conntrack == nil { conntrackClient, err := flowtracking.NewClient(ctx) if err != nil { return err } d.conntrack = conntrackClient } d.startApplicationInterceptor(ctx) d.startNetworkInterceptor(ctx) go d.nflogger.Run(ctx) return nil }
go
{ "resource": "" }
q7046
contextFromIP
train
func (d *Datapath) contextFromIP(app bool, mark string, port uint16, protocol uint8) (*pucontext.PUContext, error) { if d.puFromIP != nil { return d.puFromIP, nil } if app { pu, err := d.puFromMark.Get(mark) if err != nil { zap.L().Error("Unable to find context for application flow with mark", zap.String("mark", mark), zap.Int("protocol", int(protocol)), zap.Int("port", int(port)), ) return nil, errMarkNotFound } return pu.(*pucontext.PUContext), nil } // Network packets for non container traffic if protocol == packet.IPProtocolTCP { contextID, err := d.contextIDFromTCPPort.GetSpecValueFromPort(port) if err != nil { zap.L().Debug("Could not find PU context for TCP server port ", zap.Uint16("port", port)) return nil, errPortNotFound } pu, err := d.puFromContextID.Get(contextID) if err != nil { return nil, errContextIDNotFound } return pu.(*pucontext.PUContext), nil } if protocol == packet.IPProtocolUDP { contextID, err := d.contextIDFromUDPPort.GetSpecValueFromPort(port) if err != nil { zap.L().Debug("Could not find PU context for UDP server port ", zap.Uint16("port", port)) return nil, errPortNotFound } pu, err := d.puFromContextID.Get(contextID) if err != nil { return nil, errContextIDNotFound } return pu.(*pucontext.PUContext), nil } zap.L().Error("Invalid protocol ", zap.Uint8("protocol", protocol)) return nil, errInvalidProtocol }
go
{ "resource": "" }
q7047
AssignMark
train
func (s *netCls) AssignMark(cgroupname string, mark uint64) error { _, err := os.Stat(filepath.Join(basePath, s.TriremePath, cgroupname)) if os.IsNotExist(err) { return fmt.Errorf("cgroup does not exist: %s", err) } //16 is the base since the mark file expects hexadecimal values markval := "0x" + (strconv.FormatUint(mark, 16)) if err := ioutil.WriteFile(filepath.Join(basePath, s.TriremePath, cgroupname, markFile), []byte(markval), 0644); err != nil { return fmt.Errorf("failed to write to net_cls.classid file for new cgroup: %s", err) } return nil }
go
{ "resource": "" }
q7048
AddProcess
train
func (s *netCls) AddProcess(cgroupname string, pid int) error { _, err := os.Stat(filepath.Join(basePath, s.TriremePath, cgroupname)) if os.IsNotExist(err) { return fmt.Errorf("cannot add process. cgroup does not exist: %s", err) } PID := []byte(strconv.Itoa(pid)) if err := syscall.Kill(pid, 0); err != nil { return nil } if err := ioutil.WriteFile(filepath.Join(basePath, s.TriremePath, cgroupname, procs), PID, 0644); err != nil { return fmt.Errorf("cannot add process: %s", err) } return nil }
go
{ "resource": "" }
q7049
RemoveProcess
train
func (s *netCls) RemoveProcess(cgroupname string, pid int) error { _, err := os.Stat(filepath.Join(basePath, s.TriremePath, cgroupname)) if os.IsNotExist(err) { return fmt.Errorf("cannot clean up process. cgroup does not exist: %s", err) } data, err := ioutil.ReadFile(filepath.Join(basePath, procs)) if err != nil { return fmt.Errorf("cannot cleanup process: %s", err) } if !strings.Contains(string(data), strconv.Itoa(pid)) { return errors.New("cannot cleanup process. process is not a part of this cgroup") } if err := ioutil.WriteFile(filepath.Join(basePath, procs), []byte(strconv.Itoa(pid)), 0644); err != nil { return fmt.Errorf("cannot clean up process: %s", err) } return nil }
go
{ "resource": "" }
q7050
DeleteCgroup
train
func (s *netCls) DeleteCgroup(cgroupname string) error { _, err := os.Stat(filepath.Join(basePath, s.TriremePath, cgroupname)) if os.IsNotExist(err) { zap.L().Debug("Group already deleted", zap.Error(err)) return nil } err = os.Remove(filepath.Join(basePath, s.TriremePath, cgroupname)) if err != nil { return fmt.Errorf("unable to delete cgroup %s: %s", cgroupname, err) } return nil }
go
{ "resource": "" }
q7051
Deletebasepath
train
func (s *netCls) Deletebasepath(cgroupName string) bool { if cgroupName == s.TriremePath { if err := os.Remove(filepath.Join(basePath, cgroupName)); err != nil { zap.L().Error("Error when removing Trireme Base Path", zap.Error(err)) } return true } return false }
go
{ "resource": "" }
q7052
ListCgroupProcesses
train
func (s *netCls) ListCgroupProcesses(cgroupname string) ([]string, error) { _, err := os.Stat(filepath.Join(basePath, s.TriremePath, cgroupname)) if os.IsNotExist(err) { return []string{}, fmt.Errorf("cgroup %s does not exist: %s", cgroupname, err) } data, err := ioutil.ReadFile(filepath.Join(basePath, s.TriremePath, cgroupname, "cgroup.procs")) if err != nil { return []string{}, fmt.Errorf("cannot read procs file: %s", err) } procs := []string{} for _, line := range strings.Split(string(data), "\n") { if len(line) > 0 { procs = append(procs, line) } } return procs, nil }
go
{ "resource": "" }
q7053
ListAllCgroups
train
func (s *netCls) ListAllCgroups(path string) []string { cgroups, err := ioutil.ReadDir(filepath.Join(basePath, s.TriremePath, path)) if err != nil { return []string{} } names := make([]string, len(cgroups)) for i := 0; i < len(cgroups); i++ { names[i] = cgroups[i].Name() } return names }
go
{ "resource": "" }
q7054
NewDockerCgroupNetController
train
func NewDockerCgroupNetController() Cgroupnetcls { controller := &netCls{ markchan: make(chan uint64), ReleaseAgentPath: "", TriremePath: "", } return controller }
go
{ "resource": "" }
q7055
NewCgroupNetController
train
func NewCgroupNetController(triremepath string, releasePath string) Cgroupnetcls { if !mounted { mounted = true if err := mountCgroupController(); err != nil { zap.L().Error("Unable to mount net_cls controller - Linux process isolation not possible", zap.Error(err)) } } binpath, _ := osext.Executable() controller := &netCls{ markchan: make(chan uint64), ReleaseAgentPath: binpath, TriremePath: "", } if releasePath != "" { controller.ReleaseAgentPath = releasePath } if triremepath != "" { controller.TriremePath = triremepath } return controller }
go
{ "resource": "" }
q7056
NewVerifier
train
func NewVerifier(s secrets.Secrets, globalCertificate *x509.Certificate) *Verifier { return &Verifier{ secrets: s, globalCert: globalCertificate, // tokenCache will cache the token results to accelerate performance tokenCache: gcache.New(2048).LRU().Expiration(20 * time.Second).Build(), } }
go
{ "resource": "" }
q7057
UpdateSecrets
train
func (p *Verifier) UpdateSecrets(s secrets.Secrets, globalCert *x509.Certificate) { p.Lock() defer p.Unlock() p.secrets = s p.globalCert = globalCert }
go
{ "resource": "" }
q7058
CreateAndSign
train
func CreateAndSign(server string, profile, scopes []string, id string, validity time.Duration, gkey interface{}) (string, error) { key, ok := gkey.(*ecdsa.PrivateKey) if !ok { return "", fmt.Errorf("Not a valid private key format") } if token, err := localCache.Get(id); err == nil { return token.(string), nil } claims := &JWTClaims{ StandardClaims: jwt.StandardClaims{ Issuer: server, ExpiresAt: time.Now().Add(validity).Unix(), Subject: id, }, Profile: profile, Scopes: scopes, } token, err := jwt.NewWithClaims(jwt.SigningMethodES256, claims).SignedString(key) if err != nil { return "", err } localCache.AddOrUpdate(id, token) return token, nil }
go
{ "resource": "" }
q7059
generateContextID
train
func generateContextID(eventInfo *common.EventInfo) (string, error) { if eventInfo.PUID == "" { return "", errors.New("puid is empty from event info") } if len(eventInfo.PUID) < 12 { return "", errors.New("puid smaller than 12 characters") } return eventInfo.PUID[:12], nil }
go
{ "resource": "" }
q7060
NewClient
train
func NewClient(ctx context.Context) (*Client, error) { c, err := conntrack.Dial(&netlink.Config{}) if err != nil { return nil, fmt.Errorf("flow tracker is unable to dial netlink: %s", err) } client := &Client{conn: c} go func() { <-ctx.Done() client.conn.Close() // nolint errcheck }() return client, nil }
go
{ "resource": "" }
q7061
UpdateApplicationFlowMark
train
func (c *Client) UpdateApplicationFlowMark(ipSrc, ipDst net.IP, protonum uint8, srcport, dstport uint16, newmark uint32) error { f := conntrack.NewFlow(protonum, 0, ipSrc, ipDst, srcport, dstport, 0, newmark) return c.conn.Update(f) }
go
{ "resource": "" }
q7062
newReplyFlow
train
func newReplyFlow(proto uint8, status conntrack.StatusFlag, srcAddr, destAddr net.IP, srcPort, destPort uint16, timeout, mark uint32) conntrack.Flow { var f conntrack.Flow f.Status.Value = status f.Timeout = timeout f.Mark = mark // Set up TupleReply with source and destination inverted f.TupleReply.IP.SourceAddress = srcAddr f.TupleReply.IP.DestinationAddress = destAddr f.TupleReply.Proto.SourcePort = srcPort f.TupleReply.Proto.DestinationPort = destPort f.TupleReply.Proto.Protocol = proto return f }
go
{ "resource": "" }
q7063
String
train
func (c *MonitorConfig) String() string { buf := fmt.Sprintf("MergeTags:[%s] ", strings.Join(c.MergeTags, ",")) buf += fmt.Sprintf("Common:%+v ", c.Common) buf += fmt.Sprintf("Monitors:{") for k, v := range c.Monitors { buf += fmt.Sprintf("{%d:%+v},", k, v) } buf += fmt.Sprintf("}") return buf }
go
{ "resource": "" }
q7064
IsComplete
train
func (c *ProcessorConfig) IsComplete() error { if c.Collector == nil { return fmt.Errorf("Missing configuration: collector") } if c.Policy == nil { return fmt.Errorf("Missing configuration: puHandler") } return nil }
go
{ "resource": "" }
q7065
CollectFlowEvent
train
func (c *collectorImpl) CollectFlowEvent(record *collector.FlowRecord) { hash := collector.StatsFlowHash(record) // If flow event doesn't have a count make it equal to 1. At least one flow is collected if record.Count == 0 { record.Count = 1 } c.Lock() defer c.Unlock() if r, ok := c.Flows[hash]; ok { r.Count = r.Count + record.Count return } c.Flows[hash] = record c.Flows[hash].Tags = record.Tags }
go
{ "resource": "" }
q7066
CollectUserEvent
train
func (c *collectorImpl) CollectUserEvent(record *collector.UserRecord) { if err := collector.StatsUserHash(record); err != nil { zap.L().Error("Cannot store user record", zap.Error(err)) return } c.Lock() defer c.Unlock() if _, ok := c.ProcessedUsers[record.ID]; !ok { c.Users[record.ID] = record c.ProcessedUsers[record.ID] = true } }
go
{ "resource": "" }
q7067
CollectPacketEvent
train
func (c *collectorImpl) CollectPacketEvent(report *collector.PacketReport) { //We will leave this unimplemented // trace event collection in done from the main enforcer c.Lock() defer c.Unlock() zap.L().Debug("Collected Packet Event") c.DatapathPacketReports = append(c.DatapathPacketReports, report) }
go
{ "resource": "" }
q7068
NewKubeClient
train
func NewKubeClient(kubeconfig string) (*kubernetes.Clientset, error) { config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { return nil, fmt.Errorf("Error Building config from Kubeconfig: %v", err) } return kubernetes.NewForConfig(config) }
go
{ "resource": "" }
q7069
CreateLocalPodController
train
func (m *KubernetesMonitor) CreateLocalPodController(namespace string, addFunc func(addedApiStruct *api.Pod) error, deleteFunc func(deletedApiStruct *api.Pod) error, updateFunc func(oldApiStruct, updatedApiStruct *api.Pod) error) (kubecache.Store, kubecache.Controller) { return CreateResourceController(m.kubeClient.CoreV1().RESTClient(), "pods", namespace, &api.Pod{}, m.localNodeSelector(), func(addedApiStruct interface{}) { if err := addFunc(addedApiStruct.(*api.Pod)); err != nil { zap.L().Error("Error while handling Add Pod", zap.Error(err)) } }, func(deletedApiStruct interface{}) { if err := deleteFunc(deletedApiStruct.(*api.Pod)); err != nil { zap.L().Error("Error while handling Delete Pod", zap.Error(err)) } }, func(oldApiStruct, updatedApiStruct interface{}) { if err := updateFunc(oldApiStruct.(*api.Pod), updatedApiStruct.(*api.Pod)); err != nil { zap.L().Error("Error while handling Update Pod", zap.Error(err)) } }) }
go
{ "resource": "" }
q7070
Pod
train
func (m *KubernetesMonitor) Pod(podName string, namespace string) (*api.Pod, error) { targetPod, err := m.kubeClient.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("error getting Kubernetes labels & IP for pod %v : %v ", podName, err) } return targetPod, nil }
go
{ "resource": "" }
q7071
DialMarkedWithContext
train
func DialMarkedWithContext(ctx context.Context, network string, addr string, mark int) (net.Conn, error) { return nil, nil }
go
{ "resource": "" }
q7072
NewSocketListener
train
func NewSocketListener(ctx context.Context, port string, mark int) (net.Listener, error) { return nil, nil }
go
{ "resource": "" }
q7073
GetOriginalDestination
train
func (p *ProxiedConnection) GetOriginalDestination() (net.IP, int) { return p.originalIP, p.originalPort }
go
{ "resource": "" }
q7074
Read
train
func (c UIDConnection) Read(b []byte) (n int, err error) { return c.nc.Read(b) }
go
{ "resource": "" }
q7075
RemoteAddr
train
func (c UIDConnection) RemoteAddr() net.Addr { uidAddr := &UIDAddr{ NetworkAddress: c.nc.RemoteAddr().Network(), } f, err := c.nc.File() if err != nil { uidAddr.Address = "NotAvailable" } defer f.Close() // nolint cred, err := syscall.GetsockoptUcred(int(f.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED) if err != nil { uidAddr.Address = "NotAvailable" } uidAddr.Address = strconv.Itoa(int(cred.Uid)) + ":" + strconv.Itoa(int(cred.Gid)) + ":" + strconv.Itoa(int(cred.Pid)) return uidAddr }
go
{ "resource": "" }
q7076
SetDeadline
train
func (c UIDConnection) SetDeadline(t time.Time) error { return c.nc.SetDeadline(t) }
go
{ "resource": "" }
q7077
SetReadDeadline
train
func (c UIDConnection) SetReadDeadline(t time.Time) error { return c.nc.SetReadDeadline(t) }
go
{ "resource": "" }
q7078
SetWriteDeadline
train
func (c UIDConnection) SetWriteDeadline(t time.Time) error { return c.nc.SetWriteDeadline(t) }
go
{ "resource": "" }
q7079
newPortAction
train
func newPortAction(tcpport string, policy *policy.FlowPolicy) (*portAction, error) { p := &portAction{} if strings.Contains(tcpport, ":") { parts := strings.Split(tcpport, ":") if len(parts) != 2 { return nil, fmt.Errorf("invalid port: %s", tcpport) } port, err := strconv.Atoi(parts[0]) if err != nil { return nil, err } p.min = uint16(port) port, err = strconv.Atoi(parts[1]) if err != nil { return nil, err } p.max = uint16(port) } else { port, err := strconv.Atoi(tcpport) if err != nil { return nil, err } p.min = uint16(port) p.max = p.min } if p.min > p.max { return nil, errors.New("min port is greater than max port") } p.policy = policy return p, nil }
go
{ "resource": "" }
q7080
DefaultHostMetadataExtractor
train
func DefaultHostMetadataExtractor(event *common.EventInfo) (*policy.PURuntime, error) { runtimeTags := policy.NewTagStore() for _, tag := range event.Tags { parts := strings.SplitN(tag, "=", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid tag: %s", tag) } runtimeTags.AppendKeyValue("@usr:"+parts[0], parts[1]) } options := &policy.OptionsType{ CgroupName: event.PUID, CgroupMark: strconv.FormatUint(cgnetcls.MarkVal(), 10), Services: event.Services, } runtimeIps := policy.ExtendedMap{"bridge": "0.0.0.0/0"} return policy.NewPURuntime(event.Name, int(event.PID), "", runtimeTags, runtimeIps, event.PUType, options), nil }
go
{ "resource": "" }
q7081
ProcessInfo
train
func ProcessInfo(pid int32) []string { userdata := []string{} p, err := process.NewProcess(pid) if err != nil { return userdata } uids, err := p.Uids() if err != nil { return userdata } groups, err := p.Gids() if err != nil { return userdata } username, err := p.Username() if err != nil { return userdata } for _, uid := range uids { userdata = append(userdata, "uid:"+strconv.Itoa(int(uid))) } for _, gid := range groups { userdata = append(userdata, "gid:"+strconv.Itoa(int(gid))) } userdata = append(userdata, "username:"+username) userid, err := user.Lookup(username) if err != nil { return userdata } gids, err := userid.GroupIds() if err != nil { return userdata } for i := 0; i < len(gids); i++ { userdata = append(userdata, "gids:"+gids[i]) group, err := user.LookupGroupId(gids[i]) if err != nil { continue } userdata = append(userdata, "groups:"+group.Name) } return userdata }
go
{ "resource": "" }
q7082
libs
train
func libs(binpath string) []string { f, err := elf.Open(binpath) if err != nil { return []string{} } libraries, _ := f.ImportedLibraries() return libraries }
go
{ "resource": "" }
q7083
IsHostPU
train
func IsHostPU(runtime policy.RuntimeReader, mode constants.ModeType) bool { if runtime == nil { return false } if mode != constants.LocalServer { return false } return runtime.PUType() == common.HostPU }
go
{ "resource": "" }
q7084
NewAppProxy
train
func NewAppProxy(tp tokenaccessor.TokenAccessor, c collector.EventCollector, puFromID cache.DataStore, certificate *tls.Certificate, s secrets.Secrets) (*AppProxy, error) { systemPool, err := x509.SystemCertPool() if err != nil { return nil, err } if ok := systemPool.AppendCertsFromPEM(s.PublicSecrets().CertAuthority()); !ok { return nil, fmt.Errorf("error while adding provided CA") } return &AppProxy{ collector: c, tokenaccessor: tp, secrets: s, puFromID: puFromID, cert: certificate, clients: cache.NewCache("clients"), systemCAPool: systemPool, registry: serviceregistry.NewServiceRegistry(), }, nil }
go
{ "resource": "" }
q7085
Unenforce
train
func (p *AppProxy) Unenforce(ctx context.Context, puID string) error { p.Lock() defer p.Unlock() // Remove pu from registry if err := p.registry.Unregister(puID); err != nil { return err } // Find the correct client. c, err := p.clients.Get(puID) if err != nil { return fmt.Errorf("Unable to find client") } client := c.(*clientData) // Shutdown all the servers and unregister listeners. for t, server := range client.netserver { if err := client.protomux.UnregisterListener(t); err != nil { zap.L().Error("Unable to unregister client", zap.Int("type", int(t)), zap.Error(err)) } if err := server.ShutDown(); err != nil { zap.L().Debug("Unable to shutdown client server", zap.Error(err)) } } // Terminate the connection multiplexer. client.protomux.Close() // Remove the client from the cache. return p.clients.Remove(puID) }
go
{ "resource": "" }
q7086
UpdateSecrets
train
func (p *AppProxy) UpdateSecrets(secret secrets.Secrets) error { p.Lock() defer p.Unlock() p.secrets = secret return nil }
go
{ "resource": "" }
q7087
registerAndRun
train
func (p *AppProxy) registerAndRun(ctx context.Context, puID string, ltype common.ListenerType, mux *protomux.MultiplexedListener, caPool *x509.CertPool, appproxy bool) (ServerInterface, error) { var listener net.Listener var err error // Create a new sub-ordinate listerner and register it for the requested type. listener, err = mux.RegisterListener(ltype) if err != nil { return nil, fmt.Errorf("Cannot register listener: %s", err) } // If the protocol is encrypted, wrapp it with TLS. encrypted := false if ltype == common.HTTPSNetwork { encrypted = true } // Start the corresponding proxy switch ltype { case common.HTTPApplication, common.HTTPSApplication, common.HTTPNetwork, common.HTTPSNetwork: c := httpproxy.NewHTTPProxy(p.collector, puID, caPool, appproxy, proxyMarkInt, p.secrets, p.registry) return c, c.RunNetworkServer(ctx, listener, encrypted) default: c := tcp.NewTCPProxy(p.tokenaccessor, p.collector, puID, p.registry, p.cert, caPool) return c, c.RunNetworkServer(ctx, listener, encrypted) } }
go
{ "resource": "" }
q7088
processCertificateUpdates
train
func (p *AppProxy) processCertificateUpdates(puInfo *policy.PUInfo, client *clientData, caPool *x509.CertPool) (bool, error) { // If there are certificates provided, we will need to update them for the // services. If the certificates are nil, we ignore them. certPEM, keyPEM, caPEM := puInfo.Policy.ServiceCertificates() if certPEM == "" || keyPEM == "" { return false, nil } // Process any updates on the cert pool if caPEM != "" { if !caPool.AppendCertsFromPEM([]byte(caPEM)) { zap.L().Warn("Failed to add Services CA") } } // Create the TLS certificate tlsCert, err := tls.X509KeyPair([]byte(certPEM), []byte(keyPEM)) if err != nil { return false, fmt.Errorf("Invalid certificates: %s", err) } for _, server := range client.netserver { server.UpdateSecrets(&tlsCert, caPool, p.secrets, certPEM, keyPEM) } return true, nil }
go
{ "resource": "" }
q7089
startNetworkInterceptor
train
func (d *Datapath) startNetworkInterceptor(ctx context.Context) { var err error nfq := make([]nfqueue.Verdict, d.filterQueue.GetNumNetworkQueues()) for i := uint16(0); i < d.filterQueue.GetNumNetworkQueues(); i++ { // Initialize all the queues nfq[i], err = nfqueue.CreateAndStartNfQueue(ctx, d.filterQueue.GetNetworkQueueStart()+i, d.filterQueue.GetNetworkQueueSize(), nfqueue.NfDefaultPacketSize, networkCallback, errorCallback, d) if err != nil { for retry := 0; retry < 5 && err != nil; retry++ { nfq[i], err = nfqueue.CreateAndStartNfQueue(ctx, d.filterQueue.GetNetworkQueueStart()+i, d.filterQueue.GetNetworkQueueSize(), nfqueue.NfDefaultPacketSize, networkCallback, errorCallback, d) <-time.After(3 * time.Second) } if err != nil { zap.L().Fatal("Unable to initialize netfilter queue", zap.Error(err)) } } } }
go
{ "resource": "" }
q7090
startApplicationInterceptor
train
func (d *Datapath) startApplicationInterceptor(ctx context.Context) { var err error nfq := make([]nfqueue.Verdict, d.filterQueue.GetNumApplicationQueues()) for i := uint16(0); i < d.filterQueue.GetNumApplicationQueues(); i++ { nfq[i], err = nfqueue.CreateAndStartNfQueue(ctx, d.filterQueue.GetApplicationQueueStart()+i, d.filterQueue.GetApplicationQueueSize(), nfqueue.NfDefaultPacketSize, appCallBack, errorCallback, d) if err != nil { for retry := 0; retry < 5 && err != nil; retry++ { nfq[i], err = nfqueue.CreateAndStartNfQueue(ctx, d.filterQueue.GetApplicationQueueStart()+i, d.filterQueue.GetApplicationQueueSize(), nfqueue.NfDefaultPacketSize, appCallBack, errorCallback, d) <-time.After(3 * time.Second) } if err != nil { zap.L().Fatal("Unable to initialize netfilter queue", zap.Int("QueueNum", int(d.filterQueue.GetNetworkQueueStart()+i)), zap.Error(err)) } } } }
go
{ "resource": "" }
q7091
NewPURuntime
train
func NewPURuntime(name string, pid int, nsPath string, tags *TagStore, ips ExtendedMap, puType common.PUType, options *OptionsType) *PURuntime { if tags == nil { tags = NewTagStore() } if ips == nil { ips = ExtendedMap{} } if options == nil { options = &OptionsType{} } return &PURuntime{ puType: puType, tags: tags, ips: ips, options: options, pid: pid, nsPath: nsPath, name: name, } }
go
{ "resource": "" }
q7092
MarshalJSON
train
func (r *PURuntime) MarshalJSON() ([]byte, error) { return json.Marshal(&PURuntimeJSON{ PUType: r.puType, Pid: r.pid, NSPath: r.nsPath, Name: r.name, IPAddresses: r.ips, Tags: r.tags, Options: r.options, }) }
go
{ "resource": "" }
q7093
UnmarshalJSON
train
func (r *PURuntime) UnmarshalJSON(param []byte) error { a := &PURuntimeJSON{} if err := json.Unmarshal(param, &a); err != nil { return err } r.pid = a.Pid r.nsPath = a.NSPath r.name = a.Name r.ips = a.IPAddresses r.tags = a.Tags r.options = a.Options r.puType = a.PUType return nil }
go
{ "resource": "" }
q7094
Pid
train
func (r *PURuntime) Pid() int { r.Lock() defer r.Unlock() return r.pid }
go
{ "resource": "" }
q7095
SetPid
train
func (r *PURuntime) SetPid(pid int) { r.Lock() defer r.Unlock() r.pid = pid }
go
{ "resource": "" }
q7096
NSPath
train
func (r *PURuntime) NSPath() string { r.Lock() defer r.Unlock() return r.nsPath }
go
{ "resource": "" }
q7097
SetNSPath
train
func (r *PURuntime) SetNSPath(nsPath string) { r.Lock() defer r.Unlock() r.nsPath = nsPath }
go
{ "resource": "" }
q7098
SetOptions
train
func (r *PURuntime) SetOptions(options OptionsType) { r.Lock() defer r.Unlock() r.options = &options }
go
{ "resource": "" }
q7099
Name
train
func (r *PURuntime) Name() string { r.Lock() defer r.Unlock() return r.name }
go
{ "resource": "" }