_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q6800
|
NewRPCClient
|
train
|
func (mr *MockRPCClientMockRecorder) NewRPCClient(contextID, channel, rpcSecret interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRPCClient", reflect.TypeOf((*MockRPCClient)(nil).NewRPCClient), contextID, channel, rpcSecret)
}
|
go
|
{
"resource": ""
}
|
q6801
|
GetRPCClient
|
train
|
func (m *MockRPCClient) GetRPCClient(contextID string) (*rpcwrapper.RPCHdl, error) {
ret := m.ctrl.Call(m, "GetRPCClient", contextID)
ret0, _ := ret[0].(*rpcwrapper.RPCHdl)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q6802
|
RemoteCall
|
train
|
func (m *MockRPCClient) RemoteCall(contextID, methodName string, req *rpcwrapper.Request, resp *rpcwrapper.Response) error {
ret := m.ctrl.Call(m, "RemoteCall", contextID, methodName, req, resp)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6803
|
RemoteCall
|
train
|
func (mr *MockRPCClientMockRecorder) RemoteCall(contextID, methodName, req, resp interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoteCall", reflect.TypeOf((*MockRPCClient)(nil).RemoteCall), contextID, methodName, req, resp)
}
|
go
|
{
"resource": ""
}
|
q6804
|
DestroyRPCClient
|
train
|
func (m *MockRPCClient) DestroyRPCClient(contextID string) {
m.ctrl.Call(m, "DestroyRPCClient", contextID)
}
|
go
|
{
"resource": ""
}
|
q6805
|
DestroyRPCClient
|
train
|
func (mr *MockRPCClientMockRecorder) DestroyRPCClient(contextID interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DestroyRPCClient", reflect.TypeOf((*MockRPCClient)(nil).DestroyRPCClient), contextID)
}
|
go
|
{
"resource": ""
}
|
q6806
|
ContextList
|
train
|
func (m *MockRPCClient) ContextList() []string {
ret := m.ctrl.Call(m, "ContextList")
ret0, _ := ret[0].([]string)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6807
|
CheckValidity
|
train
|
func (mr *MockRPCClientMockRecorder) CheckValidity(req, secret interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckValidity", reflect.TypeOf((*MockRPCClient)(nil).CheckValidity), req, secret)
}
|
go
|
{
"resource": ""
}
|
q6808
|
StartServer
|
train
|
func (m *MockRPCServer) StartServer(ctx context.Context, protocol, path string, handler interface{}) error {
ret := m.ctrl.Call(m, "StartServer", ctx, protocol, path, handler)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6809
|
StartServer
|
train
|
func (mr *MockRPCServerMockRecorder) StartServer(ctx, protocol, path, handler interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartServer", reflect.TypeOf((*MockRPCServer)(nil).StartServer), ctx, protocol, path, handler)
}
|
go
|
{
"resource": ""
}
|
q6810
|
ProcessMessage
|
train
|
func (mr *MockRPCServerMockRecorder) ProcessMessage(req, secret interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessMessage", reflect.TypeOf((*MockRPCServer)(nil).ProcessMessage), req, secret)
}
|
go
|
{
"resource": ""
}
|
q6811
|
CheckValidity
|
train
|
func (m *MockRPCServer) CheckValidity(req *rpcwrapper.Request, secret string) bool {
ret := m.ctrl.Call(m, "CheckValidity", req, secret)
ret0, _ := ret[0].(bool)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6812
|
Map
|
train
|
func (w *WatchPodMapper) Map(obj handler.MapObject) []reconcile.Request {
pod, ok := obj.Object.(*corev1.Pod)
if !ok {
return nil
}
if pod.Spec.NodeName != w.nodeName {
return nil
}
if pod.Spec.HostNetwork && !w.enableHostPods {
return nil
}
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: pod.Name,
Namespace: pod.Namespace,
},
},
}
}
|
go
|
{
"resource": ""
}
|
q6813
|
NewPKIIssuer
|
train
|
func NewPKIIssuer(privateKey *ecdsa.PrivateKey) PKITokenIssuer {
return &tokenManager{
privateKey: privateKey,
signMethod: jwt.SigningMethodES256,
}
}
|
go
|
{
"resource": ""
}
|
q6814
|
NewPKIVerifier
|
train
|
func NewPKIVerifier(publicKeys []*ecdsa.PublicKey, cacheValidity time.Duration) PKITokenVerifier {
validity := defaultValidity * time.Second
if cacheValidity > 0 {
validity = cacheValidity
}
return &tokenManager{
publicKeys: publicKeys,
signMethod: jwt.SigningMethodES256,
keycache: cache.NewCacheWithExpiration("PKIVerifierKey", validity),
validity: validity,
}
}
|
go
|
{
"resource": ""
}
|
q6815
|
Verify
|
train
|
func (p *tokenManager) Verify(token []byte) (*DatapathKey, error) {
tokenString := string(token)
if pk, err := p.keycache.Get(tokenString); err == nil {
return pk.(*DatapathKey), err
}
claims := &verifierClaims{}
var JWTToken *jwt.Token
var err error
for _, pk := range p.publicKeys {
if pk == nil {
continue
}
JWTToken, err = jwt.ParseWithClaims(tokenString, claims, func(_ *jwt.Token) (interface{}, error) { // nolint
return pk, nil
})
if err != nil || !JWTToken.Valid {
continue
}
dp := &DatapathKey{
PublicKey: KeyFromClaims(claims),
Tags: claims.Tags,
}
if time.Now().Add(p.validity).Unix() <= claims.ExpiresAt {
p.keycache.AddOrUpdate(tokenString, dp)
}
return dp, nil
}
return nil, errors.New("unable to verify token against any available public key")
}
|
go
|
{
"resource": ""
}
|
q6816
|
CreateTokenFromCertificate
|
train
|
func (p *tokenManager) CreateTokenFromCertificate(cert *x509.Certificate, tags []string) ([]byte, error) {
// Combine the application claims with the standard claims
claims := &verifierClaims{
X: cert.PublicKey.(*ecdsa.PublicKey).X,
Y: cert.PublicKey.(*ecdsa.PublicKey).Y,
Tags: tags,
}
claims.ExpiresAt = cert.NotAfter.Unix()
// Create the token and sign with our key
strtoken, err := jwt.NewWithClaims(p.signMethod, claims).SignedString(p.privateKey)
if err != nil {
return []byte{}, err
}
return []byte(strtoken), nil
}
|
go
|
{
"resource": ""
}
|
q6817
|
KeyFromClaims
|
train
|
func KeyFromClaims(claims *verifierClaims) *ecdsa.PublicKey {
return &ecdsa.PublicKey{
Curve: elliptic.P256(),
X: claims.X,
Y: claims.Y,
}
}
|
go
|
{
"resource": ""
}
|
q6818
|
NewTCPProxy
|
train
|
func NewTCPProxy(
tp tokenaccessor.TokenAccessor,
c collector.EventCollector,
puContext string,
registry *serviceregistry.Registry,
certificate *tls.Certificate,
caPool *x509.CertPool,
) *Proxy {
localIPs := markedconn.GetInterfaces()
return &Proxy{
collector: c,
tokenaccessor: tp,
puContext: puContext,
registry: registry,
localIPs: localIPs,
certificate: certificate,
ca: caPool,
}
}
|
go
|
{
"resource": ""
}
|
q6819
|
RunNetworkServer
|
train
|
func (p *Proxy) RunNetworkServer(ctx context.Context, listener net.Listener, encrypted bool) error {
// Encryption is done transparently for TCP.
go p.serve(ctx, listener)
return nil
}
|
go
|
{
"resource": ""
}
|
q6820
|
UpdateSecrets
|
train
|
func (p *Proxy) UpdateSecrets(cert *tls.Certificate, caPool *x509.CertPool, s secrets.Secrets, certPEM, keyPEM string) {
p.Lock()
defer p.Unlock()
p.certificate = cert
p.ca = caPool
}
|
go
|
{
"resource": ""
}
|
q6821
|
handle
|
train
|
func (p *Proxy) handle(ctx context.Context, upConn net.Conn) {
defer upConn.Close() // nolint
ip, port := upConn.(*markedconn.ProxiedConnection).GetOriginalDestination()
downConn, err := p.downConnection(ctx, ip, port)
if err != nil {
flowproperties := &proxyFlowProperties{
DestIP: ip.String(),
DestPort: uint16(port),
SourceIP: upConn.RemoteAddr().(*net.TCPAddr).IP.String(),
DestType: collector.EndPointTypeExternalIP,
SourceType: collector.EnpointTypePU,
}
puContext, perr := p.puContextFromContextID(p.puContext)
if perr != nil {
zap.L().Error("Unable to find policy context for tcp connection",
zap.String("Context", p.puContext),
zap.Error(perr))
return
}
p.reportRejectedFlow(flowproperties, puContext.ManagementID(), "default", puContext, collector.UnableToDial, nil, nil)
return
}
defer downConn.Close() // nolint
// Now let us handle the state machine for the down connection
isEncrypted, err := p.CompleteEndPointAuthorization(ip, port, upConn, downConn)
if err != nil {
zap.L().Error("Error on Authorization", zap.Error(err))
return
}
if err := p.proxyData(ctx, isEncrypted, upConn, downConn); err != nil {
zap.L().Debug("Error will proxying data", zap.Error(err))
}
}
|
go
|
{
"resource": ""
}
|
q6822
|
downConnection
|
train
|
func (p *Proxy) downConnection(ctx context.Context, ip net.IP, port int) (net.Conn, error) {
raddr := &net.TCPAddr{
IP: ip,
Port: port,
}
return markedconn.DialMarkedWithContext(ctx, "tcp4", raddr.String(), proxyMarkInt)
}
|
go
|
{
"resource": ""
}
|
q6823
|
RefreshPUs
|
train
|
func (m *KubernetesMonitor) RefreshPUs(ctx context.Context, pod *api.Pod) error {
if pod == nil {
return fmt.Errorf("pod is nil")
}
podNamespace := pod.GetNamespace()
podName := pod.GetName()
puIDs := m.cache.getPUIDsbyPod(podNamespace, podName)
for _, puid := range puIDs {
dockerRuntime := m.cache.getDockerRuntimeByPUID(puid)
if dockerRuntime == nil {
continue
}
kubernetesRuntime, managedContainer, err := m.kubernetesExtractor(dockerRuntime, pod)
if err != nil {
return fmt.Errorf("error while processing Kubernetes pod %s/%s for container %s %s", podNamespace, podName, puid, err)
}
// UnmanagedContainers are simply ignored. It should not come this far if it is a non managed container anyways.
if !managedContainer {
zap.L().Debug("unmanaged Kubernetes container", zap.String("puID", puid), zap.String("podNamespace", podNamespace), zap.String("podName", podName))
continue
}
// We keep the cache uptoDate for future queries
m.cache.updatePUIDCache(podNamespace, podName, puid, dockerRuntime, kubernetesRuntime)
if err := m.handlers.Policy.HandlePUEvent(ctx, puid, common.EventUpdate, kubernetesRuntime); err != nil {
return err
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6824
|
getKubernetesInformation
|
train
|
func getKubernetesInformation(runtime policy.RuntimeReader) (string, string, error) {
podNamespace, ok := runtime.Tag(KubernetesPodNamespaceIdentifier)
if !ok {
return "", "", fmt.Errorf("Error getting Kubernetes Pod namespace")
}
podName, ok := runtime.Tag(KubernetesPodNameIdentifier)
if !ok {
return "", "", fmt.Errorf("Error getting Kubernetes Pod name")
}
return podNamespace, podName, nil
}
|
go
|
{
"resource": ""
}
|
q6825
|
decorateRuntime
|
train
|
func (m *KubernetesMonitor) decorateRuntime(puID string, runtimeInfo policy.RuntimeReader, event common.Event,
podName, podNamespace string) (err error) {
// Do nothing on other events apart from start event.
if event != common.EventStart {
return nil
}
puRuntime, ok := runtimeInfo.(*policy.PURuntime)
if !ok {
zap.L().Error("Found invalid runtime for puid", zap.String("puid", puID))
return fmt.Errorf("invalid runtime for puid:%s", puID)
}
extensions := policy.ExtendedMap{}
// pause container with host net set to true.
if runtimeInfo.PUType() == common.LinuxProcessPU {
extensions[constants.DockerHostMode] = "true"
extensions[constants.DockerHostPUID] = puID
options := puRuntime.Options()
options.PolicyExtensions = extensions
// set Options on docker runtime.
puRuntime.SetOptions(options)
return nil
}
pausePUID := ""
puIDs := m.cache.getPUIDsbyPod(podNamespace, podName)
// get the puid of the pause container.
for _, id := range puIDs {
rtm := m.cache.getDockerRuntimeByPUID(id)
if rtm == nil {
continue
}
if isPodInfraContainer(rtm) && rtm.PUType() == common.LinuxProcessPU {
pausePUID = id
break
}
// if the pause container is not host net container, nothing to do.
if isPodInfraContainer(rtm) {
return nil
}
}
extensions[constants.DockerHostPUID] = pausePUID
options := puRuntime.Options()
options.PolicyExtensions = extensions
// set Options on docker runtime.
puRuntime.SetOptions(options)
return nil
}
|
go
|
{
"resource": ""
}
|
q6826
|
NewMockStatsClient
|
train
|
func NewMockStatsClient(ctrl *gomock.Controller) *MockStatsClient {
mock := &MockStatsClient{ctrl: ctrl}
mock.recorder = &MockStatsClientMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q6827
|
SendStats
|
train
|
func (mr *MockStatsClientMockRecorder) SendStats() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStats", reflect.TypeOf((*MockStatsClient)(nil).SendStats))
}
|
go
|
{
"resource": ""
}
|
q6828
|
processNetUDPPacket
|
train
|
func (d *Datapath) processNetUDPPacket(udpPacket *packet.Packet, context *pucontext.PUContext, conn *connection.UDPConnection) (action interface{}, claims *tokens.ConnectionClaims, err error) {
// Extra check, just in case the caller didn't provide a connection.
if conn == nil {
return nil, nil, fmt.Errorf("no connection provided")
}
udpPacketType := udpPacket.GetUDPType()
// Update connection state in the internal state machine tracker
switch udpPacketType {
case packet.UDPSynMask:
// Parse the packet for the identity information.
action, claims, err = d.processNetworkUDPSynPacket(context, conn, udpPacket)
if err != nil {
return nil, nil, err
}
// Send the return packet.
if err = d.sendUDPSynAckPacket(udpPacket, context, conn); err != nil {
return nil, nil, err
}
// Mark the state that we have transmitted a SynAck packet.
conn.SetState(connection.UDPReceiverSendSynAck)
return action, claims, nil
case packet.UDPAckMask:
// Retrieve the header and parse the signatures.
if err = d.processNetworkUDPAckPacket(udpPacket, context, conn); err != nil {
zap.L().Error("Error during authorization", zap.Error(err))
return nil, nil, err
}
// Set the connection to
conn.SetState(connection.UDPReceiverProcessedAck)
return nil, nil, nil
case packet.UDPSynAckMask:
// Process the synack header and claims of the other side.
action, claims, err = d.processNetworkUDPSynAckPacket(udpPacket, context, conn)
if err != nil {
zap.L().Error("UDP Syn ack failed with", zap.Error(err))
return nil, nil, err
}
// Send back the acknowledgement.
err = d.sendUDPAckPacket(udpPacket, context, conn)
if err != nil {
zap.L().Error("Unable to send udp Syn ack failed", zap.Error(err))
return nil, nil, err
}
conn.SetState(connection.UDPClientSendAck)
return action, claims, nil
default:
state := conn.GetState()
if state == connection.UDPReceiverProcessedAck || state == connection.UDPClientSendAck || state == connection.UDPData {
conn.SetState(connection.UDPData)
return nil, nil, nil
}
return nil, nil, fmt.Errorf("invalid packet at state: %d", state)
}
}
|
go
|
{
"resource": ""
}
|
q6829
|
ProcessApplicationUDPPacket
|
train
|
func (d *Datapath) ProcessApplicationUDPPacket(p *packet.Packet) (conn *connection.UDPConnection, err error) {
if d.packetLogs {
zap.L().Debug("Processing application UDP packet ",
zap.String("flow", p.L4FlowHash()),
)
defer zap.L().Debug("Finished Processing UDP application packet ",
zap.String("flow", p.L4FlowHash()),
zap.Error(err),
)
}
// First retrieve the connection state.
conn, err = d.appUDPRetrieveState(p)
if err != nil {
zap.L().Debug("Connection not found", zap.Error(err))
return nil, fmt.Errorf("Received packet from unenforced process: %s", err)
}
// We are processing only one packet from a given connection at a time.
conn.Lock()
defer conn.Unlock()
// do some pre processing.
if d.service != nil {
// PreProcessServiceInterface
if !d.service.PreProcessUDPAppPacket(p, conn.Context, conn, packet.UDPSynMask) {
p.Print(packet.PacketFailureService)
return nil, fmt.Errorf("pre service processing failed for UDP application packet")
}
}
triggerControlProtocol := false
switch conn.GetState() {
case connection.UDPStart:
// Queue the packet. We will send it after we authorize the session.
if err = conn.QueuePackets(p); err != nil {
// unable to queue packets, perhaps queue is full. if start
// machine is still in start state, we can start authorisation
// again. A drop counter is incremented.
zap.L().Debug("udp queue full for connection", zap.String("flow", p.L4FlowHash()))
}
// Set the state indicating that we send out a Syn packet
conn.SetState(connection.UDPClientSendSyn)
// Drop the packet. We stored it in the queue.
triggerControlProtocol = true
case connection.UDPReceiverProcessedAck, connection.UDPClientSendAck, connection.UDPData:
conn.SetState(connection.UDPData)
default:
zap.L().Debug("Packet is added to the queue", zap.String("flow", p.L4FlowHash()))
if err = conn.QueuePackets(p); err != nil {
return conn, fmt.Errorf("Unable to queue packets:%s", err)
}
return conn, fmt.Errorf("Drop in nfq - buffered")
}
if d.service != nil {
// PostProcessServiceInterface
if !d.service.PostProcessUDPAppPacket(p, nil, conn.Context, conn) {
p.Print(packet.PacketFailureService)
return conn, fmt.Errorf("Encryption failed for application packet")
}
}
if triggerControlProtocol {
err = d.triggerNegotiation(p, conn.Context, conn)
if err != nil {
return conn, err
}
return conn, fmt.Errorf("Drop in nfq - buffered")
}
return conn, nil
}
|
go
|
{
"resource": ""
}
|
q6830
|
triggerNegotiation
|
train
|
func (d *Datapath) triggerNegotiation(udpPacket *packet.Packet, context *pucontext.PUContext, conn *connection.UDPConnection) (err error) {
udpOptions := packet.CreateUDPAuthMarker(packet.UDPSynMask)
udpData, err := d.tokenAccessor.CreateSynPacketToken(context, &conn.Auth)
if err != nil {
return err
}
newPacket, err := d.clonePacketHeaders(udpPacket)
if err != nil {
return fmt.Errorf("Unable to clone packet: %s", err)
}
// Attach the UDP data and token
newPacket.UDPTokenAttach(udpOptions, udpData)
// send packet
err = d.writeWithRetransmit(newPacket.GetBuffer(0), conn, conn.SynChannel())
if err != nil {
zap.L().Error("Unable to send syn token on raw socket", zap.Error(err))
return fmt.Errorf("unable to transmit syn packet")
}
// Populate the caches to track the connection
hash := udpPacket.L4FlowHash()
d.udpAppOrigConnectionTracker.AddOrUpdate(hash, conn)
d.udpSourcePortConnectionCache.AddOrUpdate(newPacket.SourcePortHash(packet.PacketTypeApplication), conn)
d.udpNatConnectionTracker.AddOrUpdate(newPacket.SourcePortHash(packet.PacketTypeApplication), newPacket.SourcePortHash(packet.PacketTypeNetwork))
return nil
}
|
go
|
{
"resource": ""
}
|
q6831
|
sendUDPSynAckPacket
|
train
|
func (d *Datapath) sendUDPSynAckPacket(udpPacket *packet.Packet, context *pucontext.PUContext, conn *connection.UDPConnection) (err error) {
// Create UDP Option
udpOptions := packet.CreateUDPAuthMarker(packet.UDPSynAckMask)
udpData, err := d.tokenAccessor.CreateSynAckPacketToken(context, &conn.Auth, claimsheader.NewClaimsHeader())
if err != nil {
return err
}
udpPacket.CreateReverseFlowPacket(udpPacket.SourceAddress(), udpPacket.SourcePort())
// Attach the UDP data and token
udpPacket.UDPTokenAttach(udpOptions, udpData)
// If we have already a backgroun re-transmit session, stop it at this point. We will
// start from the beginning.
if conn.GetState() == connection.UDPReceiverSendSynAck {
conn.SynAckStop()
}
// Only start the retransmission timer once. Not on every packet.
if err := d.writeWithRetransmit(udpPacket.GetBuffer(0), conn, conn.SynAckChannel()); err != nil {
zap.L().Debug("Unable to send synack token on raw socket", zap.Error(err))
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6832
|
processNetworkUDPSynPacket
|
train
|
func (d *Datapath) processNetworkUDPSynPacket(context *pucontext.PUContext, conn *connection.UDPConnection, udpPacket *packet.Packet) (action interface{}, claims *tokens.ConnectionClaims, err error) {
claims, err = d.tokenAccessor.ParsePacketToken(&conn.Auth, udpPacket.ReadUDPToken())
if err != nil {
d.reportUDPRejectedFlow(udpPacket, conn, collector.DefaultEndPoint, context.ManagementID(), context, tokens.CodeFromErr(err), nil, nil, false)
return nil, nil, fmt.Errorf("UDP Syn packet dropped because of invalid token: %s", err)
}
// if there are no claims we must drop the connection and we drop the Syn
// packet. The source will retry but we have no state to maintain here.
if claims == nil {
d.reportUDPRejectedFlow(udpPacket, conn, collector.DefaultEndPoint, context.ManagementID(), context, collector.InvalidToken, nil, nil, false)
return nil, nil, fmt.Errorf("UDP Syn packet dropped because of no claims")
}
// Why is this required. Take a look.
txLabel, _ := claims.T.Get(enforcerconstants.TransmitterLabel)
// Add the port as a label with an @ prefix. These labels are invalid otherwise
// If all policies are restricted by port numbers this will allow port-specific policies
claims.T.AppendKeyValue(enforcerconstants.PortNumberLabelString, strconv.Itoa(int(udpPacket.DestPort())))
report, pkt := context.SearchRcvRules(claims.T)
if pkt.Action.Rejected() {
d.reportUDPRejectedFlow(udpPacket, conn, txLabel, context.ManagementID(), context, collector.PolicyDrop, report, pkt, false)
return nil, nil, fmt.Errorf("connection rejected because of policy: %s", claims.T.String())
}
hash := udpPacket.L4FlowHash()
// conntrack
d.udpNetOrigConnectionTracker.AddOrUpdate(hash, conn)
d.udpAppReplyConnectionTracker.AddOrUpdate(udpPacket.L4ReverseFlowHash(), conn)
// Record actions
conn.ReportFlowPolicy = report
conn.PacketFlowPolicy = pkt
return pkt, claims, nil
}
|
go
|
{
"resource": ""
}
|
q6833
|
sendUDPFinPacket
|
train
|
func (d *Datapath) sendUDPFinPacket(udpPacket *packet.Packet) (err error) {
// Create UDP Option
udpOptions := packet.CreateUDPAuthMarker(packet.UDPFinAckMask)
udpPacket.CreateReverseFlowPacket(udpPacket.SourceAddress(), udpPacket.SourcePort())
// Attach the UDP data and token
udpPacket.UDPTokenAttach(udpOptions, []byte{})
zap.L().Info("Sending udp fin ack packet", zap.String("packet", udpPacket.L4FlowHash()))
// no need for retransmits here.
err = d.udpSocketWriter.WriteSocket(udpPacket.GetBuffer(0))
if err != nil {
zap.L().Debug("Unable to send fin packet on raw socket", zap.Error(err))
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6834
|
processUDPFinPacket
|
train
|
func (d *Datapath) processUDPFinPacket(udpPacket *packet.Packet) (err error) { // nolint
// add it to the udp fin cache. If we have already received the fin packet
// for this flow. There is no need to change the connmark label again.
if d.udpFinPacketTracker.AddOrUpdate(udpPacket.L4ReverseFlowHash(), true) {
return nil
}
// clear cache entries.
if err := d.udpAppOrigConnectionTracker.Remove(udpPacket.L4ReverseFlowHash()); err != nil {
zap.L().Debug("Failed to clean cache udpappOrigConnectionTracker", zap.Error(err))
}
if err := d.udpSourcePortConnectionCache.Remove(udpPacket.SourcePortHash(packet.PacketTypeNetwork)); err != nil {
zap.L().Debug("Failed to clean cache udpsourcePortConnectionCache", zap.Error(err))
}
zap.L().Debug("Updating the connmark label", zap.String("flow", udpPacket.L4FlowHash()))
if err = d.conntrack.UpdateNetworkFlowMark(
udpPacket.SourceAddress(),
udpPacket.DestinationAddress(),
udpPacket.IPProto(),
udpPacket.SourcePort(),
udpPacket.DestPort(),
constants.DeleteConnmark,
); err != nil {
zap.L().Error("Failed to update conntrack table for flow to terminate connection",
zap.String("app-conn", udpPacket.L4FlowHash()),
zap.Error(err),
)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6835
|
newRemoteEnforcer
|
train
|
func newRemoteEnforcer(
ctx context.Context,
cancel context.CancelFunc,
service packetprocessor.PacketProcessor,
rpcHandle rpcwrapper.RPCServer,
secret string,
statsClient statsclient.StatsClient,
collector statscollector.Collector,
debugClient debugclient.DebugClient,
) (*RemoteEnforcer, error) {
var err error
if collector == nil {
collector = statscollector.NewCollector()
}
if statsClient == nil {
statsClient, err = statsclient.NewStatsClient(collector)
if err != nil {
return nil, err
}
}
if debugClient == nil {
debugClient, err = debugclient.NewDebugClient(collector)
if err != nil {
return nil, err
}
}
procMountPoint := os.Getenv(constants.EnvMountPoint)
if procMountPoint == "" {
procMountPoint = constants.DefaultProcMountPoint
}
return &RemoteEnforcer{
collector: collector,
service: service,
rpcSecret: secret,
rpcHandle: rpcHandle,
procMountPoint: procMountPoint,
statsClient: statsClient,
debugClient: debugClient,
ctx: ctx,
cancel: cancel,
exit: make(chan bool),
}, nil
}
|
go
|
{
"resource": ""
}
|
q6836
|
Enforce
|
train
|
func (s *RemoteEnforcer) Enforce(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
if !s.rpcHandle.CheckValidity(&req, s.rpcSecret) {
resp.Status = "enforce message auth failed"
return fmt.Errorf(resp.Status)
}
cmdLock.Lock()
defer cmdLock.Unlock()
payload, ok := req.Payload.(rpcwrapper.EnforcePayload)
if !ok {
resp.Status = "invalid enforcer payload"
return fmt.Errorf(resp.Status)
}
plc, err := payload.Policy.ToPrivatePolicy(true)
if err != nil {
return err
}
puInfo := &policy.PUInfo{
ContextID: payload.ContextID,
Policy: plc,
Runtime: policy.NewPURuntimeWithDefaults(),
}
if s.enforcer == nil || s.supervisor == nil {
resp.Status = "enforcer not initialized - cannot enforce"
return fmt.Errorf(resp.Status)
}
// If any error happens, cleanup everything on exit so that we can recover
// by launcing a new remote.
defer func() {
if err != nil {
s.cleanup()
}
}()
if err = s.supervisor.Supervise(payload.ContextID, puInfo); err != nil {
resp.Status = err.Error()
return err
}
if err = s.enforcer.Enforce(payload.ContextID, puInfo); err != nil {
resp.Status = err.Error()
return err
}
resp.Status = ""
return nil
}
|
go
|
{
"resource": ""
}
|
q6837
|
Unenforce
|
train
|
func (s *RemoteEnforcer) Unenforce(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
if !s.rpcHandle.CheckValidity(&req, s.rpcSecret) {
resp.Status = "unenforce message auth failed"
return fmt.Errorf(resp.Status)
}
cmdLock.Lock()
defer cmdLock.Unlock()
s.statsClient.SendStats()
payload, ok := req.Payload.(rpcwrapper.UnEnforcePayload)
if !ok {
resp.Status = "invalid unenforcer payload"
return fmt.Errorf(resp.Status)
}
var err error
// If any error happens, cleanup everything on exit so that we can recover
// by launcing a new remote.
defer func() {
if err != nil {
s.cleanup()
}
}()
if err = s.supervisor.Unsupervise(payload.ContextID); err != nil {
resp.Status = err.Error()
return fmt.Errorf("unable to clean supervisor: %s", err)
}
if err = s.enforcer.Unenforce(payload.ContextID); err != nil {
resp.Status = err.Error()
return fmt.Errorf("unable to stop enforcer: %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6838
|
SetTargetNetworks
|
train
|
func (s *RemoteEnforcer) SetTargetNetworks(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
var err error
if !s.rpcHandle.CheckValidity(&req, s.rpcSecret) {
resp.Status = "SetTargetNetworks message auth failed" //nolint
return fmt.Errorf(resp.Status)
}
cmdLock.Lock()
defer cmdLock.Unlock()
if s.enforcer == nil || s.supervisor == nil {
return fmt.Errorf(resp.Status)
}
payload := req.Payload.(rpcwrapper.SetTargetNetworksPayload)
// If any error happens, cleanup everything on exit so that we can recover
// by launcing a new remote.
defer func() {
if err != nil {
s.cleanup()
}
}()
if err = s.enforcer.SetTargetNetworks(payload.Configuration); err != nil {
return err
}
err = s.supervisor.SetTargetNetworks(payload.Configuration)
return err
}
|
go
|
{
"resource": ""
}
|
q6839
|
EnforcerExit
|
train
|
func (s *RemoteEnforcer) EnforcerExit(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
s.cleanup()
s.exit <- true
return nil
}
|
go
|
{
"resource": ""
}
|
q6840
|
UpdateSecrets
|
train
|
func (s *RemoteEnforcer) UpdateSecrets(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
var err error
if !s.rpcHandle.CheckValidity(&req, s.rpcSecret) {
resp.Status = "updatesecrets auth failed"
return fmt.Errorf(resp.Status)
}
cmdLock.Lock()
defer cmdLock.Unlock()
if s.enforcer == nil {
return fmt.Errorf(resp.Status)
}
// If any error happens, cleanup everything on exit so that we can recover
// by launcing a new remote.
defer func() {
if err != nil {
s.cleanup()
}
}()
payload := req.Payload.(rpcwrapper.UpdateSecretsPayload)
s.secrets, err = secrets.NewSecrets(payload.Secrets)
if err != nil {
return err
}
err = s.enforcer.UpdateSecrets(s.secrets)
return err
}
|
go
|
{
"resource": ""
}
|
q6841
|
EnableIPTablesPacketTracing
|
train
|
func (s *RemoteEnforcer) EnableIPTablesPacketTracing(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
if !s.rpcHandle.CheckValidity(&req, s.rpcSecret) {
resp.Status = "enable iptable packet tracing auth failed"
return fmt.Errorf(resp.Status)
}
cmdLock.Lock()
defer cmdLock.Unlock()
payload := req.Payload.(rpcwrapper.EnableIPTablesPacketTracingPayLoad)
if err := s.supervisor.EnableIPTablesPacketTracing(context.Background(), payload.ContextID, payload.Interval); err != nil {
resp.Status = err.Error()
return err
}
resp.Status = ""
return nil
}
|
go
|
{
"resource": ""
}
|
q6842
|
setupEnforcer
|
train
|
func (s *RemoteEnforcer) setupEnforcer(payload *rpcwrapper.InitRequestPayload) error {
var err error
s.secrets, err = secrets.NewSecrets(payload.Secrets)
if err != nil {
return err
}
if s.enforcer, err = createEnforcer(
payload.MutualAuth,
payload.FqConfig,
s.collector,
s.service,
s.secrets,
payload.ServerID,
payload.Validity,
constants.RemoteContainer,
s.procMountPoint,
payload.ExternalIPCacheTimeout,
payload.PacketLogs,
payload.Configuration,
); err != nil || s.enforcer == nil {
return fmt.Errorf("Error while initializing remote enforcer, %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6843
|
cleanup
|
train
|
func (s *RemoteEnforcer) cleanup() {
if s.supervisor != nil {
if err := s.supervisor.CleanUp(); err != nil {
zap.L().Error("unable to clean supervisor state", zap.Error(err))
}
}
if s.enforcer != nil {
if err := s.enforcer.CleanUp(); err != nil {
zap.L().Error("unable to clean enforcer state", zap.Error(err))
}
}
if s.service != nil {
if err := s.service.Stop(); err != nil {
zap.L().Error("unable to clean service state", zap.Error(err))
}
}
s.cancel()
}
|
go
|
{
"resource": ""
}
|
q6844
|
LaunchRemoteEnforcer
|
train
|
func LaunchRemoteEnforcer(service packetprocessor.PacketProcessor) error {
// Before doing anything validate that we are in the right namespace.
if err := validateNamespace(); err != nil {
return err
}
ctx, cancelMainCtx := context.WithCancel(context.Background())
defer cancelMainCtx()
namedPipe := os.Getenv(constants.EnvContextSocket)
secret := os.Getenv(constants.EnvRPCClientSecret)
if secret == "" {
zap.L().Fatal("No secret found")
}
flag := unix.SIGHUP
if err := unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(flag), 0, 0, 0); err != nil {
return err
}
rpcHandle := rpcwrapper.NewRPCServer()
re, err := newRemoteEnforcer(ctx, cancelMainCtx, service, rpcHandle, secret, nil, nil, nil)
if err != nil {
return err
}
go func() {
if err := rpcHandle.StartServer(ctx, "unix", namedPipe, re); err != nil {
zap.L().Fatal("Failed to start the server", zap.Error(err))
}
}()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)
select {
case <-c:
re.cleanup()
case <-re.exit:
zap.L().Info("Remote enforcer exiting ...")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6845
|
getCEnvVariable
|
train
|
func getCEnvVariable(name string) string {
val := C.getenv(C.CString(name))
if val == nil {
return ""
}
return C.GoString(val)
}
|
go
|
{
"resource": ""
}
|
q6846
|
NewAPICache
|
train
|
func NewAPICache(rules []*policy.HTTPRule, id string, external bool) *APICache {
a := &APICache{
methodRoots: map[string]*node{},
ID: id,
External: external,
}
for _, rule := range rules {
sc := &scopeRule{
rule: rule,
}
for _, method := range rule.Methods {
if _, ok := a.methodRoots[method]; !ok {
a.methodRoots[method] = &node{}
}
for _, uri := range rule.URIs {
insert(a.methodRoots[method], uri, sc)
}
}
}
return a
}
|
go
|
{
"resource": ""
}
|
q6847
|
FindRule
|
train
|
func (c *APICache) FindRule(verb, uri string) (bool, *policy.HTTPRule) {
found, rule := c.Find(verb, uri)
if rule == nil {
return found, nil
}
if policyRule, ok := rule.(*scopeRule); ok {
return found, policyRule.rule
}
return false, nil
}
|
go
|
{
"resource": ""
}
|
q6848
|
FindAndMatchScope
|
train
|
func (c *APICache) FindAndMatchScope(verb, uri string, attributes []string) (bool, bool) {
found, rule := c.Find(verb, uri)
if !found || rule == nil {
return false, false
}
policyRule, ok := rule.(*scopeRule)
if !ok {
return false, false
}
if policyRule.rule.Public {
return true, true
}
return c.MatchClaims(policyRule.rule.ClaimMatchingRules, attributes), false
}
|
go
|
{
"resource": ""
}
|
q6849
|
MatchClaims
|
train
|
func (c *APICache) MatchClaims(rules [][]string, claims []string) bool {
claimsMap := map[string]struct{}{}
for _, claim := range claims {
claimsMap[claim] = struct{}{}
}
var matched int
for _, clause := range rules {
matched = len(clause)
for _, claim := range clause {
if _, ok := claimsMap[claim]; ok {
matched--
}
if matched == 0 {
return true
}
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q6850
|
Find
|
train
|
func (c *APICache) Find(verb, uri string) (bool, interface{}) {
root, ok := c.methodRoots[verb]
if !ok {
return false, nil
}
return search(root, uri)
}
|
go
|
{
"resource": ""
}
|
q6851
|
parse
|
train
|
func parse(s string) (string, string) {
if s == "/" {
return s, ""
}
for i := 1; i < len(s); i++ {
if s[i] == '/' {
return s[0:i], s[i:]
}
}
return s, ""
}
|
go
|
{
"resource": ""
}
|
q6852
|
insert
|
train
|
func insert(n *node, api string, data interface{}) {
if len(api) == 0 {
n.data = data
n.leaf = true
return
}
prefix, suffix := parse(api)
// root node or terminal node
if prefix == "/" {
n.data = data
n.leaf = true
return
}
if n.children == nil {
n.children = map[string]*node{}
}
// If there is no child, add the new child.
next, ok := n.children[prefix]
if !ok {
next = &node{}
n.children[prefix] = next
}
insert(next, suffix, data)
}
|
go
|
{
"resource": ""
}
|
q6853
|
getPausePUID
|
train
|
func getPausePUID(extensions policy.ExtendedMap) string {
if extensions == nil {
return ""
}
if puid, ok := extensions.Get(constants.DockerHostPUID); ok {
zap.L().Debug("puid of pause container is", zap.String("puid", puid))
return puid
}
return ""
}
|
go
|
{
"resource": ""
}
|
q6854
|
policyExtensions
|
train
|
func policyExtensions(runtime policy.RuntimeReader) (extensions policy.ExtendedMap) {
if runtime == nil {
return nil
}
if runtime.Options().PolicyExtensions == nil {
return nil
}
if extensions, ok := runtime.Options().PolicyExtensions.(policy.ExtendedMap); ok {
return extensions
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6855
|
isHostNetworkContainer
|
train
|
func isHostNetworkContainer(runtime policy.RuntimeReader) bool {
return runtime.PUType() == common.LinuxProcessPU || (getPausePUID(policyExtensions(runtime)) != "")
}
|
go
|
{
"resource": ""
}
|
q6856
|
isKubernetesContainer
|
train
|
func isKubernetesContainer(labels map[string]string) bool {
if _, ok := labels[constants.K8sPodNamespace]; ok {
return true
}
return false
}
|
go
|
{
"resource": ""
}
|
q6857
|
kubePodIdentifier
|
train
|
func kubePodIdentifier(labels map[string]string) string {
if !isKubernetesContainer(labels) {
return ""
}
podName := ""
podNamespace := ""
podNamespace, ok := labels[constants.K8sPodNamespace]
if !ok {
podNamespace = ""
}
podName, ok = labels[constants.K8sPodName]
if !ok {
podName = ""
}
if podName == "" || podNamespace == "" {
zap.L().Warn("K8s pod does not have podname/podnamespace labels")
return ""
}
return podNamespace + "/" + podName
}
|
go
|
{
"resource": ""
}
|
q6858
|
NewPolicyDB
|
train
|
func NewPolicyDB() (m *PolicyDB) {
m = &PolicyDB{
numberOfPolicies: 0,
equalPrefixes: map[string]intList{},
equalMapTable: map[string]map[string][]*ForwardingPolicy{},
equalIDMapTable: map[string][]*ForwardingPolicy{},
notEqualMapTable: map[string]map[string][]*ForwardingPolicy{},
notStarTable: map[string][]*ForwardingPolicy{},
defaultNotExistsPolicy: nil,
}
return m
}
|
go
|
{
"resource": ""
}
|
q6859
|
tagSplit
|
train
|
func (m *PolicyDB) tagSplit(tag string, k *string, v *string) error {
l := len(tag)
if l < 3 {
return fmt.Errorf("Invalid tag: invalid length '%s'", tag)
}
if tag[0] == '=' {
return fmt.Errorf("Invalid tag: missing key '%s'", tag)
}
for i := 0; i < l; i++ {
if tag[i] == '=' {
if i+1 >= l {
return fmt.Errorf("Invalid tag: missing value '%s'", tag)
}
*k = tag[:i]
*v = tag[i+1:]
return nil
}
}
return fmt.Errorf("Invalid tag: missing equal symbol '%s'", tag)
}
|
go
|
{
"resource": ""
}
|
q6860
|
Search
|
train
|
func (m *PolicyDB) Search(tags *policy.TagStore) (int, interface{}) {
count := make([]int, m.numberOfPolicies+1)
skip := make([]bool, m.numberOfPolicies+1)
// Disable all policies that fail the not key exists
copiedTags := tags.GetSlice()
var k, v string
for _, t := range copiedTags {
if err := m.tagSplit(t, &k, &v); err != nil {
continue
}
for _, policy := range m.notStarTable[k] {
skip[policy.index] = true
}
}
// Go through the list of tags
for _, t := range copiedTags {
// Search for matches of t (tag id)
if index, action := searchInMapTable(m.equalIDMapTable[t], count, skip); index >= 0 {
return index, action
}
if err := m.tagSplit(t, &k, &v); err != nil {
continue
}
// Search for matches of k=v
if index, action := searchInMapTable(m.equalMapTable[k][v], count, skip); index >= 0 {
return index, action
}
// Search for matches in prefixes
for _, i := range m.equalPrefixes[k] {
if i <= len(v) {
if index, action := searchInMapTable(m.equalMapTable[k][v[:i]], count, skip); index >= 0 {
return index, action
}
}
}
// Parse all of the policies that have a key that matches the incoming tag key
// and a not equal operator and that has a not match rule
for value, policies := range m.notEqualMapTable[k] {
if v == value {
continue
}
if index, action := searchInMapTable(policies, count, skip); index >= 0 {
return index, action
}
}
}
if m.defaultNotExistsPolicy != nil && !skip[m.defaultNotExistsPolicy.index] {
return m.defaultNotExistsPolicy.index, m.defaultNotExistsPolicy.actions
}
return -1, nil
}
|
go
|
{
"resource": ""
}
|
q6861
|
PrintPolicyDB
|
train
|
func (m *PolicyDB) PrintPolicyDB() {
zap.L().Debug("Print Policy DB: equal table")
for key, values := range m.equalMapTable {
for value, policies := range values {
zap.L().Debug("Print Policy DB",
zap.String("policies", fmt.Sprintf("%#v", policies)),
zap.String("key", key),
zap.String("value", value),
)
}
}
zap.L().Debug("Print Policy DB: equal id table")
for key, values := range m.equalIDMapTable {
for _, policies := range values {
zap.L().Debug("Print Policy DB",
zap.String("policies", fmt.Sprintf("%#v", policies)),
zap.String("key", key),
)
}
}
zap.L().Debug("Print Policy DB - not equal table")
for key, values := range m.notEqualMapTable {
for value, policies := range values {
zap.L().Debug("Print Policy DB",
zap.String("policies", fmt.Sprintf("%#v", policies)),
zap.String("key", key),
zap.String("value", value),
)
}
}
}
|
go
|
{
"resource": ""
}
|
q6862
|
cgroupChainRules
|
train
|
func (i *Instance) cgroupChainRules(cfg *ACLInfo) [][]string {
// Rules for older distros (eg RH 6.9/Ubuntu 14.04), due to absence of
// cgroup match modules, source ports are used to trap outgoing traffic.
if i.isLegacyKernel && (cfg.PUType == common.HostNetworkPU || cfg.PUType == common.HostPU) {
return i.legacyPuChainRules(
cfg.ContextID,
cfg.AppChain,
cfg.NetChain,
cfg.CgroupMark,
cfg.TCPPorts,
cfg.UDPPorts,
cfg.ProxyPort,
cfg.ProxySetName,
cfg.AppSection,
cfg.NetSection,
cfg.PUType,
)
}
tmpl := template.Must(template.New(cgroupCaptureTemplate).Funcs(template.FuncMap{
"isUDPPorts": func() bool {
return cfg.UDPPorts != "0"
},
"isTCPPorts": func() bool {
return cfg.TCPPorts != "0"
},
"isHostPU": func() bool {
return cfg.AppSection == HostModeOutput && cfg.NetSection == HostModeInput
},
}).Parse(cgroupCaptureTemplate))
rules, err := extractRulesFromTemplate(tmpl, cfg)
if err != nil {
zap.L().Warn("unable to extract rules", zap.Error(err))
}
return append(rules, i.proxyRules(cfg)...)
}
|
go
|
{
"resource": ""
}
|
q6863
|
containerChainRules
|
train
|
func (i *Instance) containerChainRules(cfg *ACLInfo) [][]string {
tmpl := template.Must(template.New(containerChainTemplate).Parse(containerChainTemplate))
rules, err := extractRulesFromTemplate(tmpl, cfg)
if err != nil {
zap.L().Warn("unable to extract rules", zap.Error(err))
}
return append(rules, i.proxyRules(cfg)...)
}
|
go
|
{
"resource": ""
}
|
q6864
|
proxyRules
|
train
|
func (i *Instance) proxyRules(cfg *ACLInfo) [][]string {
tmpl := template.Must(template.New(proxyChainTemplate).Funcs(template.FuncMap{
"isCgroupSet": func() bool {
return cfg.CgroupMark != ""
},
}).Parse(proxyChainTemplate))
rules, err := extractRulesFromTemplate(tmpl, cfg)
if err != nil {
zap.L().Warn("unable to extract rules", zap.Error(err))
}
return rules
}
|
go
|
{
"resource": ""
}
|
q6865
|
trapRules
|
train
|
func (i *Instance) trapRules(cfg *ACLInfo, isHostPU bool) [][]string {
tmpl := template.Must(template.New(packetCaptureTemplate).Funcs(template.FuncMap{
"needDnsRules": func() bool {
return i.mode == constants.Sidecar || isHostPU || i.isLegacyKernel
},
"isUIDProcess": func() bool {
return cfg.UID != ""
},
}).Parse(packetCaptureTemplate))
rules, err := extractRulesFromTemplate(tmpl, cfg)
if err != nil {
zap.L().Warn("unable to extract rules", zap.Error(err))
}
return rules
}
|
go
|
{
"resource": ""
}
|
q6866
|
addContainerChain
|
train
|
func (i *Instance) addContainerChain(appChain string, netChain string) error {
if err := i.ipt.NewChain(i.appPacketIPTableContext, appChain); err != nil {
return fmt.Errorf("unable to add chain %s of context %s: %s", appChain, i.appPacketIPTableContext, err)
}
if err := i.ipt.NewChain(i.netPacketIPTableContext, netChain); err != nil {
return fmt.Errorf("unable to add netchain %s of context %s: %s", netChain, i.netPacketIPTableContext, err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6867
|
processRulesFromList
|
train
|
func (i *Instance) processRulesFromList(rulelist [][]string, methodType string) error {
var err error
for _, cr := range rulelist {
// HACK: Adding a retry loop to avoid iptables error of "invalid argument"
// Once in a while iptables
L:
for retry := 0; retry < 3; retry++ {
switch methodType {
case "Append":
if err = i.ipt.Append(cr[0], cr[1], cr[2:]...); err == nil {
break L
}
case "Insert":
order, err := strconv.Atoi(cr[2])
if err != nil {
zap.L().Error("Incorrect format for iptables insert")
return errors.New("invalid format")
}
if err = i.ipt.Insert(cr[0], cr[1], order, cr[3:]...); err == nil {
break L
}
case "Delete":
if err = i.ipt.Delete(cr[0], cr[1], cr[2:]...); err == nil {
break L
}
default:
return errors.New("invalid method type")
}
}
if err != nil && methodType != "Delete" {
return fmt.Errorf("unable to %s rule for table %s and chain %s with error %s", methodType, cr[0], cr[1], err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6868
|
addPacketTrap
|
train
|
func (i *Instance) addPacketTrap(cfg *ACLInfo, isHostPU bool) error {
return i.processRulesFromList(i.trapRules(cfg, isHostPU), "Append")
}
|
go
|
{
"resource": ""
}
|
q6869
|
programExtensionsRules
|
train
|
func (i *Instance) programExtensionsRules(rule *aclIPset, chain, proto, ipMatchDirection string) error {
rulesspec := []string{
"-p", proto,
"-m", "set", "--match-set", rule.ipset, ipMatchDirection,
"--match", "multiport", "--dports", strings.Join(rule.ports, ","),
}
for _, ext := range rule.extensions {
args, err := shellwords.Parse(ext)
if err != nil {
return fmt.Errorf("unable to parse extension %s: %v", ext, err)
}
rulesspec = append(rulesspec, args...)
if err := i.ipt.Append(i.appPacketIPTableContext, chain, rulesspec...); err != nil {
return fmt.Errorf("unable to program extension rules: %v", err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6870
|
sortACLsInBuckets
|
train
|
func (i *Instance) sortACLsInBuckets(contextID, chain string, reverseChain string, rules []aclIPset, isAppACLs bool) *rulesInfo {
rulesBucket := &rulesInfo{
RejectObserveApply: [][]string{},
RejectNotObserved: [][]string{},
RejectObserveContinue: [][]string{},
AcceptObserveApply: [][]string{},
AcceptNotObserved: [][]string{},
AcceptObserveContinue: [][]string{},
ReverseRules: [][]string{},
}
direction := "src"
reverse := "dst"
nflogGroup := "11"
if isAppACLs {
direction = "dst"
reverse = "src"
nflogGroup = "10"
}
for _, rule := range rules {
for _, proto := range rule.protocols {
acls, r := i.generateACLRules(contextID, &rule, chain, reverseChain, nflogGroup, proto, direction, reverse)
rulesBucket.ReverseRules = append(rulesBucket.ReverseRules, r...)
if testReject(rule.policy) && testObserveApply(rule.policy) {
rulesBucket.RejectObserveApply = append(rulesBucket.RejectObserveApply, acls...)
}
if testReject(rule.policy) && testNotObserved(rule.policy) {
rulesBucket.RejectNotObserved = append(rulesBucket.RejectNotObserved, acls...)
}
if testReject(rule.policy) && testObserveContinue(rule.policy) {
rulesBucket.RejectObserveContinue = append(rulesBucket.RejectObserveContinue, acls...)
}
if testAccept(rule.policy) && testObserveContinue(rule.policy) {
rulesBucket.AcceptObserveContinue = append(rulesBucket.AcceptObserveContinue, acls...)
}
if testAccept(rule.policy) && testNotObserved(rule.policy) {
rulesBucket.AcceptNotObserved = append(rulesBucket.AcceptNotObserved, acls...)
}
if testAccept(rule.policy) && testObserveApply(rule.policy) {
rulesBucket.AcceptObserveApply = append(rulesBucket.AcceptObserveApply, acls...)
}
}
}
return rulesBucket
}
|
go
|
{
"resource": ""
}
|
q6871
|
addExternalACLs
|
train
|
func (i *Instance) addExternalACLs(contextID string, chain string, reverseChain string, rules []aclIPset, isAppAcls bool) error {
rulesBucket := i.sortACLsInBuckets(contextID, chain, reverseChain, rules, isAppAcls)
tmpl := template.Must(template.New(acls).Funcs(template.FuncMap{
"joinRule": func(rule []string) string {
return strings.Join(rule, " ")
},
}).Parse(acls))
aclRules, err := extractRulesFromTemplate(tmpl, *rulesBucket)
if err != nil {
return fmt.Errorf("unable to extract rules from template: %s", err)
}
if err := i.processRulesFromList(aclRules, "Append"); err != nil {
return fmt.Errorf("unable to install rules - mode :%s %v", err, isAppAcls)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6872
|
deleteChainRules
|
train
|
func (i *Instance) deleteChainRules(cfg *ACLInfo) error {
if i.mode != constants.LocalServer {
return i.processRulesFromList(i.containerChainRules(cfg), "Delete")
}
if cfg.UID != "" {
return i.processRulesFromList(i.uidChainRules(cfg), "Delete")
}
return i.processRulesFromList(i.cgroupChainRules(cfg), "Delete")
}
|
go
|
{
"resource": ""
}
|
q6873
|
deletePUChains
|
train
|
func (i *Instance) deletePUChains(appChain, netChain string) error {
if err := i.ipt.ClearChain(i.appPacketIPTableContext, appChain); err != nil {
zap.L().Warn("Failed to clear the container ack packets chain",
zap.String("appChain", appChain),
zap.String("context", i.appPacketIPTableContext),
zap.Error(err),
)
}
if err := i.ipt.DeleteChain(i.appPacketIPTableContext, appChain); err != nil {
zap.L().Warn("Failed to delete the container ack packets chain",
zap.String("appChain", appChain),
zap.String("context", i.appPacketIPTableContext),
zap.Error(err),
)
}
if err := i.ipt.ClearChain(i.netPacketIPTableContext, netChain); err != nil {
zap.L().Warn("Failed to clear the container net packets chain",
zap.String("netChain", netChain),
zap.String("context", i.netPacketIPTableContext),
zap.Error(err),
)
}
if err := i.ipt.DeleteChain(i.netPacketIPTableContext, netChain); err != nil {
zap.L().Warn("Failed to delete the container net packets chain",
zap.String("netChain", netChain),
zap.String("context", i.netPacketIPTableContext),
zap.Error(err),
)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6874
|
setGlobalRules
|
train
|
func (i *Instance) setGlobalRules() error {
cfg, err := i.newACLInfo(0, "", nil, 0)
if err != nil {
return err
}
tmpl := template.Must(template.New(globalRules).Funcs(template.FuncMap{
"isLocalServer": func() bool {
return i.mode == constants.LocalServer
},
}).Parse(globalRules))
rules, err := extractRulesFromTemplate(tmpl, cfg)
if err != nil {
zap.L().Warn("unable to extract rules", zap.Error(err))
}
if err := i.processRulesFromList(rules, "Append"); err != nil {
return fmt.Errorf("unable to install global rules:%s", err)
}
// nat rules cannot be templated, since they interfere with Docker.
err = i.ipt.Insert(i.appProxyIPTableContext,
ipTableSectionPreRouting, 1,
"-p", "tcp",
"-m", "addrtype", "--dst-type", "LOCAL",
"-m", "set", "!", "--match-set", excludedNetworkSet, "src",
"-j", natProxyInputChain)
if err != nil {
return fmt.Errorf("unable to add default allow for marked packets at net: %s", err)
}
err = i.ipt.Insert(i.appProxyIPTableContext,
ipTableSectionOutput, 1,
"-m", "set", "!", "--match-set", excludedNetworkSet, "dst",
"-j", natProxyOutputChain)
if err != nil {
return fmt.Errorf("unable to add default allow for marked packets at net: %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6875
|
cleanACLSection
|
train
|
func (i *Instance) cleanACLSection(context, chainPrefix string) {
rules, err := i.ipt.ListChains(context)
if err != nil {
zap.L().Warn("Failed to list chains",
zap.String("context", context),
zap.Error(err),
)
}
for _, rule := range rules {
if strings.Contains(rule, chainPrefix) {
if err := i.ipt.ClearChain(context, rule); err != nil {
zap.L().Warn("Can not clear the chain",
zap.String("context", context),
zap.String("section", rule),
zap.Error(err),
)
}
if err := i.ipt.DeleteChain(context, rule); err != nil {
zap.L().Warn("Can not delete the chain",
zap.String("context", context),
zap.String("section", rule),
zap.Error(err),
)
}
}
}
}
|
go
|
{
"resource": ""
}
|
q6876
|
NewMockRuntimeReader
|
train
|
func NewMockRuntimeReader(ctrl *gomock.Controller) *MockRuntimeReader {
mock := &MockRuntimeReader{ctrl: ctrl}
mock.recorder = &MockRuntimeReaderMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q6877
|
Pid
|
train
|
func (m *MockRuntimeReader) Pid() int {
ret := m.ctrl.Call(m, "Pid")
ret0, _ := ret[0].(int)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6878
|
Tag
|
train
|
func (m *MockRuntimeReader) Tag(arg0 string) (string, bool) {
ret := m.ctrl.Call(m, "Tag", arg0)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q6879
|
Tag
|
train
|
func (mr *MockRuntimeReaderMockRecorder) Tag(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tag", reflect.TypeOf((*MockRuntimeReader)(nil).Tag), arg0)
}
|
go
|
{
"resource": ""
}
|
q6880
|
Tags
|
train
|
func (m *MockRuntimeReader) Tags() *policy.TagStore {
ret := m.ctrl.Call(m, "Tags")
ret0, _ := ret[0].(*policy.TagStore)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6881
|
Options
|
train
|
func (m *MockRuntimeReader) Options() policy.OptionsType {
ret := m.ctrl.Call(m, "Options")
ret0, _ := ret[0].(policy.OptionsType)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6882
|
IPAddresses
|
train
|
func (m *MockRuntimeReader) IPAddresses() policy.ExtendedMap {
ret := m.ctrl.Call(m, "IPAddresses")
ret0, _ := ret[0].(policy.ExtendedMap)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6883
|
PUType
|
train
|
func (m *MockRuntimeReader) PUType() common.PUType {
ret := m.ctrl.Call(m, "PUType")
ret0, _ := ret[0].(common.PUType)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6884
|
PUType
|
train
|
func (mr *MockRuntimeReaderMockRecorder) PUType() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PUType", reflect.TypeOf((*MockRuntimeReader)(nil).PUType))
}
|
go
|
{
"resource": ""
}
|
q6885
|
SetServices
|
train
|
func (m *MockRuntimeReader) SetServices(services []common.Service) {
m.ctrl.Call(m, "SetServices", services)
}
|
go
|
{
"resource": ""
}
|
q6886
|
SetServices
|
train
|
func (mr *MockRuntimeReaderMockRecorder) SetServices(services interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetServices", reflect.TypeOf((*MockRuntimeReader)(nil).SetServices), services)
}
|
go
|
{
"resource": ""
}
|
q6887
|
PortMap
|
train
|
func (m *MockRuntimeReader) PortMap() map[nat.Port][]string {
ret := m.ctrl.Call(m, "PortMap")
ret0, _ := ret[0].(map[nat.Port][]string)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6888
|
NewMockResolver
|
train
|
func NewMockResolver(ctrl *gomock.Controller) *MockResolver {
mock := &MockResolver{ctrl: ctrl}
mock.recorder = &MockResolverMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q6889
|
HandlePUEvent
|
train
|
func (m *MockResolver) HandlePUEvent(ctx context.Context, puID string, event common.Event, runtime policy.RuntimeReader) error {
ret := m.ctrl.Call(m, "HandlePUEvent", ctx, puID, event, runtime)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q6890
|
HandlePUEvent
|
train
|
func (mr *MockResolverMockRecorder) HandlePUEvent(ctx, puID, event, runtime interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandlePUEvent", reflect.TypeOf((*MockResolver)(nil).HandlePUEvent), ctx, puID, event, runtime)
}
|
go
|
{
"resource": ""
}
|
q6891
|
Code
|
train
|
func (e *ErrToken) Code() string {
switch e.message {
case errCompressedTagMismatch:
return collector.CompressedTagMismatch
case errDatapathVersionMismatch:
return collector.DatapathVersionMismatch
default:
return collector.InvalidToken
}
}
|
go
|
{
"resource": ""
}
|
q6892
|
CodeFromErr
|
train
|
func CodeFromErr(err error) string {
errToken, ok := err.(*ErrToken)
if !ok {
return collector.InvalidToken
}
return errToken.Code()
}
|
go
|
{
"resource": ""
}
|
q6893
|
Run
|
train
|
func (a *nfLog) Run(ctx context.Context) {
a.Lock()
a.srcNflogHandle, _ = nflog.BindAndListenForLogs([]uint16{a.ipv4groupSource}, 64, a.sourceNFLogsHanlder, a.nflogErrorHandler)
a.dstNflogHandle, _ = nflog.BindAndListenForLogs([]uint16{a.ipv4groupDest}, 64, a.destNFLogsHandler, a.nflogErrorHandler)
a.Unlock()
go func() {
<-ctx.Done()
a.Lock()
a.srcNflogHandle.NFlogClose()
a.dstNflogHandle.NFlogClose()
a.Unlock()
}()
}
|
go
|
{
"resource": ""
}
|
q6894
|
newAppConnectionState
|
train
|
func newAppConnectionState(nativeID, serviceID string, p *pucontext.PUContext, r *http.Request, originalDestination *net.TCPAddr) *connectionState {
sourceIP := "0.0.0.0/0"
sourcePort := 0
if sourceAddress, err := net.ResolveTCPAddr("tcp", r.RemoteAddr); err == nil {
sourceIP = sourceAddress.IP.String()
sourcePort = sourceAddress.Port
}
return &connectionState{
stats: &collector.FlowRecord{
ContextID: nativeID,
Destination: &collector.EndPoint{
URI: r.Method + " " + r.RequestURI,
HTTPMethod: r.Method,
Type: collector.EndPointTypeExternalIP,
Port: uint16(originalDestination.Port),
IP: originalDestination.IP.String(),
ID: collector.DefaultEndPoint,
},
Source: &collector.EndPoint{
Type: collector.EnpointTypePU,
ID: p.ManagementID(),
IP: sourceIP,
Port: uint16(sourcePort),
HTTPMethod: r.Method,
URI: r.Method + " " + r.RequestURI,
},
Action: policy.Reject,
L4Protocol: packet.IPProtocolTCP,
ServiceType: policy.ServiceHTTP,
ServiceID: serviceID,
Tags: p.Annotations(),
PolicyID: "default",
Count: 1,
},
}
}
|
go
|
{
"resource": ""
}
|
q6895
|
newNetworkConnectionState
|
train
|
func newNetworkConnectionState(nativeID string, pctx *serviceregistry.PortContext, r *http.Request, source, dest *net.TCPAddr) *connectionState {
return &connectionState{
stats: &collector.FlowRecord{
ContextID: nativeID,
Destination: &collector.EndPoint{
ID: pctx.PUContext.ManagementID(),
URI: r.Method + " " + r.RequestURI,
HTTPMethod: r.Method,
Type: collector.EnpointTypePU,
IP: dest.IP.String(),
Port: uint16(dest.Port),
},
Source: &collector.EndPoint{
Type: collector.EndPointTypeExternalIP,
IP: source.IP.String(),
ID: collector.DefaultEndPoint,
Port: uint16(source.Port),
},
Action: policy.Reject,
L4Protocol: packet.IPProtocolTCP,
ServiceType: policy.ServiceHTTP,
PolicyID: "default",
ServiceID: pctx.Service.ID,
Tags: pctx.PUContext.Annotations(),
Count: 1,
},
}
}
|
go
|
{
"resource": ""
}
|
q6896
|
KubernetesMetadataExtractor
|
train
|
func KubernetesMetadataExtractor(event *common.EventInfo) (*policy.PURuntime, error) {
if event.NS == "" {
return nil, errors.New("namespace path is required when using cni")
}
runtimeTags := policy.NewTagStore()
for _, tag := range event.Tags {
parts := strings.Split(tag, "=")
if len(parts) != 2 {
return nil, fmt.Errorf("invalid tag: %s", tag)
}
runtimeTags.AppendKeyValue("@usr:"+parts[0], parts[1])
}
runtimeIps := policy.ExtendedMap{"bridge": "0.0.0.0/0"}
return policy.NewPURuntime(event.Name, 1, "", runtimeTags, runtimeIps, common.LinuxProcessPU, nil), nil
}
|
go
|
{
"resource": ""
}
|
q6897
|
CleanAllTriremeACLs
|
train
|
func CleanAllTriremeACLs() error {
ipt, err := iptablesctrl.NewInstance(fqconfig.NewFilterQueueWithDefaults(), constants.LocalServer, &runtime.Configuration{})
if err != nil {
return fmt.Errorf("unable to initialize cleaning iptables controller: %s", err)
}
return ipt.CleanUp()
}
|
go
|
{
"resource": ""
}
|
q6898
|
ActionString
|
train
|
func (f ActionType) ActionString() string {
if f.Accepted() && !f.Rejected() {
return actionAccept
}
if !f.Accepted() && f.Rejected() {
return actionReject
}
return actionPassthrough
}
|
go
|
{
"resource": ""
}
|
q6899
|
LogPrefix
|
train
|
func (f *FlowPolicy) LogPrefix(contextID string) string {
prefix := contextID + ":" + f.PolicyID + ":" + f.ServiceID + f.EncodedActionString()
return prefix
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.