_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q7400 | NewVerifier | train | func NewVerifier(v Verifier) (Verifier, error) {
if v == nil {
return nil, nil
}
switch v.VerifierType() {
case common.PKI:
p := v.(*pkitokens.PKIJWTVerifier)
v, err := pkitokens.NewVerifier(p)
if err != nil {
return nil, err
}
return v, nil
case common.OIDC:
p := v.(*oidc.TokenVerifier)
verifier, err := oidc.NewClient(context.Background(), p)
if err != nil {
return nil, err
}
return verifier, nil
}
return nil, fmt.Errorf("uknown verifier type")
} | go | {
"resource": ""
} |
q7401 | NewMockEnforcer | train | func NewMockEnforcer(ctrl *gomock.Controller) *MockEnforcer {
mock := &MockEnforcer{ctrl: ctrl}
mock.recorder = &MockEnforcerMockRecorder{mock}
return mock
} | go | {
"resource": ""
} |
q7402 | Unenforce | train | func (m *MockEnforcer) Unenforce(contextID string) error {
ret := m.ctrl.Call(m, "Unenforce", contextID)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7403 | GetFilterQueue | train | func (m *MockEnforcer) GetFilterQueue() *fqconfig.FilterQueue {
ret := m.ctrl.Call(m, "GetFilterQueue")
ret0, _ := ret[0].(*fqconfig.FilterQueue)
return ret0
} | go | {
"resource": ""
} |
q7404 | GetFilterQueue | train | func (mr *MockEnforcerMockRecorder) GetFilterQueue() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFilterQueue", reflect.TypeOf((*MockEnforcer)(nil).GetFilterQueue))
} | go | {
"resource": ""
} |
q7405 | CleanUp | train | func (m *MockEnforcer) CleanUp() error {
ret := m.ctrl.Call(m, "CleanUp")
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7406 | EnableIPTablesPacketTracing | train | func (m *MockDebugInfo) EnableIPTablesPacketTracing(ctx context.Context, contextID string, interval time.Duration) error {
ret := m.ctrl.Call(m, "EnableIPTablesPacketTracing", ctx, contextID, interval)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7407 | GetUDPData | train | func (p *Packet) GetUDPData() []byte {
return p.ipHdr.Buffer[p.ipHdr.ipHeaderLen+UDPDataPos:]
} | go | {
"resource": ""
} |
q7408 | DropTCPDetachedBytes | train | func (p *Packet) DropTCPDetachedBytes() {
p.tcpHdr.tcpOptions = []byte{}
p.tcpHdr.tcpData = []byte{}
} | go | {
"resource": ""
} |
q7409 | Print | train | func (p *Packet) Print(context uint64) {
if p.ipHdr.ipProto != IPProtocolTCP {
return
}
logPkt := false
detailed := false
if PacketLogLevel || context == 0 {
logPkt = true
detailed = true
}
var buf string
print := false
if logPkt {
if printCount%200 == 0 {
buf += fmt.Sprintf("Packet: %5s %5s %25s %15s %5s %15s %5s %6s %20s %20s %6s %20s %20s %2s %5s %5s\n",
"IPID", "Dir", "Comment", "SIP", "SP", "DIP", "DP", "Flags", "TCPSeq", "TCPAck", "TCPLen", "ExpAck", "ExpSeq", "DO", "Acsum", "Ccsum")
}
printCount++
offset := 0
if (p.GetTCPFlags() & TCPSynMask) == TCPSynMask {
offset = 1
}
expAck := p.tcpHdr.tcpSeq + uint32(uint16(len(p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]))-p.TCPDataStartBytes()) + uint32(offset)
ccsum := p.computeTCPChecksum()
csumValidationStr := ""
if p.tcpHdr.tcpChecksum != ccsum {
csumValidationStr = "Bad Checksum"
}
buf += fmt.Sprintf("Packet: %5d %5s %25s %15s %5d %15s %5d %6s %20d %20d %6d %20d %20d %2d %5d %5d %12s\n",
p.ipHdr.ipID,
flagsToDir(p.context|context),
flagsToStr(p.context|context),
p.ipHdr.sourceAddress.To4().String(), p.tcpHdr.sourcePort,
p.ipHdr.destinationAddress.To4().String(), p.tcpHdr.destinationPort,
tcpFlagsToStr(p.GetTCPFlags()),
p.tcpHdr.tcpSeq, p.tcpHdr.tcpAck, uint16(len(p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]))-p.TCPDataStartBytes(),
expAck, expAck, p.tcpHdr.tcpDataOffset,
p.tcpHdr.tcpChecksum, ccsum, csumValidationStr)
print = true
}
if detailed {
pktBytes := []byte{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2, 8, 0}
pktBytes = append(pktBytes, p.ipHdr.Buffer...)
pktBytes = append(pktBytes, p.tcpHdr.tcpOptions...)
pktBytes = append(pktBytes, p.tcpHdr.tcpData...)
buf += fmt.Sprintf("%s\n", hex.Dump(pktBytes))
print = true
}
if print {
zap.L().Debug(buf)
}
} | go | {
"resource": ""
} |
q7410 | GetTCPBytes | train | func (p *Packet) GetTCPBytes() []byte {
pktBytes := []byte{}
pktBytes = append(pktBytes, p.ipHdr.Buffer...)
pktBytes = append(pktBytes, p.tcpHdr.tcpOptions...)
pktBytes = append(pktBytes, p.tcpHdr.tcpData...)
return pktBytes
} | go | {
"resource": ""
} |
q7411 | ReadTCPData | train | func (p *Packet) ReadTCPData() []byte {
return p.ipHdr.Buffer[uint16(p.ipHdr.ipHeaderLen)+p.TCPDataStartBytes():]
} | go | {
"resource": ""
} |
q7412 | CheckTCPAuthenticationOption | train | func (p *Packet) CheckTCPAuthenticationOption(iOptionLength int) (err error) {
tcpDataStart := p.TCPDataStartBytes()
if tcpDataStart <= minTCPIPPacketLen {
return errTCPAuthOption
}
optionLength := uint16(iOptionLength)
buffer := p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]
// Our option was not found in the right place. We don't do anything
// for this packet.
if buffer[tcpDataStart-optionLength] != TCPAuthenticationOption {
return errTCPAuthOption
}
return
} | go | {
"resource": ""
} |
q7413 | FixupIPHdrOnDataModify | train | func (p *Packet) FixupIPHdrOnDataModify(old, new uint16) {
// IP Header Processing
// IP chekcsum fixup.
p.ipHdr.ipChecksum = incCsum16(p.ipHdr.ipChecksum, old, new)
// Update IP Total Length.
p.ipHdr.ipTotalLength = p.ipHdr.ipTotalLength + new - old
binary.BigEndian.PutUint16(p.ipHdr.Buffer[ipv4LengthPos:ipv4LengthPos+2], p.ipHdr.ipTotalLength)
binary.BigEndian.PutUint16(p.ipHdr.Buffer[ipv4ChecksumPos:ipv4ChecksumPos+2], p.ipHdr.ipChecksum)
} | go | {
"resource": ""
} |
q7414 | IncreaseTCPSeq | train | func (p *Packet) IncreaseTCPSeq(incr uint32) {
buffer := p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]
p.tcpHdr.tcpSeq = p.tcpHdr.tcpSeq + incr
binary.BigEndian.PutUint32(buffer[tcpSeqPos:tcpSeqPos+4], p.tcpHdr.tcpSeq)
} | go | {
"resource": ""
} |
q7415 | DecreaseTCPSeq | train | func (p *Packet) DecreaseTCPSeq(decr uint32) {
buffer := p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]
p.tcpHdr.tcpSeq = p.tcpHdr.tcpSeq - decr
binary.BigEndian.PutUint32(buffer[tcpSeqPos:tcpSeqPos+4], p.tcpHdr.tcpSeq)
} | go | {
"resource": ""
} |
q7416 | IncreaseTCPAck | train | func (p *Packet) IncreaseTCPAck(incr uint32) {
buffer := p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]
p.tcpHdr.tcpAck = p.tcpHdr.tcpAck + incr
binary.BigEndian.PutUint32(buffer[tcpAckPos:tcpAckPos+4], p.tcpHdr.tcpAck)
} | go | {
"resource": ""
} |
q7417 | DecreaseTCPAck | train | func (p *Packet) DecreaseTCPAck(decr uint32) {
buffer := p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]
p.tcpHdr.tcpAck = p.tcpHdr.tcpAck - decr
binary.BigEndian.PutUint32(buffer[tcpAckPos:tcpAckPos+4], p.tcpHdr.tcpAck)
} | go | {
"resource": ""
} |
q7418 | FixuptcpHdrOnTCPDataDetach | train | func (p *Packet) FixuptcpHdrOnTCPDataDetach(dataLength uint16, optionLength uint16) {
// Update DataOffset
buffer := p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]
p.tcpHdr.tcpDataOffset = p.tcpHdr.tcpDataOffset - uint8(optionLength/4)
buffer[tcpDataOffsetPos] = p.tcpHdr.tcpDataOffset << 4
} | go | {
"resource": ""
} |
q7419 | FixuptcpHdrOnTCPDataAttach | train | func (p *Packet) FixuptcpHdrOnTCPDataAttach(tcpOptions []byte, tcpData []byte) {
buffer := p.ipHdr.Buffer[p.ipHdr.ipHeaderLen:]
numberOfOptions := len(tcpOptions) / 4
// Modify the fields
p.tcpHdr.tcpDataOffset = p.tcpHdr.tcpDataOffset + uint8(numberOfOptions)
binary.BigEndian.PutUint16(buffer[tcpChecksumPos:tcpChecksumPos+2], p.tcpHdr.tcpChecksum)
buffer[tcpDataOffsetPos] = p.tcpHdr.tcpDataOffset << 4
} | go | {
"resource": ""
} |
q7420 | TCPDataAttach | train | func (p *Packet) TCPDataAttach(tcpOptions []byte, tcpData []byte) (err error) {
if err = p.tcpDataAttach(tcpOptions, tcpData); err != nil {
return fmt.Errorf("tcp data attachment failed: %s", err)
}
// We are increasing tcpOptions by 1 32-bit word. We are always adding
// our option last.
packetLenIncrease := uint16(len(tcpData) + len(tcpOptions))
// IP Header Processing
p.FixupIPHdrOnDataModify(p.ipHdr.ipTotalLength, p.ipHdr.ipTotalLength+packetLenIncrease)
// TCP Header Processing
p.FixuptcpHdrOnTCPDataAttach(tcpOptions, tcpData)
return
} | go | {
"resource": ""
} |
q7421 | L4FlowHash | train | func (p *Packet) L4FlowHash() string {
return p.ipHdr.sourceAddress.String() + ":" + p.ipHdr.destinationAddress.String() + ":" + strconv.Itoa(int(p.SourcePort())) + ":" + strconv.Itoa(int(p.DestPort()))
} | go | {
"resource": ""
} |
q7422 | ID | train | func (p *Packet) ID() string {
return strconv.Itoa(int(p.ipHdr.ipID))
} | go | {
"resource": ""
} |
q7423 | GetBuffer | train | func (p *Packet) GetBuffer(offset int) []byte {
return p.ipHdr.Buffer[offset:]
} | go | {
"resource": ""
} |
q7424 | NewInstance | train | func NewInstance(fqc *fqconfig.FilterQueue, mode constants.ModeType, cfg *runtime.Configuration) (*Instance, error) {
ipt, err := provider.NewGoIPTablesProvider([]string{"mangle"})
if err != nil {
return nil, fmt.Errorf("unable to initialize iptables provider: %s", err)
}
ips := provider.NewGoIPsetProvider()
return newInstanceWithProviders(fqc, mode, cfg, ipt, ips)
} | go | {
"resource": ""
} |
q7425 | newInstanceWithProviders | train | func newInstanceWithProviders(fqc *fqconfig.FilterQueue, mode constants.ModeType, cfg *runtime.Configuration, ipt provider.IptablesProvider, ips provider.IpsetProvider) (*Instance, error) {
if cfg == nil {
cfg = &runtime.Configuration{}
}
i := &Instance{
fqc: fqc,
ipt: ipt,
ipset: ips,
appPacketIPTableContext: "mangle",
netPacketIPTableContext: "mangle",
appProxyIPTableContext: "nat",
mode: mode,
appPacketIPTableSection: ipTableSectionOutput,
appCgroupIPTableSection: TriremeOutput,
netLinuxIPTableSection: TriremeInput,
netPacketIPTableSection: ipTableSectionInput,
appSynAckIPTableSection: ipTableSectionOutput,
contextIDToPortSetMap: cache.NewCache("contextIDToPortSetMap"),
createPUPortSet: ipsetCreatePortset,
isLegacyKernel: buildflags.IsLegacyKernel(),
serviceIDToIPsets: map[string]*ipsetInfo{},
puToServiceIDs: map[string][]string{},
cfg: cfg,
conntrackCmd: flushUDPConntrack,
}
lock.Lock()
instance = i
defer lock.Unlock()
return i, nil
} | go | {
"resource": ""
} |
q7426 | Run | train | func (i *Instance) Run(ctx context.Context) error {
go func() {
<-ctx.Done()
zap.L().Debug("Stop the supervisor")
i.CleanUp() // nolint
}()
// Clean any previous ACLs. This is needed in case we crashed at some
// earlier point or there are other ACLs that create conflicts. We
// try to clean only ACLs related to Trireme.
if err := i.cleanACLs(); err != nil {
return fmt.Errorf("Unable to clean previous acls while starting the supervisor: %s", err)
}
// Create all the basic target sets. These are the global target sets
// that do not depend on policy configuration. If they already exist
// we will delete them and start again.
targetTCPSet, targetUDPSet, excludedSet, err := createGlobalSets(i.ipset)
if err != nil {
return fmt.Errorf("unable to create global sets: %s", err)
}
i.targetTCPSet = targetTCPSet
i.targetUDPSet = targetUDPSet
i.excludedNetworksSet = excludedSet
if err := i.updateAllTargetNetworks(i.cfg, &runtime.Configuration{}); err != nil {
// If there is a failure try to clean up on exit.
i.ipset.DestroyAll(chainPrefix) // nolint errcheck
return fmt.Errorf("unable to initialize target networks: %s", err)
}
// Initialize all the global Trireme chains. There are several global chaims
// that apply to all PUs:
// Tri-App/Tri-Net are the main chains for the egress/ingress directions
// UID related chains for any UID PUs.
// Host, Service, Pid chains for the different modes of operation (host mode, pu mode, host service).
// The priority is explicit (Pid activations take precedence of Service activations and Host Services)
if err := i.initializeChains(); err != nil {
return fmt.Errorf("Unable to initialize chains: %s", err)
}
// Insert the global ACLS. These are the main ACLs that will direct traffic from
// the INPUT/OUTPUT chains to the Trireme chains. They also includes the main
// rules of the main chains. These rules are never touched again, unless
// if we gracefully terminate.
if err := i.setGlobalRules(); err != nil {
return fmt.Errorf("failed to update synack networks: %s", err)
}
return nil
} | go | {
"resource": ""
} |
q7427 | ConfigureRules | train | func (i *Instance) ConfigureRules(version int, contextID string, pu *policy.PUInfo) error {
var err error
var cfg *ACLInfo
// First we create an IPSet for destination matching ports. This only
// applies to Linux type PUs. A port set is associated with every PU,
// and packets matching this destination get associated with the context
// of the PU.
if err = i.createPortSet(contextID, pu); err != nil {
return err
}
// We create the generic ACL object that is used for all the templates.
cfg, err = i.newACLInfo(version, contextID, pu, pu.Runtime.PUType())
if err != nil {
return err
}
// Create the proxy sets. These are the target sets that will match
// traffic towards the L4 and L4 services. There are two sets created
// for every PU in this context (for outgoing and incoming traffic).
// The outgoing sets capture all traffic towards specific destinations
// as proxied traffic. Incoming sets correspond to the listening
// services.
if err = i.createProxySets(cfg.ProxySetName); err != nil {
return err
}
// At this point we can install all the ACL rules that will direct
// traffic to user space, allow for external access or direct
// traffic towards the proxies
if err = i.installRules(cfg, pu); err != nil {
return err
}
// We commit the ACLs at the end. Note, that some of the ACLs in the
// NAT table are not committed as a group. The commit function only
// applies when newer versions of tables are installed (1.6.2 and above).
if err = i.ipt.Commit(); err != nil {
zap.L().Error("unable to configure rules", zap.Error(err))
return err
}
i.conntrackCmd(i.cfg.UDPTargetNetworks)
return nil
} | go | {
"resource": ""
} |
q7428 | DeleteRules | train | func (i *Instance) DeleteRules(version int, contextID string, tcpPorts, udpPorts string, mark string, username string, proxyPort string, puType common.PUType) error {
cfg, err := i.newACLInfo(version, contextID, nil, puType)
if err != nil {
zap.L().Error("unable to create cleanup configuration", zap.Error(err))
return err
}
cfg.UDPPorts = udpPorts
cfg.TCPPorts = tcpPorts
cfg.CgroupMark = mark
cfg.Mark = mark
cfg.UID = username
cfg.PUType = puType
cfg.ProxyPort = proxyPort
// We clean up the chain rules first, so that we can delete the chains.
// If any rule is not deleted, then the chain will show as busy.
if err := i.deleteChainRules(cfg); err != nil {
zap.L().Warn("Failed to clean rules", zap.Error(err))
}
// We can now delete the chains we have created for this PU. Note that
// in every case we only create two chains for every PU. All other
// chains are global.
if err = i.deletePUChains(cfg.AppChain, cfg.NetChain); err != nil {
zap.L().Warn("Failed to clean container chains while deleting the rules", zap.Error(err))
}
// We call commit to update all the changes, before destroying the ipsets.
// References must be deleted for ipset deletion to succeed.
if err := i.ipt.Commit(); err != nil {
zap.L().Warn("Failed to commit ACL changes", zap.Error(err))
}
// We delete the set that captures all destination ports of the
// PU. This only holds for Linux PUs.
if err := i.deletePortSet(contextID); err != nil {
zap.L().Warn("Failed to remove port set")
}
// We delete the proxy port sets that were created for this PU.
if err := i.deleteProxySets(cfg.ProxySetName); err != nil {
zap.L().Warn("Failed to delete proxy sets", zap.Error(err))
}
// Destroy all the ACL related IPSets that were created
// on demand for any external services.
i.destroyACLIPsets(contextID)
return nil
} | go | {
"resource": ""
} |
q7429 | UpdateRules | train | func (i *Instance) UpdateRules(version int, contextID string, containerInfo *policy.PUInfo, oldContainerInfo *policy.PUInfo) error {
policyrules := containerInfo.Policy
if policyrules == nil {
return errors.New("policy rules cannot be nil")
}
// We cache the old config and we use it to delete the previous
// rules. Every time we update the policy the version changes to
// its binary complement.
newCfg, err := i.newACLInfo(version, contextID, containerInfo, containerInfo.Runtime.PUType())
if err != nil {
return err
}
oldCfg, err := i.newACLInfo(version^1, contextID, oldContainerInfo, containerInfo.Runtime.PUType())
if err != nil {
return err
}
// Install all the new rules. The hooks to the new chains are appended
// and do not take effect yet.
if err := i.installRules(newCfg, containerInfo); err != nil {
return nil
}
// Remove mapping from old chain. By removing the old hooks the new
// hooks take priority.
if err := i.deleteChainRules(oldCfg); err != nil {
return err
}
// Delete the old chains, since there are not references any more.
if err := i.deletePUChains(oldCfg.AppChain, oldCfg.NetChain); err != nil {
return err
}
// Commit all actions in on iptables-restore function.
if err := i.ipt.Commit(); err != nil {
return err
}
// Sync all the IPSets with any new information coming from the policy.
i.synchronizePUACLs(contextID, policyrules.ApplicationACLs(), policyrules.NetworkACLs())
return nil
} | go | {
"resource": ""
} |
q7430 | CleanUp | train | func (i *Instance) CleanUp() error {
if err := i.cleanACLs(); err != nil {
zap.L().Error("Failed to clean acls while stopping the supervisor", zap.Error(err))
}
if err := i.ipset.DestroyAll(chainPrefix); err != nil {
zap.L().Error("Failed to clean up ipsets", zap.Error(err))
}
return nil
} | go | {
"resource": ""
} |
q7431 | initializeChains | train | func (i *Instance) initializeChains() error {
cfg, err := i.newACLInfo(0, "", nil, 0)
if err != nil {
return err
}
tmpl := template.Must(template.New(triremChains).Funcs(template.FuncMap{
"isLocalServer": func() bool {
return i.mode == constants.LocalServer
},
}).Parse(triremChains))
rules, err := extractRulesFromTemplate(tmpl, cfg)
if err != nil {
return fmt.Errorf("unable to create trireme chains:%s", err)
}
for _, rule := range rules {
if len(rule) != 4 {
continue
}
if err := i.ipt.NewChain(rule[1], rule[3]); err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q7432 | configureLinuxRules | train | func (i *Instance) configureLinuxRules(cfg *ACLInfo) error {
// These checks are for rather unusal error scenarios. We should
// never see errors here. But better safe than sorry.
if cfg.CgroupMark == "" {
return errors.New("no mark value found")
}
if cfg.TCPPortSet == "" {
return fmt.Errorf("port set was not found for the contextID. This should not happen")
}
return i.addChainRules(cfg)
} | go | {
"resource": ""
} |
q7433 | installRules | train | func (i *Instance) installRules(cfg *ACLInfo, containerInfo *policy.PUInfo) error {
var err error
var appACLIPset, netACLIPset []aclIPset
policyrules := containerInfo.Policy
if err := i.updateProxySet(containerInfo.Policy, cfg.ProxySetName); err != nil {
return err
}
if appACLIPset, err = i.createACLIPSets(cfg.ContextID, policyrules.ApplicationACLs()); err != nil {
return err
}
if netACLIPset, err = i.createACLIPSets(cfg.ContextID, policyrules.NetworkACLs()); err != nil {
return err
}
// Install the PU specific chain first.
if err := i.addContainerChain(cfg.AppChain, cfg.NetChain); err != nil {
return err
}
// If its a remote and thus container, configure container rules.
if i.mode == constants.RemoteContainer || i.mode == constants.Sidecar {
if err := i.configureContainerRules(cfg); err != nil {
return err
}
}
// If its a Linux process configure the Linux rules.
if i.mode == constants.LocalServer {
if err := i.configureLinuxRules(cfg); err != nil {
return err
}
}
isHostPU := extractors.IsHostPU(containerInfo.Runtime, i.mode)
if err := i.addExternalACLs(cfg.ContextID, cfg.AppChain, cfg.NetChain, appACLIPset, true); err != nil {
return err
}
if err := i.addExternalACLs(cfg.ContextID, cfg.NetChain, cfg.AppChain, netACLIPset, false); err != nil {
return err
}
if err := i.addPacketTrap(cfg, isHostPU); err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q7434 | puPortSetName | train | func puPortSetName(contextID string, prefix string) string {
hash := murmur3.New64()
if _, err := io.WriteString(hash, contextID); err != nil {
return ""
}
output := base64.URLEncoding.EncodeToString(hash.Sum(nil))
if len(contextID) > 4 {
contextID = contextID[:4] + output[:4]
} else {
contextID = contextID + output[:4]
}
if len(prefix) > 16 {
prefix = prefix[:16]
}
return (prefix + contextID)
} | go | {
"resource": ""
} |
q7435 | NewProtoListener | train | func NewProtoListener(mark int) *ProtoListener {
return &ProtoListener{
connection: make(chan net.Conn),
mark: mark,
}
} | go | {
"resource": ""
} |
q7436 | Accept | train | func (p *ProtoListener) Accept() (net.Conn, error) {
c, ok := <-p.connection
if !ok {
return nil, fmt.Errorf("mux: listener closed")
}
return c, nil
} | go | {
"resource": ""
} |
q7437 | NewMultiplexedListener | train | func NewMultiplexedListener(l net.Listener, mark int, registry *serviceregistry.Registry, puID string) *MultiplexedListener {
return &MultiplexedListener{
root: l,
done: make(chan struct{}),
shutdown: make(chan struct{}),
wg: sync.WaitGroup{},
protomap: map[common.ListenerType]*ProtoListener{},
registry: registry,
localIPs: markedconn.GetInterfaces(),
mark: mark,
puID: puID,
}
} | go | {
"resource": ""
} |
q7438 | RegisterListener | train | func (m *MultiplexedListener) RegisterListener(ltype common.ListenerType) (*ProtoListener, error) {
m.Lock()
defer m.Unlock()
if _, ok := m.protomap[ltype]; ok {
return nil, fmt.Errorf("Cannot register same listener type multiple times")
}
p := &ProtoListener{
Listener: m.root,
connection: make(chan net.Conn),
mark: m.mark,
}
m.protomap[ltype] = p
return p, nil
} | go | {
"resource": ""
} |
q7439 | UnregisterListener | train | func (m *MultiplexedListener) UnregisterListener(ltype common.ListenerType) error {
m.Lock()
defer m.Unlock()
delete(m.protomap, ltype)
return nil
} | go | {
"resource": ""
} |
q7440 | RegisterDefaultListener | train | func (m *MultiplexedListener) RegisterDefaultListener(p *ProtoListener) error {
m.Lock()
defer m.Unlock()
if m.defaultListener != nil {
return fmt.Errorf("Default listener already registered")
}
m.defaultListener = p
return nil
} | go | {
"resource": ""
} |
q7441 | UnregisterDefaultListener | train | func (m *MultiplexedListener) UnregisterDefaultListener() error {
m.Lock()
defer m.Unlock()
if m.defaultListener == nil {
return fmt.Errorf("No default listener registered")
}
m.defaultListener = nil
return nil
} | go | {
"resource": ""
} |
q7442 | Serve | train | func (m *MultiplexedListener) Serve(ctx context.Context) error {
defer func() {
close(m.done)
m.wg.Wait()
m.RLock()
defer m.RUnlock()
for _, l := range m.protomap {
close(l.connection)
// Drain the connections enqueued for the listener.
for c := range l.connection {
c.Close() // nolint
}
}
}()
for {
select {
case <-ctx.Done():
return nil
case <-m.shutdown:
return nil
default:
c, err := m.root.Accept()
if err != nil {
return err
}
m.wg.Add(1)
go m.serve(c)
}
}
} | go | {
"resource": ""
} |
q7443 | GetParameters | train | func GetParameters() (logToConsole bool, logID string, logLevel string, logFormat string, compressedTagsVersion claimsheader.CompressionType) {
logLevel = os.Getenv(constants.EnvLogLevel)
if logLevel == "" {
logLevel = "info"
}
logFormat = os.Getenv(constants.EnvLogFormat)
if logLevel == "" {
logFormat = "json"
}
if console := os.Getenv(constants.EnvLogToConsole); console == constants.EnvLogToConsoleEnable {
logToConsole = true
}
logID = os.Getenv(constants.EnvLogID)
compressedTagsVersion = claimsheader.CompressionTypeNone
if console := os.Getenv(constants.EnvCompressedTags); console != string(claimsheader.CompressionTypeNone) {
if console == string(claimsheader.CompressionTypeV1) {
compressedTagsVersion = claimsheader.CompressionTypeV1
} else if console == string(claimsheader.CompressionTypeV2) {
compressedTagsVersion = claimsheader.CompressionTypeV2
}
}
return
} | go | {
"resource": ""
} |
q7444 | isPodInfraContainer | train | func isPodInfraContainer(runtime policy.RuntimeReader) bool {
// The Infra container can be found by checking env. variable.
tagContent, ok := runtime.Tag(KubernetesContainerNameIdentifier)
if !ok || tagContent != KubernetesInfraContainerName {
return false
}
return true
} | go | {
"resource": ""
} |
q7445 | NewStatsClient | train | func NewStatsClient(cr statscollector.Collector) (StatsClient, error) {
sc := &statsClient{
collector: cr,
rpchdl: rpcwrapper.NewRPCWrapper(),
secret: os.Getenv(constants.EnvStatsSecret),
statsChannel: os.Getenv(constants.EnvStatsChannel),
statsInterval: defaultStatsIntervalMiliseconds * time.Millisecond,
userRetention: defaultUserRetention * time.Minute,
stop: make(chan bool),
}
if sc.statsChannel == "" {
return nil, errors.New("no path to stats socket provided")
}
if sc.secret == "" {
return nil, errors.New("no secret provided for stats channel")
}
return sc, nil
} | go | {
"resource": ""
} |
q7446 | sendStats | train | func (s *statsClient) sendStats(ctx context.Context) {
ticker := time.NewTicker(s.statsInterval)
userTicker := time.NewTicker(s.userRetention)
// nolint : gosimple
for {
select {
case <-ticker.C:
flows := s.collector.GetAllRecords()
users := s.collector.GetUserRecords()
if flows == nil && users == nil {
continue
}
s.sendRequest(flows, users)
case <-userTicker.C:
s.collector.FlushUserCache()
case <-ctx.Done():
return
}
}
} | go | {
"resource": ""
} |
q7447 | SendStats | train | func (s *statsClient) SendStats() {
flows := s.collector.GetAllRecords()
users := s.collector.GetUserRecords()
if flows == nil && users == nil {
zap.L().Debug("Flows and UserRecords are nil while sending stats to collector")
return
}
s.sendRequest(flows, users)
} | go | {
"resource": ""
} |
q7448 | Run | train | func (s *statsClient) Run(ctx context.Context) error {
if err := s.rpchdl.NewRPCClient(statsContextID, s.statsChannel, s.secret); err != nil {
zap.L().Error("Stats RPC client cannot connect", zap.Error(err))
return err
}
go s.sendStats(ctx)
return nil
} | go | {
"resource": ""
} |
q7449 | New | train | func New(serverID string, mode constants.ModeType, opts ...Option) TriremeController {
c := &config{
serverID: serverID,
collector: collector.NewDefaultCollector(),
mode: mode,
fq: fqconfig.NewFilterQueueWithDefaults(),
mutualAuth: true,
validity: time.Hour * 8760,
procMountPoint: constants.DefaultProcMountPoint,
externalIPcacheTimeout: -1,
remoteParameters: &env.RemoteParameters{
LogToConsole: true,
LogFormat: "console",
LogLevel: "info",
LogWithID: false,
CompressedTags: claimsheader.CompressionTypeV1,
},
}
for _, opt := range opts {
opt(c)
}
zap.L().Debug("Trireme configuration", zap.String("configuration", fmt.Sprintf("%+v", c)))
return newTrireme(c)
} | go | {
"resource": ""
} |
q7450 | Run | train | func (t *trireme) Run(ctx context.Context) error {
// Start all the supervisors.
for _, s := range t.supervisors {
if err := s.Run(ctx); err != nil {
zap.L().Error("Error when starting the supervisor", zap.Error(err))
return fmt.Errorf("Error while starting supervisor %v", err)
}
}
// Start all the enforcers.
for _, e := range t.enforcers {
if err := e.Run(ctx); err != nil {
return fmt.Errorf("unable to start the enforcer: %s", err)
}
}
go t.runIPTraceCollector(ctx)
return nil
} | go | {
"resource": ""
} |
q7451 | CleanUp | train | func (t *trireme) CleanUp() error {
for _, s := range t.supervisors {
s.CleanUp() // nolint
}
for _, e := range t.enforcers {
e.CleanUp() // nolint
}
return nil
} | go | {
"resource": ""
} |
q7452 | Enforce | train | func (t *trireme) Enforce(ctx context.Context, puID string, policy *policy.PUPolicy, runtime *policy.PURuntime) error {
lock, _ := t.locks.LoadOrStore(puID, &sync.Mutex{})
lock.(*sync.Mutex).Lock()
defer lock.(*sync.Mutex).Unlock()
return t.doHandleCreate(puID, policy, runtime)
} | go | {
"resource": ""
} |
q7453 | UpdateSecrets | train | func (t *trireme) UpdateSecrets(secrets secrets.Secrets) error {
for _, enforcer := range t.enforcers {
if err := enforcer.UpdateSecrets(secrets); err != nil {
zap.L().Error("unable to update secrets", zap.Error(err))
}
}
return nil
} | go | {
"resource": ""
} |
q7454 | UpdateConfiguration | train | func (t *trireme) UpdateConfiguration(cfg *runtime.Configuration) error {
failure := false
for _, s := range t.supervisors {
err := s.SetTargetNetworks(cfg)
if err != nil {
zap.L().Error("Failed to update target networks in supervisor", zap.Error(err))
failure = true
}
}
for _, e := range t.enforcers {
err := e.SetTargetNetworks(cfg)
if err != nil {
zap.L().Error("Failed to update target networks in cotnroller", zap.Error(err))
failure = true
}
}
if failure {
return fmt.Errorf("configuration update failed")
}
return nil
} | go | {
"resource": ""
} |
q7455 | doHandleCreate | train | func (t *trireme) doHandleCreate(contextID string, policyInfo *policy.PUPolicy, runtimeInfo *policy.PURuntime) error {
containerInfo := policy.PUInfoFromPolicyAndRuntime(contextID, policyInfo, runtimeInfo)
logEvent := &collector.ContainerRecord{
ContextID: contextID,
IPAddress: policyInfo.IPAddresses(),
Tags: policyInfo.Annotations(),
Event: collector.ContainerStart,
}
defer func() {
t.config.collector.CollectContainerEvent(logEvent)
}()
addTransmitterLabel(contextID, containerInfo)
if !mustEnforce(contextID, containerInfo) {
logEvent.Event = collector.ContainerIgnored
return nil
}
if err := t.enforcers[t.puTypeToEnforcerType[containerInfo.Runtime.PUType()]].Enforce(contextID, containerInfo); err != nil {
logEvent.Event = collector.ContainerFailed
return fmt.Errorf("unable to setup enforcer: %s", err)
}
if err := t.supervisors[t.puTypeToEnforcerType[containerInfo.Runtime.PUType()]].Supervise(contextID, containerInfo); err != nil {
if werr := t.enforcers[t.puTypeToEnforcerType[containerInfo.Runtime.PUType()]].Unenforce(contextID); werr != nil {
zap.L().Warn("Failed to clean up state after failures",
zap.String("contextID", contextID),
zap.Error(werr),
)
}
logEvent.Event = collector.ContainerFailed
return fmt.Errorf("unable to setup supervisor: %s", err)
}
return nil
} | go | {
"resource": ""
} |
q7456 | doHandleDelete | train | func (t *trireme) doHandleDelete(contextID string, runtime *policy.PURuntime) error {
errS := t.supervisors[t.puTypeToEnforcerType[runtime.PUType()]].Unsupervise(contextID)
errE := t.enforcers[t.puTypeToEnforcerType[runtime.PUType()]].Unenforce(contextID)
t.config.collector.CollectContainerEvent(&collector.ContainerRecord{
ContextID: contextID,
IPAddress: runtime.IPAddresses(),
Tags: nil,
Event: collector.ContainerDelete,
})
if errS != nil || errE != nil {
return fmt.Errorf("unable to delete context id %s, supervisor %s, enforcer %s", contextID, errS, errE)
}
return nil
} | go | {
"resource": ""
} |
q7457 | doUpdatePolicy | train | func (t *trireme) doUpdatePolicy(contextID string, newPolicy *policy.PUPolicy, runtime *policy.PURuntime) error {
containerInfo := policy.PUInfoFromPolicyAndRuntime(contextID, newPolicy, runtime)
addTransmitterLabel(contextID, containerInfo)
if !mustEnforce(contextID, containerInfo) {
return nil
}
if err := t.enforcers[t.puTypeToEnforcerType[containerInfo.Runtime.PUType()]].Enforce(contextID, containerInfo); err != nil {
//We lost communication with the remote and killed it lets restart it here by feeding a create event in the request channel
if werr := t.supervisors[t.puTypeToEnforcerType[containerInfo.Runtime.PUType()]].Unsupervise(contextID); werr != nil {
zap.L().Warn("Failed to clean up after enforcerments failures",
zap.String("contextID", contextID),
zap.Error(werr),
)
}
return fmt.Errorf("unable to update policy for pu %s: %s", contextID, err)
}
if err := t.supervisors[t.puTypeToEnforcerType[containerInfo.Runtime.PUType()]].Supervise(contextID, containerInfo); err != nil {
if werr := t.enforcers[t.puTypeToEnforcerType[containerInfo.Runtime.PUType()]].Unenforce(contextID); werr != nil {
zap.L().Warn("Failed to clean up after enforcerments failures",
zap.String("contextID", contextID),
zap.Error(werr),
)
}
return fmt.Errorf("supervisor failed to update policy for pu %s: %s", contextID, err)
}
t.config.collector.CollectContainerEvent(&collector.ContainerRecord{
ContextID: contextID,
IPAddress: runtime.IPAddresses(),
Tags: containerInfo.Runtime.Tags(),
Event: collector.ContainerUpdate,
})
return nil
} | go | {
"resource": ""
} |
q7458 | TCPFlagsToStr | train | func TCPFlagsToStr(flags uint8) string {
s := ""
if flags&0x20 == 0 {
s = s + "."
} else {
s = s + "U"
}
if flags&0x10 == 0 {
s = s + "."
} else {
s = s + "A"
}
if flags&0x08 == 0 {
s = s + "."
} else {
s = s + "P"
}
if flags&0x04 == 0 {
s = s + "."
} else {
s = s + "R"
}
if flags&0x02 == 0 {
s = s + "."
} else {
s = s + "S"
}
if flags&0x01 == 0 {
s = s + "."
} else {
s = s + "F"
}
return s
} | go | {
"resource": ""
} |
q7459 | DialMarkedWithContext | train | func DialMarkedWithContext(ctx context.Context, network string, addr string, mark int) (net.Conn, error) {
d := net.Dialer{
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
if err := syscall.SetNonblock(int(fd), false); err != nil {
zap.L().Error("unable to set socket options", zap.Error(err))
}
if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_MARK, mark); err != nil {
zap.L().Error("Failed to assing mark to socket", zap.Error(err))
}
if err := syscall.SetsockoptInt(int(fd), syscall.SOL_TCP, 30, 1); err != nil {
zap.L().Debug("Failed to set fast open socket option", zap.Error(err))
}
})
},
}
conn, err := d.DialContext(ctx, network, addr)
if err != nil {
zap.L().Error("Failed to dial to downstream node",
zap.Error(err),
zap.String("Address", addr),
zap.String("Network type", network),
)
}
return conn, err
} | go | {
"resource": ""
} |
q7460 | NewSocketListener | train | func NewSocketListener(ctx context.Context, port string, mark int) (net.Listener, error) {
listenerCfg := net.ListenConfig{
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_MARK, mark); err != nil {
zap.L().Error("Failed to mark connection", zap.Error(err))
}
if err := syscall.SetsockoptInt(int(fd), syscall.SOL_TCP, 23, 16*1024); err != nil {
zap.L().Error("Cannot set tcp fast open options", zap.Error(err))
}
})
},
}
listener, err := listenerCfg.Listen(ctx, "tcp4", port)
if err != nil {
return nil, fmt.Errorf("Failed to create listener: %s", err)
}
return ProxiedListener{netListener: listener, mark: mark}, nil
} | go | {
"resource": ""
} |
q7461 | LocalAddr | train | func (p *ProxiedConnection) LocalAddr() net.Addr {
addr, err := net.ResolveTCPAddr("tcp", p.originalIP.String()+":"+strconv.Itoa(p.originalPort))
if err != nil {
return nil
}
return addr
} | go | {
"resource": ""
} |
q7462 | SetDeadline | train | func (p *ProxiedConnection) SetDeadline(t time.Time) error {
return p.originalTCPConnection.SetDeadline(t)
} | go | {
"resource": ""
} |
q7463 | SetReadDeadline | train | func (p *ProxiedConnection) SetReadDeadline(t time.Time) error {
return p.originalTCPConnection.SetReadDeadline(t)
} | go | {
"resource": ""
} |
q7464 | SetWriteDeadline | train | func (p *ProxiedConnection) SetWriteDeadline(t time.Time) error {
return p.originalTCPConnection.SetWriteDeadline(t)
} | go | {
"resource": ""
} |
q7465 | GetInterfaces | train | func GetInterfaces() map[string]struct{} {
ipmap := map[string]struct{}{}
ifaces, _ := net.Interfaces()
for _, intf := range ifaces {
addrs, _ := intf.Addrs()
for _, addr := range addrs {
ip, _, _ := net.ParseCIDR(addr.String())
if ip.To4() != nil {
ipmap[ip.String()] = struct{}{}
}
}
}
return ipmap
} | go | {
"resource": ""
} |
q7466 | GetCgroupList | train | func GetCgroupList() []string {
var cgroupList []string
filelist, err := ioutil.ReadDir(filepath.Join(basePath, TriremeBasePath))
if err != nil {
return cgroupList
}
for _, file := range filelist {
if file.IsDir() {
cgroupList = append(cgroupList, file.Name())
}
}
return cgroupList
} | go | {
"resource": ""
} |
q7467 | OptionCollector | train | func OptionCollector(c collector.EventCollector) Option {
return func(cfg *config) {
cfg.collector = c
}
} | go | {
"resource": ""
} |
q7468 | OptionDatapathService | train | func OptionDatapathService(s packetprocessor.PacketProcessor) Option {
return func(cfg *config) {
cfg.service = s
}
} | go | {
"resource": ""
} |
q7469 | OptionSecret | train | func OptionSecret(s secrets.Secrets) Option {
return func(cfg *config) {
cfg.secret = s
}
} | go | {
"resource": ""
} |
q7470 | OptionEnforceFqConfig | train | func OptionEnforceFqConfig(f *fqconfig.FilterQueue) Option {
return func(cfg *config) {
cfg.fq = f
}
} | go | {
"resource": ""
} |
q7471 | OptionRuntimeConfiguration | train | func OptionRuntimeConfiguration(c *runtime.Configuration) Option {
return func(cfg *config) {
cfg.runtimeCfg = c
}
} | go | {
"resource": ""
} |
q7472 | OptionRuntimeErrorChannel | train | func OptionRuntimeErrorChannel(errorChannel chan *policy.RuntimeError) Option {
return func(cfg *config) {
cfg.runtimeErrorChannel = errorChannel
}
} | go | {
"resource": ""
} |
q7473 | OptionRemoteParameters | train | func OptionRemoteParameters(p *env.RemoteParameters) Option {
return func(cfg *config) {
cfg.remoteParameters = p
}
} | go | {
"resource": ""
} |
q7474 | newTrireme | train | func newTrireme(c *config) TriremeController {
var err error
t := &trireme{
config: c,
enforcers: map[constants.ModeType]enforcer.Enforcer{},
supervisors: map[constants.ModeType]supervisor.Supervisor{},
puTypeToEnforcerType: map[common.PUType]constants.ModeType{},
locks: sync.Map{},
enablingTrace: make(chan *traceTrigger, 10),
}
zap.L().Debug("Creating Enforcers")
if err = t.newEnforcers(); err != nil {
zap.L().Error("Unable to create datapath enforcers", zap.Error(err))
return nil
}
zap.L().Debug("Creating Supervisors")
if err = t.newSupervisors(); err != nil {
zap.L().Error("Unable to start datapath supervisor", zap.Error(err))
return nil
}
if c.linuxProcess {
t.puTypeToEnforcerType[common.LinuxProcessPU] = constants.LocalServer
t.puTypeToEnforcerType[common.UIDLoginPU] = constants.LocalServer
t.puTypeToEnforcerType[common.HostPU] = constants.LocalServer
t.puTypeToEnforcerType[common.HostNetworkPU] = constants.LocalServer
t.puTypeToEnforcerType[common.SSHSessionPU] = constants.LocalServer
}
if t.config.mode == constants.RemoteContainer {
t.puTypeToEnforcerType[common.ContainerPU] = constants.RemoteContainer
t.puTypeToEnforcerType[common.KubernetesPU] = constants.RemoteContainer
}
if t.config.mode == constants.Sidecar {
t.puTypeToEnforcerType[common.ContainerPU] = constants.Sidecar
}
return t
} | go | {
"resource": ""
} |
q7475 | generateContextID | train | func (l *linuxProcessor) generateContextID(eventInfo *common.EventInfo) (string, error) {
puID := eventInfo.PUID
if eventInfo.Cgroup == "" {
return puID, nil
}
if !l.regStop.Match([]byte(eventInfo.Cgroup)) {
return "", fmt.Errorf("invalid pu id: %s", eventInfo.Cgroup)
}
puID = baseName(eventInfo.Cgroup, "/")
if eventInfo.PUType == common.SSHSessionPU {
return "ssh-" + puID, nil
}
return puID, nil
} | go | {
"resource": ""
} |
q7476 | pam_sm_open_session | train | func pam_sm_open_session(pamh *C.pam_handle_t, flags, argc int, argv **C.char) C.int {
C.initLog()
user := C.get_user(pamh)
service := C.get_service(pamh)
metadatamap := []string{}
userstring := "user=" + C.GoString(user)
metadatamap = append(metadatamap, userstring)
if groups, err := getGroupList(C.GoString(user)); err == nil {
metadatamap = append(metadatamap, groups...)
}
if service != nil {
metadatamap = append(metadatamap, "SessionType="+C.GoString(service))
} else {
metadatamap = append(metadatamap, "SessionType=login")
}
request := &common.EventInfo{
PUType: common.UIDLoginPU,
PUID: C.GoString(user),
Name: "login-" + C.GoString(user),
PID: int32(os.Getpid()),
Tags: metadatamap,
EventType: "start",
}
if C.is_root(user) == 1 {
//Do nothing this is login shell account
} else {
//Do something
slog, _ := syslog.New(syslog.LOG_ALERT|syslog.LOG_AUTH, "mypam")
defer func() {
_ = slog.Close()
}()
client, err := client.NewClient(common.TriremeSocket)
if err != nil {
return C.PAM_SUCCESS
}
slog.Alert("Calling Trireme") // nolit
if err := client.SendRequest(request); err != nil {
err = fmt.Errorf("Policy Server call failed %s", err)
_ = slog.Alert(err.Error())
return C.PAM_SESSION_ERR
}
}
return C.PAM_SUCCESS
} | go | {
"resource": ""
} |
q7477 | pam_sm_close_session | train | func pam_sm_close_session(pamh *C.pam_handle_t, flags, argc int, argv **C.char) C.int {
slog, _ := syslog.New(syslog.LOG_ALERT|syslog.LOG_AUTH, "mypam")
slog.Alert("pam_sm_close_session") // nolint
slog.Close() // nolint
return C.PAM_SUCCESS
} | go | {
"resource": ""
} |
q7478 | NewMockProcessManager | train | func NewMockProcessManager(ctrl *gomock.Controller) *MockProcessManager {
mock := &MockProcessManager{ctrl: ctrl}
mock.recorder = &MockProcessManagerMockRecorder{mock}
return mock
} | go | {
"resource": ""
} |
q7479 | KillRemoteEnforcer | train | func (m *MockProcessManager) KillRemoteEnforcer(contextID string, force bool) error {
ret := m.ctrl.Call(m, "KillRemoteEnforcer", contextID, force)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7480 | KillRemoteEnforcer | train | func (mr *MockProcessManagerMockRecorder) KillRemoteEnforcer(contextID, force interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillRemoteEnforcer", reflect.TypeOf((*MockProcessManager)(nil).KillRemoteEnforcer), contextID, force)
} | go | {
"resource": ""
} |
q7481 | LaunchRemoteEnforcer | train | func (m *MockProcessManager) LaunchRemoteEnforcer(contextID string, refPid int, refNsPath, arg, statssecret, procMountPoint string) (bool, error) {
ret := m.ctrl.Call(m, "LaunchRemoteEnforcer", contextID, refPid, refNsPath, arg, statssecret, procMountPoint)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
} | go | {
"resource": ""
} |
q7482 | LaunchRemoteEnforcer | train | func (mr *MockProcessManagerMockRecorder) LaunchRemoteEnforcer(contextID, refPid, refNsPath, arg, statssecret, procMountPoint interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LaunchRemoteEnforcer", reflect.TypeOf((*MockProcessManager)(nil).LaunchRemoteEnforcer), contextID, refPid, refNsPath, arg, statssecret, procMountPoint)
} | go | {
"resource": ""
} |
q7483 | NewClaimsHeader | train | func NewClaimsHeader(opts ...Option) *ClaimsHeader {
c := &ClaimsHeader{}
for _, opt := range opts {
opt(c)
}
return c
} | go | {
"resource": ""
} |
q7484 | NewCompactPKI | train | func NewCompactPKI(keyPEM []byte, certPEM []byte, caPEM []byte, txKey []byte, compress claimsheader.CompressionType) (*CompactPKI, error) {
zap.L().Warn("DEPRECATED. secrets.NewCompactPKI is deprecated in favor of secrets.NewCompactPKIWithTokenCA")
return NewCompactPKIWithTokenCA(keyPEM, certPEM, caPEM, [][]byte{caPEM}, txKey, compress)
} | go | {
"resource": ""
} |
q7485 | PublicSecrets | train | func (p *CompactPKI) PublicSecrets() PublicSecrets {
return &CompactPKIPublicSecrets{
Type: PKICompactType,
Key: p.PrivateKeyPEM,
Certificate: p.PublicKeyPEM,
CA: p.AuthorityPEM,
Token: p.txKey,
TokenCAs: p.TokenKeyPEMs,
Compressed: p.Compressed,
}
} | go | {
"resource": ""
} |
q7486 | NewPUPolicyWithDefaults | train | func NewPUPolicyWithDefaults() *PUPolicy {
return NewPUPolicy("", AllowAll, nil, nil, nil, nil, nil, nil, nil, nil, 0, nil, nil, []string{})
} | go | {
"resource": ""
} |
q7487 | ManagementID | train | func (p *PUPolicy) ManagementID() string {
p.Lock()
defer p.Unlock()
return p.managementID
} | go | {
"resource": ""
} |
q7488 | TriremeAction | train | func (p *PUPolicy) TriremeAction() PUAction {
p.Lock()
defer p.Unlock()
return p.triremeAction
} | go | {
"resource": ""
} |
q7489 | SetTriremeAction | train | func (p *PUPolicy) SetTriremeAction(action PUAction) {
p.Lock()
defer p.Unlock()
p.triremeAction = action
} | go | {
"resource": ""
} |
q7490 | ApplicationACLs | train | func (p *PUPolicy) ApplicationACLs() IPRuleList {
p.Lock()
defer p.Unlock()
return p.applicationACLs.Copy()
} | go | {
"resource": ""
} |
q7491 | NetworkACLs | train | func (p *PUPolicy) NetworkACLs() IPRuleList {
p.Lock()
defer p.Unlock()
return p.networkACLs.Copy()
} | go | {
"resource": ""
} |
q7492 | DNSNameACLs | train | func (p *PUPolicy) DNSNameACLs() DNSRuleList {
p.Lock()
defer p.Unlock()
return p.DNSACLs.Copy()
} | go | {
"resource": ""
} |
q7493 | ReceiverRules | train | func (p *PUPolicy) ReceiverRules() TagSelectorList {
p.Lock()
defer p.Unlock()
return p.receiverRules.Copy()
} | go | {
"resource": ""
} |
q7494 | AddReceiverRules | train | func (p *PUPolicy) AddReceiverRules(t TagSelector) {
p.Lock()
defer p.Unlock()
p.receiverRules = append(p.receiverRules, t)
} | go | {
"resource": ""
} |
q7495 | TransmitterRules | train | func (p *PUPolicy) TransmitterRules() TagSelectorList {
p.Lock()
defer p.Unlock()
return p.transmitterRules.Copy()
} | go | {
"resource": ""
} |
q7496 | AddTransmitterRules | train | func (p *PUPolicy) AddTransmitterRules(t TagSelector) {
p.Lock()
defer p.Unlock()
p.transmitterRules = append(p.transmitterRules, t)
} | go | {
"resource": ""
} |
q7497 | Identity | train | func (p *PUPolicy) Identity() *TagStore {
p.Lock()
defer p.Unlock()
return p.identity.Copy()
} | go | {
"resource": ""
} |
q7498 | Annotations | train | func (p *PUPolicy) Annotations() *TagStore {
p.Lock()
defer p.Unlock()
return p.annotations.Copy()
} | go | {
"resource": ""
} |
q7499 | AddIdentityTag | train | func (p *PUPolicy) AddIdentityTag(k, v string) {
p.Lock()
defer p.Unlock()
p.identity.AppendKeyValue(k, v)
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.