_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q6900
|
EncodedActionString
|
train
|
func (f *FlowPolicy) EncodedActionString() string {
var e string
if f.Action.Accepted() && !f.Action.Rejected() {
if f.ObserveAction.ObserveContinue() {
e = "1"
} else if f.ObserveAction.ObserveApply() {
e = "2"
} else {
e = "3"
}
} else if !f.Action.Accepted() && f.Action.Rejected() {
if f.ObserveAction.ObserveContinue() {
e = "4"
} else if f.ObserveAction.ObserveApply() {
e = "5"
} else {
e = "6"
}
} else {
if f.ObserveAction.ObserveContinue() {
e = "7"
} else if f.ObserveAction.ObserveApply() {
e = "8"
} else {
e = "9"
}
}
return e
}
|
go
|
{
"resource": ""
}
|
q6901
|
EncodedStringToAction
|
train
|
func EncodedStringToAction(e string) (ActionType, ObserveActionType, error) {
switch e {
case "1":
return Observe | Accept, ObserveContinue, nil
case "2":
return Observe | Accept, ObserveApply, nil
case "3":
return Accept, ObserveNone, nil
case "4":
return Observe | Reject, ObserveContinue, nil
case "5":
return Observe | Reject, ObserveApply, nil
case "6":
return Reject, ObserveNone, nil
case "7":
return Observe, ObserveContinue, nil
case "8":
return Observe, ObserveApply, nil
case "9":
return 0, ObserveNone, nil
}
return 0, 0, errors.New("Invalid encoding")
}
|
go
|
{
"resource": ""
}
|
q6902
|
Copy
|
train
|
func (l DNSRuleList) Copy() DNSRuleList {
list := make(DNSRuleList, len(l))
for i, v := range l {
list[i] = v
}
return list
}
|
go
|
{
"resource": ""
}
|
q6903
|
Copy
|
train
|
func (l IPRuleList) Copy() IPRuleList {
list := make(IPRuleList, len(l))
for i, v := range l {
list[i] = v
}
return list
}
|
go
|
{
"resource": ""
}
|
q6904
|
Copy
|
train
|
func (t TagSelectorList) Copy() TagSelectorList {
list := make(TagSelectorList, len(t))
for i, v := range t {
list[i] = v
}
return list
}
|
go
|
{
"resource": ""
}
|
q6905
|
Copy
|
train
|
func (s ExtendedMap) Copy() ExtendedMap {
c := ExtendedMap{}
for k, v := range s {
c[k] = v
}
return c
}
|
go
|
{
"resource": ""
}
|
q6906
|
Get
|
train
|
func (s ExtendedMap) Get(key string) (string, bool) {
value, ok := s[key]
return value, ok
}
|
go
|
{
"resource": ""
}
|
q6907
|
newRemoteEnforcer
|
train
|
func newRemoteEnforcer(
ctx context.Context,
cancel context.CancelFunc,
service packetprocessor.PacketProcessor,
rpcHandle rpcwrapper.RPCServer,
secret string,
statsClient statsclient.StatsClient,
collector statscollector.Collector,
debugClient debugclient.DebugClient,
) (*RemoteEnforcer, error) {
return nil, nil
}
|
go
|
{
"resource": ""
}
|
q6908
|
NewSecretsProxy
|
train
|
func NewSecretsProxy() *SecretsProxy {
return &SecretsProxy{
socketPath: constants.DefaultSecretsPath,
drivers: cache.NewCache("secrets driver cache"),
apiCacheMapping: cache.NewCache("secrets api cache"),
cgroupCache: cache.NewCache("secrets pu cache"),
policyCache: cache.NewCache("policy cache"),
}
}
|
go
|
{
"resource": ""
}
|
q6909
|
Run
|
train
|
func (s *SecretsProxy) Run(ctx context.Context) error {
s.Lock()
defer s.Unlock()
var err error
// Start a custom listener
addr, _ := net.ResolveUnixAddr("unix", s.socketPath)
nl, err := net.ListenUnix("unix", addr)
if err != nil {
return fmt.Errorf("Unable to start API server: %s", err)
}
s.server = &http.Server{
Handler: http.HandlerFunc(s.apiProcessor),
}
go func() {
<-ctx.Done()
s.server.Close() // nolint errcheck
}()
go s.server.Serve(server.NewUIDListener(nl)) // nolint errcheck
return nil
}
|
go
|
{
"resource": ""
}
|
q6910
|
Enforce
|
train
|
func (s *SecretsProxy) Enforce(puInfo *policy.PUInfo) error {
return s.updateService(puInfo)
}
|
go
|
{
"resource": ""
}
|
q6911
|
apiProcessor
|
train
|
func (s *SecretsProxy) apiProcessor(w http.ResponseWriter, r *http.Request) {
zap.L().Info("Processing secrets call",
zap.String("URI", r.RequestURI),
zap.String("Host", r.Host),
zap.String("Remote address", r.RemoteAddr),
)
// The remote address will contain the uid, gid and pid of the calling process.
// This is because of the specific socket listener we are uing.
parts := strings.Split(r.RemoteAddr, ":")
if len(parts) != 3 {
httpError(w, fmt.Errorf("Bad Remote Address"), "Unauthorized request", http.StatusUnauthorized)
return
}
// We only care about the originating PID.
pid := parts[2]
cgroup, err := findParentCgroup(pid)
if err != nil {
httpError(w, err, "Unauthorized client - not the first process", http.StatusUnauthorized)
return
}
data, err := s.apiCacheMapping.Get(cgroup)
if err != nil {
httpError(w, err, "Unauthorized client", http.StatusUnauthorized)
return
}
// Find the corresponding API cache with the access permissions for
// this particular client.
apiCache, ok := data.(*urisearch.APICache)
if !ok {
httpError(w, fmt.Errorf("Invalid data types"), "Internal server error - invalid type", http.StatusInternalServerError)
return
}
// Find the identity of the PU
policyData, err := s.policyCache.Get(cgroup)
if err != nil {
httpError(w, err, "Unauthorized client", http.StatusUnauthorized)
return
}
scopes, ok := policyData.([]string)
if !ok {
httpError(w, fmt.Errorf("Invalid data types"), "Internal server error - invalid type", http.StatusInternalServerError)
return
}
// Search the API cache for matching rules.
found, _ := apiCache.FindAndMatchScope(r.Method, r.RequestURI, scopes)
if !found {
httpError(w, fmt.Errorf("Unauthorized service"), "Unauthorized access", http.StatusUnauthorized)
return
}
// Retrieve the secrets driver data and information.
driverData, err := s.drivers.Get(cgroup)
if err != nil {
httpError(w, err, "No secrets driver for this client", http.StatusBadRequest)
return
}
driver, ok := driverData.(SecretsDriver)
if !ok {
httpError(w, fmt.Errorf("driver not found"), "Bad driver", http.StatusInternalServerError)
return
}
// Transfor the request based on the driver.
if err := driver.Transform(r); err != nil {
httpError(w, err, "Secrets driver error", http.StatusInternalServerError)
return
}
// Forward the request. TODO .. we need to massage the return here.
forwarder, err := forward.New(forward.RoundTripper(driver.Transport()))
if err != nil {
httpError(w, err, "Failed to configure forwarder", http.StatusInternalServerError)
return
}
forwarder.ServeHTTP(w, r)
}
|
go
|
{
"resource": ""
}
|
q6912
|
ValidateOriginProcess
|
train
|
func ValidateOriginProcess(pid string) (string, error) {
pidNumber, err := strconv.Atoi(pid)
if err != nil {
return "", fmt.Errorf("Invalid PID %s", pid)
}
process, err := process.NewProcess(int32(pidNumber))
if err != nil {
return "", fmt.Errorf("Process not found: %s", err)
}
ppid, err := process.Ppid()
if err != nil {
return "", fmt.Errorf("Parent process not found: %s", err)
}
parentPidCgroup, err := processCgroups(strconv.Itoa(int(ppid)), "net_cls,net_prio")
if err != nil {
return "", fmt.Errorf("Parent cgroup not found: %s", err)
}
if parentPidCgroup != "/" {
return "", fmt.Errorf("Parent is not root cgroup - authorization fail")
}
return findParentCgroup(pid)
}
|
go
|
{
"resource": ""
}
|
q6913
|
findParentCgroup
|
train
|
func findParentCgroup(pid string) (string, error) {
cgroup, err := processCgroups(pid, "net_cls,net_prio")
if err != nil {
return "", fmt.Errorf("Invalid cgroup: %s", err)
}
for i := len(cgroup) - 1; i > 0; i-- {
if cgroup[i:i+1] == "/" {
return cgroup[:i], nil
}
}
if strings.HasPrefix(cgroup, "/docker/") && len(cgroup) > 8 {
return cgroup[8:20], nil
}
return "", fmt.Errorf("Cannot find parent cgroup: %s", pid)
}
|
go
|
{
"resource": ""
}
|
q6914
|
NewSupervisor
|
train
|
func NewSupervisor(
collector collector.EventCollector,
enforcerInstance enforcer.Enforcer,
mode constants.ModeType,
cfg *runtime.Configuration,
p packetprocessor.PacketProcessor,
) (Supervisor, error) {
if collector == nil || enforcerInstance == nil {
return nil, errors.New("Invalid parameters")
}
filterQueue := enforcerInstance.GetFilterQueue()
if filterQueue == nil {
return nil, errors.New("enforcer filter queues cannot be nil")
}
impl, err := iptablesctrl.NewInstance(filterQueue, mode, cfg)
if err != nil {
return nil, fmt.Errorf("unable to initialize supervisor controllers: %s", err)
}
return &Config{
mode: mode,
impl: impl,
versionTracker: cache.NewCache("SupVersionTracker"),
collector: collector,
filterQueue: filterQueue,
service: p,
cfg: cfg,
}, nil
}
|
go
|
{
"resource": ""
}
|
q6915
|
Run
|
train
|
func (s *Config) Run(ctx context.Context) error {
s.Lock()
defer s.Unlock()
if err := s.impl.Run(ctx); err != nil {
return fmt.Errorf("unable to start the implementer: %s", err)
}
if err := s.impl.SetTargetNetworks(s.cfg); err != nil {
return err
}
if s.service != nil {
s.service.Initialize(s.filterQueue, s.impl.ACLProvider())
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6916
|
Supervise
|
train
|
func (s *Config) Supervise(contextID string, pu *policy.PUInfo) error {
if pu == nil || pu.Policy == nil || pu.Runtime == nil {
return errors.New("Invalid PU or policy info")
}
_, err := s.versionTracker.Get(contextID)
if err != nil {
// ContextID is not found in Cache, New PU: Do create.
return s.doCreatePU(contextID, pu)
}
// Context already in the cache. Just run update
return s.doUpdatePU(contextID, pu)
}
|
go
|
{
"resource": ""
}
|
q6917
|
Unsupervise
|
train
|
func (s *Config) Unsupervise(contextID string) error {
s.Lock()
defer s.Unlock()
data, err := s.versionTracker.Get(contextID)
if err != nil {
return fmt.Errorf("cannot find policy version: %s", err)
}
cfg := data.(*cacheData)
port := cfg.containerInfo.Policy.ServicesListeningPort()
// If local server, delete pu specific chains in Trireme/NetworkSvc/Hostmode chains.
puType := cfg.containerInfo.Runtime.PUType()
// TODO (varks): Similar to configureRules and UpdateRules, DeleteRules should take
// only contextID and *policy.PUInfo as function parameters.
if err := s.impl.DeleteRules(cfg.version, contextID, cfg.tcpPorts, cfg.udpPorts, cfg.mark, cfg.username, port, puType); err != nil {
zap.L().Warn("Some rules were not deleted during unsupervise", zap.Error(err))
}
if err := s.versionTracker.Remove(contextID); err != nil {
zap.L().Warn("Failed to clean the rule version cache", zap.Error(err))
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6918
|
CleanUp
|
train
|
func (s *Config) CleanUp() error {
s.Lock()
defer s.Unlock()
return s.impl.CleanUp()
}
|
go
|
{
"resource": ""
}
|
q6919
|
SetTargetNetworks
|
train
|
func (s *Config) SetTargetNetworks(cfg *runtime.Configuration) error {
s.Lock()
defer s.Unlock()
return s.impl.SetTargetNetworks(cfg)
}
|
go
|
{
"resource": ""
}
|
q6920
|
doUpdatePU
|
train
|
func (s *Config) doUpdatePU(contextID string, pu *policy.PUInfo) error {
s.Lock()
data, err := s.versionTracker.LockedModify(contextID, revert, 1)
if err != nil {
return fmt.Errorf("unable to find pu %s in cache: %s", contextID, err)
}
c := data.(*cacheData)
if err := s.impl.UpdateRules(c.version, contextID, pu, c.containerInfo); err != nil {
// Try to clean up, even though this is fatal and it will most likely fail
s.Unlock()
s.Unsupervise(contextID) // nolint
return err
}
// Updated the policy in the cached processing unit.
c.containerInfo.Policy = pu.Policy
s.Unlock()
return nil
}
|
go
|
{
"resource": ""
}
|
q6921
|
EnableIPTablesPacketTracing
|
train
|
func (s *Config) EnableIPTablesPacketTracing(ctx context.Context, contextID string, interval time.Duration) error {
data, err := s.versionTracker.Get(contextID)
if err != nil {
return fmt.Errorf("cannot find policy version: %s", err)
}
cfg := data.(*cacheData)
iptablesRules := debugRules(cfg, s.mode)
ipt := s.impl.ACLProvider()
for _, rule := range iptablesRules {
if err := ipt.Insert(rule[0], rule[1], 1, rule[2:]...); err != nil {
zap.L().Error("Unable to install rule", zap.Error(err))
}
}
// anonymous go func to flush debug iptables after interval
go func() {
for {
select {
case <-ctx.Done():
case <-time.After(interval):
for _, rule := range iptablesRules {
if err := ipt.Delete(rule[0], rule[1], rule[2:]...); err != nil {
zap.L().Debug("Unable to delete trace rules", zap.Error(err))
}
}
}
}
}()
return nil
}
|
go
|
{
"resource": ""
}
|
q6922
|
New
|
train
|
func New(serverID string, validity time.Duration, secret secrets.Secrets) (TokenAccessor, error) {
tokenEngine, err := tokens.NewJWT(validity, serverID, secret)
if err != nil {
return nil, err
}
return &tokenAccessor{
tokens: tokenEngine,
serverID: serverID,
validity: validity,
}, nil
}
|
go
|
{
"resource": ""
}
|
q6923
|
SetToken
|
train
|
func (t *tokenAccessor) SetToken(serverID string, validity time.Duration, secret secrets.Secrets) error {
t.Lock()
defer t.Unlock()
tokenEngine, err := tokens.NewJWT(validity, serverID, secret)
if err != nil {
return err
}
t.tokens = tokenEngine
return nil
}
|
go
|
{
"resource": ""
}
|
q6924
|
CreateAckPacketToken
|
train
|
func (t *tokenAccessor) CreateAckPacketToken(context *pucontext.PUContext, auth *connection.AuthInfo) ([]byte, error) {
claims := &tokens.ConnectionClaims{
LCL: auth.LocalContext,
RMT: auth.RemoteContext,
}
token, err := t.getToken().CreateAndSign(true, claims, auth.LocalContext, claimsheader.NewClaimsHeader())
if err != nil {
return []byte{}, err
}
return token, nil
}
|
go
|
{
"resource": ""
}
|
q6925
|
CreateSynPacketToken
|
train
|
func (t *tokenAccessor) CreateSynPacketToken(context *pucontext.PUContext, auth *connection.AuthInfo) (token []byte, err error) {
token, serviceContext, err := context.GetCachedTokenAndServiceContext()
if err == nil && bytes.Equal(auth.LocalServiceContext, serviceContext) {
// Randomize the nonce and send it
err = t.getToken().Randomize(token, auth.LocalContext)
if err == nil {
return token, nil
}
// If there is an error, let's try to create a new one
}
claims := &tokens.ConnectionClaims{
T: context.Identity(),
EK: auth.LocalServiceContext,
}
if token, err = t.getToken().CreateAndSign(false, claims, auth.LocalContext, claimsheader.NewClaimsHeader()); err != nil {
return []byte{}, nil
}
context.UpdateCachedTokenAndServiceContext(token, auth.LocalServiceContext)
return token, nil
}
|
go
|
{
"resource": ""
}
|
q6926
|
CreateSynAckPacketToken
|
train
|
func (t *tokenAccessor) CreateSynAckPacketToken(context *pucontext.PUContext, auth *connection.AuthInfo, claimsHeader *claimsheader.ClaimsHeader) (token []byte, err error) {
claims := &tokens.ConnectionClaims{
T: context.Identity(),
RMT: auth.RemoteContext,
EK: auth.LocalServiceContext,
}
if token, err = t.getToken().CreateAndSign(false, claims, auth.LocalContext, claimsHeader); err != nil {
return []byte{}, nil
}
return token, nil
}
|
go
|
{
"resource": ""
}
|
q6927
|
ParsePacketToken
|
train
|
func (t *tokenAccessor) ParsePacketToken(auth *connection.AuthInfo, data []byte) (*tokens.ConnectionClaims, error) {
// Validate the certificate and parse the token
claims, nonce, cert, err := t.getToken().Decode(false, data, auth.RemotePublicKey)
if err != nil {
return nil, err
}
// We always a need a valid remote context ID
if claims.T == nil {
return nil, errors.New("no claims found")
}
remoteContextID, ok := claims.T.Get(enforcerconstants.TransmitterLabel)
if !ok {
return nil, errors.New("no transmitter label")
}
auth.RemotePublicKey = cert
auth.RemoteContext = nonce
auth.RemoteContextID = remoteContextID
auth.RemoteServiceContext = claims.EK
return claims, nil
}
|
go
|
{
"resource": ""
}
|
q6928
|
ParseAckToken
|
train
|
func (t *tokenAccessor) ParseAckToken(auth *connection.AuthInfo, data []byte) (*tokens.ConnectionClaims, error) {
gt := t.getToken()
if gt == nil {
return nil, errors.New("token is nil")
}
if auth == nil {
return nil, errors.New("auth is nil")
}
// Validate the certificate and parse the token
claims, _, _, err := t.getToken().Decode(true, data, auth.RemotePublicKey)
if err != nil {
return nil, err
}
// Compare the incoming random context with the stored context
matchLocal := bytes.Compare(claims.RMT, auth.LocalContext)
matchRemote := bytes.Compare(claims.LCL, auth.RemoteContext)
if matchLocal != 0 || matchRemote != 0 {
return nil, errors.New("failed to match context in ack packet")
}
return claims, nil
}
|
go
|
{
"resource": ""
}
|
q6929
|
NewMockEventCollector
|
train
|
func NewMockEventCollector(ctrl *gomock.Controller) *MockEventCollector {
mock := &MockEventCollector{ctrl: ctrl}
mock.recorder = &MockEventCollectorMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q6930
|
CollectFlowEvent
|
train
|
func (mr *MockEventCollectorMockRecorder) CollectFlowEvent(record interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CollectFlowEvent", reflect.TypeOf((*MockEventCollector)(nil).CollectFlowEvent), record)
}
|
go
|
{
"resource": ""
}
|
q6931
|
NewClient
|
train
|
func NewClient(ctx context.Context, v *TokenVerifier) (*TokenVerifier, error) {
// Initialize caches only once if they are nil.
if stateCache == nil {
stateCache = gcache.New(2048).LRU().Expiration(120 * time.Second).Build()
}
if tokenCache == nil {
tokenCache = gcache.New(2048).LRU().Build()
}
// Create a new generic OIDC provider based on the provider URL.
// The library will auto-discover the configuration of the provider.
// If it is not a compliant provider we should report and error here.
provider, err := oidc.NewProvider(ctx, v.ProviderURL)
if err != nil {
return nil, fmt.Errorf("Failed to initialize provider: %s", err)
}
oidConfig := &oidc.Config{
ClientID: v.ClientID,
SkipClientIDCheck: true,
}
v.oauthVerifier = provider.Verifier(oidConfig)
scopes := []string{oidc.ScopeOpenID, "profile", "email"}
for _, scope := range v.Scopes {
if scope != oidc.ScopeOpenID && scope != "profile" && scope != "email" {
scopes = append(scopes, scope)
}
}
v.clientConfig = &oauth2.Config{
ClientID: v.ClientID,
ClientSecret: v.ClientSecret,
Endpoint: provider.Endpoint(),
RedirectURL: v.RedirectURL,
Scopes: scopes,
}
// Google does not honor the OIDC standard to refresh tokens
// with a proper scope. Instead it requires a prompt parameter
// to be passed. In order to deal wit this, we will have to
// detect Google as the OIDC and pass the parameters.
if strings.Contains(v.ProviderURL, "accounts.google.com") {
v.googleHack = true
}
return v, nil
}
|
go
|
{
"resource": ""
}
|
q6932
|
Callback
|
train
|
func (v *TokenVerifier) Callback(r *http.Request) (string, string, int, error) {
// We first validate that the callback state matches the original redirect
// state. We clean up the cache once it is validated. During this process
// we recover the original URL that initiated the protocol. This allows
// us to redirect the client to their original request.
receivedState := r.URL.Query().Get("state")
originURL, err := stateCache.Get(receivedState)
if err != nil {
return "", "", http.StatusBadRequest, fmt.Errorf("bad state")
}
stateCache.Remove(receivedState)
// We exchange the authorization code with an OAUTH token. This is the main
// step where the OAUTH provider will match the code to the token.
oauth2Token, err := v.clientConfig.Exchange(r.Context(), r.URL.Query().Get("code"), oauth2.AccessTypeOffline)
if err != nil {
return "", "", http.StatusInternalServerError, fmt.Errorf("bad code: %s", err)
}
// We extract the rawID token.
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
if !ok {
return "", "", http.StatusInternalServerError, fmt.Errorf("bad ID")
}
if err := tokenCache.SetWithExpire(
rawIDToken,
&clientData{
tokenSource: v.clientConfig.TokenSource(context.Background(), oauth2Token),
expiry: oauth2Token.Expiry,
},
time.Until(oauth2Token.Expiry.Add(3600*time.Second)),
); err != nil {
return "", "", http.StatusInternalServerError, fmt.Errorf("failed to insert token in the cache: %s", err)
}
return rawIDToken, originURL.(string), http.StatusTemporaryRedirect, nil
}
|
go
|
{
"resource": ""
}
|
q6933
|
AddRule
|
train
|
func (c *ACLCache) AddRule(rule policy.IPRule) (err error) {
if rule.Policy.ObserveAction.ObserveApply() {
return c.observe.addRule(rule)
}
if rule.Policy.Action.Accepted() {
return c.accept.addRule(rule)
}
return c.reject.addRule(rule)
}
|
go
|
{
"resource": ""
}
|
q6934
|
AddRuleList
|
train
|
func (c *ACLCache) AddRuleList(rules policy.IPRuleList) (err error) {
for _, rule := range rules {
if err = c.AddRule(rule); err != nil {
return
}
}
c.reject.reverseSort()
c.accept.reverseSort()
c.observe.reverseSort()
return
}
|
go
|
{
"resource": ""
}
|
q6935
|
GetMatchingAction
|
train
|
func (c *ACLCache) GetMatchingAction(ip []byte, port uint16) (report *policy.FlowPolicy, packet *policy.FlowPolicy, err error) {
report, packet, err = c.reject.getMatchingAction(ip, port, report)
if err == nil {
return
}
report, packet, err = c.accept.getMatchingAction(ip, port, report)
if err == nil {
return
}
report, packet, err = c.observe.getMatchingAction(ip, port, report)
if err == nil {
return
}
if report == nil {
report = catchAllPolicy
}
if packet == nil {
packet = catchAllPolicy
}
return report, packet, errors.New("no match")
}
|
go
|
{
"resource": ""
}
|
q6936
|
Enforce
|
train
|
func (s *ProxyInfo) Enforce(contextID string, puInfo *policy.PUInfo) error {
initEnforcer, err := s.prochdl.LaunchRemoteEnforcer(
contextID,
puInfo.Runtime.Pid(),
puInfo.Runtime.NSPath(),
s.commandArg,
s.statsServerSecret,
s.procMountPoint,
)
if err != nil {
return err
}
zap.L().Debug("Called enforce and launched process", zap.String("contextID", contextID),
zap.Reflect("Policy Object", puInfo))
if initEnforcer {
if err := s.initRemoteEnforcer(contextID); err != nil {
s.prochdl.KillRemoteEnforcer(contextID, true) // nolint errcheck
return err
}
}
enforcerPayload := &rpcwrapper.EnforcePayload{
ContextID: contextID,
Policy: puInfo.Policy.ToPublicPolicy(),
}
//Only the secrets need to be under lock. They can change async to the enforce call from Updatesecrets
s.RLock()
enforcerPayload.Secrets = s.Secrets.PublicSecrets()
s.RUnlock()
request := &rpcwrapper.Request{
Payload: enforcerPayload,
}
if err := s.rpchdl.RemoteCall(contextID, remoteenforcer.Enforce, request, &rpcwrapper.Response{}); err != nil {
s.prochdl.KillRemoteEnforcer(contextID, true) // nolint errcheck
return fmt.Errorf("failed to send message to remote enforcer: %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6937
|
Unenforce
|
train
|
func (s *ProxyInfo) Unenforce(contextID string) error {
request := &rpcwrapper.Request{
Payload: &rpcwrapper.UnEnforcePayload{
ContextID: contextID,
},
}
if err := s.rpchdl.RemoteCall(contextID, remoteenforcer.Unenforce, request, &rpcwrapper.Response{}); err != nil {
zap.L().Error("failed to send message to remote enforcer", zap.Error(err))
}
return s.prochdl.KillRemoteEnforcer(contextID, true)
}
|
go
|
{
"resource": ""
}
|
q6938
|
CleanUp
|
train
|
func (s *ProxyInfo) CleanUp() error {
// request := &rpcwrapper.Request{}
var allErrors string
for _, contextID := range s.rpchdl.ContextList() {
if err := s.prochdl.KillRemoteEnforcer(contextID, false); err != nil {
allErrors = allErrors + " contextID:" + err.Error()
}
}
if len(allErrors) > 0 {
return fmt.Errorf("Remote enforcers failed: %s", allErrors)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6939
|
EnableDatapathPacketTracing
|
train
|
func (s *ProxyInfo) EnableDatapathPacketTracing(contextID string, direction packettracing.TracingDirection, interval time.Duration) error {
resp := &rpcwrapper.Response{}
request := &rpcwrapper.Request{
Payload: &rpcwrapper.EnableDatapathPacketTracingPayLoad{
Direction: direction,
Interval: interval,
ContextID: contextID,
},
}
if err := s.rpchdl.RemoteCall(contextID, remoteenforcer.EnableDatapathPacketTracing, request, resp); err != nil {
return fmt.Errorf("unable to enable datapath packet tracing %s -- %s", err, resp.Status)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6940
|
SetTargetNetworks
|
train
|
func (s *ProxyInfo) SetTargetNetworks(cfg *runtime.Configuration) error {
resp := &rpcwrapper.Response{}
request := &rpcwrapper.Request{
Payload: &rpcwrapper.SetTargetNetworksPayload{
Configuration: cfg,
},
}
var allErrors string
for _, contextID := range s.rpchdl.ContextList() {
if err := s.rpchdl.RemoteCall(contextID, remoteenforcer.SetTargetNetworks, request, resp); err != nil {
allErrors = allErrors + " contextID " + contextID + ":" + err.Error()
}
}
s.Lock()
s.cfg = cfg
s.Unlock()
if len(allErrors) > 0 {
return fmt.Errorf("Remote enforcers failed: %s", allErrors)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6941
|
Run
|
train
|
func (s *ProxyInfo) Run(ctx context.Context) error {
statsServer := rpcwrapper.NewRPCWrapper()
rpcServer := &StatsServer{
rpchdl: statsServer,
collector: s.collector,
secret: s.statsServerSecret,
}
// Start the server for statistics collection.
go statsServer.StartServer(ctx, "unix", constants.StatsChannel, rpcServer) // nolint
return nil
}
|
go
|
{
"resource": ""
}
|
q6942
|
initRemoteEnforcer
|
train
|
func (s *ProxyInfo) initRemoteEnforcer(contextID string) error {
resp := &rpcwrapper.Response{}
request := &rpcwrapper.Request{
Payload: &rpcwrapper.InitRequestPayload{
FqConfig: s.filterQueue,
MutualAuth: s.mutualAuth,
Validity: s.validity,
ServerID: s.serverID,
ExternalIPCacheTimeout: s.ExternalIPCacheTimeout,
PacketLogs: s.packetLogs,
Secrets: s.Secrets.PublicSecrets(),
Configuration: s.cfg,
},
}
return s.rpchdl.RemoteCall(contextID, remoteenforcer.InitEnforcer, request, resp)
}
|
go
|
{
"resource": ""
}
|
q6943
|
NewProxyEnforcer
|
train
|
func NewProxyEnforcer(
mutualAuth bool,
filterQueue *fqconfig.FilterQueue,
collector collector.EventCollector,
secrets secrets.Secrets,
serverID string,
validity time.Duration,
cmdArg string,
procMountPoint string,
ExternalIPCacheTimeout time.Duration,
packetLogs bool,
cfg *runtime.Configuration,
runtimeError chan *policy.RuntimeError,
remoteParameters *env.RemoteParameters,
) enforcer.Enforcer {
statsServersecret, err := crypto.GenerateRandomString(32)
if err != nil {
// There is a very small chance of this happening we will log an error here.
zap.L().Error("Failed to generate random secret for stats reporting", zap.Error(err))
// We will use current time as the secret
statsServersecret = time.Now().String()
}
rpcClient := rpcwrapper.NewRPCWrapper()
return &ProxyInfo{
mutualAuth: mutualAuth,
Secrets: secrets,
serverID: serverID,
validity: validity,
prochdl: processmon.New(context.Background(), remoteParameters, runtimeError, rpcClient),
rpchdl: rpcClient,
filterQueue: filterQueue,
commandArg: cmdArg,
statsServerSecret: statsServersecret,
procMountPoint: procMountPoint,
ExternalIPCacheTimeout: ExternalIPCacheTimeout,
packetLogs: packetLogs,
collector: collector,
cfg: cfg,
}
}
|
go
|
{
"resource": ""
}
|
q6944
|
GetStats
|
train
|
func (r *StatsServer) GetStats(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
if !r.rpchdl.ProcessMessage(&req, r.secret) {
return errors.New("message sender cannot be verified")
}
payload := req.Payload.(rpcwrapper.StatsPayload)
for _, record := range payload.Flows {
r.collector.CollectFlowEvent(record)
}
for _, record := range payload.Users {
r.collector.CollectUserEvent(record)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6945
|
PostPacketEvent
|
train
|
func (r *StatsServer) PostPacketEvent(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
if !r.rpchdl.ProcessMessage(&req, r.secret) {
return errors.New("message sender cannot be verified")
}
payload := req.Payload.(rpcwrapper.DebugPacketPayload)
for _, record := range payload.PacketRecords {
r.collector.CollectPacketEvent(record)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6946
|
NewHTTPProxy
|
train
|
func NewHTTPProxy(
c collector.EventCollector,
puContext string,
caPool *x509.CertPool,
applicationProxy bool,
mark int,
secrets secrets.Secrets,
registry *serviceregistry.Registry,
) *Config {
return &Config{
collector: c,
puContext: puContext,
ca: caPool,
applicationProxy: applicationProxy,
mark: mark,
secrets: secrets,
localIPs: markedconn.GetInterfaces(),
registry: registry,
tlsClientConfig: &tls.Config{
RootCAs: caPool,
},
}
}
|
go
|
{
"resource": ""
}
|
q6947
|
clientTLSConfiguration
|
train
|
func (p *Config) clientTLSConfiguration(conn net.Conn, originalConfig *tls.Config) (*tls.Config, error) {
if mconn, ok := conn.(*markedconn.ProxiedConnection); ok {
ip, port := mconn.GetOriginalDestination()
portContext, err := p.registry.RetrieveExposedServiceContext(ip, port, "")
if err != nil {
return nil, fmt.Errorf("Unknown service: %s", err)
}
if portContext.Service.UserAuthorizationType == policy.UserAuthorizationMutualTLS || portContext.Service.UserAuthorizationType == policy.UserAuthorizationJWT {
clientCAs := p.ca
if portContext.ClientTrustedRoots != nil {
clientCAs = portContext.ClientTrustedRoots
}
config := p.newBaseTLSConfig()
config.ClientAuth = tls.VerifyClientCertIfGiven
config.ClientCAs = clientCAs
return config, nil
}
return originalConfig, nil
}
return nil, fmt.Errorf("Invalid connection")
}
|
go
|
{
"resource": ""
}
|
q6948
|
newBaseTLSConfig
|
train
|
func (p *Config) newBaseTLSConfig() *tls.Config {
return &tls.Config{
GetCertificate: p.GetCertificateFunc(),
NextProtos: []string{"h2"},
PreferServerCipherSuites: true,
SessionTicketsDisabled: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
},
}
}
|
go
|
{
"resource": ""
}
|
q6949
|
UpdateSecrets
|
train
|
func (p *Config) UpdateSecrets(cert *tls.Certificate, caPool *x509.CertPool, s secrets.Secrets, certPEM, keyPEM string) {
p.Lock()
defer p.Unlock()
p.cert = cert
p.ca = caPool
p.secrets = s
p.certPEM = certPEM
p.keyPEM = keyPEM
p.tlsClientConfig.RootCAs = caPool
}
|
go
|
{
"resource": ""
}
|
q6950
|
GetCertificateFunc
|
train
|
func (p *Config) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
p.RLock()
defer p.RUnlock()
// First we check if this is a direct access to the public port. In this case
// we will use the service public certificate. Otherwise, we will return the
// enforcer certificate since this is internal access.
if mconn, ok := clientHello.Conn.(*markedconn.ProxiedConnection); ok {
ip, port := mconn.GetOriginalDestination()
portContext, err := p.registry.RetrieveExposedServiceContext(ip, port, "")
if err != nil {
return nil, fmt.Errorf("service not available: %s %d", ip.String(), port)
}
service := portContext.Service
if service.PublicNetworkInfo != nil && service.PublicNetworkInfo.Ports.Min == uint16(port) && len(service.PublicServiceCertificate) > 0 {
tlsCert, err := tls.X509KeyPair(service.PublicServiceCertificate, service.PublicServiceCertificateKey)
if err != nil {
return nil, fmt.Errorf("failed to parse server certificate: %s", err)
}
return &tlsCert, nil
}
}
if p.cert != nil {
return p.cert, nil
}
return nil, fmt.Errorf("no cert available - cert is nil")
}
}
|
go
|
{
"resource": ""
}
|
q6951
|
NewGoIPTablesProvider
|
train
|
func NewGoIPTablesProvider(batchTables []string) (*BatchProvider, error) {
ipt, err := iptables.New()
if err != nil {
return nil, err
}
batchTablesMap := map[string]bool{}
// We will only support the batch method if there is iptables-restore and iptables
// version 1.6.2 or better. Otherwise, we fall back to classic iptables instructions.
// This will allow us to support older kernel versions.
if restoreHasWait() {
for _, t := range batchTables {
batchTablesMap[t] = true
}
}
b := &BatchProvider{
ipt: ipt,
rules: map[string]map[string][]string{},
batchTables: batchTablesMap,
}
b.commitFunc = b.restore
return b, nil
}
|
go
|
{
"resource": ""
}
|
q6952
|
NewCustomBatchProvider
|
train
|
func NewCustomBatchProvider(ipt BaseIPTables, commit func(buf *bytes.Buffer) error, batchTables []string) *BatchProvider {
batchTablesMap := map[string]bool{}
for _, t := range batchTables {
batchTablesMap[t] = true
}
return &BatchProvider{
ipt: ipt,
rules: map[string]map[string][]string{},
batchTables: batchTablesMap,
commitFunc: commit,
}
}
|
go
|
{
"resource": ""
}
|
q6953
|
Append
|
train
|
func (b *BatchProvider) Append(table, chain string, rulespec ...string) error {
b.Lock()
defer b.Unlock()
if _, ok := b.batchTables[table]; !ok {
return b.ipt.Append(table, chain, rulespec...)
}
if _, ok := b.rules[table]; !ok {
b.rules[table] = map[string][]string{}
}
if _, ok := b.rules[table][chain]; !ok {
b.rules[table][chain] = []string{}
}
rule := strings.Join(rulespec, " ")
b.rules[table][chain] = append(b.rules[table][chain], rule)
return nil
}
|
go
|
{
"resource": ""
}
|
q6954
|
Insert
|
train
|
func (b *BatchProvider) Insert(table, chain string, pos int, rulespec ...string) error {
b.Lock()
defer b.Unlock()
if _, ok := b.batchTables[table]; !ok {
return b.ipt.Insert(table, chain, pos, rulespec...)
}
if _, ok := b.rules[table]; !ok {
b.rules[table] = map[string][]string{}
}
if _, ok := b.rules[table][chain]; !ok {
b.rules[table][chain] = []string{}
}
rule := strings.Join(rulespec, " ")
if pos == 1 {
b.rules[table][chain] = append([]string{rule}, b.rules[table][chain]...)
} else if pos > len(b.rules[table][chain]) {
b.rules[table][chain] = append(b.rules[table][chain], rule)
} else {
b.rules[table][chain] = append(b.rules[table][chain], "newvalue")
copy(b.rules[table][chain][pos-1:], b.rules[table][chain][pos-2:])
b.rules[table][chain][pos-1] = rule
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6955
|
Delete
|
train
|
func (b *BatchProvider) Delete(table, chain string, rulespec ...string) error {
b.Lock()
defer b.Unlock()
if _, ok := b.batchTables[table]; !ok {
return b.ipt.Delete(table, chain, rulespec...)
}
if _, ok := b.rules[table]; !ok {
return nil
}
if _, ok := b.rules[table][chain]; !ok {
return nil
}
rule := strings.Join(rulespec, " ")
for index, r := range b.rules[table][chain] {
if rule == r {
switch index {
case 0:
if len(b.rules[table][chain]) == 1 {
b.rules[table][chain] = []string{}
} else {
b.rules[table][chain] = b.rules[table][chain][1:]
}
case len(b.rules[table][chain]) - 1:
b.rules[table][chain] = b.rules[table][chain][:index]
default:
b.rules[table][chain] = append(b.rules[table][chain][:index], b.rules[table][chain][index+1:]...)
}
break
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6956
|
ListChains
|
train
|
func (b *BatchProvider) ListChains(table string) ([]string, error) {
b.Lock()
defer b.Unlock()
return b.ipt.ListChains(table)
}
|
go
|
{
"resource": ""
}
|
q6957
|
NewChain
|
train
|
func (b *BatchProvider) NewChain(table, chain string) error {
b.Lock()
defer b.Unlock()
if _, ok := b.batchTables[table]; !ok {
return b.ipt.NewChain(table, chain)
}
if _, ok := b.rules[table]; !ok {
b.rules[table] = map[string][]string{}
}
b.rules[table][chain] = []string{}
return nil
}
|
go
|
{
"resource": ""
}
|
q6958
|
Commit
|
train
|
func (b *BatchProvider) Commit() error {
b.Lock()
defer b.Unlock()
// We don't commit if we don't have any tables. This is old
// kernel compatibility mode.
if len(b.batchTables) == 0 {
return nil
}
buf, err := b.createDataBuffer()
if err != nil {
return fmt.Errorf("Failed to crete buffer %s", err)
}
return b.commitFunc(buf)
}
|
go
|
{
"resource": ""
}
|
q6959
|
RetrieveTable
|
train
|
func (b *BatchProvider) RetrieveTable() map[string]map[string][]string {
b.Lock()
defer b.Unlock()
return b.rules
}
|
go
|
{
"resource": ""
}
|
q6960
|
restore
|
train
|
func (b *BatchProvider) restore(buf *bytes.Buffer) error {
cmd := exec.Command(restoreCmd, "--wait")
cmd.Stdin = buf
out, err := cmd.CombinedOutput()
if err != nil {
again, _ := b.createDataBuffer()
zap.L().Error("Failed to execute command", zap.Error(err),
zap.ByteString("Output", out),
zap.String("Output", again.String()),
)
return fmt.Errorf("Failed to execute iptables-restore: %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6961
|
MockNewRPCClient
|
train
|
func (m *testRPC) MockNewRPCClient(t *testing.T, impl func(contextID string, channel string, secret string) error) {
m.currentMocks(t).NewRPCClientMock = impl
}
|
go
|
{
"resource": ""
}
|
q6962
|
MockGetRPCClient
|
train
|
func (m *testRPC) MockGetRPCClient(t *testing.T, impl func(contextID string) (*RPCHdl, error)) {
m.currentMocks(t).GetRPCClientMock = impl
}
|
go
|
{
"resource": ""
}
|
q6963
|
MockRemoteCall
|
train
|
func (m *testRPC) MockRemoteCall(t *testing.T, impl func(contextID string, methodName string, req *Request, resp *Response) error) {
m.currentMocks(t).RemoteCallMock = impl
}
|
go
|
{
"resource": ""
}
|
q6964
|
MockDestroyRPCClient
|
train
|
func (m *testRPC) MockDestroyRPCClient(t *testing.T, impl func(contextID string)) {
m.currentMocks(t).DestroyRPCClientMock = impl
}
|
go
|
{
"resource": ""
}
|
q6965
|
MockStartServer
|
train
|
func (m *testRPC) MockStartServer(t *testing.T, impl func(ctx context.Context, protocol string, path string, handler interface{}) error) {
m.currentMocks(t).StartServerMock = impl
}
|
go
|
{
"resource": ""
}
|
q6966
|
MockProcessMessage
|
train
|
func (m *testRPC) MockProcessMessage(t *testing.T, impl func(req *Request, secret string) bool) {
m.currentMocks(t).ProcessMessageMock = impl
}
|
go
|
{
"resource": ""
}
|
q6967
|
MockContextList
|
train
|
func (m *testRPC) MockContextList(t *testing.T, impl func() []string) {
m.currentMocks(t).ContextListMock = impl
}
|
go
|
{
"resource": ""
}
|
q6968
|
MockCheckValidity
|
train
|
func (m *testRPC) MockCheckValidity(t *testing.T, impl func(req *Request, secret string) bool) {
m.currentMocks(t).CheckValidityMock = impl
}
|
go
|
{
"resource": ""
}
|
q6969
|
NewRPCClient
|
train
|
func (m *testRPC) NewRPCClient(contextID string, channel string, secret string) error {
if mock := m.currentMocks(nil); mock != nil && mock.NewRPCClientMock != nil {
return mock.NewRPCClientMock(contextID, channel, secret)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6970
|
GetRPCClient
|
train
|
func (m *testRPC) GetRPCClient(contextID string) (*RPCHdl, error) {
if mock := m.currentMocks(nil); mock != nil && mock.GetRPCClientMock != nil {
return mock.GetRPCClientMock(contextID)
}
return nil, nil
}
|
go
|
{
"resource": ""
}
|
q6971
|
RemoteCall
|
train
|
func (m *testRPC) RemoteCall(contextID string, methodName string, req *Request, resp *Response) error {
if mock := m.currentMocks(nil); mock != nil && mock.RemoteCallMock != nil {
return mock.RemoteCallMock(contextID, methodName, req, resp)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6972
|
DestroyRPCClient
|
train
|
func (m *testRPC) DestroyRPCClient(contextID string) {
if mock := m.currentMocks(nil); mock != nil && mock.DestroyRPCClientMock != nil {
mock.DestroyRPCClientMock(contextID)
return
}
}
|
go
|
{
"resource": ""
}
|
q6973
|
CheckValidity
|
train
|
func (m *testRPC) CheckValidity(req *Request, secret string) bool {
if mock := m.currentMocks(nil); mock != nil && mock.DestroyRPCClientMock != nil {
return mock.CheckValidityMock(req, secret)
}
return false
}
|
go
|
{
"resource": ""
}
|
q6974
|
StartServer
|
train
|
func (m *testRPC) StartServer(ctx context.Context, protocol string, path string, handler interface{}) error {
if mock := m.currentMocks(nil); mock != nil && mock.StartServerMock != nil {
return mock.StartServerMock(ctx, protocol, path, handler)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6975
|
ProcessMessage
|
train
|
func (m *testRPC) ProcessMessage(req *Request, secret string) bool {
if mock := m.currentMocks(nil); mock != nil && mock.ProcessMessageMock != nil {
return mock.ProcessMessageMock(req, secret)
}
return true
}
|
go
|
{
"resource": ""
}
|
q6976
|
ContextList
|
train
|
func (m *testRPC) ContextList() []string {
if mock := m.currentMocks(m.currentTest); mock != nil && mock.ContextListMock != nil {
return mock.ContextListMock()
}
return []string{}
}
|
go
|
{
"resource": ""
}
|
q6977
|
currentMocks
|
train
|
func (m *testRPC) currentMocks(t *testing.T) *mockedMethods {
m.lock.Lock()
defer m.lock.Unlock()
if t == nil {
t = m.currentTest
} else {
m.currentTest = t
}
mocks := m.mocks[t]
if mocks == nil {
mocks = &mockedMethods{}
m.mocks[t] = mocks
}
return mocks
}
|
go
|
{
"resource": ""
}
|
q6978
|
NewPU
|
train
|
func NewPU(contextID string, puInfo *policy.PUInfo, timeout time.Duration) (*PUContext, error) {
ctx := context.Background()
ctx, cancelFunc := context.WithCancel(ctx)
pu := &PUContext{
id: contextID,
username: puInfo.Runtime.Options().UserID,
autoport: puInfo.Runtime.Options().AutoPort,
managementID: puInfo.Policy.ManagementID(),
puType: puInfo.Runtime.PUType(),
identity: puInfo.Policy.Identity(),
annotations: puInfo.Policy.Annotations(),
externalIPCache: cache.NewCacheWithExpiration("External IP Cache", timeout),
ApplicationACLs: acls.NewACLCache(),
networkACLs: acls.NewACLCache(),
mark: puInfo.Runtime.Options().CgroupMark,
scopes: puInfo.Policy.Scopes(),
CancelFunc: cancelFunc,
}
pu.CreateRcvRules(puInfo.Policy.ReceiverRules())
pu.CreateTxtRules(puInfo.Policy.TransmitterRules())
tcpPorts, udpPorts := common.ConvertServicesToProtocolPortList(puInfo.Runtime.Options().Services)
pu.tcpPorts = strings.Split(tcpPorts, ",")
pu.udpPorts = strings.Split(udpPorts, ",")
if err := pu.UpdateApplicationACLs(puInfo.Policy.ApplicationACLs()); err != nil {
return nil, err
}
if err := pu.UpdateNetworkACLs(puInfo.Policy.NetworkACLs()); err != nil {
return nil, err
}
dnsACL := puInfo.Policy.DNSNameACLs()
pu.startDNS(ctx, &dnsACL)
return pu, nil
}
|
go
|
{
"resource": ""
}
|
q6979
|
RetrieveCachedExternalFlowPolicy
|
train
|
func (p *PUContext) RetrieveCachedExternalFlowPolicy(id string) (interface{}, error) {
return p.externalIPCache.Get(id)
}
|
go
|
{
"resource": ""
}
|
q6980
|
NetworkACLPolicy
|
train
|
func (p *PUContext) NetworkACLPolicy(packet *packet.Packet) (report *policy.FlowPolicy, action *policy.FlowPolicy, err error) {
defer p.RUnlock()
p.RLock()
return p.networkACLs.GetMatchingAction(packet.SourceAddress(), packet.DestPort())
}
|
go
|
{
"resource": ""
}
|
q6981
|
NetworkACLPolicyFromAddr
|
train
|
func (p *PUContext) NetworkACLPolicyFromAddr(addr net.IP, port uint16) (report *policy.FlowPolicy, action *policy.FlowPolicy, err error) {
defer p.RUnlock()
p.RLock()
return p.networkACLs.GetMatchingAction(addr, port)
}
|
go
|
{
"resource": ""
}
|
q6982
|
UpdateApplicationACLs
|
train
|
func (p *PUContext) UpdateApplicationACLs(rules policy.IPRuleList) error {
defer p.Unlock()
p.Lock()
return p.ApplicationACLs.AddRuleList(rules)
}
|
go
|
{
"resource": ""
}
|
q6983
|
UpdateNetworkACLs
|
train
|
func (p *PUContext) UpdateNetworkACLs(rules policy.IPRuleList) error {
defer p.Unlock()
p.Lock()
return p.networkACLs.AddRuleList(rules)
}
|
go
|
{
"resource": ""
}
|
q6984
|
CacheExternalFlowPolicy
|
train
|
func (p *PUContext) CacheExternalFlowPolicy(packet *packet.Packet, plc interface{}) {
p.externalIPCache.AddOrUpdate(packet.SourceAddress().String()+":"+strconv.Itoa(int(packet.SourcePort())), plc)
}
|
go
|
{
"resource": ""
}
|
q6985
|
GetProcessKeys
|
train
|
func (p *PUContext) GetProcessKeys() (string, []string, []string) {
return p.mark, p.tcpPorts, p.udpPorts
}
|
go
|
{
"resource": ""
}
|
q6986
|
SynServiceContext
|
train
|
func (p *PUContext) SynServiceContext() []byte {
p.RLock()
defer p.RUnlock()
return p.synServiceContext
}
|
go
|
{
"resource": ""
}
|
q6987
|
UpdateSynServiceContext
|
train
|
func (p *PUContext) UpdateSynServiceContext(synServiceContext []byte) {
p.Lock()
p.synServiceContext = synServiceContext
p.Unlock()
}
|
go
|
{
"resource": ""
}
|
q6988
|
GetCachedTokenAndServiceContext
|
train
|
func (p *PUContext) GetCachedTokenAndServiceContext() ([]byte, []byte, error) {
p.RLock()
defer p.RUnlock()
if p.synExpiration.After(time.Now()) && len(p.synToken) > 0 {
token := make([]byte, len(p.synToken))
copy(token, p.synToken)
return token, p.synServiceContext, nil
}
return nil, nil, fmt.Errorf("expired Token")
}
|
go
|
{
"resource": ""
}
|
q6989
|
UpdateCachedTokenAndServiceContext
|
train
|
func (p *PUContext) UpdateCachedTokenAndServiceContext(token []byte, serviceContext []byte) {
p.Lock()
p.synToken = token
p.synExpiration = time.Now().Add(time.Millisecond * 500)
p.synServiceContext = serviceContext
p.Unlock()
}
|
go
|
{
"resource": ""
}
|
q6990
|
Scopes
|
train
|
func (p *PUContext) Scopes() []string {
p.RLock()
defer p.RUnlock()
return p.scopes
}
|
go
|
{
"resource": ""
}
|
q6991
|
GetJWT
|
train
|
func (p *PUContext) GetJWT() (string, error) {
p.RLock()
defer p.RUnlock()
if p.jwtExpiration.After(time.Now()) && len(p.jwt) > 0 {
return p.jwt, nil
}
return "", fmt.Errorf("expired token")
}
|
go
|
{
"resource": ""
}
|
q6992
|
UpdateJWT
|
train
|
func (p *PUContext) UpdateJWT(jwt string, expiration time.Time) {
p.Lock()
defer p.Unlock()
p.jwt = jwt
p.jwtExpiration = expiration
}
|
go
|
{
"resource": ""
}
|
q6993
|
createRuleDBs
|
train
|
func (p *PUContext) createRuleDBs(policyRules policy.TagSelectorList) *policies {
policyDB := &policies{
rejectRules: lookup.NewPolicyDB(),
observeRejectRules: lookup.NewPolicyDB(),
acceptRules: lookup.NewPolicyDB(),
observeAcceptRules: lookup.NewPolicyDB(),
observeApplyRules: lookup.NewPolicyDB(),
encryptRules: lookup.NewPolicyDB(),
}
for _, rule := range policyRules {
// Add encrypt rule to encrypt table.
if rule.Policy.Action.Encrypted() {
policyDB.encryptRules.AddPolicy(rule)
}
if rule.Policy.ObserveAction.ObserveContinue() {
if rule.Policy.Action.Accepted() {
policyDB.observeAcceptRules.AddPolicy(rule)
} else if rule.Policy.Action.Rejected() {
policyDB.observeRejectRules.AddPolicy(rule)
}
} else if rule.Policy.ObserveAction.ObserveApply() {
policyDB.observeApplyRules.AddPolicy(rule)
} else if rule.Policy.Action.Accepted() {
policyDB.acceptRules.AddPolicy(rule)
} else if rule.Policy.Action.Rejected() {
policyDB.rejectRules.AddPolicy(rule)
} else {
continue
}
}
return policyDB
}
|
go
|
{
"resource": ""
}
|
q6994
|
CreateRcvRules
|
train
|
func (p *PUContext) CreateRcvRules(policyRules policy.TagSelectorList) {
p.rcv = p.createRuleDBs(policyRules)
}
|
go
|
{
"resource": ""
}
|
q6995
|
CreateTxtRules
|
train
|
func (p *PUContext) CreateTxtRules(policyRules policy.TagSelectorList) {
p.txt = p.createRuleDBs(policyRules)
}
|
go
|
{
"resource": ""
}
|
q6996
|
searchRules
|
train
|
func (p *PUContext) searchRules(
policies *policies,
tags *policy.TagStore,
skipRejectPolicies bool,
) (report *policy.FlowPolicy, packet *policy.FlowPolicy) {
var reportingAction *policy.FlowPolicy
var packetAction *policy.FlowPolicy
if !skipRejectPolicies {
// Look for rejection rules
observeIndex, observeAction := policies.observeRejectRules.Search(tags)
if observeIndex >= 0 {
reportingAction = observeAction.(*policy.FlowPolicy)
}
index, action := policies.rejectRules.Search(tags)
if index >= 0 {
packetAction = action.(*policy.FlowPolicy)
if reportingAction == nil {
reportingAction = packetAction
}
return reportingAction, packetAction
}
}
if reportingAction == nil {
// Look for allow rules
observeIndex, observeAction := policies.observeAcceptRules.Search(tags)
if observeIndex >= 0 {
reportingAction = observeAction.(*policy.FlowPolicy)
}
}
index, action := policies.acceptRules.Search(tags)
if index >= 0 {
packetAction = action.(*policy.FlowPolicy)
// Look for encrypt rules
encryptIndex, _ := policies.encryptRules.Search(tags)
if encryptIndex >= 0 {
// Do not overwrite the action for accept rules.
finalAction := action.(*policy.FlowPolicy)
packetAction = &policy.FlowPolicy{
Action: policy.Accept | policy.Encrypt,
PolicyID: finalAction.PolicyID,
ServiceID: finalAction.ServiceID,
}
}
if reportingAction == nil {
reportingAction = packetAction
}
return reportingAction, packetAction
}
// Look for observe apply rules
observeIndex, observeAction := policies.observeApplyRules.Search(tags)
if observeIndex >= 0 {
packetAction = observeAction.(*policy.FlowPolicy)
if reportingAction == nil {
reportingAction = packetAction
}
return reportingAction, packetAction
}
// Handle default if nothing provides to drop with no policyID.
packetAction = &policy.FlowPolicy{
Action: policy.Reject,
PolicyID: "default",
}
if reportingAction == nil {
reportingAction = packetAction
}
return reportingAction, packetAction
}
|
go
|
{
"resource": ""
}
|
q6997
|
SearchTxtRules
|
train
|
func (p *PUContext) SearchTxtRules(
tags *policy.TagStore,
skipRejectPolicies bool,
) (report *policy.FlowPolicy, packet *policy.FlowPolicy) {
return p.searchRules(p.txt, tags, skipRejectPolicies)
}
|
go
|
{
"resource": ""
}
|
q6998
|
SearchRcvRules
|
train
|
func (p *PUContext) SearchRcvRules(
tags *policy.TagStore,
) (report *policy.FlowPolicy, packet *policy.FlowPolicy) {
return p.searchRules(p.rcv, tags, false)
}
|
go
|
{
"resource": ""
}
|
q6999
|
Register
|
train
|
func (r *Registry) Register(
puID string,
pu *policy.PUInfo,
puContext *pucontext.PUContext,
secrets secrets.Secrets,
) (*ServiceContext, error) {
r.Lock()
defer r.Unlock()
sctx := &ServiceContext{
PU: pu,
PUContext: puContext,
dependentServiceCache: servicecache.NewTable(),
RootCA: [][]byte{},
}
// Delete all old references first. Since the registry is locked
// nobody will be affected.
r.indexByPort.DeleteByID(puID, true)
r.indexByPort.DeleteByID(puID, false)
if err := r.updateDependentServices(sctx); err != nil {
return nil, err
}
if err := r.updateExposedServices(sctx, secrets); err != nil {
return nil, err
}
r.indexByName[puID] = sctx
return sctx, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.