_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q7300 | RemoteCall | train | func (r *RPCWrapper) RemoteCall(contextID string, methodName string, req *Request, resp *Response) error {
rpcClient, err := r.GetRPCClient(contextID)
if err != nil {
return err
}
digest := hmac.New(sha256.New, []byte(rpcClient.Secret))
hash, err := payloadHash(req.Payload)
if err != nil {
return err
}
if _, err := digest.Write(hash); err != nil {
return err
}
req.HashAuth = digest.Sum(nil)
return rpcClient.Client.Call(methodName, req, resp)
} | go | {
"resource": ""
} |
q7301 | CheckValidity | train | func (r *RPCWrapper) CheckValidity(req *Request, secret string) bool {
digest := hmac.New(sha256.New, []byte(secret))
hash, err := payloadHash(req.Payload)
if err != nil {
return false
}
if _, err := digest.Write(hash); err != nil {
return false
}
return hmac.Equal(req.HashAuth, digest.Sum(nil))
} | go | {
"resource": ""
} |
q7302 | StartServer | train | func (r *RPCWrapper) StartServer(ctx context.Context, protocol string, path string, handler interface{}) error {
if len(path) == 0 {
zap.L().Fatal("Sock param not passed in environment")
}
// Register RPC Type
RegisterTypes()
// Register handlers
if err := rpc.Register(handler); err != nil {
return err
}
rpc.HandleHTTP()
// removing old path in case it exists already - error if we can't remove it
if _, err := os.Stat(path); err == nil {
zap.L().Debug("Socket path already exists: removing", zap.String("path", path))
if rerr := os.Remove(path); rerr != nil {
return fmt.Errorf("unable to delete existing socket path %s: %s", path, rerr)
}
}
// Get listener
listen, err := net.Listen(protocol, path)
if err != nil {
return err
}
go http.Serve(listen, nil) // nolint
<-ctx.Done()
if merr := listen.Close(); merr != nil {
zap.L().Warn("Connection already closed", zap.Error(merr))
}
_, err = os.Stat(path)
if !os.IsNotExist(err) {
if err := os.Remove(path); err != nil {
zap.L().Warn("failed to remove old path", zap.Error(err))
}
}
return nil
} | go | {
"resource": ""
} |
q7303 | DestroyRPCClient | train | func (r *RPCWrapper) DestroyRPCClient(contextID string) {
r.Lock()
defer r.Unlock()
rpcHdl, err := r.rpcClientMap.Get(contextID)
if err != nil {
return
}
if err = rpcHdl.(*RPCHdl).Client.Close(); err != nil {
zap.L().Warn("Failed to close channel",
zap.String("contextID", contextID),
zap.Error(err),
)
}
if err = os.Remove(rpcHdl.(*RPCHdl).Channel); err != nil {
zap.L().Debug("Failed to remove channel - already closed",
zap.String("contextID", contextID),
zap.Error(err),
)
}
if err = r.rpcClientMap.Remove(contextID); err != nil {
zap.L().Warn("Failed to remove item from cache",
zap.String("contextID", contextID),
zap.Error(err),
)
}
} | go | {
"resource": ""
} |
q7304 | ContextList | train | func (r *RPCWrapper) ContextList() []string {
keylist := r.rpcClientMap.KeyList()
contextArray := []string{}
for _, key := range keylist {
if kstring, ok := key.(string); ok {
contextArray = append(contextArray, kstring)
}
}
return contextArray
} | go | {
"resource": ""
} |
q7305 | ProcessMessage | train | func (r *RPCWrapper) ProcessMessage(req *Request, secret string) bool {
return r.CheckValidity(req, secret)
} | go | {
"resource": ""
} |
q7306 | payloadHash | train | func payloadHash(payload interface{}) ([]byte, error) {
hash, err := hashstructure.Hash(payload, nil)
if err != nil {
return []byte{}, err
}
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, hash)
return buf, nil
} | go | {
"resource": ""
} |
q7307 | RegisterTypes | train | func RegisterTypes() {
gob.Register(&secrets.CompactPKIPublicSecrets{})
gob.Register(&pkitokens.PKIJWTVerifier{})
gob.Register(&oidc.TokenVerifier{})
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.Init_Request_Payload", *(&InitRequestPayload{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.Init_Response_Payload", *(&InitResponsePayload{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.Enforce_Payload", *(&EnforcePayload{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.UnEnforce_Payload", *(&UnEnforcePayload{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.Stats_Payload", *(&StatsPayload{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.UpdateSecrets_Payload", *(&UpdateSecretsPayload{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.SetTargetNetworks_Payload", *(&SetTargetNetworksPayload{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.EnableIPTablesPacketTracing_PayLoad", *(&EnableIPTablesPacketTracingPayLoad{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.EnableDatapathPacketTracing_PayLoad", *(&EnableDatapathPacketTracingPayLoad{}))
gob.RegisterName("go.aporeto.io/trireme-lib/controller/internal/enforcer/utils/rpcwrapper.DebugPacket_Payload", *(&DebugPacketPayload{}))
} | go | {
"resource": ""
} |
q7308 | New | train | func New() Registerer {
return ®isterer{
handlers: map[common.PUType]map[common.Event]common.EventHandler{},
}
} | go | {
"resource": ""
} |
q7309 | RegisterProcessor | train | func (r *registerer) RegisterProcessor(puType common.PUType, ep processor.Processor) error {
if _, ok := r.handlers[puType]; ok {
return fmt.Errorf("Processor already registered for this PU type %d ", puType)
}
r.handlers[puType] = map[common.Event]common.EventHandler{}
r.addHandler(puType, common.EventStart, ep.Start)
r.addHandler(puType, common.EventStop, ep.Stop)
r.addHandler(puType, common.EventCreate, ep.Create)
r.addHandler(puType, common.EventDestroy, ep.Destroy)
r.addHandler(puType, common.EventPause, ep.Pause)
r.addHandler(puType, common.EventResync, ep.Resync)
return nil
} | go | {
"resource": ""
} |
q7310 | processApplicationTCPPacket | train | func (d *Datapath) processApplicationTCPPacket(tcpPacket *packet.Packet, context *pucontext.PUContext, conn *connection.TCPConnection) (interface{}, error) {
if conn == nil {
return nil, nil
}
// State machine based on the flags
switch tcpPacket.GetTCPFlags() & packet.TCPSynAckMask {
case packet.TCPSynMask: //Processing SYN packet from Application
return d.processApplicationSynPacket(tcpPacket, context, conn)
case packet.TCPAckMask:
return nil, d.processApplicationAckPacket(tcpPacket, context, conn)
case packet.TCPSynAckMask:
return nil, d.processApplicationSynAckPacket(tcpPacket, context, conn)
default:
return nil, nil
}
} | go | {
"resource": ""
} |
q7311 | processApplicationSynPacket | train | func (d *Datapath) processApplicationSynPacket(tcpPacket *packet.Packet, context *pucontext.PUContext, conn *connection.TCPConnection) (interface{}, error) {
// If the packet is not in target networks then look into the external services application cache to
// make a decision whether the packet should be forwarded. For target networks with external services
// network syn/ack accepts the packet if it belongs to external services.
_, pkt, perr := d.targetNetworks.GetMatchingAction(tcpPacket.DestinationAddress(), tcpPacket.DestPort())
if perr != nil {
report, policy, perr := context.ApplicationACLPolicyFromAddr(tcpPacket.DestinationAddress(), tcpPacket.DestPort())
if perr == nil && policy.Action.Accepted() {
return nil, nil
}
d.reportExternalServiceFlow(context, report, pkt, true, tcpPacket)
return nil, fmt.Errorf("No acls found for external services. Dropping application syn packet")
}
if policy, err := context.RetrieveCachedExternalFlowPolicy(tcpPacket.DestinationAddress().String() + ":" + strconv.Itoa(int(tcpPacket.DestPort()))); err == nil {
d.appOrigConnectionTracker.AddOrUpdate(tcpPacket.L4FlowHash(), conn)
d.sourcePortConnectionCache.AddOrUpdate(tcpPacket.SourcePortHash(packet.PacketTypeApplication), conn)
return policy, nil
}
// We are now processing as a Trireme packet that needs authorization headers
// Create TCP Option
tcpOptions := d.createTCPAuthenticationOption([]byte{})
// Create a token
tcpData, err := d.tokenAccessor.CreateSynPacketToken(context, &conn.Auth)
if err != nil {
return nil, err
}
// Set the state indicating that we send out a Syn packet
conn.SetState(connection.TCPSynSend)
// Poplate the caches to track the connection
hash := tcpPacket.L4FlowHash()
d.appOrigConnectionTracker.AddOrUpdate(hash, conn)
d.sourcePortConnectionCache.AddOrUpdate(tcpPacket.SourcePortHash(packet.PacketTypeApplication), conn)
// Attach the tags to the packet and accept the packet
return nil, tcpPacket.TCPDataAttach(tcpOptions, tcpData)
} | go | {
"resource": ""
} |
q7312 | processApplicationSynAckPacket | train | func (d *Datapath) processApplicationSynAckPacket(tcpPacket *packet.Packet, context *pucontext.PUContext, conn *connection.TCPConnection) error {
// if the traffic belongs to the same pu, let it go
if conn.GetState() == connection.TCPData && conn.IsLoopbackConnection() {
return nil
}
// If we are already in the connection.TCPData, it means that this is an external flow
// At this point we can release the flow to the kernel by updating conntrack
// We can also clean up the state since we are not going to see any more
// packets from this connection.
if conn.GetState() == connection.TCPData && !conn.ServiceConnection {
if err := d.conntrack.UpdateApplicationFlowMark(
tcpPacket.SourceAddress(),
tcpPacket.DestinationAddress(),
tcpPacket.IPProto(),
tcpPacket.SourcePort(),
tcpPacket.DestPort(),
constants.DefaultConnMark,
); err != nil {
zap.L().Error("Failed to update conntrack entry for flow at SynAck packet",
zap.String("context", string(conn.Auth.LocalContext)),
zap.String("app-conn", tcpPacket.L4ReverseFlowHash()),
zap.String("state", fmt.Sprintf("%d", conn.GetState())),
zap.Error(err),
)
}
err1 := d.netOrigConnectionTracker.Remove(tcpPacket.L4ReverseFlowHash())
err2 := d.appReplyConnectionTracker.Remove(tcpPacket.L4FlowHash())
if err1 != nil || err2 != nil {
zap.L().Debug("Failed to remove cache entries")
}
return nil
}
// We now process packets that need authorization options
// Create TCP Option
tcpOptions := d.createTCPAuthenticationOption([]byte{})
claimsHeader := claimsheader.NewClaimsHeader(
claimsheader.OptionEncrypt(conn.PacketFlowPolicy.Action.Encrypted()),
)
tcpData, err := d.tokenAccessor.CreateSynAckPacketToken(context, &conn.Auth, claimsHeader)
if err != nil {
return err
}
// Set the state for future reference
conn.SetState(connection.TCPSynAckSend)
// Attach the tags to the packet
return tcpPacket.TCPDataAttach(tcpOptions, tcpData)
} | go | {
"resource": ""
} |
q7313 | processNetworkTCPPacket | train | func (d *Datapath) processNetworkTCPPacket(tcpPacket *packet.Packet, context *pucontext.PUContext, conn *connection.TCPConnection) (action interface{}, claims *tokens.ConnectionClaims, err error) {
if conn == nil {
return nil, nil, nil
}
// Update connection state in the internal state machine tracker
switch tcpPacket.GetTCPFlags() & packet.TCPSynAckMask {
case packet.TCPSynMask:
return d.processNetworkSynPacket(context, conn, tcpPacket)
case packet.TCPAckMask:
return d.processNetworkAckPacket(context, conn, tcpPacket)
case packet.TCPSynAckMask:
return d.processNetworkSynAckPacket(context, conn, tcpPacket)
default: // Ignore any other packet
return nil, nil, nil
}
} | go | {
"resource": ""
} |
q7314 | createTCPAuthenticationOption | train | func (d *Datapath) createTCPAuthenticationOption(token []byte) []byte {
tokenLen := uint8(len(token))
options := []byte{packet.TCPAuthenticationOption, enforcerconstants.TCPAuthenticationOptionBaseLen + tokenLen, 0, 0}
if tokenLen != 0 {
options = append(options, token...)
}
return options
} | go | {
"resource": ""
} |
q7315 | appSynRetrieveState | train | func (d *Datapath) appSynRetrieveState(p *packet.Packet) (*connection.TCPConnection, error) {
context, err := d.contextFromIP(true, p.Mark, p.SourcePort(), packet.IPProtocolTCP)
if err != nil {
return nil, err
}
if conn, err := d.appOrigConnectionTracker.GetReset(p.L4FlowHash(), 0); err == nil {
return conn.(*connection.TCPConnection), nil
}
return connection.NewTCPConnection(context, p), nil
} | go | {
"resource": ""
} |
q7316 | appRetrieveState | train | func (d *Datapath) appRetrieveState(p *packet.Packet) (*connection.TCPConnection, error) {
hash := p.L4FlowHash()
// If this ack packet is from Server, Did we see a network Syn for this server PU?
conn, err := d.appReplyConnectionTracker.GetReset(hash, 0)
if err == nil {
if uerr := updateTimer(d.appReplyConnectionTracker, hash, conn.(*connection.TCPConnection)); uerr != nil {
zap.L().Error("entry expired just before updating the timer", zap.String("flow", hash))
return nil, uerr
}
return conn.(*connection.TCPConnection), nil
}
// If this ack packet is from client, Did we see an Application Syn packet before?
conn, err = d.appOrigConnectionTracker.GetReset(hash, 0)
if err == nil {
if uerr := updateTimer(d.appOrigConnectionTracker, hash, conn.(*connection.TCPConnection)); uerr != nil {
return nil, uerr
}
return conn.(*connection.TCPConnection), nil
}
if p.GetTCPFlags()&packet.TCPSynAckMask == packet.TCPAckMask {
// Let's try if its an existing connection
context, err := d.contextFromIP(true, p.Mark, p.SourcePort(), packet.IPProtocolTCP)
if err != nil {
return nil, errors.New("No context in app processing")
}
conn = connection.NewTCPConnection(context, p)
conn.(*connection.TCPConnection).SetState(connection.UnknownState)
return conn.(*connection.TCPConnection), nil
}
return nil, errNoConnFound
} | go | {
"resource": ""
} |
q7317 | netSynRetrieveState | train | func (d *Datapath) netSynRetrieveState(p *packet.Packet) (*connection.TCPConnection, error) {
context, err := d.contextFromIP(false, p.Mark, p.DestPort(), packet.IPProtocolTCP)
if err == nil {
if conn, err := d.netOrigConnectionTracker.GetReset(p.L4FlowHash(), 0); err == nil {
return conn.(*connection.TCPConnection), nil
}
return connection.NewTCPConnection(context, p), nil
}
//This needs to hit only for local processes never for containers
//Don't return an error create a dummy context and return it so we truncate the packet before we send it up
if d.mode != constants.RemoteContainer {
//we will create the bare minimum needed to exercise our stack
//We need this syn to look similar to what we will pass on the retry
//so we setup enough for us to identify this request in the later stages
// Remove any of our data from the packet.
if err = p.CheckTCPAuthenticationOption(enforcerconstants.TCPAuthenticationOptionBaseLen); err != nil {
zap.L().Error("Syn received with tcp option not set", zap.Error(err))
return nil, errNonPUTraffic
}
if err = p.TCPDataDetach(enforcerconstants.TCPAuthenticationOptionBaseLen); err != nil {
zap.L().Error("Error removing TCP Data", zap.Error(err))
return nil, errNonPUTraffic
}
p.DropTCPDetachedBytes()
p.UpdateTCPChecksum()
return nil, errNonPUTraffic
}
return nil, errInvalidNetState
} | go | {
"resource": ""
} |
q7318 | netSynAckRetrieveState | train | func (d *Datapath) netSynAckRetrieveState(p *packet.Packet) (*connection.TCPConnection, error) {
conn, err := d.sourcePortConnectionCache.GetReset(p.SourcePortHash(packet.PacketTypeNetwork), 0)
if err != nil {
return nil, errNonPUTraffic
}
return conn.(*connection.TCPConnection), nil
} | go | {
"resource": ""
} |
q7319 | netRetrieveState | train | func (d *Datapath) netRetrieveState(p *packet.Packet) (*connection.TCPConnection, error) {
hash := p.L4FlowHash()
// Did we see a network syn/ack packet? (PU is a client)
conn, err := d.netReplyConnectionTracker.GetReset(hash, 0)
if err == nil {
if err = updateTimer(d.netReplyConnectionTracker, hash, conn.(*connection.TCPConnection)); err != nil {
return nil, err
}
return conn.(*connection.TCPConnection), nil
}
// Did we see a network Syn packet before? (PU is a server)
conn, err = d.netOrigConnectionTracker.GetReset(hash, 0)
if err == nil {
if err = updateTimer(d.netOrigConnectionTracker, hash, conn.(*connection.TCPConnection)); err != nil {
return nil, err
}
return conn.(*connection.TCPConnection), nil
}
// We reach in this state for a client PU only when the service connection(encrypt) sends data sparsely.
// Packets are dropped when this happens, and that is a BUG!!!
// For the server PU we mark the connection in the unknown state.
if p.GetTCPFlags()&packet.TCPSynAckMask == packet.TCPAckMask {
// Let's try if its an existing connection
context, cerr := d.contextFromIP(false, p.Mark, p.DestPort(), packet.IPProtocolTCP)
if cerr != nil {
return nil, err
}
conn = connection.NewTCPConnection(context, p)
conn.(*connection.TCPConnection).SetState(connection.UnknownState)
return conn.(*connection.TCPConnection), nil
}
return nil, errInvalidNetState
} | go | {
"resource": ""
} |
q7320 | updateTimer | train | func updateTimer(c cache.DataStore, hash string, conn *connection.TCPConnection) error {
conn.RLock()
defer conn.RUnlock()
if conn.ServiceConnection && conn.TimeOut > 0 {
return c.SetTimeOut(hash, conn.TimeOut)
}
return nil
} | go | {
"resource": ""
} |
q7321 | releaseFlow | train | func (d *Datapath) releaseFlow(context *pucontext.PUContext, report *policy.FlowPolicy, action *policy.FlowPolicy, tcpPacket *packet.Packet) {
if err := d.appOrigConnectionTracker.Remove(tcpPacket.L4ReverseFlowHash()); err != nil {
zap.L().Debug("Failed to clean cache appOrigConnectionTracker", zap.Error(err))
}
if err := d.sourcePortConnectionCache.Remove(tcpPacket.SourcePortHash(packet.PacketTypeNetwork)); err != nil {
zap.L().Debug("Failed to clean cache sourcePortConnectionCache", zap.Error(err))
}
if err := d.conntrack.UpdateNetworkFlowMark(
tcpPacket.SourceAddress(),
tcpPacket.DestinationAddress(),
tcpPacket.IPProto(),
tcpPacket.SourcePort(),
tcpPacket.DestPort(),
constants.DefaultConnMark,
); err != nil {
zap.L().Error("Failed to update conntrack table",
zap.String("app-conn", tcpPacket.L4ReverseFlowHash()),
zap.Error(err))
}
d.reportReverseExternalServiceFlow(context, report, action, true, tcpPacket)
} | go | {
"resource": ""
} |
q7322 | releaseUnmonitoredFlow | train | func (d *Datapath) releaseUnmonitoredFlow(tcpPacket *packet.Packet) {
zap.L().Debug("Releasing flow", zap.String("flow", tcpPacket.L4FlowHash()))
if err := d.conntrack.UpdateNetworkFlowMark(
tcpPacket.SourceAddress(),
tcpPacket.DestinationAddress(),
tcpPacket.IPProto(),
tcpPacket.SourcePort(),
tcpPacket.DestPort(),
constants.DefaultConnMark,
); err != nil && !netlink.IsNotExist(errors.Cause(err)) {
zap.L().Error("Failed to update conntrack table", zap.Error(err))
}
} | go | {
"resource": ""
} |
q7323 | NewMockSupervisor | train | func NewMockSupervisor(ctrl *gomock.Controller) *MockSupervisor {
mock := &MockSupervisor{ctrl: ctrl}
mock.recorder = &MockSupervisorMockRecorder{mock}
return mock
} | go | {
"resource": ""
} |
q7324 | Supervise | train | func (m *MockSupervisor) Supervise(contextID string, puInfo *policy.PUInfo) error {
ret := m.ctrl.Call(m, "Supervise", contextID, puInfo)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7325 | Supervise | train | func (mr *MockSupervisorMockRecorder) Supervise(contextID, puInfo interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Supervise", reflect.TypeOf((*MockSupervisor)(nil).Supervise), contextID, puInfo)
} | go | {
"resource": ""
} |
q7326 | Unsupervise | train | func (m *MockSupervisor) Unsupervise(contextID string) error {
ret := m.ctrl.Call(m, "Unsupervise", contextID)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7327 | SetTargetNetworks | train | func (m *MockSupervisor) SetTargetNetworks(cfg *runtime.Configuration) error {
ret := m.ctrl.Call(m, "SetTargetNetworks", cfg)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7328 | NewMockImplementor | train | func NewMockImplementor(ctrl *gomock.Controller) *MockImplementor {
mock := &MockImplementor{ctrl: ctrl}
mock.recorder = &MockImplementorMockRecorder{mock}
return mock
} | go | {
"resource": ""
} |
q7329 | ConfigureRules | train | func (m *MockImplementor) ConfigureRules(version int, contextID string, containerInfo *policy.PUInfo) error {
ret := m.ctrl.Call(m, "ConfigureRules", version, contextID, containerInfo)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7330 | ConfigureRules | train | func (mr *MockImplementorMockRecorder) ConfigureRules(version, contextID, containerInfo interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigureRules", reflect.TypeOf((*MockImplementor)(nil).ConfigureRules), version, contextID, containerInfo)
} | go | {
"resource": ""
} |
q7331 | DeleteRules | train | func (m *MockImplementor) DeleteRules(version int, context, tcpPorts, udpPorts, mark, uid, proxyPort string, puType common.PUType) error {
ret := m.ctrl.Call(m, "DeleteRules", version, context, tcpPorts, udpPorts, mark, uid, proxyPort, puType)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7332 | DeleteRules | train | func (mr *MockImplementorMockRecorder) DeleteRules(version, context, tcpPorts, udpPorts, mark, uid, proxyPort, puType interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRules", reflect.TypeOf((*MockImplementor)(nil).DeleteRules), version, context, tcpPorts, udpPorts, mark, uid, proxyPort, puType)
} | go | {
"resource": ""
} |
q7333 | ACLProvider | train | func (m *MockImplementor) ACLProvider() aclprovider.IptablesProvider {
ret := m.ctrl.Call(m, "ACLProvider")
ret0, _ := ret[0].(aclprovider.IptablesProvider)
return ret0
} | go | {
"resource": ""
} |
q7334 | NewEventServer | train | func NewEventServer(address string, registerer registerer.Registerer) (*EventServer, error) {
// Cleanup the socket first.
if _, err := os.Stat(address); err == nil {
if err := os.Remove(address); err != nil {
return nil, fmt.Errorf("Cannot create clean up socket: %s", err)
}
}
return &EventServer{
socketPath: address,
registerer: registerer,
}, nil
} | go | {
"resource": ""
} |
q7335 | ServeHTTP | train | func (e *EventServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
e.create(w, r)
} | go | {
"resource": ""
} |
q7336 | Run | train | func (e *EventServer) Run(ctx context.Context) error {
// Create the handler
e.server = &http.Server{
Handler: e,
}
// Start a custom listener
addr, _ := net.ResolveUnixAddr("unix", e.socketPath)
nl, err := net.ListenUnix("unix", addr)
if err != nil {
return fmt.Errorf("Unable to start API server: %s", err)
}
// We make the socket accesible to all users of the system.
// TODO: create a trireme group for this
if err := os.Chmod(addr.String(), 0766); err != nil {
return fmt.Errorf("Cannot make the socket accessible to all users: %s", err)
}
listener := &UIDListener{nl}
// Start serving HTTP requests in the background
go e.server.Serve(listener) // nolint
// Listen for context cancellation to close the socket.
go func() {
<-ctx.Done()
nl.Close() // nolint
}()
return nil
} | go | {
"resource": ""
} |
q7337 | create | train | func (e *EventServer) create(w http.ResponseWriter, r *http.Request) {
event := &common.EventInfo{}
defer r.Body.Close() // nolint
if err := json.NewDecoder(r.Body).Decode(event); err != nil {
http.Error(w, "Invalid request", http.StatusBadRequest)
return
}
if err := validateTypes(event); err != nil {
http.Error(w, fmt.Sprintf("Invalid request fields: %s", err), http.StatusBadRequest)
return
}
if err := validateUser(r, event); err != nil {
http.Error(w, fmt.Sprintf("Invalid user to pid mapping found: %s", err), http.StatusForbidden)
return
}
if err := validateEvent(event); err != nil {
http.Error(w, fmt.Sprintf("Bad request: %s", err), http.StatusBadRequest)
return
}
if err := e.processEvent(r.Context(), event); err != nil {
zap.L().Error("Error in processing event", zap.Error(err), zap.Reflect("Event", event))
http.Error(w, fmt.Sprintf("Cannot handle request: %s", err), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusAccepted)
} | go | {
"resource": ""
} |
q7338 | processEvent | train | func (e *EventServer) processEvent(ctx context.Context, eventInfo *common.EventInfo) (err error) {
if e.registerer == nil {
return fmt.Errorf("No registered handlers")
}
f, err := e.registerer.GetHandler(eventInfo.PUType, eventInfo.EventType)
if err != nil {
return fmt.Errorf("Handler not found: %s", err)
}
return f(ctx, eventInfo)
} | go | {
"resource": ""
} |
q7339 | validateUser | train | func validateUser(r *http.Request, event *common.EventInfo) error {
// Find the calling user.
parts := strings.Split(r.RemoteAddr, ":")
if len(parts) != 3 {
return fmt.Errorf("Invalid user context")
}
// Accept all requests from root users
if parts[0] == "0" {
return nil
}
// The target process must be valid.
p, err := process.NewProcess(event.PID)
if err != nil {
return fmt.Errorf("Process not found")
}
// The UID of the calling process must match the UID of the target process.
uids, err := p.Uids()
if err != nil {
return fmt.Errorf("Unknown user ID")
}
match := false
for _, uid := range uids {
if strconv.Itoa(int(uid)) == parts[0] {
match = true
}
}
if !match {
return fmt.Errorf("Invalid user - no access to this process: %+v PARTS: %+v", event, parts)
}
return nil
} | go | {
"resource": ""
} |
q7340 | validateTypes | train | func validateTypes(event *common.EventInfo) error {
regexStrings := regexp.MustCompile("^[a-zA-Z0-9_:.$%/-]{0,256}$")
regexNS := regexp.MustCompile("^[a-zA-Z0-9/-]{0,128}$")
regexCgroup := regexp.MustCompile("^/trireme/(uid/){0,1}[a-zA-Z0-9_:.$%]{1,64}$")
if _, ok := common.EventMap[event.EventType]; !ok {
return fmt.Errorf("invalid event: %s", string(event.EventType))
}
if event.PUType > common.TransientPU {
return fmt.Errorf("invalid pu type %v", event.PUType)
}
if !regexStrings.Match([]byte(event.Name)) {
return fmt.Errorf("Name is not of the right format")
}
if len(event.Cgroup) > 0 && !regexCgroup.Match([]byte(event.Cgroup)) {
return fmt.Errorf("Invalid cgroup format: %s", event.Cgroup)
}
if !regexNS.Match([]byte(event.NS)) {
return fmt.Errorf("Namespace is not of the right format")
}
for k, v := range event.IPs {
if !regexStrings.Match([]byte(k)) {
return fmt.Errorf("Invalid IP name: %s", k)
}
if ip := net.ParseIP(v); ip == nil {
return fmt.Errorf("Invalid IP address: %s", v)
}
}
return nil
} | go | {
"resource": ""
} |
q7341 | validateEvent | train | func validateEvent(event *common.EventInfo) error {
if event.EventType == common.EventCreate || event.EventType == common.EventStart {
if event.HostService {
if event.NetworkOnlyTraffic {
if event.Name == "" {
return fmt.Errorf("Service name must be provided and must not be default")
}
}
} else {
if event.PUID == "" {
event.PUID = strconv.Itoa(int(event.PID))
}
}
}
if event.EventType == common.EventStop || event.EventType == common.EventDestroy {
regStop := regexp.MustCompile("^/trireme/[a-zA-Z0-9_]{1,11}$")
if event.Cgroup != "" && !regStop.Match([]byte(event.Cgroup)) {
return fmt.Errorf("Cgroup is not of the right format")
}
}
return nil
} | go | {
"resource": ""
} |
q7342 | createPortSet | train | func (i *Instance) createPortSet(contextID string, puInfo *policy.PUInfo) error {
if i.mode == constants.RemoteContainer {
return nil
}
username := puInfo.Runtime.Options().UserID
prefix := ""
if username != "" {
prefix = uidPortSetPrefix
} else {
prefix = processPortSetPrefix
}
portSetName := puPortSetName(contextID, prefix)
if puseterr := i.createPUPortSet(portSetName); puseterr != nil {
return puseterr
}
i.contextIDToPortSetMap.AddOrUpdate(contextID, portSetName)
return nil
} | go | {
"resource": ""
} |
q7343 | deletePortSet | train | func (i *Instance) deletePortSet(contextID string) error {
if i.mode == constants.RemoteContainer {
return nil
}
portSetName := i.getPortSet(contextID)
if portSetName == "" {
return fmt.Errorf("Failed to find port set")
}
ips := ipset.IPSet{
Name: portSetName,
}
if err := ips.Destroy(); err != nil {
return fmt.Errorf("Failed to delete pu port set "+portSetName, zap.Error(err))
}
if err := i.contextIDToPortSetMap.Remove(contextID); err != nil {
zap.L().Debug("portset not found for the contextID", zap.String("contextID", contextID))
}
return nil
} | go | {
"resource": ""
} |
q7344 | DeletePortFromPortSet | train | func (i *Instance) DeletePortFromPortSet(contextID string, port string) error {
portSetName := i.getPortSet(contextID)
if portSetName == "" {
return fmt.Errorf("unable to get portset for contextID %s", contextID)
}
ips := ipset.IPSet{
Name: portSetName,
}
if _, err := strconv.Atoi(port); err != nil {
return fmt.Errorf("invalid port: %s", err)
}
if err := ips.Del(port); err != nil {
return fmt.Errorf("unable to delete port from portset: %s", err)
}
return nil
} | go | {
"resource": ""
} |
q7345 | NewCollector | train | func NewCollector() Collector {
return &collectorImpl{
Flows: map[string]*collector.FlowRecord{},
Users: map[string]*collector.UserRecord{},
ProcessedUsers: map[string]bool{},
DatapathPacketReports: []*collector.PacketReport{},
}
} | go | {
"resource": ""
} |
q7346 | DefaultMetadataExtractor | train | func DefaultMetadataExtractor(info *types.ContainerJSON) (*policy.PURuntime, error) {
// trigger new build
tags := policy.NewTagStore()
// TODO: Remove OLDTAGS
tags.AppendKeyValue("@sys:image", info.Config.Image)
tags.AppendKeyValue("@sys:name", info.Name)
tags.AppendKeyValue("@app:image", info.Config.Image)
tags.AppendKeyValue("@app:extractor", "docker")
tags.AppendKeyValue("@app:docker:name", info.Name)
for k, v := range info.Config.Labels {
if len(strings.TrimSpace(k)) == 0 {
continue
}
value := v
if len(v) == 0 {
value = "<empty>"
}
if !strings.HasPrefix(k, UserLabelPrefix) {
tags.AppendKeyValue(UserLabelPrefix+k, value)
} else {
tags.AppendKeyValue(k, value)
}
}
ipa := policy.ExtendedMap{
"bridge": info.NetworkSettings.IPAddress,
}
if info.HostConfig.NetworkMode == constants.DockerHostMode {
return policy.NewPURuntime(info.Name, info.State.Pid, "", tags, ipa, common.LinuxProcessPU, hostModeOptions(info)), nil
}
return policy.NewPURuntime(info.Name, info.State.Pid, "", tags, ipa, common.ContainerPU, nil), nil
} | go | {
"resource": ""
} |
q7347 | hostModeOptions | train | func hostModeOptions(dockerInfo *types.ContainerJSON) *policy.OptionsType {
options := policy.OptionsType{
CgroupName: strconv.Itoa(dockerInfo.State.Pid),
CgroupMark: strconv.FormatUint(cgnetcls.MarkVal(), 10),
}
for p := range dockerInfo.Config.ExposedPorts {
if p.Proto() == "tcp" {
s, err := portspec.NewPortSpecFromString(p.Port(), nil)
if err != nil {
continue
}
options.Services = append(options.Services, common.Service{
Protocol: uint8(6),
Ports: s,
})
}
}
return &options
} | go | {
"resource": ""
} |
q7348 | StatsFlowHash | train | func StatsFlowHash(r *FlowRecord) string {
hash := xxhash.New()
hash.Write([]byte(r.Source.ID)) // nolint errcheck
hash.Write([]byte(r.Destination.ID)) // nolint errcheck
port := make([]byte, 2)
binary.BigEndian.PutUint16(port, r.Destination.Port)
hash.Write(port) // nolint errcheck
hash.Write([]byte(r.Action.String())) // nolint errcheck
hash.Write([]byte(r.ObservedAction.String())) // nolint errcheck
hash.Write([]byte(r.DropReason)) // nolint errcheck
hash.Write([]byte(r.Destination.URI)) // nolint errcheck
return fmt.Sprintf("%d", hash.Sum64())
} | go | {
"resource": ""
} |
q7349 | StatsUserHash | train | func StatsUserHash(r *UserRecord) error {
// Order matters for the hash function loop
sort.Strings(r.Claims)
hash := xxhash.New()
for i := 0; i < len(r.Claims); i++ {
if strings.HasPrefix(r.Claims[i], "sub") {
continue
}
if _, err := hash.Write([]byte(r.Claims[i])); err != nil {
return fmt.Errorf("Cannot create hash")
}
}
r.ID = fmt.Sprintf("%d", hash.Sum64())
return nil
} | go | {
"resource": ""
} |
q7350 | NewPortCache | train | func NewPortCache(name string) *PortCache {
return &PortCache{
ports: cache.NewCache(name),
ranges: []*portspec.PortSpec{},
}
} | go | {
"resource": ""
} |
q7351 | AddPortSpec | train | func (p *PortCache) AddPortSpec(s *portspec.PortSpec) {
if s.Min == s.Max {
p.ports.AddOrUpdate(s.Min, s)
} else {
// Remove the range if it exists
p.Remove(s) // nolint
// Insert the portspec
p.Lock()
p.ranges = append([]*portspec.PortSpec{s}, p.ranges...)
p.Unlock()
}
} | go | {
"resource": ""
} |
q7352 | AddPortSpecToEnd | train | func (p *PortCache) AddPortSpecToEnd(s *portspec.PortSpec) {
// Remove the range if it exists
p.Remove(s) // nolint
p.Lock()
p.ranges = append(p.ranges, s)
p.Unlock()
} | go | {
"resource": ""
} |
q7353 | AddUnique | train | func (p *PortCache) AddUnique(s *portspec.PortSpec) error {
p.Lock()
defer p.Unlock()
if s.Min == s.Max {
if err, _ := p.ports.Get(s.Min); err != nil {
return fmt.Errorf("Port already exists: %s", err)
}
}
for _, r := range p.ranges {
if r.Max <= s.Min || r.Min >= s.Max {
continue
}
return fmt.Errorf("Overlap detected: %d %d", r.Max, r.Min)
}
if s.Min == s.Max {
return p.ports.Add(s.Min, s)
}
p.ranges = append(p.ranges, s)
return nil
} | go | {
"resource": ""
} |
q7354 | GetSpecValueFromPort | train | func (p *PortCache) GetSpecValueFromPort(port uint16) (interface{}, error) {
if spec, err := p.ports.Get(port); err == nil {
return spec.(*portspec.PortSpec).Value(), nil
}
p.Lock()
defer p.Unlock()
for _, s := range p.ranges {
if s.Min <= port && port < s.Max {
return s.Value(), nil
}
}
return nil, fmt.Errorf("No match for port %d", port)
} | go | {
"resource": ""
} |
q7355 | Remove | train | func (p *PortCache) Remove(s *portspec.PortSpec) error {
if s.Min == s.Max {
return p.ports.Remove(s.Min)
}
p.Lock()
defer p.Unlock()
for i, r := range p.ranges {
if r.Min == s.Min && r.Max == s.Max {
left := p.ranges[:i]
right := p.ranges[i+1:]
p.ranges = append(left, right...)
return nil
}
}
return fmt.Errorf("port not found")
} | go | {
"resource": ""
} |
q7356 | RemoveStringPorts | train | func (p *PortCache) RemoveStringPorts(ports string) error {
s, err := portspec.NewPortSpecFromString(ports, nil)
if err != nil {
return err
}
return p.Remove(s)
} | go | {
"resource": ""
} |
q7357 | NewMockRemoteIntf | train | func NewMockRemoteIntf(ctrl *gomock.Controller) *MockRemoteIntf {
mock := &MockRemoteIntf{ctrl: ctrl}
mock.recorder = &MockRemoteIntfMockRecorder{mock}
return mock
} | go | {
"resource": ""
} |
q7358 | InitEnforcer | train | func (m *MockRemoteIntf) InitEnforcer(req rpcwrapper.Request, resp *rpcwrapper.Response) error {
ret := m.ctrl.Call(m, "InitEnforcer", req, resp)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7359 | InitEnforcer | train | func (mr *MockRemoteIntfMockRecorder) InitEnforcer(req, resp interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitEnforcer", reflect.TypeOf((*MockRemoteIntf)(nil).InitEnforcer), req, resp)
} | go | {
"resource": ""
} |
q7360 | Run | train | func (e *enforcer) Run(ctx context.Context) error {
if e.proxy != nil {
if err := e.proxy.Run(ctx); err != nil {
return err
}
}
if e.transport != nil {
if err := e.transport.Run(ctx); err != nil {
return err
}
}
if e.secrets != nil {
if err := e.secrets.Run(ctx); err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q7361 | Enforce | train | func (e *enforcer) Enforce(contextID string, puInfo *policy.PUInfo) error {
if e.transport != nil {
if err := e.transport.Enforce(contextID, puInfo); err != nil {
return fmt.Errorf("unable to enforce in nfq: %s", err)
}
}
if e.proxy != nil {
if err := e.proxy.Enforce(context.Background(), contextID, puInfo); err != nil {
return fmt.Errorf("unable to enforce in proxy: %s", err)
}
}
if e.secrets != nil {
if err := e.secrets.Enforce(puInfo); err != nil {
return fmt.Errorf("unable to enforce in secrets proxy: %s", err)
}
}
return nil
} | go | {
"resource": ""
} |
q7362 | Unenforce | train | func (e *enforcer) Unenforce(contextID string) error {
var perr, nerr, serr error
if e.proxy != nil {
if perr = e.proxy.Unenforce(context.Background(), contextID); perr != nil {
zap.L().Error("Failed to unenforce contextID in proxy",
zap.String("ContextID", contextID),
zap.Error(perr),
)
}
}
if e.transport != nil {
if nerr = e.transport.Unenforce(contextID); nerr != nil {
zap.L().Error("Failed to unenforce contextID in transport",
zap.String("ContextID", contextID),
zap.Error(nerr),
)
}
}
if e.secrets != nil {
if serr = e.secrets.Unenforce(contextID); nerr != nil {
zap.L().Error("Failed to unenforce contextID in transport",
zap.String("ContextID", contextID),
zap.Error(nerr),
)
}
}
if perr != nil || nerr != nil || serr != nil {
return fmt.Errorf("Failed to unenforce %s %s", perr, nerr)
}
return nil
} | go | {
"resource": ""
} |
q7363 | UpdateSecrets | train | func (e *enforcer) UpdateSecrets(secrets secrets.Secrets) error {
if e.proxy != nil {
if err := e.proxy.UpdateSecrets(secrets); err != nil {
return err
}
}
if e.transport != nil {
if err := e.transport.UpdateSecrets(secrets); err != nil {
return err
}
}
if e.secrets != nil {
if err := e.secrets.UpdateSecrets(secrets); err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q7364 | EnableDatapathPacketTracing | train | func (e *enforcer) EnableDatapathPacketTracing(contextID string, direction packettracing.TracingDirection, interval time.Duration) error {
return e.transport.EnableDatapathPacketTracing(contextID, direction, interval)
} | go | {
"resource": ""
} |
q7365 | New | train | func New(
mutualAuthorization bool,
fqConfig *fqconfig.FilterQueue,
collector collector.EventCollector,
service packetprocessor.PacketProcessor,
secrets secrets.Secrets,
serverID string,
validity time.Duration,
mode constants.ModeType,
procMountPoint string,
externalIPCacheTimeout time.Duration,
packetLogs bool,
cfg *runtime.Configuration,
) (Enforcer, error) {
tokenAccessor, err := tokenaccessor.New(serverID, validity, secrets)
if err != nil {
zap.L().Fatal("Cannot create a token engine")
}
puFromContextID := cache.NewCache("puFromContextID")
transport := nfqdatapath.New(
mutualAuthorization,
fqConfig,
collector,
serverID,
validity,
service,
secrets,
mode,
procMountPoint,
externalIPCacheTimeout,
packetLogs,
tokenAccessor,
puFromContextID,
cfg,
)
tcpProxy, err := applicationproxy.NewAppProxy(tokenAccessor, collector, puFromContextID, nil, secrets)
if err != nil {
return nil, err
}
return &enforcer{
proxy: tcpProxy,
transport: transport,
secrets: secretsproxy.NewSecretsProxy(),
}, nil
} | go | {
"resource": ""
} |
q7366 | extractHeaderAttribute | train | func (h HeaderBytes) extractHeaderAttribute(mask uint32) uint32 {
data := binary.LittleEndian.Uint32(h)
return data & mask
} | go | {
"resource": ""
} |
q7367 | toMask | train | func (ct CompressionType) toMask() compressionTypeMask {
switch ct {
case CompressionTypeV1:
return compressionTypeV1Mask
case CompressionTypeV2:
return compressionTypeV2Mask
default:
return compressionTypeNoneMask
}
} | go | {
"resource": ""
} |
q7368 | toType | train | func (cm compressionTypeMask) toType() CompressionType {
switch cm {
case compressionTypeV1Mask:
return CompressionTypeV1
case compressionTypeV2Mask:
return CompressionTypeV2
default:
return CompressionTypeNone
}
} | go | {
"resource": ""
} |
q7369 | String2CompressionType | train | func String2CompressionType(s string) CompressionType {
switch s {
case CompressionTypeV1.toString():
return CompressionTypeV1
case CompressionTypeV2.toString():
return CompressionTypeV2
default:
return CompressionTypeNone
}
} | go | {
"resource": ""
} |
q7370 | newReconciler | train | func newReconciler(mgr manager.Manager, handler *config.ProcessorConfig, metadataExtractor extractors.PodMetadataExtractor, netclsProgrammer extractors.PodNetclsProgrammer, nodeName string, enableHostPods bool) *ReconcilePod {
return &ReconcilePod{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
recorder: mgr.GetRecorder("trireme-pod-controller"),
handler: handler,
metadataExtractor: metadataExtractor,
netclsProgrammer: netclsProgrammer,
nodeName: nodeName,
enableHostPods: enableHostPods,
// TODO: might move into configuration
handlePUEventTimeout: 5 * time.Second,
metadataExtractTimeout: 3 * time.Second,
netclsProgramTimeout: 2 * time.Second,
}
} | go | {
"resource": ""
} |
q7371 | addController | train | func addController(mgr manager.Manager, r *ReconcilePod, eventsCh <-chan event.GenericEvent) error {
// Create a new controller
c, err := controller.New("trireme-pod-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// we use this mapper in both of our event sources
mapper := &WatchPodMapper{
client: mgr.GetClient(),
nodeName: r.nodeName,
enableHostPods: r.enableHostPods,
}
// use the our watch pod mapper which filters pods before we reconcile
if err := c.Watch(
&source.Kind{Type: &corev1.Pod{}},
&handler.EnqueueRequestsFromMapFunc{ToRequests: mapper},
); err != nil {
return err
}
// we pass in a custom channel for events generated by resync
if err := c.Watch(
&source.Channel{Source: eventsCh},
&handler.EnqueueRequestsFromMapFunc{ToRequests: mapper},
); err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q7372 | NewProcessor | train | func NewProcessor(s secrets.Secrets, trustedCertificate *x509.Certificate) *Processor {
return &Processor{
aporetoJWT: servicetokens.NewVerifier(s, trustedCertificate),
}
} | go | {
"resource": ""
} |
q7373 | UpdateSecrets | train | func (p *Processor) UpdateSecrets(s secrets.Secrets, trustedCertificate *x509.Certificate) {
p.aporetoJWT.UpdateSecrets(s, trustedCertificate)
} | go | {
"resource": ""
} |
q7374 | AddOrUpdateService | train | func (p *Processor) AddOrUpdateService(apis *urisearch.APICache, serviceType policy.UserAuthorizationTypeValues, handler usertokens.Verifier, mappings map[string]string) {
p.Lock()
defer p.Unlock()
p.apis = apis
p.userTokenMappings = mappings
p.userTokenHandler = handler
p.userAuthorizationType = serviceType
} | go | {
"resource": ""
} |
q7375 | UpdateServiceAPIs | train | func (p *Processor) UpdateServiceAPIs(apis *urisearch.APICache) error {
p.Lock()
defer p.Unlock()
p.apis = apis
return nil
} | go | {
"resource": ""
} |
q7376 | DecodeUserClaims | train | func (p *Processor) DecodeUserClaims(name, userToken string, certs []*x509.Certificate, r *http.Request) ([]string, bool, string, error) {
switch p.userAuthorizationType {
case policy.UserAuthorizationMutualTLS, policy.UserAuthorizationJWT:
// First parse any incoming certificates and retrieve attributes from them.
// This is used in case of client authorization with certificates.
attributes := []string{}
for _, cert := range certs {
attributes = append(attributes, "CN="+cert.Subject.CommonName)
for _, email := range cert.EmailAddresses {
attributes = append(attributes, "Email="+email)
}
for _, org := range cert.Subject.Organization {
attributes = append(attributes, "O="+org)
}
for _, org := range cert.Subject.OrganizationalUnit {
attributes = append(attributes, "OU="+org)
}
}
if p.userAuthorizationType == policy.UserAuthorizationJWT && p.userTokenHandler != nil {
jwtAttributes, _, _, err := p.userTokenHandler.Validate(r.Context(), userToken)
if err != nil {
return attributes, false, userToken, fmt.Errorf("Unable to decode JWT: %s", err)
}
attributes = append(attributes, jwtAttributes...)
}
return attributes, false, userToken, nil
case policy.UserAuthorizationOIDC:
// Now we can parse the user claims.
if p.userTokenHandler == nil {
zap.L().Error("Internal Server Error: OIDC User Token Handler not configured")
return []string{}, false, userToken, nil
}
return p.userTokenHandler.Validate(r.Context(), userToken)
default:
return []string{}, false, userToken, nil
}
} | go | {
"resource": ""
} |
q7377 | DecodeAporetoClaims | train | func (p *Processor) DecodeAporetoClaims(aporetoToken string, publicKey string) (string, []string, error) {
if len(aporetoToken) == 0 || p.aporetoJWT == nil {
return "", []string{}, nil
}
// Finally we can parse the Aporeto token.
id, scopes, profile, err := p.aporetoJWT.ParseToken(aporetoToken, publicKey)
if err != nil {
return "", []string{}, fmt.Errorf("Invalid Aporeto Token: %s", err)
}
return id, append(profile, scopes...), nil
} | go | {
"resource": ""
} |
q7378 | Callback | train | func (p *Processor) Callback(w http.ResponseWriter, r *http.Request) {
p.RLock()
defer p.RUnlock()
// Validate the JWT token through the handler.
token, originURL, status, err := p.userTokenHandler.Callback(r)
if err != nil {
http.Error(w, fmt.Sprintf("Invalid code %s:", err), http.StatusInternalServerError)
return
}
cookie := &http.Cookie{
Name: "X-APORETO-AUTH",
Value: token,
HttpOnly: true,
Secure: true,
Path: "/",
}
http.SetCookie(w, cookie)
// We transmit the information in the return payload for applications
// that choose to use it directly without a cookie.
data, err := json.MarshalIndent(cookie, " ", " ")
if err != nil {
http.Error(w, "Bad data", http.StatusInternalServerError)
return
}
// We redirect here to the original URL that the application attempted
// to access.
w.Header().Add("Location", originURL)
http.Error(w, string(data), status)
} | go | {
"resource": ""
} |
q7379 | Check | train | func (p *Processor) Check(method, uri string, claims []string) (bool, bool) {
p.RLock()
defer p.RUnlock()
return p.apis.FindAndMatchScope(method, uri, claims)
} | go | {
"resource": ""
} |
q7380 | RedirectURI | train | func (p *Processor) RedirectURI(originURL string) string {
p.RLock()
defer p.RUnlock()
return p.userTokenHandler.IssueRedirect(originURL)
} | go | {
"resource": ""
} |
q7381 | UpdateRequestHeaders | train | func (p *Processor) UpdateRequestHeaders(r *http.Request, claims []string) {
p.RLock()
defer p.RUnlock()
if len(p.userTokenMappings) == 0 {
return
}
for _, claim := range claims {
parts := strings.SplitN(claim, "=", 2)
if header, ok := p.userTokenMappings[parts[0]]; ok && len(parts) == 2 {
r.Header.Add(header, parts[1])
}
}
} | go | {
"resource": ""
} |
q7382 | NewFilterQueueWithDefaults | train | func NewFilterQueueWithDefaults() *FilterQueue {
return NewFilterQueue(
DefaultQueueSeperation,
DefaultMarkValue,
DefaultQueueStart,
DefaultNumberOfQueues,
DefaultNumberOfQueues,
DefaultQueueSize,
DefaultQueueSize,
)
} | go | {
"resource": ""
} |
q7383 | hasSynced | train | func hasSynced(sync chan struct{}, controller kubecache.Controller) {
for {
if controller.HasSynced() {
sync <- struct{}{}
return
}
<-time.After(100 * time.Millisecond)
}
} | go | {
"resource": ""
} |
q7384 | Synchronize | train | func (n *NFQParser) Synchronize() error {
n.Lock()
defer n.Unlock()
data, err := ioutil.ReadFile(n.filePath)
if err != nil {
return err
}
n.nfqStr = string(data)
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
lineParts := strings.Fields(line)
newNFQ := makeNFQLayout(lineParts)
n.contents[newNFQ.QueueNum] = newNFQ
}
return nil
} | go | {
"resource": ""
} |
q7385 | RetrieveByQueue | train | func (n *NFQParser) RetrieveByQueue(queueNum string) *NFQLayout {
n.Lock()
defer n.Unlock()
content, ok := n.contents[queueNum]
if ok {
return &content
}
return nil
} | go | {
"resource": ""
} |
q7386 | RetrieveAll | train | func (n *NFQParser) RetrieveAll() map[string]NFQLayout {
n.Lock()
defer n.Unlock()
return n.contents
} | go | {
"resource": ""
} |
q7387 | String | train | func (n *NFQParser) String() string {
n.Lock()
defer n.Unlock()
return n.nfqStr
} | go | {
"resource": ""
} |
q7388 | NewMockProcessor | train | func NewMockProcessor(ctrl *gomock.Controller) *MockProcessor {
mock := &MockProcessor{ctrl: ctrl}
mock.recorder = &MockProcessorMockRecorder{mock}
return mock
} | go | {
"resource": ""
} |
q7389 | Start | train | func (m *MockProcessor) Start(ctx context.Context, eventInfo *common.EventInfo) error {
ret := m.ctrl.Call(m, "Start", ctx, eventInfo)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
q7390 | Start | train | func (mr *MockProcessorMockRecorder) Start(ctx, eventInfo interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockProcessor)(nil).Start), ctx, eventInfo)
} | go | {
"resource": ""
} |
q7391 | NewTagStoreFromMap | train | func NewTagStoreFromMap(tags map[string]string) *TagStore {
t := &TagStore{make([]string, len(tags))}
i := 0
for k, v := range tags {
t.Tags[i] = k + "=" + v
i++
}
return t
} | go | {
"resource": ""
} |
q7392 | Copy | train | func (t *TagStore) Copy() *TagStore {
c := make([]string, len(t.Tags))
copy(c, t.Tags)
return &TagStore{c}
} | go | {
"resource": ""
} |
q7393 | Get | train | func (t *TagStore) Get(key string) (string, bool) {
for _, kv := range t.Tags {
parts := strings.SplitN(kv, "=", 2)
if len(parts) != 2 {
continue
}
if key == parts[0] {
return parts[1], true
}
}
return "", false
} | go | {
"resource": ""
} |
q7394 | Merge | train | func (t *TagStore) Merge(m *TagStore) (merged int) {
for _, kv := range m.Tags {
parts := strings.SplitN(kv, "=", 2)
if len(parts) != 2 {
continue
}
if _, ok := t.Get(parts[0]); !ok {
t.AppendKeyValue(parts[0], parts[1])
merged++
}
}
return merged
} | go | {
"resource": ""
} |
q7395 | AppendKeyValue | train | func (t *TagStore) AppendKeyValue(key, value string) {
t.Tags = append(t.Tags, key+"="+value)
} | go | {
"resource": ""
} |
q7396 | RoundTrip | train | func (t *TriremeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := t.RoundTripper.RoundTrip(req)
if err != nil || res == nil {
return res, err
}
data := req.Context().Value(statsContextKey)
if data == nil {
return res, nil
}
state, ok := data.(*connectionState)
if ok && state.cookie == nil {
return res, nil
}
if v := state.cookie.String(); v != "" {
res.Header.Add("Set-Cookie", v)
}
return res, nil
} | go | {
"resource": ""
} |
q7397 | Resync | train | func (u *UIDMonitor) Resync(ctx context.Context) error {
return u.proc.Resync(ctx, nil)
} | go | {
"resource": ""
} |
q7398 | NewMockDebugClient | train | func NewMockDebugClient(ctrl *gomock.Controller) *MockDebugClient {
mock := &MockDebugClient{ctrl: ctrl}
mock.recorder = &MockDebugClientMockRecorder{mock}
return mock
} | go | {
"resource": ""
} |
q7399 | Run | train | func (m *MockDebugClient) Run(ctx context.Context) error {
ret := m.ctrl.Call(m, "Run", ctx)
ret0, _ := ret[0].(error)
return ret0
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.