_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q8000 | InspectNameserver | train | func (d *OvsDriver) InspectNameserver() ([]byte, error) {
if d.nameServer == nil {
return []byte{}, nil
}
ns, err := d.nameServer.InspectState()
jsonState, err := json.Marshal(ns)
if err != nil {
log.Errorf("Error encoding nameserver state. Err: %v", err)
return []byte{}, err
}
return jsonState, nil
} | go | {
"resource": ""
} |
q8001 | NewClient | train | func (cp *consulPlugin) NewClient(endpoints []string) (API, error) {
cc := new(ConsulClient)
if len(endpoints) == 0 {
endpoints = []string{"127.0.0.1:8500"}
}
// default consul config
cc.consulConfig = api.Config{Address: strings.TrimPrefix(endpoints[0], "http://")}
// Initialize service DB
cc.serviceDb = make(map[string]*consulServiceState)
// Init consul client
client, err := api.NewClient(&cc.consulConfig)
if err != nil {
log.Fatalf("Error initializing consul client")
return nil, err
}
cc.client = client
// verify we can reach the consul
_, _, err = client.KV().List("/", nil)
if err != nil {
if api.IsServerError(err) || strings.Contains(err.Error(), "EOF") ||
strings.Contains(err.Error(), "connection refused") {
for i := 0; i < maxConsulRetries; i++ {
_, _, err = client.KV().List("/", nil)
if err == nil {
break
}
// Retry after a delay
time.Sleep(time.Second)
}
}
// return error if it failed after retries
if err != nil {
log.Errorf("Error connecting to consul. Err: %v", err)
return nil, err
}
}
return cc, nil
} | go | {
"resource": ""
} |
q8002 | GetObj | train | func (cp *ConsulClient) GetObj(key string, retVal interface{}) error {
key = processKey("/contiv.io/obj/" + processKey(key))
resp, _, err := cp.client.KV().Get(key, &api.QueryOptions{RequireConsistent: true})
if err != nil {
if api.IsServerError(err) || strings.Contains(err.Error(), "EOF") ||
strings.Contains(err.Error(), "connection refused") {
for i := 0; i < maxConsulRetries; i++ {
resp, _, err = cp.client.KV().Get(key, &api.QueryOptions{RequireConsistent: true})
if err == nil {
break
}
// Retry after a delay
time.Sleep(time.Second)
}
}
// return error if it failed after retries
if err != nil {
return err
}
}
// Consul returns success and a nil kv when a key is not found,
// translate it to 'Key not found' error
if resp == nil {
return errors.New("Key not found")
}
// Parse JSON response
if err := json.Unmarshal(resp.Value, retVal); err != nil {
log.Errorf("Error parsing object %v, Err %v", resp.Value, err)
return err
}
return nil
} | go | {
"resource": ""
} |
q8003 | ListDir | train | func (cp *ConsulClient) ListDir(key string) ([]string, error) {
key = processKey("/contiv.io/obj/" + processKey(key))
kvs, _, err := cp.client.KV().List(key, nil)
if err != nil {
if api.IsServerError(err) || strings.Contains(err.Error(), "EOF") ||
strings.Contains(err.Error(), "connection refused") {
for i := 0; i < maxConsulRetries; i++ {
kvs, _, err = cp.client.KV().List(key, nil)
if err == nil {
break
}
// Retry after a delay
time.Sleep(time.Second)
}
}
// return error if it failed after retries
if err != nil {
return nil, err
}
}
// Consul returns success and a nil kv when a key is not found,
// translate it to 'Key not found' error
if kvs == nil {
return []string{}, nil
}
var keys []string
for _, kv := range kvs {
keys = append(keys, string(kv.Value))
}
return keys, nil
} | go | {
"resource": ""
} |
q8004 | SetObj | train | func (cp *ConsulClient) SetObj(key string, value interface{}) error {
key = processKey("/contiv.io/obj/" + processKey(key))
// JSON format the object
jsonVal, err := json.Marshal(value)
if err != nil {
log.Errorf("Json conversion error. Err %v", err)
return err
}
_, err = cp.client.KV().Put(&api.KVPair{Key: key, Value: jsonVal}, nil)
if err != nil {
if api.IsServerError(err) || strings.Contains(err.Error(), "EOF") ||
strings.Contains(err.Error(), "connection refused") {
for i := 0; i < maxConsulRetries; i++ {
_, err = cp.client.KV().Put(&api.KVPair{Key: key, Value: jsonVal}, nil)
if err == nil {
break
}
// Retry after a delay
time.Sleep(time.Second)
}
}
}
return err
} | go | {
"resource": ""
} |
q8005 | DelObj | train | func (cp *ConsulClient) DelObj(key string) error {
key = processKey("/contiv.io/obj/" + processKey(key))
_, err := cp.client.KV().Delete(key, nil)
if err != nil {
if api.IsServerError(err) || strings.Contains(err.Error(), "EOF") ||
strings.Contains(err.Error(), "connection refused") {
for i := 0; i < maxConsulRetries; i++ {
_, err = cp.client.KV().Delete(key, nil)
if err == nil {
break
}
// Retry after a delay
time.Sleep(time.Second)
}
}
}
return err
} | go | {
"resource": ""
} |
q8006 | ClearState | train | func (d *ConsulStateDriver) ClearState(key string) error {
key = processKey(key)
_, err := d.Client.KV().Delete(key, nil)
return err
} | go | {
"resource": ""
} |
q8007 | ReadState | train | func (d *ConsulStateDriver) ReadState(key string, value core.State,
unmarshal func([]byte, interface{}) error) error {
key = processKey(key)
encodedState, err := d.Read(key)
if err != nil {
return err
}
return unmarshal(encodedState, value)
} | go | {
"resource": ""
} |
q8008 | ReadAllState | train | func (d *ConsulStateDriver) ReadAllState(baseKey string, sType core.State,
unmarshal func([]byte, interface{}) error) ([]core.State, error) {
baseKey = processKey(baseKey)
return readAllStateCommon(d, baseKey, sType, unmarshal)
} | go | {
"resource": ""
} |
q8009 | AddLink | train | func AddLink(link *Link, obj ModelObj) {
link.ObjType = obj.GetType()
link.ObjKey = obj.GetKey()
} | go | {
"resource": ""
} |
q8010 | AddLinkSet | train | func AddLinkSet(linkSet *(map[string]Link), obj ModelObj) error {
// Allocate the linkset if its nil
if *linkSet == nil {
*linkSet = make(map[string]Link)
}
// add the link to map
(*linkSet)[obj.GetKey()] = Link{
ObjType: obj.GetType(),
ObjKey: obj.GetKey(),
}
return nil
} | go | {
"resource": ""
} |
q8011 | RemoveLinkSet | train | func RemoveLinkSet(linkSet *(map[string]Link), obj ModelObj) error {
// check is linkset is nil
if *linkSet == nil {
return nil
}
// remove the link from map
delete(*linkSet, obj.GetKey())
return nil
} | go | {
"resource": ""
} |
q8012 | WriteObj | train | func WriteObj(objType, objKey string, value interface{}) error {
key := "/modeldb/" + objType + "/" + objKey
err := cdb.SetObj(key, value)
if err != nil {
log.Errorf("Error storing object %s. Err: %v", key, err)
return err
}
return nil
} | go | {
"resource": ""
} |
q8013 | ReadObj | train | func ReadObj(objType, objKey string, retVal interface{}) error {
key := "/modeldb/" + objType + "/" + objKey
err := cdb.GetObj(key, retVal)
if err != nil {
log.Errorf("Error reading object: %s. Err: %v", key, err)
return err
}
return nil
} | go | {
"resource": ""
} |
q8014 | DeleteObj | train | func DeleteObj(objType, objKey string) error {
key := "/modeldb/" + objType + "/" + objKey
err := cdb.DelObj(key)
if err != nil {
log.Errorf("Error deleting object: %s. Err: %v", key, err)
}
return nil
} | go | {
"resource": ""
} |
q8015 | ReadAllObj | train | func ReadAllObj(objType string) ([]string, error) {
key := "/modeldb/" + objType + "/"
return cdb.ListDir(key)
} | go | {
"resource": ""
} |
q8016 | GetEndpointGroupID | train | func GetEndpointGroupID(stateDriver core.StateDriver, groupName, tenantName string) (int, error) {
// If service name is not specified, we are done
if groupName == "" {
return 0, nil
}
epgKey := GetEndpointGroupKey(groupName, tenantName)
cfgEpGroup := &EndpointGroupState{}
cfgEpGroup.StateDriver = stateDriver
err := cfgEpGroup.Read(epgKey)
if err != nil {
log.Errorf("Error finding epg: %s. Err: %v", epgKey, err)
return 0, core.Errorf("EPG not found")
}
// return endpoint group id
return cfgEpGroup.EndpointGroupID, nil
} | go | {
"resource": ""
} |
q8017 | getGwCIDR | train | func getGwCIDR(epgObj *contivModel.EndpointGroup, stateDriver core.StateDriver) (string, error) {
// get the subnet info and add it to ans
nwCfg := &mastercfg.CfgNetworkState{}
nwCfg.StateDriver = stateDriver
networkID := epgObj.NetworkName + "." + epgObj.TenantName
nErr := nwCfg.Read(networkID)
if nErr != nil {
log.Errorf("Failed to network info %v %v ", networkID, nErr)
return "", nErr
}
gw := nwCfg.Gateway + "/" + strconv.Itoa(int(nwCfg.SubnetLen))
log.Debugf("GW is %s for epg %s", gw, epgObj.GroupName)
return gw, nil
} | go | {
"resource": ""
} |
q8018 | addPolicyContracts | train | func addPolicyContracts(csMap map[string]*contrSpec, epg *epgSpec, policy *contivModel.Policy) error {
for ruleName := range policy.LinkSets.Rules {
rule := contivModel.FindRule(ruleName)
if rule == nil {
errStr := fmt.Sprintf("rule %v not found", ruleName)
return errors.New(errStr)
}
if rule.FromIpAddress != "" || rule.FromNetwork != "" ||
rule.ToIpAddress != "" || rule.ToNetwork != "" {
log.Errorf("rule: %+v is invalid for ACI mode", rule)
errStr := fmt.Sprintf("rule %s is invalid, only From/ToEndpointGroup may be specified in ACI mode", ruleName)
return errors.New(errStr)
}
if rule.Action == "deny" {
log.Debugf("==Ignoring deny rule %v", ruleName)
continue
}
filter := filterInfo{Protocol: rule.Protocol, ServPort: strconv.Itoa(rule.Port)}
cn := getContractName(policy.PolicyName, rule.FromEndpointGroup,
rule.ToEndpointGroup)
spec, found := csMap[cn]
if !found {
// add a link for this contract
lKind := cProvide
if rule.ToEndpointGroup != "" {
lKind = cConsume
}
cLink := contrLink{LinkKind: lKind,
ContractName: cn,
ContractKind: cInternal,
}
epg.ContractLinks = append(epg.ContractLinks, cLink)
spec = &contrSpec{Name: cn}
csMap[cn] = spec
}
spec.Filters = append(spec.Filters, filter)
}
return nil
} | go | {
"resource": ""
} |
q8019 | CreateAppNw | train | func CreateAppNw(app *contivModel.AppProfile) error {
aciPresent, aErr := master.IsAciConfigured()
if aErr != nil {
log.Errorf("Couldn't read global config %v", aErr)
return aErr
}
if !aciPresent {
log.Debugf("ACI not configured")
return nil
}
// Get the state driver
stateDriver, uErr := utils.GetStateDriver()
if uErr != nil {
return uErr
}
eMap := &epgMap{}
eMap.Specs = make(map[string]epgSpec)
eMap.Contracts = make(map[string]*contrSpec)
ans := &appNwSpec{}
ans.ACIGwAPIVersion = aciGwAPIVersion
gwConfig := contivModel.FindAciGw("aciGw")
if gwConfig == nil {
log.Infof("aciGw object not found -- gw will use env settings")
} else {
ans.GWConfig = gwConfig
log.Infof("gwConfig: %+v", gwConfig)
}
ans.TenantName = app.TenantName
ans.AppName = app.AppProfileName
// Gather all basic epg info into the epg map
for epgKey := range app.LinkSets.EndpointGroups {
epgObj := contivModel.FindEndpointGroup(epgKey)
if epgObj == nil {
err := fmt.Sprintf("Epg %v does not exist", epgKey)
log.Errorf("%v", err)
return errors.New(err)
}
if err := appendEpgInfo(eMap, epgObj, stateDriver); err != nil {
log.Errorf("Error getting epg info %v", err)
return err
}
}
// walk the map and add to ANS
for _, epg := range eMap.Specs {
ans.Epgs = append(ans.Epgs, epg)
log.Debugf("Added epg %v", epg.Name)
}
for _, contract := range eMap.Contracts {
ans.ContractDefs = append(ans.ContractDefs, *contract)
log.Debugf("Added contract %v", contract.Name)
}
log.Infof("Launching appNwSpec: %+v", ans)
lErr := ans.launch()
time.Sleep(2 * time.Second)
ans.notifyDP()
return lErr
} | go | {
"resource": ""
} |
q8020 | DeleteAppNw | train | func DeleteAppNw(app *contivModel.AppProfile) error {
aciPresent, aErr := master.IsAciConfigured()
if aErr != nil {
log.Errorf("Couldn't read global config %v", aErr)
return aErr
}
if !aciPresent {
log.Debugf("ACI not configured")
return nil
}
ans := &appNwSpec{}
ans.TenantName = app.TenantName
ans.AppName = app.AppProfileName
url := proxyURL + "deleteAppProf"
resp, err := httpPost(url, ans)
if err != nil {
log.Errorf("Delete failed. Error: %v", err)
return err
}
if resp.Result != "success" {
log.Errorf("Delete failed %v - %v", resp.Result, resp.Info)
}
time.Sleep(time.Second)
return nil
} | go | {
"resource": ""
} |
q8021 | NewLock | train | func (cp *ConsulClient) NewLock(name string, myID string, ttl uint64) (LockInterface, error) {
// Create a lock
return &consulLock{
name: name,
keyName: "contiv.io/lock/" + name,
myID: myID,
ttl: fmt.Sprintf("%ds", ttl),
eventChan: make(chan LockEvent, 1),
stopChan: make(chan struct{}, 1),
mutex: new(sync.Mutex),
client: cp.client,
}, nil
} | go | {
"resource": ""
} |
q8022 | IsReleased | train | func (lk *consulLock) IsReleased() bool {
lk.mutex.Lock()
defer lk.mutex.Unlock()
return lk.isReleased
} | go | {
"resource": ""
} |
q8023 | createSession | train | func (lk *consulLock) createSession() error {
// session configuration
sessCfg := api.SessionEntry{
Name: lk.keyName,
Behavior: "delete",
LockDelay: 10 * time.Millisecond,
TTL: lk.ttl,
}
// Create consul session
sessionID, _, err := lk.client.Session().CreateNoChecks(&sessCfg, nil)
if err != nil {
log.Errorf("Error Creating session for lock %s. Err: %v", lk.keyName, err)
return err
}
log.Infof("Created session: %s for lock %s/%s", sessionID, lk.name, lk.myID)
// save the session ID for later
lk.mutex.Lock()
lk.sessionID = sessionID
lk.mutex.Unlock()
return nil
} | go | {
"resource": ""
} |
q8024 | renewSession | train | func (lk *consulLock) renewSession() {
for {
err := lk.client.Session().RenewPeriodic(lk.ttl, lk.sessionID, nil, lk.stopChan)
if err == nil || lk.IsReleased() {
// If lock was released, exit this go routine
return
}
// Create new consul session
err = lk.createSession()
if err != nil {
log.Errorf("Error Creating session for lock %s. Err: %v", lk.keyName, err)
}
}
} | go | {
"resource": ""
} |
q8025 | GetEndpoint | train | func GetEndpoint(epID string) (*drivers.OperEndpointState, error) {
// Get hold of the state driver
stateDriver, err := GetStateDriver()
if err != nil {
return nil, err
}
operEp := &drivers.OperEndpointState{}
operEp.StateDriver = stateDriver
err = operEp.Read(epID)
if err != nil {
return nil, err
}
return operEp, nil
} | go | {
"resource": ""
} |
q8026 | NewOvsdbDriver | train | func NewOvsdbDriver(bridgeName string, failMode string, vxlanUDPPort int) (*OvsdbDriver, error) {
// Create a new driver instance
d := new(OvsdbDriver)
d.bridgeName = bridgeName
d.vxlanUDPPort = fmt.Sprintf("%d", vxlanUDPPort)
// Connect to OVS
ovs, err := libovsdb.ConnectUnix("")
if err != nil {
log.Fatalf("Error connecting to OVS. Err: %v", err)
return nil, err
}
d.ovs = ovs
// Initialize the cache
d.cache = make(map[string]map[libovsdb.UUID]libovsdb.Row)
d.ovs.Register(d)
initial, _ := d.ovs.MonitorAll(ovsDataBase, "")
d.populateCache(*initial)
// Create a bridge after registering for events as we depend on ovsdb cache.
// Since the same dirver is used as endpoint driver, only create the bridge
// if it's not already created
// XXX: revisit if the bridge-name needs to be configurable
brCreated := false
for _, row := range d.cache[bridgeTable] {
if row.Fields["name"] == bridgeName {
brCreated = true
break
}
}
if !brCreated {
err = d.createDeleteBridge(bridgeName, failMode, operCreateBridge)
if err != nil {
log.Fatalf("Error creating bridge %s. Err: %v", bridgeName, err)
return nil, err
}
}
return d, nil
} | go | {
"resource": ""
} |
q8027 | Update | train | func (d *OvsdbDriver) Update(context interface{}, tableUpdates libovsdb.TableUpdates) {
d.populateCache(tableUpdates)
intfUpds, ok := tableUpdates.Updates["Interface"]
if !ok {
return
}
for _, intfUpd := range intfUpds.Rows {
intf := intfUpd.New.Fields["name"]
oldLacpStatus, ok := intfUpd.Old.Fields["lacp_current"]
if !ok {
return
}
newLacpStatus, ok := intfUpd.New.Fields["lacp_current"]
if !ok {
return
}
if oldLacpStatus == newLacpStatus || d.ovsSwitch == nil {
return
}
linkUpd := ofnet.LinkUpdateInfo{
LinkName: intf.(string),
LacpStatus: newLacpStatus.(bool),
}
log.Debugf("LACP_UPD: Interface: %+v. LACP Status - (Old: %+v, New: %+v)\n", intf, oldLacpStatus, newLacpStatus)
d.ovsSwitch.HandleLinkUpdates(linkUpd)
}
} | go | {
"resource": ""
} |
q8028 | createDeleteBridge | train | func (d *OvsdbDriver) createDeleteBridge(bridgeName, failMode string, op oper) error {
namedUUIDStr := "netplugin"
brUUID := []libovsdb.UUID{{GoUuid: namedUUIDStr}}
protocols := []string{"OpenFlow10", "OpenFlow11", "OpenFlow12", "OpenFlow13"}
opStr := "insert"
if op != operCreateBridge {
opStr = "delete"
}
// simple insert/delete operation
brOp := libovsdb.Operation{}
if op == operCreateBridge {
bridge := make(map[string]interface{})
bridge["name"] = bridgeName
// Enable Openflow1.3
bridge["protocols"], _ = libovsdb.NewOvsSet(protocols)
// set fail-mode if required
if failMode != "" {
bridge["fail_mode"] = "secure"
}
brOp = libovsdb.Operation{
Op: opStr,
Table: bridgeTable,
Row: bridge,
UUIDName: namedUUIDStr,
}
} else {
condition := libovsdb.NewCondition("name", "==", bridgeName)
brOp = libovsdb.Operation{
Op: opStr,
Table: bridgeTable,
Where: []interface{}{condition},
}
// also fetch the br-uuid from cache
for uuid, row := range d.cache[bridgeTable] {
name := row.Fields["name"].(string)
if name == bridgeName {
brUUID = []libovsdb.UUID{uuid}
break
}
}
}
// Inserting/Deleting a Bridge row in Bridge table requires mutating
// the open_vswitch table.
mutateUUID := brUUID
mutateSet, _ := libovsdb.NewOvsSet(mutateUUID)
mutation := libovsdb.NewMutation("bridges", opStr, mutateSet)
condition := libovsdb.NewCondition("_uuid", "==", d.getRootUUID())
// simple mutate operation
mutateOp := libovsdb.Operation{
Op: "mutate",
Table: rootTable,
Mutations: []interface{}{mutation},
Where: []interface{}{condition},
}
operations := []libovsdb.Operation{brOp, mutateOp}
return d.performOvsdbOps(operations)
} | go | {
"resource": ""
} |
q8029 | GetPortOrIntfNameFromID | train | func (d *OvsdbDriver) GetPortOrIntfNameFromID(id string, isPort bool) (string, error) {
table := portTable
if !isPort {
table = interfaceTable
}
d.cacheLock.RLock()
defer d.cacheLock.RUnlock()
// walk thru all ports
for _, row := range d.cache[table] {
if extIDs, ok := row.Fields["external_ids"]; ok {
extIDMap := extIDs.(libovsdb.OvsMap).GoMap
if portID, ok := extIDMap["endpoint-id"]; ok && portID == id {
return row.Fields["name"].(string), nil
}
}
}
return "", core.Errorf("Ovs port/intf not found for id: %s", id)
} | go | {
"resource": ""
} |
q8030 | CreatePort | train | func (d *OvsdbDriver) CreatePort(intfName, intfType, id string, tag, burst int, bandwidth int64) error {
// intfName is assumed to be unique enough to become uuid
portUUIDStr := intfName
intfUUIDStr := fmt.Sprintf("Intf%s", intfName)
portUUID := []libovsdb.UUID{{GoUuid: portUUIDStr}}
intfUUID := []libovsdb.UUID{{GoUuid: intfUUIDStr}}
opStr := "insert"
var err error
// insert/delete a row in Interface table
idMap := make(map[string]string)
intfOp := libovsdb.Operation{}
intf := make(map[string]interface{})
intf["name"] = intfName
intf["type"] = intfType
if bandwidth != 0 {
intf["ingress_policing_rate"] = bandwidth
}
if burst != 0 {
intf["ingress_policing_burst"] = burst
}
idMap["endpoint-id"] = id
intf["external_ids"], err = libovsdb.NewOvsMap(idMap)
if err != nil {
return err
}
// interface table ops
intfOp = libovsdb.Operation{
Op: opStr,
Table: interfaceTable,
Row: intf,
UUIDName: intfUUIDStr,
}
// insert/delete a row in Port table
portOp := libovsdb.Operation{}
port := make(map[string]interface{})
port["name"] = intfName
if tag != 0 {
port["vlan_mode"] = "access"
port["tag"] = tag
} else {
port["vlan_mode"] = "trunk"
}
port["interfaces"], err = libovsdb.NewOvsSet(intfUUID)
if err != nil {
return err
}
port["external_ids"], err = libovsdb.NewOvsMap(idMap)
if err != nil {
return err
}
portOp = libovsdb.Operation{
Op: opStr,
Table: portTable,
Row: port,
UUIDName: portUUIDStr,
}
// mutate the Ports column of the row in the Bridge table
mutateSet, _ := libovsdb.NewOvsSet(portUUID)
mutation := libovsdb.NewMutation("ports", opStr, mutateSet)
condition := libovsdb.NewCondition("name", "==", d.bridgeName)
mutateOp := libovsdb.Operation{
Op: "mutate",
Table: bridgeTable,
Mutations: []interface{}{mutation},
Where: []interface{}{condition},
}
operations := []libovsdb.Operation{intfOp, portOp, mutateOp}
return d.performOvsdbOps(operations)
} | go | {
"resource": ""
} |
q8031 | GetInterfacesInPort | train | func (d *OvsdbDriver) GetInterfacesInPort(portName string) []string {
var intfList []string
d.cacheLock.RLock()
defer d.cacheLock.RUnlock()
for _, row := range d.cache["Port"] {
name := row.Fields["name"].(string)
if name == portName {
// Port found
// Iterate over the list of interfaces
switch (row.Fields["interfaces"]).(type) {
case libovsdb.UUID: // Individual interface case
intfUUID := row.Fields["interfaces"].(libovsdb.UUID)
intfInfo := d.GetIntfInfo(intfUUID)
if reflect.DeepEqual(intfInfo, libovsdb.Row{}) {
log.Errorf("could not find interface with UUID: %+v", intfUUID)
break
}
intfList = append(intfList, intfInfo.Fields["name"].(string))
case libovsdb.OvsSet: // Port bond case
intfUUIDList := row.Fields["interfaces"].(libovsdb.OvsSet)
for _, intfUUID := range intfUUIDList.GoSet {
intfInfo := d.GetIntfInfo(intfUUID.(libovsdb.UUID))
if reflect.DeepEqual(intfInfo, libovsdb.Row{}) {
continue
}
intfList = append(intfList, intfInfo.Fields["name"].(string))
}
}
sort.Strings(intfList)
break
}
}
return intfList
} | go | {
"resource": ""
} |
q8032 | GetIntfInfo | train | func (d *OvsdbDriver) GetIntfInfo(uuid libovsdb.UUID) libovsdb.Row {
d.cacheLock.RLock()
defer d.cacheLock.RUnlock()
for intfUUID, row := range d.cache["Interface"] {
if intfUUID == uuid {
return row
}
}
return libovsdb.Row{}
} | go | {
"resource": ""
} |
q8033 | CreatePortBond | train | func (d *OvsdbDriver) CreatePortBond(intfList []string, bondName string) error {
var err error
var ops []libovsdb.Operation
var intfUUIDList []libovsdb.UUID
opStr := "insert"
// Add all the interfaces to the interface table
for _, intf := range intfList {
intfUUIDStr := fmt.Sprintf("Intf%s", intf)
intfUUID := []libovsdb.UUID{{GoUuid: intfUUIDStr}}
intfUUIDList = append(intfUUIDList, intfUUID...)
// insert/delete a row in Interface table
intfOp := libovsdb.Operation{}
iface := make(map[string]interface{})
iface["name"] = intf
// interface table ops
intfOp = libovsdb.Operation{
Op: opStr,
Table: interfaceTable,
Row: iface,
UUIDName: intfUUIDStr,
}
ops = append(ops, intfOp)
}
// Insert bond information in Port table
portOp := libovsdb.Operation{}
port := make(map[string]interface{})
port["name"] = bondName
port["vlan_mode"] = "trunk"
port["interfaces"], err = libovsdb.NewOvsSet(intfUUIDList)
if err != nil {
return err
}
// Set LACP and Hash properties
// "balance-tcp" - balances flows among slaves based on L2, L3, and L4 protocol information such as
// destination MAC address, IP address, and TCP port
// lacp-fallback-ab:true - Fall back to activ-backup mode when LACP negotiation fails
port["bond_mode"] = "balance-tcp"
port["lacp"] = "active"
lacpMap := make(map[string]string)
lacpMap["lacp-fallback-ab"] = "true"
port["other_config"], err = libovsdb.NewOvsMap(lacpMap)
portUUIDStr := bondName
portUUID := []libovsdb.UUID{{GoUuid: portUUIDStr}}
portOp = libovsdb.Operation{
Op: opStr,
Table: portTable,
Row: port,
UUIDName: portUUIDStr,
}
ops = append(ops, portOp)
// Mutate the Ports column of the row in the Bridge table to include bond name
mutateSet, _ := libovsdb.NewOvsSet(portUUID)
mutation := libovsdb.NewMutation("ports", opStr, mutateSet)
condition := libovsdb.NewCondition("name", "==", d.bridgeName)
mutateOp := libovsdb.Operation{
Op: "mutate",
Table: bridgeTable,
Mutations: []interface{}{mutation},
Where: []interface{}{condition},
}
ops = append(ops, mutateOp)
return d.performOvsdbOps(ops)
} | go | {
"resource": ""
} |
q8034 | DeletePortBond | train | func (d *OvsdbDriver) DeletePortBond(bondName string, intfList []string) error {
var ops []libovsdb.Operation
var condition []interface{}
portUUIDStr := bondName
portUUID := []libovsdb.UUID{{GoUuid: portUUIDStr}}
opStr := "delete"
for _, intfName := range intfList {
// insert/delete a row in Interface table
condition = libovsdb.NewCondition("name", "==", intfName)
intfOp := libovsdb.Operation{
Op: opStr,
Table: interfaceTable,
Where: []interface{}{condition},
}
ops = append(ops, intfOp)
}
// insert/delete a row in Port table
condition = libovsdb.NewCondition("name", "==", bondName)
portOp := libovsdb.Operation{
Op: opStr,
Table: portTable,
Where: []interface{}{condition},
}
ops = append(ops, portOp)
// also fetch the port-uuid from cache
d.cacheLock.RLock()
for uuid, row := range d.cache["Port"] {
name := row.Fields["name"].(string)
if name == bondName {
portUUID = []libovsdb.UUID{uuid}
break
}
}
d.cacheLock.RUnlock()
// mutate the Ports column of the row in the Bridge table
mutateSet, _ := libovsdb.NewOvsSet(portUUID)
mutation := libovsdb.NewMutation("ports", opStr, mutateSet)
condition = libovsdb.NewCondition("name", "==", d.bridgeName)
mutateOp := libovsdb.Operation{
Op: "mutate",
Table: bridgeTable,
Mutations: []interface{}{mutation},
Where: []interface{}{condition},
}
ops = append(ops, mutateOp)
// Perform OVS transaction
return d.performOvsdbOps(ops)
} | go | {
"resource": ""
} |
q8035 | UpdatePolicingRate | train | func (d *OvsdbDriver) UpdatePolicingRate(intfName string, burst int, bandwidth int64) error {
bw := int(bandwidth)
intf := make(map[string]interface{})
intf["ingress_policing_rate"] = bw
intf["ingress_policing_burst"] = burst
condition := libovsdb.NewCondition("name", "==", intfName)
if condition == nil {
return errors.New("error getting the new condition")
}
mutateOp := libovsdb.Operation{
Op: "update",
Table: interfaceTable,
Row: intf,
Where: []interface{}{condition},
}
operations := []libovsdb.Operation{mutateOp}
return d.performOvsdbOps(operations)
} | go | {
"resource": ""
} |
q8036 | CreateVtep | train | func (d *OvsdbDriver) CreateVtep(intfName string, vtepRemoteIP string) error {
portUUIDStr := intfName
intfUUIDStr := fmt.Sprintf("Intf%s", intfName)
portUUID := []libovsdb.UUID{{GoUuid: portUUIDStr}}
intfUUID := []libovsdb.UUID{{GoUuid: intfUUIDStr}}
opStr := "insert"
intfType := "vxlan"
var err error
// insert/delete a row in Interface table
intf := make(map[string]interface{})
intf["name"] = intfName
intf["type"] = intfType
// Special handling for VTEP ports
intfOptions := make(map[string]interface{})
intfOptions["remote_ip"] = vtepRemoteIP
intfOptions["key"] = "flow" // Insert VNI per flow
intfOptions["tos"] = "inherit" // Copy DSCP from inner to outer IP header
intfOptions["dst_port"] = d.vxlanUDPPort // Set the UDP port for VXLAN
intf["options"], err = libovsdb.NewOvsMap(intfOptions)
if err != nil {
log.Errorf("error '%s' creating options from %v \n", err, intfOptions)
return err
}
// Add an entry in Interface table
intfOp := libovsdb.Operation{
Op: opStr,
Table: interfaceTable,
Row: intf,
UUIDName: intfUUIDStr,
}
// insert/delete a row in Port table
port := make(map[string]interface{})
port["name"] = intfName
port["vlan_mode"] = "trunk"
port["interfaces"], err = libovsdb.NewOvsSet(intfUUID)
if err != nil {
return err
}
// Add an entry in Port table
portOp := libovsdb.Operation{
Op: opStr,
Table: portTable,
Row: port,
UUIDName: portUUIDStr,
}
// mutate the Ports column of the row in the Bridge table
mutateSet, _ := libovsdb.NewOvsSet(portUUID)
mutation := libovsdb.NewMutation("ports", opStr, mutateSet)
condition := libovsdb.NewCondition("name", "==", d.bridgeName)
mutateOp := libovsdb.Operation{
Op: "mutate",
Table: bridgeTable,
Mutations: []interface{}{mutation},
Where: []interface{}{condition},
}
// Perform OVS transaction
operations := []libovsdb.Operation{intfOp, portOp, mutateOp}
return d.performOvsdbOps(operations)
} | go | {
"resource": ""
} |
q8037 | GetOfpPortNo | train | func (d *OvsdbDriver) GetOfpPortNo(intfName string) (uint32, error) {
retryNo := 0
condition := libovsdb.NewCondition("name", "==", intfName)
selectOp := libovsdb.Operation{
Op: "select",
Table: "Interface",
Where: []interface{}{condition},
}
for {
row, err := d.ovs.Transact(ovsDataBase, selectOp)
if err == nil && len(row) > 0 && len(row[0].Rows) > 0 {
value := row[0].Rows[0]["ofport"]
if reflect.TypeOf(value).Kind() == reflect.Float64 {
//retry few more time. Due to asynchronous call between
//port creation and populating ovsdb entry for the interface
//may not be populated instantly.
if ofpPort := reflect.ValueOf(value).Float(); ofpPort != -1 {
return uint32(ofpPort), nil
}
}
}
time.Sleep(300 * time.Millisecond)
if retryNo == maxOfportRetry {
return 0, errors.New("ofPort not found")
}
retryNo++
}
} | go | {
"resource": ""
} |
q8038 | IsVtepPresent | train | func (d *OvsdbDriver) IsVtepPresent(remoteIP string) (bool, string) {
d.cacheLock.RLock()
defer d.cacheLock.RUnlock()
// walk the local cache
for tName, table := range d.cache {
if tName == "Interface" {
for _, row := range table {
options := row.Fields["options"]
switch optMap := options.(type) {
case libovsdb.OvsMap:
if optMap.GoMap["remote_ip"] == remoteIP {
value := row.Fields["name"]
switch t := value.(type) {
case string:
return true, t
default:
// return false, ""
}
}
default:
// return false, ""
}
}
}
}
// We could not find the interface name
return false, ""
} | go | {
"resource": ""
} |
q8039 | SetClusterMode | train | func SetClusterMode(cm string) error {
switch cm {
case core.Docker, core.Kubernetes, core.SwarmMode:
case core.Test: // internal mode used for integration testing
break
default:
return core.Errorf("%s not a valid cluster mode {%s | %s | %s}",
cm, core.Docker, core.Kubernetes, core.SwarmMode)
}
masterRTCfg.clusterMode = cm
return nil
} | go | {
"resource": ""
} |
q8040 | CreateGlobal | train | func CreateGlobal(stateDriver core.StateDriver, gc *intent.ConfigGlobal) error {
log.Infof("Received global create with intent {%v}", gc)
var err error
gcfgUpdateList := []string{}
masterGc := &mastercfg.GlobConfig{}
masterGc.StateDriver = stateDriver
masterGc.Read("global")
gstate.GlobalMutex.Lock()
defer gstate.GlobalMutex.Unlock()
gCfg := &gstate.Cfg{}
gCfg.StateDriver = stateDriver
gCfg.Read("global")
// check for valid values
if gc.NwInfraType != "" {
switch gc.NwInfraType {
case "default", "aci", "aci-opflex":
// These values are acceptable.
default:
return errors.New("invalid fabric mode")
}
masterGc.NwInfraType = gc.NwInfraType
}
if gc.VLANs != "" {
_, err := netutils.ParseTagRanges(gc.VLANs, "vlan")
if err != nil {
return err
}
gCfg.Auto.VLANs = gc.VLANs
gcfgUpdateList = append(gcfgUpdateList, "vlan")
}
if gc.VXLANs != "" {
_, err = netutils.ParseTagRanges(gc.VXLANs, "vxlan")
if err != nil {
return err
}
gCfg.Auto.VXLANs = gc.VXLANs
gcfgUpdateList = append(gcfgUpdateList, "vxlan")
}
if gc.FwdMode != "" {
masterGc.FwdMode = gc.FwdMode
}
if gc.ArpMode != "" {
masterGc.ArpMode = gc.ArpMode
}
if gc.PvtSubnet != "" {
masterGc.PvtSubnet = gc.PvtSubnet
}
if len(gcfgUpdateList) > 0 {
// Delete old state
gOper := &gstate.Oper{}
gOper.StateDriver = stateDriver
err = gOper.Read("")
if err == nil {
for _, res := range gcfgUpdateList {
err = gCfg.UpdateResources(res)
if err != nil {
return err
}
}
} else {
for _, res := range gcfgUpdateList {
// setup resources
err = gCfg.Process(res)
if err != nil {
log.Errorf("Error updating the config %+v. Error: %s", gCfg, err)
return err
}
}
}
err = gCfg.Write()
if err != nil {
log.Errorf("error updating global config.Error: %s", err)
return err
}
}
return masterGc.Write()
} | go | {
"resource": ""
} |
q8041 | DeleteGlobal | train | func DeleteGlobal(stateDriver core.StateDriver) error {
masterGc := &mastercfg.GlobConfig{}
masterGc.StateDriver = stateDriver
err := masterGc.Read("")
if err == nil {
err = masterGc.Clear()
if err != nil {
return err
}
}
// Setup global state
gCfg := &gstate.Cfg{}
gCfg.StateDriver = stateDriver
err = gCfg.Read("")
if err == nil {
err = gCfg.DeleteResources("vlan")
if err != nil {
return err
}
err = gCfg.DeleteResources("vxlan")
if err != nil {
return err
}
err = gCfg.Clear()
if err != nil {
return err
}
}
// Delete old state
gOper := &gstate.Oper{}
gOper.StateDriver = stateDriver
err = gOper.Read("")
if err == nil {
err = gOper.Clear()
if err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q8042 | DeleteTenant | train | func DeleteTenant(stateDriver core.StateDriver, tenant *intent.ConfigTenant) error {
return validateTenantConfig(tenant)
} | go | {
"resource": ""
} |
q8043 | IsAciConfigured | train | func IsAciConfigured() (res bool, err error) {
// Get the state driver
stateDriver, uErr := utils.GetStateDriver()
if uErr != nil {
log.Warnf("Couldn't read global config %v", uErr)
return false, uErr
}
// read global config
masterGc := &mastercfg.GlobConfig{}
masterGc.StateDriver = stateDriver
uErr = masterGc.Read("config")
if core.ErrIfKeyExists(uErr) != nil {
log.Errorf("Couldn't read global config %v", uErr)
return false, uErr
}
if uErr != nil {
log.Warnf("Couldn't read global config %v", uErr)
return false, nil
}
if masterGc.NwInfraType != "aci" {
log.Debugf("NwInfra type is %v, no ACI", masterGc.NwInfraType)
return false, nil
}
return true, nil
} | go | {
"resource": ""
} |
q8044 | getVersion | train | func getVersion(w http.ResponseWriter, r *http.Request) {
ver := version.Get()
resp, err := json.Marshal(ver)
if err != nil {
http.Error(w,
core.Errorf("marshaling json failed. Error: %s", err).Error(),
http.StatusInternalServerError)
return
}
w.Write(resp)
return
} | go | {
"resource": ""
} |
q8045 | slaveProxyHandler | train | func slaveProxyHandler(w http.ResponseWriter, r *http.Request) {
log.Infof("proxy handler for %q ", r.URL.Path)
localIP, err := netutils.GetDefaultAddr()
if err != nil {
log.Fatalf("Error getting local IP address. Err: %v", err)
}
// get current holder of master lock
masterNode := leaderLock.GetHolder()
if masterNode == "" {
http.Error(w, "Leader not found", http.StatusInternalServerError)
return
}
// If we are the master, return
if localIP == masterNode {
http.Error(w, "Self proxying error", http.StatusInternalServerError)
return
}
// build the proxy url
url, _ := url.Parse(fmt.Sprintf("http://%s", masterNode))
// Create a proxy for the URL
proxy := httputil.NewSingleHostReverseProxy(url)
// modify the request url
newReq := *r
// newReq.URL = url
// Serve http
proxy.ServeHTTP(w, &newReq)
} | go | {
"resource": ""
} |
q8046 | ReadAll | train | func (s *CfgEndpointState) ReadAll() ([]core.State, error) {
return s.StateDriver.ReadAllState(endpointConfigPathPrefix, s, json.Unmarshal)
} | go | {
"resource": ""
} |
q8047 | WatchAll | train | func (s *CfgEndpointState) WatchAll(rsps chan core.WatchState) error {
return s.StateDriver.WatchAllState(endpointConfigPathPrefix, s, json.Unmarshal,
rsps)
} | go | {
"resource": ""
} |
q8048 | Init | train | func (d *MasterDaemon) Init() {
// set cluster mode
err := master.SetClusterMode(d.ClusterMode)
if err != nil {
log.Fatalf("Failed to set cluster-mode %q. Error: %s", d.ClusterMode, err)
}
// initialize state driver
d.stateDriver, err = utils.NewStateDriver(d.ClusterStoreDriver, &core.InstanceInfo{DbURL: d.ClusterStoreURL})
if err != nil {
log.Fatalf("Failed to init state-store: driver %q, URLs %q. Error: %s", d.ClusterStoreDriver, d.ClusterStoreURL, err)
}
// Initialize resource manager
d.resmgr, err = resources.NewStateResourceManager(d.stateDriver)
if err != nil {
log.Fatalf("Failed to init resource manager. Error: %s", err)
}
// Create an objdb client
d.objdbClient, err = objdb.InitClient(d.ClusterStoreDriver, []string{d.ClusterStoreURL})
if err != nil {
log.Fatalf("Error connecting to state store: driver %q, URLs %q. Err: %v", d.ClusterStoreDriver, d.ClusterStoreURL, err)
}
} | go | {
"resource": ""
} |
q8049 | agentDiscoveryLoop | train | func (d *MasterDaemon) agentDiscoveryLoop() {
// Create channels for watch thread
agentEventCh := make(chan objdb.WatchServiceEvent, 1)
watchStopCh := make(chan bool, 1)
// Start a watch on netplugin service
err := d.objdbClient.WatchService("netplugin", agentEventCh, watchStopCh)
if err != nil {
log.Fatalf("Could not start a watch on netplugin service. Err: %v", err)
}
for {
agentEv := <-agentEventCh
log.Debugf("Received netplugin watch event: %+v", agentEv)
// build host info
nodeInfo := ofnet.OfnetNode{
HostAddr: agentEv.ServiceInfo.HostAddr,
HostPort: uint16(agentEv.ServiceInfo.Port),
}
if agentEv.EventType == objdb.WatchServiceEventAdd {
err = d.ofnetMaster.AddNode(nodeInfo)
if err != nil {
log.Errorf("Error adding node %v. Err: %v", nodeInfo, err)
}
} else if agentEv.EventType == objdb.WatchServiceEventDel {
var res bool
log.Infof("Unregister node %+v", nodeInfo)
d.ofnetMaster.UnRegisterNode(&nodeInfo, &res)
go d.startDeferredCleanup(nodeInfo, agentEv.ServiceInfo.Hostname)
}
// Dont process next peer event for another 100ms
time.Sleep(100 * time.Millisecond)
}
} | go | {
"resource": ""
} |
q8050 | getPluginAddress | train | func (d *MasterDaemon) getPluginAddress(hostName string) (string, error) {
srvList, err := d.objdbClient.GetService("netplugin.vtep")
if err != nil {
log.Errorf("Error getting netplugin nodes. Err: %v", err)
return "", err
}
for _, srv := range srvList {
if srv.Hostname == hostName {
return srv.HostAddr, nil
}
}
return "", fmt.Errorf("Could not find plugin instance with name: %s", hostName)
} | go | {
"resource": ""
} |
q8051 | ClearEndpoints | train | func (d *MasterDaemon) ClearEndpoints(stateDriver core.StateDriver, epCfgs *[]core.State, id, matchField string) error {
for _, epCfg := range *epCfgs {
ep := epCfg.(*mastercfg.CfgEndpointState)
if (matchField == "net" && ep.NetID == id) ||
(matchField == "group" && ep.ServiceName == id) ||
(matchField == "ep" && strings.Contains(ep.EndpointID, id)) {
// Delete the endpoint state from netmaster
_, err := master.DeleteEndpointID(stateDriver, ep.ID)
if err != nil {
return fmt.Errorf("Cannot cleanup EP: %s. Err: %+v", ep.EndpointID, err)
}
pluginAddress, err := d.getPluginAddress(ep.HomingHost)
if err != nil {
return err
}
epDelURL := "http://" + pluginAddress + ":9090/debug/reclaimEndpoint/" + ep.ID
err = utils.HTTPDel(epDelURL)
if err != nil {
return fmt.Errorf("Error sending HTTP delete request to %s. Err: %+v", pluginAddress, err)
}
}
}
return nil
} | go | {
"resource": ""
} |
q8052 | runLeader | train | func (d *MasterDaemon) runLeader() {
router := mux.NewRouter()
// Create a new api controller
apiConfig := &objApi.APIControllerConfig{
NetForwardMode: d.NetForwardMode,
NetInfraType: d.NetInfraType,
}
d.apiController = objApi.NewAPIController(router, d.objdbClient, apiConfig)
//Restore state from clusterStore
d.restoreCache()
// Register netmaster service
d.registerService()
// initialize policy manager
mastercfg.InitPolicyMgr(d.stateDriver, d.ofnetMaster)
// setup HTTP routes
d.registerRoutes(router)
d.startListeners(router, d.stopLeaderChan)
log.Infof("Exiting Leader mode")
} | go | {
"resource": ""
} |
q8053 | runFollower | train | func (d *MasterDaemon) runFollower() {
router := mux.NewRouter()
router.PathPrefix("/").HandlerFunc(slaveProxyHandler)
// Register netmaster service
d.registerService()
// just wait on stop channel
log.Infof("Listening in follower mode")
d.startListeners(router, d.stopFollowerChan)
log.Info("Exiting follower mode")
} | go | {
"resource": ""
} |
q8054 | becomeLeader | train | func (d *MasterDaemon) becomeLeader() {
// ask listener to stop
d.stopFollowerChan <- true
// set current state
d.currState = "leader"
// Run the HTTP listener
go d.runLeader()
} | go | {
"resource": ""
} |
q8055 | becomeFollower | train | func (d *MasterDaemon) becomeFollower() {
// ask listener to stop
d.stopLeaderChan <- true
time.Sleep(time.Second)
// set current state
d.currState = "follower"
// run follower loop
go d.runFollower()
} | go | {
"resource": ""
} |
q8056 | InitServices | train | func (d *MasterDaemon) InitServices() {
if d.ClusterMode == "kubernetes" {
isLeader := func() bool {
return d.currState == "leader"
}
networkpolicy.InitK8SServiceWatch(d.ControlURL, isLeader)
}
} | go | {
"resource": ""
} |
q8057 | RunMasterFsm | train | func (d *MasterDaemon) RunMasterFsm() {
var err error
masterURL := strings.Split(d.ControlURL, ":")
masterIP, masterPort := masterURL[0], masterURL[1]
if len(masterURL) != 2 {
log.Fatalf("Invalid netmaster URL")
}
// create new ofnet master
d.ofnetMaster = ofnet.NewOfnetMaster(masterIP, ofnet.OFNET_MASTER_PORT)
if d.ofnetMaster == nil {
log.Fatalf("Error creating ofnet master")
}
// Register all existing netplugins in the background
go d.agentDiscoveryLoop()
// Create the lock
leaderLock, err = d.objdbClient.NewLock("netmaster/leader", masterIP+":"+masterPort, leaderLockTTL)
if err != nil {
log.Fatalf("Could not create leader lock. Err: %v", err)
}
// Try to acquire the lock
err = leaderLock.Acquire(0)
if err != nil {
// We dont expect any error during acquire.
log.Fatalf("Error while acquiring lock. Err: %v", err)
}
// Initialize the stop channel
d.stopLeaderChan = make(chan bool, 1)
d.stopFollowerChan = make(chan bool, 1)
// set current state
d.currState = "follower"
// Start off being a follower
go d.runFollower()
// Main run loop waiting on leader lock
for {
// Wait for lock events
select {
case event := <-leaderLock.EventChan():
if event.EventType == objdb.LockAcquired {
log.Infof("Leader lock acquired")
d.becomeLeader()
} else if event.EventType == objdb.LockLost {
log.Infof("Leader lock lost. Becoming follower")
d.becomeFollower()
}
}
}
} | go | {
"resource": ""
} |
q8058 | getMasterInfo | train | func (d *MasterDaemon) getMasterInfo() (map[string]interface{}, error) {
info := make(map[string]interface{})
// get local ip
localIP, err := netutils.GetDefaultAddr()
if err != nil {
return nil, errors.New("error getting local IP address")
}
// get current holder of master lock
leader := leaderLock.GetHolder()
if leader == "" {
return nil, errors.New("leader not found")
}
// Get all netplugin services
srvList, err := d.objdbClient.GetService("netplugin")
if err != nil {
log.Errorf("Error getting netplugin nodes. Err: %v", err)
return nil, err
}
// Add each node
pluginNodes := []string{}
for _, srv := range srvList {
pluginNodes = append(pluginNodes, srv.HostAddr)
}
// Get all netmaster services
srvList, err = d.objdbClient.GetService("netmaster")
if err != nil {
log.Errorf("Error getting netmaster nodes. Err: %v", err)
return nil, err
}
// Add each node
masterNodes := []string{}
for _, srv := range srvList {
masterNodes = append(masterNodes, srv.HostAddr)
}
// setup info map
info["local-ip"] = localIP
info["leader-ip"] = leader
info["current-state"] = d.currState
info["netplugin-nodes"] = pluginNodes
info["netmaster-nodes"] = masterNodes
return info, nil
} | go | {
"resource": ""
} |
q8059 | PolicyAttach | train | func PolicyAttach(epg *contivModel.EndpointGroup, policy *contivModel.Policy) error {
// Dont install policies in ACI mode
if !isPolicyEnabled() {
return nil
}
epgpKey := epg.Key + ":" + policy.Key
// See if it already exists
gp := mastercfg.FindEpgPolicy(epgpKey)
if gp != nil {
log.Errorf("EPG policy %s already exists", epgpKey)
return EpgPolicyExists
}
stateDriver, err := utils.GetStateDriver()
if err != nil {
log.Errorf("Could not get StateDriver while attaching policy %+v", policy)
return err
}
epgID, err := mastercfg.GetEndpointGroupID(stateDriver, epg.GroupName, epg.TenantName)
if err != nil {
log.Errorf("Error getting epgID for %s. Err: %v", epgpKey, err)
return err
}
// Create the epg policy
gp, err = mastercfg.NewEpgPolicy(epgpKey, epgID, policy)
if err != nil {
log.Errorf("Error creating EPG policy. Err: %v", err)
return err
}
return nil
} | go | {
"resource": ""
} |
q8060 | PolicyDetach | train | func PolicyDetach(epg *contivModel.EndpointGroup, policy *contivModel.Policy) error {
// Dont install policies in ACI mode
if !isPolicyEnabled() {
return nil
}
epgpKey := epg.Key + ":" + policy.Key
// find the policy
gp := mastercfg.FindEpgPolicy(epgpKey)
if gp == nil {
log.Errorf("Epg policy %s does not exist", epgpKey)
return core.Errorf("epg policy does not exist")
}
// Delete all rules within the policy
for ruleKey := range policy.LinkSets.Rules {
// find the rule
rule := contivModel.FindRule(ruleKey)
if rule == nil {
log.Errorf("Error finding the rule %s", ruleKey)
continue
}
log.Infof("Deleting Rule %s from epgp policy %s", ruleKey, epgpKey)
// Add the rule to epg Policy
err := gp.DelRule(rule)
if err != nil {
log.Errorf("Error deleting rule %s from epg polict %s. Err: %v", ruleKey, epgpKey, err)
}
}
// delete it
return gp.Delete()
} | go | {
"resource": ""
} |
q8061 | PolicyAddRule | train | func PolicyAddRule(policy *contivModel.Policy, rule *contivModel.Rule) error {
// Dont install policies in ACI mode
if !isPolicyEnabled() {
return nil
}
// Walk all associated endpoint groups
for epgKey := range policy.LinkSets.EndpointGroups {
gpKey := epgKey + ":" + policy.Key
// Find the epg policy
gp := mastercfg.FindEpgPolicy(gpKey)
if gp == nil {
log.Errorf("Failed to find the epg policy %s", gpKey)
return core.Errorf("epg policy not found")
}
// Add the Rule
err := gp.AddRule(rule)
if err != nil {
log.Errorf("Error adding the rule %s to epg policy %s. Err: %v", rule.Key, gpKey, err)
return err
}
// Save the policy state
err = gp.Write()
if err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q8062 | initStateDriver | train | func initStateDriver(clusterStore string) (core.StateDriver, error) {
// parse the state store URL
parts := strings.Split(clusterStore, "://")
if len(parts) < 2 {
return nil, core.Errorf("Invalid state-store URL %q", clusterStore)
}
stateStore := parts[0]
// Make sure we support the statestore type
switch stateStore {
case utils.EtcdNameStr:
case utils.ConsulNameStr:
default:
return nil, core.Errorf("Unsupported state-store %q", stateStore)
}
// Setup instance info
instInfo := core.InstanceInfo{
DbURL: clusterStore,
}
return utils.NewStateDriver(stateStore, &instInfo)
} | go | {
"resource": ""
} |
q8063 | parseRange | train | func parseRange(rangeStr string) ([]uint, error) {
var values []uint
if rangeStr == "" {
return []uint{}, nil
}
// split ranges based on "," char
rangeList := strings.Split(rangeStr, ",")
for _, subrange := range rangeList {
minMax := strings.Split(strings.TrimSpace(subrange), "-")
if len(minMax) == 2 {
min, err := strconv.Atoi(minMax[0])
if err != nil {
log.Errorf("Invalid range: %v", subrange)
return nil, err
}
max, err := strconv.Atoi(minMax[1])
if err != nil {
log.Errorf("Invalid range: %v", subrange)
return nil, err
}
// some error checking
if min > max || min < 0 || max < 0 {
log.Errorf("Invalid range values: %v", subrange)
return nil, fmt.Errorf("invalid range values")
}
for i := min; i <= max; i++ {
values = append(values, uint(i))
}
} else if len(minMax) == 1 {
val, err := strconv.Atoi(minMax[0])
if err != nil {
log.Errorf("Invalid range: %v", subrange)
return nil, err
}
values = append(values, uint(val))
} else {
log.Errorf("Invalid range: %v", subrange)
return nil, fmt.Errorf("invalid range format")
}
}
return values, nil
} | go | {
"resource": ""
} |
q8064 | processResource | train | func processResource(stateDriver core.StateDriver, rsrcName, rsrcVal string) error {
// Read global config
gCfg := gstate.Cfg{}
gCfg.StateDriver = stateDriver
err := gCfg.Read("")
if err != nil {
log.Errorf("error reading tenant cfg state. Error: %s", err)
return err
}
// process resource based on name
if rsrcName == "vlan" {
numVlans, vlansInUse := gCfg.GetVlansInUse()
fmt.Printf("Num Vlans: %d\n Current Vlans in Use: %s\n", numVlans, vlansInUse)
// see if we need to set the resource
if rsrcVal != "" {
values, err := parseRange(rsrcVal)
if err != nil {
log.Errorf("Error parsing range: %v", err)
return err
}
log.Infof("Setting vlan values: %v", values)
// set vlan values
for _, val := range values {
_, err = gCfg.AllocVLAN(val)
if err != nil {
log.Errorf("Error setting vlan: %d. Err: %v", val, err)
}
}
log.Infof("Finished setting VLANs")
}
} else if rsrcName == "vxlan" {
numVxlans, vxlansInUse := gCfg.GetVxlansInUse()
fmt.Printf("Num Vxlans: %d\n Current Vxlans in Use: %s\n", numVxlans, vxlansInUse)
// see if we need to set the resource
if rsrcVal != "" {
values, err := parseRange(rsrcVal)
if err != nil {
log.Errorf("Error parsing range: %v", err)
return err
}
log.Infof("Setting vxlan values: %v", values)
// set vlan values
for _, val := range values {
_, _, err = gCfg.AllocVXLAN(val)
if err != nil {
log.Errorf("Error setting vxlan: %d. Err: %v", val, err)
}
}
log.Infof("Finished setting VXLANs")
}
} else {
log.Errorf("Unknown resource: %v", rsrcName)
return fmt.Errorf("unknown resource")
}
return nil
} | go | {
"resource": ""
} |
q8065 | ipnsExecute | train | func (cniReq *cniServer) ipnsExecute(namespace string, args []string) ([]byte, error) {
ipCmd := "ip"
ipArgs := []string{"netns", "exec", namespace}
ipArgs = append(ipArgs, args...)
cniLog.Infof("processing cmd: %v", ipArgs)
return exec.Command(ipCmd, ipArgs...).CombinedOutput()
} | go | {
"resource": ""
} |
q8066 | ipnsBatchExecute | train | func (cniReq *cniServer) ipnsBatchExecute(namespace string, args [][]string) ([]byte, error) {
for idx, arg1 := range args {
if out, err := cniReq.ipnsExecute(namespace, arg1); err != nil {
cniLog.Errorf("failed to execute [%d] %v %s, %s", idx, err, arg1, string(out))
return out, err
}
}
return nil, nil
} | go | {
"resource": ""
} |
q8067 | setUpAPIClient | train | func setUpAPIClient() *APIClient {
// Read config
err := k8sutils.GetK8SConfig(&contivK8Config)
if err != nil {
log.Errorf("Failed: %v", err)
return nil
}
return NewAPIClient(contivK8Config.K8sAPIServer, contivK8Config.K8sCa,
contivK8Config.K8sKey, contivK8Config.K8sCert, contivK8Config.K8sToken)
} | go | {
"resource": ""
} |
q8068 | InitKubServiceWatch | train | func InitKubServiceWatch(np *plugin.NetPlugin) {
watchClient := setUpAPIClient()
if watchClient == nil {
log.Fatalf("Could not init kubernetes API client")
}
svcCh := make(chan SvcWatchResp, 1)
epCh := make(chan EpWatchResp, 1)
go func() {
for {
select {
case svcEvent := <-svcCh:
switch svcEvent.opcode {
case "WARN":
log.Debugf("svcWatch : %s", svcEvent.errStr)
break
case "FATAL":
log.Errorf("svcWatch : %s", svcEvent.errStr)
break
case "ERROR":
log.Warnf("svcWatch : %s", svcEvent.errStr)
watchClient.WatchServices(svcCh)
break
case "DELETED":
np.DelSvcSpec(svcEvent.svcName, &svcEvent.svcSpec)
break
default:
np.AddSvcSpec(svcEvent.svcName, &svcEvent.svcSpec)
}
case epEvent := <-epCh:
switch epEvent.opcode {
case "WARN":
log.Debugf("epWatch : %s", epEvent.errStr)
break
case "FATAL":
log.Errorf("epWatch : %s", epEvent.errStr)
break
case "ERROR":
log.Warnf("epWatch : %s", epEvent.errStr)
watchClient.WatchSvcEps(epCh)
break
default:
np.SvcProviderUpdate(epEvent.svcName, epEvent.providers)
}
}
}
}()
watchClient.WatchServices(svcCh)
watchClient.WatchSvcEps(epCh)
} | go | {
"resource": ""
} |
q8069 | InitCNIServer | train | func InitCNIServer(netplugin *plugin.NetPlugin) error {
netPlugin = netplugin
hostname, err := os.Hostname()
if err != nil {
log.Fatalf("Could not retrieve hostname: %v", err)
}
pluginHost = hostname
// Set up the api client instance
kubeAPIClient = setUpAPIClient()
if kubeAPIClient == nil {
log.Fatalf("Could not init kubernetes API client")
}
log.Debugf("Configuring router")
router := mux.NewRouter()
// register handlers for cni
t := router.Headers("Content-Type", "application/json").Methods("POST").Subrouter()
t.HandleFunc(cniapi.EPAddURL, utils.MakeHTTPHandler(addPod))
t.HandleFunc(cniapi.EPDelURL, utils.MakeHTTPHandler(deletePod))
t.HandleFunc("/ContivCNI.{*}", utils.UnknownAction)
driverPath := cniapi.ContivCniSocket
os.Remove(driverPath)
os.MkdirAll(cniapi.PluginPath, 0700)
go func() {
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: driverPath, Net: "unix"})
if err != nil {
panic(err)
}
log.Infof("k8s plugin listening on %s", driverPath)
http.Serve(l, router)
l.Close()
log.Infof("k8s plugin closing %s", driverPath)
}()
//InitKubServiceWatch(netplugin)
return nil
} | go | {
"resource": ""
} |
q8070 | epCleanUp | train | func epCleanUp(req *epSpec) error {
// first delete from netplugin
// ignore any errors as this is best effort
netID := req.Network + "." + req.Tenant
pluginErr := netPlugin.DeleteEndpoint(netID + "-" + req.EndpointID)
// now delete from master
delReq := master.DeleteEndpointRequest{
TenantName: req.Tenant,
NetworkName: req.Network,
ServiceName: req.Group,
EndpointID: req.EndpointID,
}
var delResp master.DeleteEndpointResponse
masterErr := cluster.MasterPostReq("/plugin/deleteEndpoint", &delReq, &delResp)
if pluginErr != nil {
log.Errorf("failed to delete endpoint: %s from netplugin %s",
netID+"-"+req.EndpointID, pluginErr)
return pluginErr
}
if masterErr != nil {
log.Errorf("failed to delete endpoint %+v from netmaster, %s", delReq, masterErr)
}
return masterErr
} | go | {
"resource": ""
} |
q8071 | createEP | train | func createEP(req *epSpec) (*epAttr, error) {
// if the ep already exists, treat as error for now.
netID := req.Network + "." + req.Tenant
ep, err := utils.GetEndpoint(netID + "-" + req.EndpointID)
if err == nil {
return nil, fmt.Errorf("the EP %s already exists", req.EndpointID)
}
// Build endpoint request
mreq := master.CreateEndpointRequest{
TenantName: req.Tenant,
NetworkName: req.Network,
ServiceName: req.Group,
EndpointID: req.EndpointID,
EPCommonName: req.Name,
ConfigEP: intent.ConfigEP{
Container: req.EndpointID,
Host: pluginHost,
ServiceName: req.Group,
},
}
var mresp master.CreateEndpointResponse
err = cluster.MasterPostReq("/plugin/createEndpoint", &mreq, &mresp)
if err != nil {
epCleanUp(req)
return nil, err
}
// this response should contain IPv6 if the underlying network is configured with IPv6
log.Infof("Got endpoint create resp from master: %+v", mresp)
// Ask netplugin to create the endpoint
err = netPlugin.CreateEndpoint(netID + "-" + req.EndpointID)
if err != nil {
log.Errorf("Endpoint creation failed. Error: %s", err)
epCleanUp(req)
return nil, err
}
ep, err = utils.GetEndpoint(netID + "-" + req.EndpointID)
if err != nil {
epCleanUp(req)
return nil, err
}
log.Debug(ep)
// need to get the subnetlen from nw state.
nw, err := utils.GetNetwork(netID)
if err != nil {
epCleanUp(req)
return nil, err
}
epResponse := epAttr{}
epResponse.PortName = ep.PortName
epResponse.IPAddress = ep.IPAddress + "/" + strconv.Itoa(int(nw.SubnetLen))
epResponse.Gateway = nw.Gateway
if ep.IPv6Address != "" {
epResponse.IPv6Address = ep.IPv6Address + "/" + strconv.Itoa(int(nw.IPv6SubnetLen))
epResponse.IPv6Gateway = nw.IPv6Gateway
}
return &epResponse, nil
} | go | {
"resource": ""
} |
q8072 | getLink | train | func getLink(ifname string) (netlink.Link, error) {
// find the link
link, err := netlink.LinkByName(ifname)
if err != nil {
if !strings.Contains(err.Error(), "Link not found") {
log.Errorf("unable to find link %q. Error: %q", ifname, err)
return link, err
}
// try once more as sometimes (somehow) link creation is taking
// sometime, causing link not found error
time.Sleep(1 * time.Second)
link, err = netlink.LinkByName(ifname)
if err != nil {
log.Errorf("unable to find link %q. Error %q", ifname, err)
}
return link, err
}
return link, err
} | go | {
"resource": ""
} |
q8073 | nsToPID | train | func nsToPID(ns string) (int, error) {
// Make sure ns is well formed
ok := strings.HasPrefix(ns, "/proc/")
if !ok {
return -1, fmt.Errorf("invalid nw name space: %v", ns)
}
elements := strings.Split(ns, "/")
return strconv.Atoi(elements[2])
} | go | {
"resource": ""
} |
q8074 | setIfAttrs | train | func setIfAttrs(pid int, ifname, cidr, cidr6, newname string) error {
nsenterPath, err := osexec.LookPath("nsenter")
if err != nil {
return err
}
ipPath, err := osexec.LookPath("ip")
if err != nil {
return err
}
// find the link
link, err := getLink(ifname)
if err != nil {
log.Errorf("unable to find link %q. Error %q", ifname, err)
return err
}
// move to the desired netns
err = netlink.LinkSetNsPid(link, pid)
if err != nil {
log.Errorf("unable to move interface %s to pid %d. Error: %s",
ifname, pid, err)
return err
}
// rename to the desired ifname
nsPid := fmt.Sprintf("%d", pid)
rename, err := osexec.Command(nsenterPath, "-t", nsPid, "-n", "-F", "--", ipPath, "link",
"set", "dev", ifname, "name", newname).CombinedOutput()
if err != nil {
log.Errorf("unable to rename interface %s to %s. Error: %s",
ifname, newname, err)
return nil
}
log.Infof("Output from rename: %v", rename)
// set the ip address
assignIP, err := osexec.Command(nsenterPath, "-t", nsPid, "-n", "-F", "--", ipPath,
"address", "add", cidr, "dev", newname).CombinedOutput()
if err != nil {
log.Errorf("unable to assign ip %s to %s. Error: %s",
cidr, newname, err)
return nil
}
log.Infof("Output from ip assign: %v", assignIP)
if cidr6 != "" {
out, err := osexec.Command(nsenterPath, "-t", nsPid, "-n", "-F", "--", ipPath,
"-6", "address", "add", cidr6, "dev", newname).CombinedOutput()
if err != nil {
log.Errorf("unable to assign IPv6 %s to %s. Error: %s",
cidr6, newname, err)
return nil
}
log.Infof("Output of IPv6 assign: %v", out)
}
// Finally, mark the link up
bringUp, err := osexec.Command(nsenterPath, "-t", nsPid, "-n", "-F", "--", ipPath,
"link", "set", "dev", newname, "up").CombinedOutput()
if err != nil {
log.Errorf("unable to assign ip %s to %s. Error: %s",
cidr, newname, err)
return nil
}
log.Debugf("Output from ip assign: %v", bringUp)
return nil
} | go | {
"resource": ""
} |
q8075 | setDefGw | train | func setDefGw(pid int, gw, gw6, intfName string) error {
nsenterPath, err := osexec.LookPath("nsenter")
if err != nil {
return err
}
routePath, err := osexec.LookPath("route")
if err != nil {
return err
}
// set default gw
nsPid := fmt.Sprintf("%d", pid)
out, err := osexec.Command(nsenterPath, "-t", nsPid, "-n", "-F", "--", routePath, "add",
"default", "gw", gw, intfName).CombinedOutput()
if err != nil {
log.Errorf("unable to set default gw %s. Error: %s - %s", gw, err, out)
return nil
}
if gw6 != "" {
out, err := osexec.Command(nsenterPath, "-t", nsPid, "-n", "-F", "--", routePath,
"-6", "add", "default", "gw", gw6, intfName).CombinedOutput()
if err != nil {
log.Errorf("unable to set default IPv6 gateway %s. Error: %s - %s", gw6, err, out)
return nil
}
}
return nil
} | go | {
"resource": ""
} |
q8076 | getEPSpec | train | func getEPSpec(pInfo *cniapi.CNIPodAttr) (*epSpec, error) {
resp := epSpec{}
// Get labels from the kube api server
epg, err := kubeAPIClient.GetPodLabel(pInfo.K8sNameSpace, pInfo.Name,
"io.contiv.net-group")
if err != nil {
log.Errorf("Error getting epg. Err: %v", err)
return &resp, err
}
// Safe to ignore the error return for subsequent invocations of GetPodLabel
netw, _ := kubeAPIClient.GetPodLabel(pInfo.K8sNameSpace, pInfo.Name,
"io.contiv.network")
tenant, _ := kubeAPIClient.GetPodLabel(pInfo.K8sNameSpace, pInfo.Name,
"io.contiv.tenant")
log.Infof("labels is %s/%s/%s for pod %s\n", tenant, netw, epg, pInfo.Name)
resp.Tenant = tenant
resp.Network = netw
resp.Group = epg
resp.EndpointID = pInfo.InfraContainerID
resp.Name = pInfo.Name
return &resp, nil
} | go | {
"resource": ""
} |
q8077 | deletePod | train | func deletePod(w http.ResponseWriter, r *http.Request, vars map[string]string) (interface{}, error) {
resp := cniapi.RspAddPod{}
logEvent("del pod")
content, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Errorf("Failed to read request: %v", err)
return resp, err
}
pInfo := cniapi.CNIPodAttr{}
if err := json.Unmarshal(content, &pInfo); err != nil {
return resp, err
}
// Get labels from the kube api server
epReq, err := getEPSpec(&pInfo)
if err != nil {
log.Errorf("Error getting labels. Err: %v", err)
setErrorResp(&resp, "Error getting labels", err)
return resp, err
}
netPlugin.DeleteHostAccPort(epReq.EndpointID)
if err = epCleanUp(epReq); err != nil {
log.Errorf("failed to delete pod, error: %s", err)
}
resp.Result = 0
resp.EndpointID = pInfo.InfraContainerID
return resp, nil
} | go | {
"resource": ""
} |
q8078 | freeAddrOnErr | train | func freeAddrOnErr(nwCfg *mastercfg.CfgNetworkState, epgCfg *mastercfg.EndpointGroupState,
ipAddress string, pErr *error) {
if *pErr != nil {
log.Infof("Freeing %s on error", ipAddress)
networkReleaseAddress(nwCfg, epgCfg, ipAddress)
}
} | go | {
"resource": ""
} |
q8079 | CreateEndpoints | train | func CreateEndpoints(stateDriver core.StateDriver, tenant *intent.ConfigTenant) error {
err := validateEndpointConfig(stateDriver, tenant)
if err != nil {
log.Errorf("error validating endpoint config. Error: %s", err)
return err
}
for _, network := range tenant.Networks {
nwCfg := &mastercfg.CfgNetworkState{}
nwCfg.StateDriver = stateDriver
networkID := network.Name + "." + tenant.Name
err = nwCfg.Read(networkID)
if err != nil {
log.Errorf("error reading oper network %s. Error: %s", network.Name, err)
return err
}
for _, ep := range network.Endpoints {
epReq := CreateEndpointRequest{}
epReq.ConfigEP = ep
_, err = CreateEndpoint(stateDriver, nwCfg, &epReq)
if err != nil {
log.Errorf("Error creating endpoint %+v. Err: %v", ep, err)
return err
}
}
err = nwCfg.Write()
if err != nil {
log.Errorf("error writing nw config. Error: %s", err)
return err
}
}
return err
} | go | {
"resource": ""
} |
q8080 | DeleteEndpoints | train | func DeleteEndpoints(hostAddr string) error {
// Get the state driver
stateDriver, err := utils.GetStateDriver()
if err != nil {
return err
}
readEp := &mastercfg.CfgEndpointState{}
readEp.StateDriver = stateDriver
epCfgs, err := readEp.ReadAll()
if err != nil {
return err
}
for _, epCfg := range epCfgs {
ep := epCfg.(*mastercfg.CfgEndpointState)
nwCfg := &mastercfg.CfgNetworkState{}
nwCfg.StateDriver = stateDriver
err = nwCfg.Read(ep.NetID)
if err != nil {
log.Errorf("Network not found for NetID: %+v", ep.NetID)
continue
}
netID := nwCfg.NetworkName + "." + nwCfg.Tenant
epID := getEpName(netID, &intent.ConfigEP{Container: ep.EndpointID})
if ep.HomingHost == hostAddr {
log.Infof("Sending DeleteEndpoint for %+v", ep)
_, err = DeleteEndpointID(stateDriver, epID)
if err != nil {
log.Errorf("Error delete endpoint: %+v. Err: %+v", ep, err)
}
epOper := &drivers.OperEndpointState{}
epOper.StateDriver = stateDriver
err := epOper.Read(epID)
if err != nil {
log.Errorf("Failed to read epOper: %+v", epOper)
return err
}
err = epOper.Clear()
if err != nil {
log.Errorf("Error deleting oper state for %+v", epOper)
} else {
log.Infof("Deleted EP oper: %+v", epOper)
}
} else {
log.Infof("EP not on host: %+v", hostAddr)
}
}
return err
} | go | {
"resource": ""
} |
q8081 | DeleteEndpointID | train | func DeleteEndpointID(stateDriver core.StateDriver, epID string) (*mastercfg.CfgEndpointState, error) {
epCfg := &mastercfg.CfgEndpointState{}
var epgCfg *mastercfg.EndpointGroupState
epCfg.StateDriver = stateDriver
err := epCfg.Read(epID)
if err != nil {
return nil, err
}
nwCfg := &mastercfg.CfgNetworkState{}
nwCfg.StateDriver = stateDriver
err = nwCfg.Read(epCfg.NetID)
// Network may already be deleted if infra nw
// If network present, free up nw resources
if err == nil && epCfg.IPAddress != "" {
if len(epCfg.ServiceName) > 0 {
epgCfg = &mastercfg.EndpointGroupState{}
epgCfg.StateDriver = stateDriver
if err := epgCfg.Read(epCfg.ServiceName + ":" + nwCfg.Tenant); err != nil {
log.Errorf("failed to read endpoint group %s, error %s",
epCfg.ServiceName+":"+epgCfg.TenantName, err)
return nil, err
}
}
err = networkReleaseAddress(nwCfg, epgCfg, epCfg.IPAddress)
if err != nil {
log.Errorf("Error releasing endpoint state for: %s. Err: %v", epCfg.IPAddress, err)
}
if epCfg.EndpointGroupKey != "" {
epgCfg := &mastercfg.EndpointGroupState{}
epgCfg.StateDriver = stateDriver
err = epgCfg.Read(epCfg.EndpointGroupKey)
if err != nil {
log.Errorf("Error reading EPG for endpoint: %+v", epCfg)
}
epgCfg.EpCount--
// write updated epg state
err = epgCfg.Write()
if err != nil {
log.Errorf("error writing epg config. Error: %s", err)
}
}
// decrement ep count
nwCfg.EpCount--
// write modified nw state
err = nwCfg.Write()
if err != nil {
log.Errorf("error writing nw config. Error: %s", err)
}
}
// Even if network not present (already deleted), cleanup ep cfg
err = epCfg.Clear()
if err != nil {
log.Errorf("error writing ep config. Error: %s", err)
return nil, err
}
return epCfg, err
} | go | {
"resource": ""
} |
q8082 | CreateEpBindings | train | func CreateEpBindings(epBindings *[]intent.ConfigEP) error {
stateDriver, err := utils.GetStateDriver()
if err != nil {
return err
}
err = validateEpBindings(epBindings)
if err != nil {
log.Errorf("error validating the ep bindings. Error: %s", err)
return err
}
readEp := &mastercfg.CfgEndpointState{}
readEp.StateDriver = stateDriver
epCfgs, err := readEp.ReadAll()
if err != nil {
log.Errorf("error fetching eps. Error: %s", err)
return err
}
for _, ep := range *epBindings {
log.Infof("creating binding between container '%s' and host '%s'",
ep.Container, ep.Host)
for _, epCfg := range epCfgs {
cfg := epCfg.(*mastercfg.CfgEndpointState)
if cfg.EndpointID != ep.Container {
continue
}
cfg.HomingHost = ep.Host
err = cfg.Write()
if err != nil {
log.Errorf("error updating epCfg. Error: %s", err)
return err
}
}
}
return nil
} | go | {
"resource": ""
} |
q8083 | cleanupExternalContracts | train | func cleanupExternalContracts(endpointGroup *contivModel.EndpointGroup) error {
tenant := endpointGroup.TenantName
for _, contractsGrp := range endpointGroup.ExtContractsGrps {
contractsGrpKey := tenant + ":" + contractsGrp
contractsGrpObj := contivModel.FindExtContractsGroup(contractsGrpKey)
if contractsGrpObj != nil {
// Break any linkeage we might have set.
modeldb.RemoveLinkSet(&contractsGrpObj.LinkSets.EndpointGroups, endpointGroup)
modeldb.RemoveLinkSet(&endpointGroup.LinkSets.ExtContractsGrps, contractsGrpObj)
// Links broken, update the contracts group object.
err := contractsGrpObj.Write()
if err != nil {
return err
}
} else {
log.Errorf("Error cleaning up consumed ext contract %s", contractsGrp)
continue
}
}
return nil
} | go | {
"resource": ""
} |
q8084 | setupExternalContracts | train | func setupExternalContracts(endpointGroup *contivModel.EndpointGroup, extContractsGrps []string) error {
// Validate presence and register consumed external contracts
tenant := endpointGroup.TenantName
for _, contractsGrp := range extContractsGrps {
contractsGrpKey := tenant + ":" + contractsGrp
contractsGrpObj := contivModel.FindExtContractsGroup(contractsGrpKey)
if contractsGrpObj == nil {
errStr := fmt.Sprintf("External contracts group %s not found", contractsGrp)
log.Errorf(errStr)
return core.Errorf(errStr)
}
// Establish the necessary links.
modeldb.AddLinkSet(&contractsGrpObj.LinkSets.EndpointGroups, endpointGroup)
modeldb.AddLinkSet(&endpointGroup.LinkSets.ExtContractsGrps, contractsGrpObj)
// Links made, write the policy set object.
err := contractsGrpObj.Write()
if err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q8085 | ExtContractsGroupCreate | train | func (ac *APIController) ExtContractsGroupCreate(contractsGroup *contivModel.ExtContractsGroup) error {
log.Infof("Received ExtContractsGroupCreate: %+v", contractsGroup)
// Validate contracts type
if contractsGroup.ContractsType != "provided" && contractsGroup.ContractsType != "consumed" {
return core.Errorf("Contracts group need to be either 'provided' or 'consumed'")
}
// Make sure the tenant exists
tenant := contivModel.FindTenant(contractsGroup.TenantName)
if tenant == nil {
return core.Errorf("Tenant %s not found", contractsGroup.TenantName)
}
// NOTE: Nothing more needs to be done here. This object
// need not be created in the masterCfg.
return nil
} | go | {
"resource": ""
} |
q8086 | ExtContractsGroupUpdate | train | func (ac *APIController) ExtContractsGroupUpdate(contractsGroup, params *contivModel.ExtContractsGroup) error {
log.Infof("Received ExtContractsGroupUpdate: %+v, params: %+v", contractsGroup, params)
log.Errorf("Error: external contracts update not supported: %s", contractsGroup.ContractsGroupName)
return core.Errorf("external contracts update not supported")
} | go | {
"resource": ""
} |
q8087 | ExtContractsGroupDelete | train | func (ac *APIController) ExtContractsGroupDelete(contractsGroup *contivModel.ExtContractsGroup) error {
log.Infof("Received ExtContractsGroupDelete: %+v", contractsGroup)
// At this moment, we let the external contracts to be deleted only
// if there are no consumers of this external contracts group
if isExtContractsGroupUsed(contractsGroup) == true {
log.Errorf("Error: External contracts groups is being used: %s", contractsGroup.ContractsGroupName)
return core.Errorf("External contracts group is in-use")
}
return nil
} | go | {
"resource": ""
} |
q8088 | MakeHTTPHandler | train | func MakeHTTPHandler(handlerFunc httpAPIFunc) http.HandlerFunc {
// Create a closure and return an anonymous function
return func(w http.ResponseWriter, r *http.Request) {
// Call the handler
resp, err := handlerFunc(w, r, mux.Vars(r))
if err != nil {
// Log error
log.Errorf("Handler for %s %s returned error: %s", r.Method, r.URL, err)
if resp == nil {
// Send HTTP response
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
// Send HTTP response as Json
content, err := json.Marshal(resp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusInternalServerError)
w.Write(content)
}
} else {
// Send HTTP response as Json
err = writeJSON(w, http.StatusOK, resp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
} | go | {
"resource": ""
} |
q8089 | UnknownAction | train | func UnknownAction(w http.ResponseWriter, r *http.Request) {
log.Infof("Unknown action at %q", r.URL.Path)
content, _ := ioutil.ReadAll(r.Body)
log.Infof("Body content: %s", string(content))
http.NotFound(w, r)
} | go | {
"resource": ""
} |
q8090 | HTTPPost | train | func HTTPPost(url string, req interface{}, resp interface{}) error {
// Convert the req to json
jsonStr, err := json.Marshal(req)
if err != nil {
log.Errorf("Error converting request data(%#v) to Json. Err: %v", req, err)
return err
}
// Perform HTTP POST operation
res, err := http.Post(url, "application/json", strings.NewReader(string(jsonStr)))
if err != nil {
log.Errorf("Error during http POST. Err: %v", err)
return err
}
defer res.Body.Close()
// Check the response code
if res.StatusCode == http.StatusInternalServerError {
eBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return errors.New("HTTP StatusInternalServerError" + err.Error())
}
return errors.New(string(eBody))
}
if res.StatusCode != http.StatusOK {
log.Errorf("HTTP error response. Status: %s, StatusCode: %d", res.Status, res.StatusCode)
return fmt.Errorf("HTTP error response. Status: %s, StatusCode: %d", res.Status, res.StatusCode)
}
// Read the entire response
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Errorf("Error during ioutil readall. Err: %v", err)
return err
}
// Convert response json to struct
err = json.Unmarshal(body, resp)
if err != nil {
log.Errorf("Error during json unmarshall. Err: %v", err)
return err
}
log.Debugf("Results for (%s): %+v\n", url, resp)
return nil
} | go | {
"resource": ""
} |
q8091 | HTTPDel | train | func HTTPDel(url string) error {
req, err := http.NewRequest("DELETE", url, nil)
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
defer res.Body.Close()
// Check the response code
if res.StatusCode == http.StatusInternalServerError {
eBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return errors.New("HTTP StatusInternalServerError " + err.Error())
}
return errors.New(string(eBody))
}
if res.StatusCode != http.StatusOK {
log.Errorf("HTTP error response. Status: %s, StatusCode: %d", res.Status, res.StatusCode)
return fmt.Errorf("HTTP error response. Status: %s, StatusCode: %d", res.Status, res.StatusCode)
}
log.Debugf("Results for (%s): %+v\n", url, res)
return nil
} | go | {
"resource": ""
} |
q8092 | NewContivClient | train | func NewContivClient(baseURL string) (*ContivClient, error) {
ok, err := regexp.Match(`^https?://`, []byte(baseURL))
if !ok {
return nil, errors.New("invalid URL: must begin with http:// or https://")
} else if err != nil {
return nil, err
}
client := ContivClient{
baseURL: baseURL,
customRequestHeaders: [][2]string{},
httpClient: &http.Client{},
}
return &client, nil
} | go | {
"resource": ""
} |
q8093 | SetAuthToken | train | func (c *ContivClient) SetAuthToken(token string) error {
// setting an auth token is only allowed on secure requests.
// if we didn't enforce this, the client could potentially send auth
// tokens in plain text across the network.
if !c.isHTTPS() {
return errors.New("setting auth token requires a https auth_proxy URL")
}
// having multiple auth token headers is confusing and makes no sense and
// which one is actually used depends on the implementation of the server.
// therefore, we will raise an error if there's already an auth token set.
for _, pair := range c.customRequestHeaders {
if pair[0] == authTokenHeader {
return errors.New("an auth token has already been set")
}
}
c.addCustomRequestHeader(authTokenHeader, token)
return nil
} | go | {
"resource": ""
} |
q8094 | Login | train | func (c *ContivClient) Login(username, password string) (*http.Response, []byte, error) {
// login is only allowed over a secure channel
if !c.isHTTPS() {
return nil, nil, errors.New("login requires a https auth_proxy URL")
}
url := c.baseURL + LoginPath
// create the POST payload for login
lp := loginPayload{
Username: username,
Password: password,
}
payload, err := json.Marshal(lp)
if err != nil {
return nil, nil, err
}
// send the login POST request
resp, err := c.httpClient.Post(url, "application/json", bytes.NewBuffer(payload))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
return resp, body, nil
} | go | {
"resource": ""
} |
q8095 | addCustomRequestHeader | train | func (c *ContivClient) addCustomRequestHeader(name, value string) {
// lowercase the header name so we can easily check for duplicates in other places.
// there can legitimately be many headers with the same name, but in some cases
// (e.g., auth token) we want to enforce that there is only one.
// Go internally canonicalizes them when we call Header.Add() anyways.
name = strings.ToLower(name)
c.customRequestHeaders = append(c.customRequestHeaders, [2]string{name, value})
} | go | {
"resource": ""
} |
q8096 | processCustomHeaders | train | func (c *ContivClient) processCustomHeaders(req *http.Request) {
for _, pair := range c.customRequestHeaders {
req.Header.Add(pair[0], pair[1])
}
} | go | {
"resource": ""
} |
q8097 | AciGwPost | train | func (c *ContivClient) AciGwPost(obj *AciGw) error {
// build key and URL
keyStr := obj.Name
url := c.baseURL + "/api/v1/aciGws/" + keyStr + "/"
// http post the object
err := c.httpPost(url, obj)
if err != nil {
log.Debugf("Error creating aciGw %+v. Err: %v", obj, err)
return err
}
return nil
} | go | {
"resource": ""
} |
q8098 | AciGwList | train | func (c *ContivClient) AciGwList() (*[]*AciGw, error) {
// build key and URL
url := c.baseURL + "/api/v1/aciGws/"
// http get the object
var objList []*AciGw
err := c.httpGet(url, &objList)
if err != nil {
log.Debugf("Error getting aciGws. Err: %v", err)
return nil, err
}
return &objList, nil
} | go | {
"resource": ""
} |
q8099 | AciGwDelete | train | func (c *ContivClient) AciGwDelete(name string) error {
// build key and URL
keyStr := name
url := c.baseURL + "/api/v1/aciGws/" + keyStr + "/"
// http get the object
err := c.httpDelete(url)
if err != nil {
log.Debugf("Error deleting aciGw %s. Err: %v", keyStr, err)
return err
}
return nil
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.