repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.is_subnet_present | def is_subnet_present(self, subnet_addr):
"""Returns if a subnet is present. """
try:
subnet_list = self.neutronclient.list_subnets(body={})
subnet_dat = subnet_list.get('subnets')
for sub in subnet_dat:
if sub.get('cidr') == subnet_addr:
return True
return False
except Exception as exc:
LOG.error("Failed to list subnet %(sub)s, Exc %(exc)s",
{'sub': subnet_addr, 'exc': str(exc)})
return False | python | def is_subnet_present(self, subnet_addr):
"""Returns if a subnet is present. """
try:
subnet_list = self.neutronclient.list_subnets(body={})
subnet_dat = subnet_list.get('subnets')
for sub in subnet_dat:
if sub.get('cidr') == subnet_addr:
return True
return False
except Exception as exc:
LOG.error("Failed to list subnet %(sub)s, Exc %(exc)s",
{'sub': subnet_addr, 'exc': str(exc)})
return False | [
"def",
"is_subnet_present",
"(",
"self",
",",
"subnet_addr",
")",
":",
"try",
":",
"subnet_list",
"=",
"self",
".",
"neutronclient",
".",
"list_subnets",
"(",
"body",
"=",
"{",
"}",
")",
"subnet_dat",
"=",
"subnet_list",
".",
"get",
"(",
"'subnets'",
")",
... | Returns if a subnet is present. | [
"Returns",
"if",
"a",
"subnet",
"is",
"present",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L119-L131 | train | 37,700 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_all_subnets_cidr | def get_all_subnets_cidr(self, no_mask=False):
"""Returns all the subnets. """
body = {}
subnet_cidrs = []
try:
subnet_list = self.neutronclient.list_subnets(body=body)
subnet_dat = subnet_list.get('subnets')
for sub in subnet_dat:
if no_mask:
subnet_cidrs.append(sub.get('cidr').split('/')[0])
else:
subnet_cidrs.append(sub.get('cidr'))
except Exception as exc:
LOG.error("Failed to list subnet Exc %s", str(exc))
return subnet_cidrs | python | def get_all_subnets_cidr(self, no_mask=False):
"""Returns all the subnets. """
body = {}
subnet_cidrs = []
try:
subnet_list = self.neutronclient.list_subnets(body=body)
subnet_dat = subnet_list.get('subnets')
for sub in subnet_dat:
if no_mask:
subnet_cidrs.append(sub.get('cidr').split('/')[0])
else:
subnet_cidrs.append(sub.get('cidr'))
except Exception as exc:
LOG.error("Failed to list subnet Exc %s", str(exc))
return subnet_cidrs | [
"def",
"get_all_subnets_cidr",
"(",
"self",
",",
"no_mask",
"=",
"False",
")",
":",
"body",
"=",
"{",
"}",
"subnet_cidrs",
"=",
"[",
"]",
"try",
":",
"subnet_list",
"=",
"self",
".",
"neutronclient",
".",
"list_subnets",
"(",
"body",
"=",
"body",
")",
... | Returns all the subnets. | [
"Returns",
"all",
"the",
"subnets",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L133-L147 | train | 37,701 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_subnets_for_net | def get_subnets_for_net(self, net):
"""Returns the subnets in a network. """
try:
subnet_list = self.neutronclient.list_subnets(network_id=net)
subnet_dat = subnet_list.get('subnets')
return subnet_dat
except Exception as exc:
LOG.error("Failed to list subnet net %(net)s, Exc: %(exc)s",
{'net': net, 'exc': str(exc)})
return None | python | def get_subnets_for_net(self, net):
"""Returns the subnets in a network. """
try:
subnet_list = self.neutronclient.list_subnets(network_id=net)
subnet_dat = subnet_list.get('subnets')
return subnet_dat
except Exception as exc:
LOG.error("Failed to list subnet net %(net)s, Exc: %(exc)s",
{'net': net, 'exc': str(exc)})
return None | [
"def",
"get_subnets_for_net",
"(",
"self",
",",
"net",
")",
":",
"try",
":",
"subnet_list",
"=",
"self",
".",
"neutronclient",
".",
"list_subnets",
"(",
"network_id",
"=",
"net",
")",
"subnet_dat",
"=",
"subnet_list",
".",
"get",
"(",
"'subnets'",
")",
"re... | Returns the subnets in a network. | [
"Returns",
"the",
"subnets",
"in",
"a",
"network",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L149-L158 | train | 37,702 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_subnet_cidr | def get_subnet_cidr(self, subnet_id):
"""retrieve the CIDR associated with a subnet, given its ID. """
try:
subnet_list = self.neutronclient.list_subnets(id=subnet_id)
subnet_dat = subnet_list.get('subnets')[0]
return subnet_dat.get('cidr')
except Exception as exc:
LOG.error("Failed to list subnet for ID %(subnet)s, "
"exc %(exc)s", {'subnet': subnet_id, 'exc': exc})
return None | python | def get_subnet_cidr(self, subnet_id):
"""retrieve the CIDR associated with a subnet, given its ID. """
try:
subnet_list = self.neutronclient.list_subnets(id=subnet_id)
subnet_dat = subnet_list.get('subnets')[0]
return subnet_dat.get('cidr')
except Exception as exc:
LOG.error("Failed to list subnet for ID %(subnet)s, "
"exc %(exc)s", {'subnet': subnet_id, 'exc': exc})
return None | [
"def",
"get_subnet_cidr",
"(",
"self",
",",
"subnet_id",
")",
":",
"try",
":",
"subnet_list",
"=",
"self",
".",
"neutronclient",
".",
"list_subnets",
"(",
"id",
"=",
"subnet_id",
")",
"subnet_dat",
"=",
"subnet_list",
".",
"get",
"(",
"'subnets'",
")",
"["... | retrieve the CIDR associated with a subnet, given its ID. | [
"retrieve",
"the",
"CIDR",
"associated",
"with",
"a",
"subnet",
"given",
"its",
"ID",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L160-L169 | train | 37,703 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.delete_network_subname | def delete_network_subname(self, sub_name):
"""Delete the network by part of its name, use with caution. """
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
for net in net_list:
if net.get('name').find(sub_name) != -1:
self.delete_network_all_subnets(net.get('net_id'))
except Exception as exc:
LOG.error("Failed to get network by subname %(name)s, "
"Exc %(exc)s",
{'name': sub_name, 'exc': str(exc)}) | python | def delete_network_subname(self, sub_name):
"""Delete the network by part of its name, use with caution. """
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
for net in net_list:
if net.get('name').find(sub_name) != -1:
self.delete_network_all_subnets(net.get('net_id'))
except Exception as exc:
LOG.error("Failed to get network by subname %(name)s, "
"Exc %(exc)s",
{'name': sub_name, 'exc': str(exc)}) | [
"def",
"delete_network_subname",
"(",
"self",
",",
"sub_name",
")",
":",
"try",
":",
"body",
"=",
"{",
"}",
"net_list",
"=",
"self",
".",
"neutronclient",
".",
"list_networks",
"(",
"body",
"=",
"body",
")",
"for",
"net",
"in",
"net_list",
":",
"if",
"... | Delete the network by part of its name, use with caution. | [
"Delete",
"the",
"network",
"by",
"part",
"of",
"its",
"name",
"use",
"with",
"caution",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L171-L182 | train | 37,704 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_network_by_name | def get_network_by_name(self, nwk_name):
"""Search for a openstack network by name. """
ret_net_lst = []
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
net_list = net_list.get('networks')
for net in net_list:
if net.get('name') == nwk_name:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by name %(name)s, "
"Exc %(exc)s",
{'name': nwk_name, 'exc': str(exc)})
return ret_net_lst | python | def get_network_by_name(self, nwk_name):
"""Search for a openstack network by name. """
ret_net_lst = []
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
net_list = net_list.get('networks')
for net in net_list:
if net.get('name') == nwk_name:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by name %(name)s, "
"Exc %(exc)s",
{'name': nwk_name, 'exc': str(exc)})
return ret_net_lst | [
"def",
"get_network_by_name",
"(",
"self",
",",
"nwk_name",
")",
":",
"ret_net_lst",
"=",
"[",
"]",
"try",
":",
"body",
"=",
"{",
"}",
"net_list",
"=",
"self",
".",
"neutronclient",
".",
"list_networks",
"(",
"body",
"=",
"body",
")",
"net_list",
"=",
... | Search for a openstack network by name. | [
"Search",
"for",
"a",
"openstack",
"network",
"by",
"name",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L184-L198 | train | 37,705 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_network_by_tenant | def get_network_by_tenant(self, tenant_id):
"""Returns the network of a given tenant. """
ret_net_lst = []
try:
net_list = self.neutronclient.list_networks(body={})
for net in net_list.get('networks'):
if net.get('tenant_id') == tenant_id:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by tenant %(tenant)s, "
"Exc %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
return ret_net_lst | python | def get_network_by_tenant(self, tenant_id):
"""Returns the network of a given tenant. """
ret_net_lst = []
try:
net_list = self.neutronclient.list_networks(body={})
for net in net_list.get('networks'):
if net.get('tenant_id') == tenant_id:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by tenant %(tenant)s, "
"Exc %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
return ret_net_lst | [
"def",
"get_network_by_tenant",
"(",
"self",
",",
"tenant_id",
")",
":",
"ret_net_lst",
"=",
"[",
"]",
"try",
":",
"net_list",
"=",
"self",
".",
"neutronclient",
".",
"list_networks",
"(",
"body",
"=",
"{",
"}",
")",
"for",
"net",
"in",
"net_list",
".",
... | Returns the network of a given tenant. | [
"Returns",
"the",
"network",
"of",
"a",
"given",
"tenant",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L200-L212 | train | 37,706 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_rtr_by_name | def get_rtr_by_name(self, rtr_name):
"""Search a router by its name. """
upd_rtr_list = []
try:
rtr_list = self.neutronclient.list_routers()
for rtr in rtr_list.get('routers'):
if rtr_name == rtr['name']:
upd_rtr_list.append(rtr)
except Exception as exc:
LOG.error("Failed to get router by name %(name)s, "
"Exc %(exc)s",
{'name': rtr_name, 'exc': str(exc)})
return upd_rtr_list | python | def get_rtr_by_name(self, rtr_name):
"""Search a router by its name. """
upd_rtr_list = []
try:
rtr_list = self.neutronclient.list_routers()
for rtr in rtr_list.get('routers'):
if rtr_name == rtr['name']:
upd_rtr_list.append(rtr)
except Exception as exc:
LOG.error("Failed to get router by name %(name)s, "
"Exc %(exc)s",
{'name': rtr_name, 'exc': str(exc)})
return upd_rtr_list | [
"def",
"get_rtr_by_name",
"(",
"self",
",",
"rtr_name",
")",
":",
"upd_rtr_list",
"=",
"[",
"]",
"try",
":",
"rtr_list",
"=",
"self",
".",
"neutronclient",
".",
"list_routers",
"(",
")",
"for",
"rtr",
"in",
"rtr_list",
".",
"get",
"(",
"'routers'",
")",
... | Search a router by its name. | [
"Search",
"a",
"router",
"by",
"its",
"name",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L215-L227 | train | 37,707 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.create_router | def create_router(self, name, tenant_id, subnet_lst):
"""Create a openstack router and add the interfaces. """
try:
body = {'router': {'name': name, 'tenant_id': tenant_id,
'admin_state_up': True}}
router = self.neutronclient.create_router(body=body)
rout_dict = router.get('router')
rout_id = rout_dict.get('id')
except Exception as exc:
LOG.error("Failed to create router with name %(name)s"
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return None
ret = self.add_intf_router(rout_id, tenant_id, subnet_lst)
if not ret:
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s, Exc %(exc)s",
{'name': name, 'exc': str(exc)})
return None
return rout_id | python | def create_router(self, name, tenant_id, subnet_lst):
"""Create a openstack router and add the interfaces. """
try:
body = {'router': {'name': name, 'tenant_id': tenant_id,
'admin_state_up': True}}
router = self.neutronclient.create_router(body=body)
rout_dict = router.get('router')
rout_id = rout_dict.get('id')
except Exception as exc:
LOG.error("Failed to create router with name %(name)s"
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return None
ret = self.add_intf_router(rout_id, tenant_id, subnet_lst)
if not ret:
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s, Exc %(exc)s",
{'name': name, 'exc': str(exc)})
return None
return rout_id | [
"def",
"create_router",
"(",
"self",
",",
"name",
",",
"tenant_id",
",",
"subnet_lst",
")",
":",
"try",
":",
"body",
"=",
"{",
"'router'",
":",
"{",
"'name'",
":",
"name",
",",
"'tenant_id'",
":",
"tenant_id",
",",
"'admin_state_up'",
":",
"True",
"}",
... | Create a openstack router and add the interfaces. | [
"Create",
"a",
"openstack",
"router",
"and",
"add",
"the",
"interfaces",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L229-L250 | train | 37,708 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.add_intf_router | def add_intf_router(self, rout_id, tenant_id, subnet_lst):
"""Add the interfaces to a router. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.add_interface_router(rout_id,
body=body)
intf.get('port_id')
except Exception as exc:
LOG.error("Failed to create router intf ID %(id)s,"
" Exc %(exc)s", {'id': rout_id, 'exc': str(exc)})
return False
return True | python | def add_intf_router(self, rout_id, tenant_id, subnet_lst):
"""Add the interfaces to a router. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.add_interface_router(rout_id,
body=body)
intf.get('port_id')
except Exception as exc:
LOG.error("Failed to create router intf ID %(id)s,"
" Exc %(exc)s", {'id': rout_id, 'exc': str(exc)})
return False
return True | [
"def",
"add_intf_router",
"(",
"self",
",",
"rout_id",
",",
"tenant_id",
",",
"subnet_lst",
")",
":",
"try",
":",
"for",
"subnet_id",
"in",
"subnet_lst",
":",
"body",
"=",
"{",
"'subnet_id'",
":",
"subnet_id",
"}",
"intf",
"=",
"self",
".",
"neutronclient"... | Add the interfaces to a router. | [
"Add",
"the",
"interfaces",
"to",
"a",
"router",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L252-L264 | train | 37,709 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.delete_router | def delete_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router.
Delete the router and remove the interfaces attached to it.
"""
ret = self.delete_intf_router(name, tenant_id, rout_id, subnet_lst)
if not ret:
return False
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s ret %(ret)s "
"Exc %(exc)s",
{'name': name, 'ret': str(ret), 'exc': str(exc)})
return False
return True | python | def delete_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router.
Delete the router and remove the interfaces attached to it.
"""
ret = self.delete_intf_router(name, tenant_id, rout_id, subnet_lst)
if not ret:
return False
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s ret %(ret)s "
"Exc %(exc)s",
{'name': name, 'ret': str(ret), 'exc': str(exc)})
return False
return True | [
"def",
"delete_router",
"(",
"self",
",",
"name",
",",
"tenant_id",
",",
"rout_id",
",",
"subnet_lst",
")",
":",
"ret",
"=",
"self",
".",
"delete_intf_router",
"(",
"name",
",",
"tenant_id",
",",
"rout_id",
",",
"subnet_lst",
")",
"if",
"not",
"ret",
":"... | Delete the openstack router.
Delete the router and remove the interfaces attached to it. | [
"Delete",
"the",
"openstack",
"router",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L267-L283 | train | 37,710 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.delete_intf_router | def delete_intf_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router and remove the interfaces attached. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.remove_interface_router(rout_id,
body=body)
intf.get('id')
except Exception as exc:
LOG.error("Failed to delete router interface %(name)s, "
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return False
return True | python | def delete_intf_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router and remove the interfaces attached. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.remove_interface_router(rout_id,
body=body)
intf.get('id')
except Exception as exc:
LOG.error("Failed to delete router interface %(name)s, "
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return False
return True | [
"def",
"delete_intf_router",
"(",
"self",
",",
"name",
",",
"tenant_id",
",",
"rout_id",
",",
"subnet_lst",
")",
":",
"try",
":",
"for",
"subnet_id",
"in",
"subnet_lst",
":",
"body",
"=",
"{",
"'subnet_id'",
":",
"subnet_id",
"}",
"intf",
"=",
"self",
".... | Delete the openstack router and remove the interfaces attached. | [
"Delete",
"the",
"openstack",
"router",
"and",
"remove",
"the",
"interfaces",
"attached",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L285-L297 | train | 37,711 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.delete_router_by_name | def delete_router_by_name(self, rtr_name, tenant_id):
"""Delete the openstack router and its interfaces given its name.
The interfaces should be already removed prior to calling this
function.
"""
try:
routers = self.neutronclient.list_routers()
rtr_list = routers.get('routers')
for rtr in rtr_list:
if rtr_name == rtr['name']:
self.neutronclient.delete_router(rtr['id'])
except Exception as exc:
LOG.error("Failed to get and delete router by name %(name)s, "
"Exc %(exc)s",
{'name': rtr_name, 'exc': str(exc)})
return False
return True | python | def delete_router_by_name(self, rtr_name, tenant_id):
"""Delete the openstack router and its interfaces given its name.
The interfaces should be already removed prior to calling this
function.
"""
try:
routers = self.neutronclient.list_routers()
rtr_list = routers.get('routers')
for rtr in rtr_list:
if rtr_name == rtr['name']:
self.neutronclient.delete_router(rtr['id'])
except Exception as exc:
LOG.error("Failed to get and delete router by name %(name)s, "
"Exc %(exc)s",
{'name': rtr_name, 'exc': str(exc)})
return False
return True | [
"def",
"delete_router_by_name",
"(",
"self",
",",
"rtr_name",
",",
"tenant_id",
")",
":",
"try",
":",
"routers",
"=",
"self",
".",
"neutronclient",
".",
"list_routers",
"(",
")",
"rtr_list",
"=",
"routers",
".",
"get",
"(",
"'routers'",
")",
"for",
"rtr",
... | Delete the openstack router and its interfaces given its name.
The interfaces should be already removed prior to calling this
function. | [
"Delete",
"the",
"openstack",
"router",
"and",
"its",
"interfaces",
"given",
"its",
"name",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L299-L316 | train | 37,712 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_rtr_name | def get_rtr_name(self, router_id):
"""Retrieve the router name. Incomplete. """
try:
body = {}
router = self.neutronclient.show_router(router_id, body=body)
return router.get('router').get('name')
except Exception as exc:
LOG.error("Failed to show router interface %(id)s "
"Exc %(exc)s", {'id': router_id, 'exc': str(exc)}) | python | def get_rtr_name(self, router_id):
"""Retrieve the router name. Incomplete. """
try:
body = {}
router = self.neutronclient.show_router(router_id, body=body)
return router.get('router').get('name')
except Exception as exc:
LOG.error("Failed to show router interface %(id)s "
"Exc %(exc)s", {'id': router_id, 'exc': str(exc)}) | [
"def",
"get_rtr_name",
"(",
"self",
",",
"router_id",
")",
":",
"try",
":",
"body",
"=",
"{",
"}",
"router",
"=",
"self",
".",
"neutronclient",
".",
"show_router",
"(",
"router_id",
",",
"body",
"=",
"body",
")",
"return",
"router",
".",
"get",
"(",
... | Retrieve the router name. Incomplete. | [
"Retrieve",
"the",
"router",
"name",
".",
"Incomplete",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L344-L352 | train | 37,713 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.find_rtr_namespace | def find_rtr_namespace(self, rout_id):
"""Find the namespace associated with the router. """
if rout_id is None:
return None
args = ['ip', 'netns', 'list']
try:
ns_list = utils.execute(args, root_helper=self.root_helper)
except Exception as exc:
LOG.error("Unable to find the namespace list Exception %s",
exc)
return None
for ns in ns_list.split():
if 'router' in ns and rout_id in ns:
return ns | python | def find_rtr_namespace(self, rout_id):
"""Find the namespace associated with the router. """
if rout_id is None:
return None
args = ['ip', 'netns', 'list']
try:
ns_list = utils.execute(args, root_helper=self.root_helper)
except Exception as exc:
LOG.error("Unable to find the namespace list Exception %s",
exc)
return None
for ns in ns_list.split():
if 'router' in ns and rout_id in ns:
return ns | [
"def",
"find_rtr_namespace",
"(",
"self",
",",
"rout_id",
")",
":",
"if",
"rout_id",
"is",
"None",
":",
"return",
"None",
"args",
"=",
"[",
"'ip'",
",",
"'netns'",
",",
"'list'",
"]",
"try",
":",
"ns_list",
"=",
"utils",
".",
"execute",
"(",
"args",
... | Find the namespace associated with the router. | [
"Find",
"the",
"namespace",
"associated",
"with",
"the",
"router",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L354-L367 | train | 37,714 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.program_rtr | def program_rtr(self, args, rout_id, namespace=None):
"""Execute the command against the namespace. """
if namespace is None:
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
final_args = ['ip', 'netns', 'exec', namespace] + args
try:
utils.execute(final_args, root_helper=self.root_helper)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. "
"Exception: %(exception)s",
{'cmd': final_args, 'exception': e})
return False
return True | python | def program_rtr(self, args, rout_id, namespace=None):
"""Execute the command against the namespace. """
if namespace is None:
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
final_args = ['ip', 'netns', 'exec', namespace] + args
try:
utils.execute(final_args, root_helper=self.root_helper)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. "
"Exception: %(exception)s",
{'cmd': final_args, 'exception': e})
return False
return True | [
"def",
"program_rtr",
"(",
"self",
",",
"args",
",",
"rout_id",
",",
"namespace",
"=",
"None",
")",
":",
"if",
"namespace",
"is",
"None",
":",
"namespace",
"=",
"self",
".",
"find_rtr_namespace",
"(",
"rout_id",
")",
"if",
"namespace",
"is",
"None",
":",... | Execute the command against the namespace. | [
"Execute",
"the",
"command",
"against",
"the",
"namespace",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L369-L384 | train | 37,715 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.program_rtr_default_gw | def program_rtr_default_gw(self, tenant_id, rout_id, gw):
"""Program the default gateway of a router. """
args = ['route', 'add', 'default', 'gw', gw]
ret = self.program_rtr(args, rout_id)
if not ret:
LOG.error("Program router returned error for %s", rout_id)
return False
return True | python | def program_rtr_default_gw(self, tenant_id, rout_id, gw):
"""Program the default gateway of a router. """
args = ['route', 'add', 'default', 'gw', gw]
ret = self.program_rtr(args, rout_id)
if not ret:
LOG.error("Program router returned error for %s", rout_id)
return False
return True | [
"def",
"program_rtr_default_gw",
"(",
"self",
",",
"tenant_id",
",",
"rout_id",
",",
"gw",
")",
":",
"args",
"=",
"[",
"'route'",
",",
"'add'",
",",
"'default'",
",",
"'gw'",
",",
"gw",
"]",
"ret",
"=",
"self",
".",
"program_rtr",
"(",
"args",
",",
"... | Program the default gateway of a router. | [
"Program",
"the",
"default",
"gateway",
"of",
"a",
"router",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L402-L409 | train | 37,716 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_subnet_nwk_excl | def get_subnet_nwk_excl(self, tenant_id, excl_list, excl_part=False):
"""Retrieve the subnets of a network.
Get the subnets inside a network after applying the exclusion
list.
"""
net_list = self.get_network_by_tenant(tenant_id)
ret_subnet_list = []
for net in net_list:
if excl_part:
name = net.get('name')
part = name.partition('::')[2]
if part:
continue
subnet_lst = self.get_subnets_for_net(net.get('id'))
for subnet_elem in subnet_lst:
subnet = subnet_elem.get('cidr').split('/')[0]
subnet_and_mask = subnet_elem.get('cidr')
if subnet not in excl_list:
ret_subnet_list.append(subnet_and_mask)
return ret_subnet_list | python | def get_subnet_nwk_excl(self, tenant_id, excl_list, excl_part=False):
"""Retrieve the subnets of a network.
Get the subnets inside a network after applying the exclusion
list.
"""
net_list = self.get_network_by_tenant(tenant_id)
ret_subnet_list = []
for net in net_list:
if excl_part:
name = net.get('name')
part = name.partition('::')[2]
if part:
continue
subnet_lst = self.get_subnets_for_net(net.get('id'))
for subnet_elem in subnet_lst:
subnet = subnet_elem.get('cidr').split('/')[0]
subnet_and_mask = subnet_elem.get('cidr')
if subnet not in excl_list:
ret_subnet_list.append(subnet_and_mask)
return ret_subnet_list | [
"def",
"get_subnet_nwk_excl",
"(",
"self",
",",
"tenant_id",
",",
"excl_list",
",",
"excl_part",
"=",
"False",
")",
":",
"net_list",
"=",
"self",
".",
"get_network_by_tenant",
"(",
"tenant_id",
")",
"ret_subnet_list",
"=",
"[",
"]",
"for",
"net",
"in",
"net_... | Retrieve the subnets of a network.
Get the subnets inside a network after applying the exclusion
list. | [
"Retrieve",
"the",
"subnets",
"of",
"a",
"network",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L411-L431 | train | 37,717 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.remove_rtr_nwk_next_hop | def remove_rtr_nwk_next_hop(self, rout_id, next_hop, subnet_lst,
excl_list):
"""Remove the next hop for all networks of a tenant. """
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
args = ['ip', 'route']
ret = self.program_rtr_return(args, rout_id, namespace=namespace)
if ret is None:
LOG.error("Get routes return None %s", rout_id)
return False
routes = ret.split('\n')
concat_lst = subnet_lst + excl_list
for rout in routes:
if len(rout) == 0:
continue
nwk = rout.split()[0]
if nwk == 'default':
continue
nwk_no_mask = nwk.split('/')[0]
if nwk_no_mask not in concat_lst and nwk not in concat_lst:
args = ['route', 'del', '-net', nwk, 'gw', next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s",
rout_id)
return False
return True | python | def remove_rtr_nwk_next_hop(self, rout_id, next_hop, subnet_lst,
excl_list):
"""Remove the next hop for all networks of a tenant. """
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
args = ['ip', 'route']
ret = self.program_rtr_return(args, rout_id, namespace=namespace)
if ret is None:
LOG.error("Get routes return None %s", rout_id)
return False
routes = ret.split('\n')
concat_lst = subnet_lst + excl_list
for rout in routes:
if len(rout) == 0:
continue
nwk = rout.split()[0]
if nwk == 'default':
continue
nwk_no_mask = nwk.split('/')[0]
if nwk_no_mask not in concat_lst and nwk not in concat_lst:
args = ['route', 'del', '-net', nwk, 'gw', next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s",
rout_id)
return False
return True | [
"def",
"remove_rtr_nwk_next_hop",
"(",
"self",
",",
"rout_id",
",",
"next_hop",
",",
"subnet_lst",
",",
"excl_list",
")",
":",
"namespace",
"=",
"self",
".",
"find_rtr_namespace",
"(",
"rout_id",
")",
"if",
"namespace",
"is",
"None",
":",
"LOG",
".",
"error"... | Remove the next hop for all networks of a tenant. | [
"Remove",
"the",
"next",
"hop",
"for",
"all",
"networks",
"of",
"a",
"tenant",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L471-L500 | train | 37,718 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_fw | def get_fw(self, fw_id):
"""Return the Firewall given its ID. """
fw = None
try:
fw = self.neutronclient.show_firewall(fw_id)
except Exception as exc:
LOG.error("Failed to get firewall list for id %(id)s, "
"Exc %(exc)s", {'id': fw_id, 'exc': str(exc)})
return fw | python | def get_fw(self, fw_id):
"""Return the Firewall given its ID. """
fw = None
try:
fw = self.neutronclient.show_firewall(fw_id)
except Exception as exc:
LOG.error("Failed to get firewall list for id %(id)s, "
"Exc %(exc)s", {'id': fw_id, 'exc': str(exc)})
return fw | [
"def",
"get_fw",
"(",
"self",
",",
"fw_id",
")",
":",
"fw",
"=",
"None",
"try",
":",
"fw",
"=",
"self",
".",
"neutronclient",
".",
"show_firewall",
"(",
"fw_id",
")",
"except",
"Exception",
"as",
"exc",
":",
"LOG",
".",
"error",
"(",
"\"Failed to get f... | Return the Firewall given its ID. | [
"Return",
"the",
"Firewall",
"given",
"its",
"ID",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L502-L510 | train | 37,719 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_fw_rule | def get_fw_rule(self, rule_id):
"""Return the firewall rule, given its ID. """
rule = None
try:
rule = self.neutronclient.show_firewall_rule(rule_id)
except Exception as exc:
LOG.error("Failed to get firewall rule for id %(id)s "
"Exc %(exc)s", {'id': rule_id, 'exc': str(exc)})
return rule | python | def get_fw_rule(self, rule_id):
"""Return the firewall rule, given its ID. """
rule = None
try:
rule = self.neutronclient.show_firewall_rule(rule_id)
except Exception as exc:
LOG.error("Failed to get firewall rule for id %(id)s "
"Exc %(exc)s", {'id': rule_id, 'exc': str(exc)})
return rule | [
"def",
"get_fw_rule",
"(",
"self",
",",
"rule_id",
")",
":",
"rule",
"=",
"None",
"try",
":",
"rule",
"=",
"self",
".",
"neutronclient",
".",
"show_firewall_rule",
"(",
"rule_id",
")",
"except",
"Exception",
"as",
"exc",
":",
"LOG",
".",
"error",
"(",
... | Return the firewall rule, given its ID. | [
"Return",
"the",
"firewall",
"rule",
"given",
"its",
"ID",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L513-L521 | train | 37,720 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_fw_policy | def get_fw_policy(self, policy_id):
"""Return the firewall policy, given its ID. """
policy = None
try:
policy = self.neutronclient.show_firewall_policy(policy_id)
except Exception as exc:
LOG.error("Failed to get firewall plcy for id %(id)s "
"Exc %(exc)s",
{'id': policy_id, 'exc': str(exc)})
return policy | python | def get_fw_policy(self, policy_id):
"""Return the firewall policy, given its ID. """
policy = None
try:
policy = self.neutronclient.show_firewall_policy(policy_id)
except Exception as exc:
LOG.error("Failed to get firewall plcy for id %(id)s "
"Exc %(exc)s",
{'id': policy_id, 'exc': str(exc)})
return policy | [
"def",
"get_fw_policy",
"(",
"self",
",",
"policy_id",
")",
":",
"policy",
"=",
"None",
"try",
":",
"policy",
"=",
"self",
".",
"neutronclient",
".",
"show_firewall_policy",
"(",
"policy_id",
")",
"except",
"Exception",
"as",
"exc",
":",
"LOG",
".",
"error... | Return the firewall policy, given its ID. | [
"Return",
"the",
"firewall",
"policy",
"given",
"its",
"ID",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L524-L533 | train | 37,721 |
openstack/networking-cisco | networking_cisco/plugins/cisco/db/l3/ha_db.py | HA_db_mixin._add_redundancy_routers | def _add_redundancy_routers(self, context, start_index, stop_index,
user_visible_router, ports=None,
ha_settings_db=None, create_ha_group=True):
"""Creates a redundancy router and its interfaces on
the specified subnets.
"""
priority = (DEFAULT_MASTER_PRIORITY +
(start_index - 1) * PRIORITY_INCREASE_STEP)
r = copy.deepcopy(user_visible_router)
# No tenant_id so redundancy routers are hidden from user
r['tenant_id'] = ''
name = r['name']
redundancy_r_ids = []
for i in range(start_index, stop_index):
del r['id']
# We don't replicate the user visible router's routes, instead
# they are populated to redundancy routers for get router(s) ops
r.pop('routes', None)
# Redundancy routers will never have a route spec themselves
# The redundancy routers must have HA disabled
r[ha.ENABLED] = False
r['name'] = name + REDUNDANCY_ROUTER_SUFFIX + str(i)
# set role so that purpose of this router can be easily determined
r[routerrole.ROUTER_ROLE_ATTR] = ROUTER_ROLE_HA_REDUNDANCY
gw_info = r[EXTERNAL_GW_INFO]
if gw_info and gw_info['external_fixed_ips']:
# Ensure ip addresses are not specified as they cannot be
# same as visible router's ip addresses.
for e_fixed_ip in gw_info['external_fixed_ips']:
e_fixed_ip.pop('ip_address', None)
r = self.create_router(context, {'router': r})
LOG.debug("Created redundancy router %(index)d with router id "
"%(r_id)s", {'index': i, 'r_id': r['id']})
priority += PRIORITY_INCREASE_STEP
r_b_b = RouterRedundancyBinding(
redundancy_router_id=r['id'],
priority=priority,
user_router_id=user_visible_router['id'])
context.session.add(r_b_b)
redundancy_r_ids.append(r['id'])
for port_db in ports or []:
port = self._core_plugin._make_port_dict(port_db)
self._add_redundancy_router_interfaces(
context, user_visible_router, None, port,
redundancy_r_ids, ha_settings_db, create_ha_group) | python | def _add_redundancy_routers(self, context, start_index, stop_index,
user_visible_router, ports=None,
ha_settings_db=None, create_ha_group=True):
"""Creates a redundancy router and its interfaces on
the specified subnets.
"""
priority = (DEFAULT_MASTER_PRIORITY +
(start_index - 1) * PRIORITY_INCREASE_STEP)
r = copy.deepcopy(user_visible_router)
# No tenant_id so redundancy routers are hidden from user
r['tenant_id'] = ''
name = r['name']
redundancy_r_ids = []
for i in range(start_index, stop_index):
del r['id']
# We don't replicate the user visible router's routes, instead
# they are populated to redundancy routers for get router(s) ops
r.pop('routes', None)
# Redundancy routers will never have a route spec themselves
# The redundancy routers must have HA disabled
r[ha.ENABLED] = False
r['name'] = name + REDUNDANCY_ROUTER_SUFFIX + str(i)
# set role so that purpose of this router can be easily determined
r[routerrole.ROUTER_ROLE_ATTR] = ROUTER_ROLE_HA_REDUNDANCY
gw_info = r[EXTERNAL_GW_INFO]
if gw_info and gw_info['external_fixed_ips']:
# Ensure ip addresses are not specified as they cannot be
# same as visible router's ip addresses.
for e_fixed_ip in gw_info['external_fixed_ips']:
e_fixed_ip.pop('ip_address', None)
r = self.create_router(context, {'router': r})
LOG.debug("Created redundancy router %(index)d with router id "
"%(r_id)s", {'index': i, 'r_id': r['id']})
priority += PRIORITY_INCREASE_STEP
r_b_b = RouterRedundancyBinding(
redundancy_router_id=r['id'],
priority=priority,
user_router_id=user_visible_router['id'])
context.session.add(r_b_b)
redundancy_r_ids.append(r['id'])
for port_db in ports or []:
port = self._core_plugin._make_port_dict(port_db)
self._add_redundancy_router_interfaces(
context, user_visible_router, None, port,
redundancy_r_ids, ha_settings_db, create_ha_group) | [
"def",
"_add_redundancy_routers",
"(",
"self",
",",
"context",
",",
"start_index",
",",
"stop_index",
",",
"user_visible_router",
",",
"ports",
"=",
"None",
",",
"ha_settings_db",
"=",
"None",
",",
"create_ha_group",
"=",
"True",
")",
":",
"priority",
"=",
"("... | Creates a redundancy router and its interfaces on
the specified subnets. | [
"Creates",
"a",
"redundancy",
"router",
"and",
"its",
"interfaces",
"on",
"the",
"specified",
"subnets",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L469-L513 | train | 37,722 |
openstack/networking-cisco | networking_cisco/plugins/cisco/db/l3/ha_db.py | HA_db_mixin._remove_redundancy_routers | def _remove_redundancy_routers(self, context, router_ids, ports,
delete_ha_groups=False):
"""Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves.
"""
subnets_info = [{'subnet_id': port['fixed_ips'][0]['subnet_id']}
for port in ports]
for r_id in router_ids:
for i in range(len(subnets_info)):
self.remove_router_interface(context, r_id, subnets_info[i])
LOG.debug("Removed interface on %(s_id)s to redundancy router "
"with %(r_id)s",
{'s_id': ports[i]['network_id'], 'r_id': r_id})
# There is only one ha group per network so only delete once
if delete_ha_groups and r_id == router_ids[0]:
self._delete_ha_group(context, ports[i]['id'])
self.delete_router(context, r_id)
LOG.debug("Deleted redundancy router %s", r_id) | python | def _remove_redundancy_routers(self, context, router_ids, ports,
delete_ha_groups=False):
"""Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves.
"""
subnets_info = [{'subnet_id': port['fixed_ips'][0]['subnet_id']}
for port in ports]
for r_id in router_ids:
for i in range(len(subnets_info)):
self.remove_router_interface(context, r_id, subnets_info[i])
LOG.debug("Removed interface on %(s_id)s to redundancy router "
"with %(r_id)s",
{'s_id': ports[i]['network_id'], 'r_id': r_id})
# There is only one ha group per network so only delete once
if delete_ha_groups and r_id == router_ids[0]:
self._delete_ha_group(context, ports[i]['id'])
self.delete_router(context, r_id)
LOG.debug("Deleted redundancy router %s", r_id) | [
"def",
"_remove_redundancy_routers",
"(",
"self",
",",
"context",
",",
"router_ids",
",",
"ports",
",",
"delete_ha_groups",
"=",
"False",
")",
":",
"subnets_info",
"=",
"[",
"{",
"'subnet_id'",
":",
"port",
"[",
"'fixed_ips'",
"]",
"[",
"0",
"]",
"[",
"'su... | Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves. | [
"Deletes",
"all",
"interfaces",
"of",
"the",
"specified",
"redundancy",
"routers",
"and",
"then",
"the",
"redundancy",
"routers",
"themselves",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L515-L532 | train | 37,723 |
openstack/networking-cisco | networking_cisco/plugins/cisco/db/l3/ha_db.py | HA_db_mixin._update_redundancy_router_interfaces | def _update_redundancy_router_interfaces(self, context, router,
port, modified_port_data,
redundancy_router_ids=None,
ha_settings_db=None):
"""To be called when the router interfaces are updated,
like in the case of change in port admin_state_up status
"""
router_id = router['id']
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
rr_ids = self._get_redundancy_router_ids(e_context, router_id)
port_info_list = self._core_plugin.get_ports(
e_context, filters={'device_id': rr_ids,
'network_id': [port['network_id']]},
fields=['device_id', 'id'])
for port_info in port_info_list:
self._core_plugin.update_port(e_context, port_info['id'],
modified_port_data)
self._update_hidden_port(e_context, port['id'], modified_port_data) | python | def _update_redundancy_router_interfaces(self, context, router,
port, modified_port_data,
redundancy_router_ids=None,
ha_settings_db=None):
"""To be called when the router interfaces are updated,
like in the case of change in port admin_state_up status
"""
router_id = router['id']
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
rr_ids = self._get_redundancy_router_ids(e_context, router_id)
port_info_list = self._core_plugin.get_ports(
e_context, filters={'device_id': rr_ids,
'network_id': [port['network_id']]},
fields=['device_id', 'id'])
for port_info in port_info_list:
self._core_plugin.update_port(e_context, port_info['id'],
modified_port_data)
self._update_hidden_port(e_context, port['id'], modified_port_data) | [
"def",
"_update_redundancy_router_interfaces",
"(",
"self",
",",
"context",
",",
"router",
",",
"port",
",",
"modified_port_data",
",",
"redundancy_router_ids",
"=",
"None",
",",
"ha_settings_db",
"=",
"None",
")",
":",
"router_id",
"=",
"router",
"[",
"'id'",
"... | To be called when the router interfaces are updated,
like in the case of change in port admin_state_up status | [
"To",
"be",
"called",
"when",
"the",
"router",
"interfaces",
"are",
"updated",
"like",
"in",
"the",
"case",
"of",
"change",
"in",
"port",
"admin_state_up",
"status"
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L632-L655 | train | 37,724 |
openstack/networking-cisco | networking_cisco/plugins/cisco/db/l3/ha_db.py | HA_db_mixin._create_hidden_port | def _create_hidden_port(self, context, network_id, device_id, fixed_ips,
port_type=DEVICE_OWNER_ROUTER_INTF):
"""Creates port used specially for HA purposes."""
port = {'port': {
'tenant_id': '', # intentionally not set
'network_id': network_id,
'mac_address': ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': device_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}}
if extensions.is_extension_supported(self._core_plugin,
"dns-integration"):
port['port'].update(dns_name='')
core_plugin = bc.get_plugin()
return core_plugin.create_port(context, port) | python | def _create_hidden_port(self, context, network_id, device_id, fixed_ips,
port_type=DEVICE_OWNER_ROUTER_INTF):
"""Creates port used specially for HA purposes."""
port = {'port': {
'tenant_id': '', # intentionally not set
'network_id': network_id,
'mac_address': ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': device_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}}
if extensions.is_extension_supported(self._core_plugin,
"dns-integration"):
port['port'].update(dns_name='')
core_plugin = bc.get_plugin()
return core_plugin.create_port(context, port) | [
"def",
"_create_hidden_port",
"(",
"self",
",",
"context",
",",
"network_id",
",",
"device_id",
",",
"fixed_ips",
",",
"port_type",
"=",
"DEVICE_OWNER_ROUTER_INTF",
")",
":",
"port",
"=",
"{",
"'port'",
":",
"{",
"'tenant_id'",
":",
"''",
",",
"# intentionally... | Creates port used specially for HA purposes. | [
"Creates",
"port",
"used",
"specially",
"for",
"HA",
"purposes",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L936-L952 | train | 37,725 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | MaxSched.allocate_fw_dev | def allocate_fw_dev(self, fw_id):
"""Allocate firewall device.
Allocate the first Firewall device which has resources available.
"""
for cnt in self.res:
used = self.res.get(cnt).get('used')
if used < self.res.get(cnt).get('quota'):
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | python | def allocate_fw_dev(self, fw_id):
"""Allocate firewall device.
Allocate the first Firewall device which has resources available.
"""
for cnt in self.res:
used = self.res.get(cnt).get('used')
if used < self.res.get(cnt).get('quota'):
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | [
"def",
"allocate_fw_dev",
"(",
"self",
",",
"fw_id",
")",
":",
"for",
"cnt",
"in",
"self",
".",
"res",
":",
"used",
"=",
"self",
".",
"res",
".",
"get",
"(",
"cnt",
")",
".",
"get",
"(",
"'used'",
")",
"if",
"used",
"<",
"self",
".",
"res",
"."... | Allocate firewall device.
Allocate the first Firewall device which has resources available. | [
"Allocate",
"firewall",
"device",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L51-L63 | train | 37,726 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | MaxSched.populate_fw_dev | def populate_fw_dev(self, fw_id, mgmt_ip, new):
"""Populate the class after a restart. """
for cnt in self.res:
used = self.res.get(cnt).get('used')
if mgmt_ip == self.res[cnt].get('mgmt_ip'):
if new:
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | python | def populate_fw_dev(self, fw_id, mgmt_ip, new):
"""Populate the class after a restart. """
for cnt in self.res:
used = self.res.get(cnt).get('used')
if mgmt_ip == self.res[cnt].get('mgmt_ip'):
if new:
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | [
"def",
"populate_fw_dev",
"(",
"self",
",",
"fw_id",
",",
"mgmt_ip",
",",
"new",
")",
":",
"for",
"cnt",
"in",
"self",
".",
"res",
":",
"used",
"=",
"self",
".",
"res",
".",
"get",
"(",
"cnt",
")",
".",
"get",
"(",
"'used'",
")",
"if",
"mgmt_ip",... | Populate the class after a restart. | [
"Populate",
"the",
"class",
"after",
"a",
"restart",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L65-L75 | train | 37,727 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | MaxSched.get_fw_dev_map | def get_fw_dev_map(self, fw_id):
"""Return the object dict and mgmt ip for a firewall. """
for cnt in self.res:
if fw_id in self.res.get(cnt).get('fw_id_lst'):
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | python | def get_fw_dev_map(self, fw_id):
"""Return the object dict and mgmt ip for a firewall. """
for cnt in self.res:
if fw_id in self.res.get(cnt).get('fw_id_lst'):
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | [
"def",
"get_fw_dev_map",
"(",
"self",
",",
"fw_id",
")",
":",
"for",
"cnt",
"in",
"self",
".",
"res",
":",
"if",
"fw_id",
"in",
"self",
".",
"res",
".",
"get",
"(",
"cnt",
")",
".",
"get",
"(",
"'fw_id_lst'",
")",
":",
"return",
"self",
".",
"res... | Return the object dict and mgmt ip for a firewall. | [
"Return",
"the",
"object",
"dict",
"and",
"mgmt",
"ip",
"for",
"a",
"firewall",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L77-L83 | train | 37,728 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | MaxSched.deallocate_fw_dev | def deallocate_fw_dev(self, fw_id):
"""Release the firewall resource. """
for cnt in self.res:
if fw_id in self.res.get(cnt).get('fw_id_lst'):
self.res[cnt]['used'] = self.res[cnt]['used'] - 1
self.res.get(cnt).get('fw_id_lst').remove(fw_id)
return | python | def deallocate_fw_dev(self, fw_id):
"""Release the firewall resource. """
for cnt in self.res:
if fw_id in self.res.get(cnt).get('fw_id_lst'):
self.res[cnt]['used'] = self.res[cnt]['used'] - 1
self.res.get(cnt).get('fw_id_lst').remove(fw_id)
return | [
"def",
"deallocate_fw_dev",
"(",
"self",
",",
"fw_id",
")",
":",
"for",
"cnt",
"in",
"self",
".",
"res",
":",
"if",
"fw_id",
"in",
"self",
".",
"res",
".",
"get",
"(",
"cnt",
")",
".",
"get",
"(",
"'fw_id_lst'",
")",
":",
"self",
".",
"res",
"[",... | Release the firewall resource. | [
"Release",
"the",
"firewall",
"resource",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L85-L91 | train | 37,729 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.populate_local_sch_cache | def populate_local_sch_cache(self, fw_dict):
"""Populate the local cache from FW DB after restart. """
for fw_id in fw_dict:
fw_data = fw_dict.get(fw_id)
mgmt_ip = fw_data.get('fw_mgmt_ip')
dev_status = fw_data.get('device_status')
if dev_status == 'SUCCESS':
new = True
else:
new = False
if mgmt_ip is not None:
drvr_dict, mgmt_ip = self.sched_obj.populate_fw_dev(fw_id,
mgmt_ip,
new)
if drvr_dict is None or mgmt_ip is None:
LOG.info("Pop cache for FW sch: drvr_dict or mgmt_ip "
"is None") | python | def populate_local_sch_cache(self, fw_dict):
"""Populate the local cache from FW DB after restart. """
for fw_id in fw_dict:
fw_data = fw_dict.get(fw_id)
mgmt_ip = fw_data.get('fw_mgmt_ip')
dev_status = fw_data.get('device_status')
if dev_status == 'SUCCESS':
new = True
else:
new = False
if mgmt_ip is not None:
drvr_dict, mgmt_ip = self.sched_obj.populate_fw_dev(fw_id,
mgmt_ip,
new)
if drvr_dict is None or mgmt_ip is None:
LOG.info("Pop cache for FW sch: drvr_dict or mgmt_ip "
"is None") | [
"def",
"populate_local_sch_cache",
"(",
"self",
",",
"fw_dict",
")",
":",
"for",
"fw_id",
"in",
"fw_dict",
":",
"fw_data",
"=",
"fw_dict",
".",
"get",
"(",
"fw_id",
")",
"mgmt_ip",
"=",
"fw_data",
".",
"get",
"(",
"'fw_mgmt_ip'",
")",
"dev_status",
"=",
... | Populate the local cache from FW DB after restart. | [
"Populate",
"the",
"local",
"cache",
"from",
"FW",
"DB",
"after",
"restart",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L132-L148 | train | 37,730 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.drvr_initialize | def drvr_initialize(self, cfg):
"""Initialize the driver routines. """
cnt = 0
for ip in self.obj_dict:
cfg_dict = {}
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
cfg_dict['mgmt_ip_addr'] = ip
if self.user_list is not None:
cfg_dict['user'] = self.user_list[cnt]
if self.pwd_list is not None:
cfg_dict['pwd'] = self.pwd_list[cnt]
if self.interface_in_list is not None:
cfg_dict['interface_in'] = self.interface_in_list[cnt]
if self.interface_out_list is not None:
cfg_dict['interface_out'] = self.interface_out_list[cnt]
drvr_obj.initialize(cfg_dict)
cnt = cnt + 1 | python | def drvr_initialize(self, cfg):
"""Initialize the driver routines. """
cnt = 0
for ip in self.obj_dict:
cfg_dict = {}
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
cfg_dict['mgmt_ip_addr'] = ip
if self.user_list is not None:
cfg_dict['user'] = self.user_list[cnt]
if self.pwd_list is not None:
cfg_dict['pwd'] = self.pwd_list[cnt]
if self.interface_in_list is not None:
cfg_dict['interface_in'] = self.interface_in_list[cnt]
if self.interface_out_list is not None:
cfg_dict['interface_out'] = self.interface_out_list[cnt]
drvr_obj.initialize(cfg_dict)
cnt = cnt + 1 | [
"def",
"drvr_initialize",
"(",
"self",
",",
"cfg",
")",
":",
"cnt",
"=",
"0",
"for",
"ip",
"in",
"self",
".",
"obj_dict",
":",
"cfg_dict",
"=",
"{",
"}",
"drvr_obj",
"=",
"self",
".",
"obj_dict",
".",
"get",
"(",
"ip",
")",
".",
"get",
"(",
"'drv... | Initialize the driver routines. | [
"Initialize",
"the",
"driver",
"routines",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L150-L166 | train | 37,731 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.populate_event_que | def populate_event_que(self, que_obj):
"""Populates the event queue object.
This is for sending router events to event handler.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_event_que(que_obj) | python | def populate_event_que(self, que_obj):
"""Populates the event queue object.
This is for sending router events to event handler.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_event_que(que_obj) | [
"def",
"populate_event_que",
"(",
"self",
",",
"que_obj",
")",
":",
"for",
"ip",
"in",
"self",
".",
"obj_dict",
":",
"drvr_obj",
"=",
"self",
".",
"obj_dict",
".",
"get",
"(",
"ip",
")",
".",
"get",
"(",
"'drvr_obj'",
")",
"drvr_obj",
".",
"populate_ev... | Populates the event queue object.
This is for sending router events to event handler. | [
"Populates",
"the",
"event",
"queue",
"object",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L168-L175 | train | 37,732 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.populate_dcnm_obj | def populate_dcnm_obj(self, dcnm_obj):
"""Populates the DCNM object. """
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_dcnm_obj(dcnm_obj) | python | def populate_dcnm_obj(self, dcnm_obj):
"""Populates the DCNM object. """
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_dcnm_obj(dcnm_obj) | [
"def",
"populate_dcnm_obj",
"(",
"self",
",",
"dcnm_obj",
")",
":",
"for",
"ip",
"in",
"self",
".",
"obj_dict",
":",
"drvr_obj",
"=",
"self",
".",
"obj_dict",
".",
"get",
"(",
"ip",
")",
".",
"get",
"(",
"'drvr_obj'",
")",
"drvr_obj",
".",
"populate_dc... | Populates the DCNM object. | [
"Populates",
"the",
"DCNM",
"object",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L177-L181 | train | 37,733 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.is_device_virtual | def is_device_virtual(self):
"""Returns if the device is physical or virtual. """
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.is_device_virtual()
# No way to pin a device as of now, so return the first
# TODO(padkrish)
return ret | python | def is_device_virtual(self):
"""Returns if the device is physical or virtual. """
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.is_device_virtual()
# No way to pin a device as of now, so return the first
# TODO(padkrish)
return ret | [
"def",
"is_device_virtual",
"(",
"self",
")",
":",
"for",
"ip",
"in",
"self",
".",
"obj_dict",
":",
"drvr_obj",
"=",
"self",
".",
"obj_dict",
".",
"get",
"(",
"ip",
")",
".",
"get",
"(",
"'drvr_obj'",
")",
"ret",
"=",
"drvr_obj",
".",
"is_device_virtua... | Returns if the device is physical or virtual. | [
"Returns",
"if",
"the",
"device",
"is",
"physical",
"or",
"virtual",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L183-L190 | train | 37,734 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.create_fw_device | def create_fw_device(self, tenant_id, fw_id, data):
"""Creates the Firewall. """
drvr_dict, mgmt_ip = self.sched_obj.allocate_fw_dev(fw_id)
if drvr_dict is not None and mgmt_ip is not None:
self.update_fw_db_mgmt_ip(fw_id, mgmt_ip)
ret = drvr_dict.get('drvr_obj').create_fw(tenant_id, data)
if not ret:
self.sched_obj.deallocate_fw_dev(fw_id)
return ret
else:
return False | python | def create_fw_device(self, tenant_id, fw_id, data):
"""Creates the Firewall. """
drvr_dict, mgmt_ip = self.sched_obj.allocate_fw_dev(fw_id)
if drvr_dict is not None and mgmt_ip is not None:
self.update_fw_db_mgmt_ip(fw_id, mgmt_ip)
ret = drvr_dict.get('drvr_obj').create_fw(tenant_id, data)
if not ret:
self.sched_obj.deallocate_fw_dev(fw_id)
return ret
else:
return False | [
"def",
"create_fw_device",
"(",
"self",
",",
"tenant_id",
",",
"fw_id",
",",
"data",
")",
":",
"drvr_dict",
",",
"mgmt_ip",
"=",
"self",
".",
"sched_obj",
".",
"allocate_fw_dev",
"(",
"fw_id",
")",
"if",
"drvr_dict",
"is",
"not",
"None",
"and",
"mgmt_ip",
... | Creates the Firewall. | [
"Creates",
"the",
"Firewall",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L192-L202 | train | 37,735 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.modify_fw_device | def modify_fw_device(self, tenant_id, fw_id, data):
"""Modifies the firewall cfg. """
drvr_dict, mgmt_ip = self.sched_obj.get_fw_dev_map(fw_id)
return drvr_dict.get('drvr_obj').modify_fw(tenant_id, data) | python | def modify_fw_device(self, tenant_id, fw_id, data):
"""Modifies the firewall cfg. """
drvr_dict, mgmt_ip = self.sched_obj.get_fw_dev_map(fw_id)
return drvr_dict.get('drvr_obj').modify_fw(tenant_id, data) | [
"def",
"modify_fw_device",
"(",
"self",
",",
"tenant_id",
",",
"fw_id",
",",
"data",
")",
":",
"drvr_dict",
",",
"mgmt_ip",
"=",
"self",
".",
"sched_obj",
".",
"get_fw_dev_map",
"(",
"fw_id",
")",
"return",
"drvr_dict",
".",
"get",
"(",
"'drvr_obj'",
")",
... | Modifies the firewall cfg. | [
"Modifies",
"the",
"firewall",
"cfg",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L213-L216 | train | 37,736 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.network_create_notif | def network_create_notif(self, tenant_id, tenant_name, cidr):
"""Notification for Network create.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.network_create_notif(tenant_id, tenant_name, cidr)
LOG.info("Driver with IP %(ip)s return %(ret)s",
{'ip': ip, 'ret': ret}) | python | def network_create_notif(self, tenant_id, tenant_name, cidr):
"""Notification for Network create.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.network_create_notif(tenant_id, tenant_name, cidr)
LOG.info("Driver with IP %(ip)s return %(ret)s",
{'ip': ip, 'ret': ret}) | [
"def",
"network_create_notif",
"(",
"self",
",",
"tenant_id",
",",
"tenant_name",
",",
"cidr",
")",
":",
"for",
"ip",
"in",
"self",
".",
"obj_dict",
":",
"drvr_obj",
"=",
"self",
".",
"obj_dict",
".",
"get",
"(",
"ip",
")",
".",
"get",
"(",
"'drvr_obj'... | Notification for Network create.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them. | [
"Notification",
"for",
"Network",
"create",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L218-L229 | train | 37,737 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py | DeviceMgr.network_delete_notif | def network_delete_notif(self, tenant_id, tenant_name, net_id):
"""Notification for Network delete.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.network_delete_notif(tenant_id, tenant_name,
net_id)
LOG.info("Driver with IP %(ip)s return %(ret)s for network "
"delete notification", {'ip': ip, 'ret': ret}) | python | def network_delete_notif(self, tenant_id, tenant_name, net_id):
"""Notification for Network delete.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.network_delete_notif(tenant_id, tenant_name,
net_id)
LOG.info("Driver with IP %(ip)s return %(ret)s for network "
"delete notification", {'ip': ip, 'ret': ret}) | [
"def",
"network_delete_notif",
"(",
"self",
",",
"tenant_id",
",",
"tenant_name",
",",
"net_id",
")",
":",
"for",
"ip",
"in",
"self",
".",
"obj_dict",
":",
"drvr_obj",
"=",
"self",
".",
"obj_dict",
".",
"get",
"(",
"ip",
")",
".",
"get",
"(",
"'drvr_ob... | Notification for Network delete.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them. | [
"Notification",
"for",
"Network",
"delete",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/dev_mgr.py#L231-L243 | train | 37,738 |
openstack/networking-cisco | networking_cisco/plugins/cisco/l3/rpc/l3_router_cfg_agent_rpc_cb.py | L3RouterCfgRpcCallback.cfg_sync_routers | def cfg_sync_routers(self, context, host, router_ids=None,
hosting_device_ids=None):
"""Sync routers according to filters to a specific Cisco cfg agent.
:param context: contains user information
:param host: originator of callback
:param router_ids: list of router ids to return information about
:param hosting_device_ids: list of hosting device ids to get
routers for.
:returns: a list of routers with their hosting devices, interfaces and
floating_ips
"""
adm_context = bc.context.get_admin_context()
try:
routers = (
self._l3plugin.list_active_sync_routers_on_hosting_devices(
adm_context, host, router_ids, hosting_device_ids))
except AttributeError:
routers = []
LOG.debug('Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s',
{'agt': host, 'routers': jsonutils.dumps(routers, indent=5)})
return routers | python | def cfg_sync_routers(self, context, host, router_ids=None,
hosting_device_ids=None):
"""Sync routers according to filters to a specific Cisco cfg agent.
:param context: contains user information
:param host: originator of callback
:param router_ids: list of router ids to return information about
:param hosting_device_ids: list of hosting device ids to get
routers for.
:returns: a list of routers with their hosting devices, interfaces and
floating_ips
"""
adm_context = bc.context.get_admin_context()
try:
routers = (
self._l3plugin.list_active_sync_routers_on_hosting_devices(
adm_context, host, router_ids, hosting_device_ids))
except AttributeError:
routers = []
LOG.debug('Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s',
{'agt': host, 'routers': jsonutils.dumps(routers, indent=5)})
return routers | [
"def",
"cfg_sync_routers",
"(",
"self",
",",
"context",
",",
"host",
",",
"router_ids",
"=",
"None",
",",
"hosting_device_ids",
"=",
"None",
")",
":",
"adm_context",
"=",
"bc",
".",
"context",
".",
"get_admin_context",
"(",
")",
"try",
":",
"routers",
"=",... | Sync routers according to filters to a specific Cisco cfg agent.
:param context: contains user information
:param host: originator of callback
:param router_ids: list of router ids to return information about
:param hosting_device_ids: list of hosting device ids to get
routers for.
:returns: a list of routers with their hosting devices, interfaces and
floating_ips | [
"Sync",
"routers",
"according",
"to",
"filters",
"to",
"a",
"specific",
"Cisco",
"cfg",
"agent",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/l3/rpc/l3_router_cfg_agent_rpc_cb.py#L47-L68 | train | 37,739 |
openstack/networking-cisco | networking_cisco/plugins/cisco/l3/rpc/l3_router_cfg_agent_rpc_cb.py | L3RouterCfgRpcCallback.update_floatingip_statuses_cfg | def update_floatingip_statuses_cfg(self, context, router_id, fip_statuses):
"""Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value
"""
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {'floatingip_id': floatingip_id,
'status': status})
try:
self._l3plugin.update_floatingip_status(
context, floatingip_id, status)
except l3_exceptions.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
# Find all floating IPs known to have been the given router
# for which an update was not received. Set them DOWN mercilessly
# This situation might occur for some asynchronous backends if
# notifications were missed
known_router_fips = self._l3plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
# Consider only floating ips which were disassociated in the API
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
LOG.debug("update_fip_statuses: disable: %s", fip_id)
self._l3plugin.update_floatingip_status(
context, fip_id, bc.constants.FLOATINGIP_STATUS_DOWN) | python | def update_floatingip_statuses_cfg(self, context, router_id, fip_statuses):
"""Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value
"""
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {'floatingip_id': floatingip_id,
'status': status})
try:
self._l3plugin.update_floatingip_status(
context, floatingip_id, status)
except l3_exceptions.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
# Find all floating IPs known to have been the given router
# for which an update was not received. Set them DOWN mercilessly
# This situation might occur for some asynchronous backends if
# notifications were missed
known_router_fips = self._l3plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
# Consider only floating ips which were disassociated in the API
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
LOG.debug("update_fip_statuses: disable: %s", fip_id)
self._l3plugin.update_floatingip_status(
context, fip_id, bc.constants.FLOATINGIP_STATUS_DOWN) | [
"def",
"update_floatingip_statuses_cfg",
"(",
"self",
",",
"context",
",",
"router_id",
",",
"fip_statuses",
")",
":",
"with",
"context",
".",
"session",
".",
"begin",
"(",
"subtransactions",
"=",
"True",
")",
":",
"for",
"(",
"floatingip_id",
",",
"status",
... | Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value | [
"Update",
"operational",
"status",
"for",
"one",
"or",
"several",
"floating",
"IPs",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/l3/rpc/l3_router_cfg_agent_rpc_cb.py#L112-L145 | train | 37,740 |
openstack/networking-cisco | networking_cisco/plugins/cisco/l3/rpc/l3_router_cfg_agent_rpc_cb.py | L3RouterCfgRpcCallback.update_port_statuses_cfg | def update_port_statuses_cfg(self, context, port_ids, status):
"""Update the operational statuses of a list of router ports.
This is called by the Cisco cfg agent to update the status of a list
of ports.
:param context: contains user information
:param port_ids: list of ids of all the ports for the given status
:param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN.
"""
self._l3plugin.update_router_port_statuses(context, port_ids,
status) | python | def update_port_statuses_cfg(self, context, port_ids, status):
"""Update the operational statuses of a list of router ports.
This is called by the Cisco cfg agent to update the status of a list
of ports.
:param context: contains user information
:param port_ids: list of ids of all the ports for the given status
:param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN.
"""
self._l3plugin.update_router_port_statuses(context, port_ids,
status) | [
"def",
"update_port_statuses_cfg",
"(",
"self",
",",
"context",
",",
"port_ids",
",",
"status",
")",
":",
"self",
".",
"_l3plugin",
".",
"update_router_port_statuses",
"(",
"context",
",",
"port_ids",
",",
"status",
")"
] | Update the operational statuses of a list of router ports.
This is called by the Cisco cfg agent to update the status of a list
of ports.
:param context: contains user information
:param port_ids: list of ids of all the ports for the given status
:param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN. | [
"Update",
"the",
"operational",
"statuses",
"of",
"a",
"list",
"of",
"router",
"ports",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/l3/rpc/l3_router_cfg_agent_rpc_cb.py#L148-L159 | train | 37,741 |
openstack/networking-cisco | tools/saf_prepare_setup.py | get_mysql_credentials | def get_mysql_credentials(cfg_file):
"""Get the credentials and database name from options in config file."""
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print('Failed to find mysql connections credentials.')
sys.exit(1)
except IOError:
print('ERROR: Cannot open %s.', cfg_file)
sys.exit(1)
value = parser.get('dfa_mysql', 'connection')
try:
# Find location of pattern in connection parameter as shown below:
# http://username:password@host/databasename?characterset=encoding'
sobj = re.search(r"(://).*(@).*(/).*(\?)", value)
# The list parameter contains:
# indices[0], is the index of '://'
# indices[1], is the index of '@'
# indices[2], is the index of '/'
# indices[3], is the index of '?'
indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)]
# Get the credentials
cred = value[indices[0] + 3:indices[1]].split(':')
# Get the host name
host = value[indices[1] + 1:indices[2]]
# Get the database name
db_name = value[indices[2] + 1:indices[3]]
# Get the character encoding
charset = value[indices[3] + 1:].split('=')[1]
return cred[0], cred[1], host, db_name, charset
except (ValueError, IndexError, AttributeError):
print('Failed to find mysql connections credentials.')
sys.exit(1) | python | def get_mysql_credentials(cfg_file):
"""Get the credentials and database name from options in config file."""
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print('Failed to find mysql connections credentials.')
sys.exit(1)
except IOError:
print('ERROR: Cannot open %s.', cfg_file)
sys.exit(1)
value = parser.get('dfa_mysql', 'connection')
try:
# Find location of pattern in connection parameter as shown below:
# http://username:password@host/databasename?characterset=encoding'
sobj = re.search(r"(://).*(@).*(/).*(\?)", value)
# The list parameter contains:
# indices[0], is the index of '://'
# indices[1], is the index of '@'
# indices[2], is the index of '/'
# indices[3], is the index of '?'
indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)]
# Get the credentials
cred = value[indices[0] + 3:indices[1]].split(':')
# Get the host name
host = value[indices[1] + 1:indices[2]]
# Get the database name
db_name = value[indices[2] + 1:indices[3]]
# Get the character encoding
charset = value[indices[3] + 1:].split('=')[1]
return cred[0], cred[1], host, db_name, charset
except (ValueError, IndexError, AttributeError):
print('Failed to find mysql connections credentials.')
sys.exit(1) | [
"def",
"get_mysql_credentials",
"(",
"cfg_file",
")",
":",
"try",
":",
"parser",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"cfg_fp",
"=",
"open",
"(",
"cfg_file",
")",
"parser",
".",
"readfp",
"(",
"cfg_fp",
")",
"cfg_fp",
".",
"close",
"(",
")... | Get the credentials and database name from options in config file. | [
"Get",
"the",
"credentials",
"and",
"database",
"name",
"from",
"options",
"in",
"config",
"file",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/tools/saf_prepare_setup.py#L103-L148 | train | 37,742 |
openstack/networking-cisco | tools/saf_prepare_setup.py | modify_conf | def modify_conf(cfgfile, service_name, outfn):
"""Modify config file neutron and keystone to include enabler options."""
if not cfgfile or not outfn:
print('ERROR: There is no config file.')
sys.exit(0)
options = service_options[service_name]
with open(cfgfile, 'r') as cf:
lines = cf.readlines()
for opt in options:
op = opt.get('option')
res = [line for line in lines if line.startswith(op)]
if len(res) > 1:
print('ERROR: There are more than one %s option.' % res)
sys.exit(0)
if res:
(op, sep, val) = (res[0].strip('\n').replace(' ', '').
partition('='))
new_val = None
if opt.get('is_list'):
# Value for this option can contain list of values.
# Append the value if it does not exist.
if not any(opt.get('value') == value for value in
val.split(',')):
new_val = ','.join((val, opt.get('value')))
else:
if val != opt.get('value'):
new_val = opt.get('value')
if new_val:
opt_idx = lines.index(res[0])
# The setting is different, replace it with new one.
lines.pop(opt_idx)
lines.insert(opt_idx, '='.join((opt.get('option'),
new_val + '\n')))
else:
# Option does not exist. Add the option.
try:
sec_idx = lines.index('[' + opt.get('section') + ']\n')
lines.insert(sec_idx + 1, '='.join(
(opt.get('option'), opt.get('value') + '\n')))
except ValueError:
print('Invalid %s section name.' % opt.get('section'))
sys.exit(0)
with open(outfn, 'w') as fwp:
all_lines = ''
for line in lines:
all_lines += line
fwp.write(all_lines) | python | def modify_conf(cfgfile, service_name, outfn):
"""Modify config file neutron and keystone to include enabler options."""
if not cfgfile or not outfn:
print('ERROR: There is no config file.')
sys.exit(0)
options = service_options[service_name]
with open(cfgfile, 'r') as cf:
lines = cf.readlines()
for opt in options:
op = opt.get('option')
res = [line for line in lines if line.startswith(op)]
if len(res) > 1:
print('ERROR: There are more than one %s option.' % res)
sys.exit(0)
if res:
(op, sep, val) = (res[0].strip('\n').replace(' ', '').
partition('='))
new_val = None
if opt.get('is_list'):
# Value for this option can contain list of values.
# Append the value if it does not exist.
if not any(opt.get('value') == value for value in
val.split(',')):
new_val = ','.join((val, opt.get('value')))
else:
if val != opt.get('value'):
new_val = opt.get('value')
if new_val:
opt_idx = lines.index(res[0])
# The setting is different, replace it with new one.
lines.pop(opt_idx)
lines.insert(opt_idx, '='.join((opt.get('option'),
new_val + '\n')))
else:
# Option does not exist. Add the option.
try:
sec_idx = lines.index('[' + opt.get('section') + ']\n')
lines.insert(sec_idx + 1, '='.join(
(opt.get('option'), opt.get('value') + '\n')))
except ValueError:
print('Invalid %s section name.' % opt.get('section'))
sys.exit(0)
with open(outfn, 'w') as fwp:
all_lines = ''
for line in lines:
all_lines += line
fwp.write(all_lines) | [
"def",
"modify_conf",
"(",
"cfgfile",
",",
"service_name",
",",
"outfn",
")",
":",
"if",
"not",
"cfgfile",
"or",
"not",
"outfn",
":",
"print",
"(",
"'ERROR: There is no config file.'",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"options",
"=",
"service_options"... | Modify config file neutron and keystone to include enabler options. | [
"Modify",
"config",
"file",
"neutron",
"and",
"keystone",
"to",
"include",
"enabler",
"options",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/tools/saf_prepare_setup.py#L151-L202 | train | 37,743 |
openstack/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_auto_config_check.py | CiscoDevMgrRPC.get_all_hosting_devices | def get_all_hosting_devices(self, context):
"""Get a list of all hosting devices."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_all_hosting_devices',
host=self.host) | python | def get_all_hosting_devices(self, context):
"""Get a list of all hosting devices."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_all_hosting_devices',
host=self.host) | [
"def",
"get_all_hosting_devices",
"(",
"self",
",",
"context",
")",
":",
"cctxt",
"=",
"self",
".",
"client",
".",
"prepare",
"(",
")",
"return",
"cctxt",
".",
"call",
"(",
"context",
",",
"'get_all_hosting_devices'",
",",
"host",
"=",
"self",
".",
"host",... | Get a list of all hosting devices. | [
"Get",
"a",
"list",
"of",
"all",
"hosting",
"devices",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_auto_config_check.py#L46-L51 | train | 37,744 |
openstack/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_auto_config_check.py | CiscoRoutingPluginRPC.get_all_hosted_routers | def get_all_hosted_routers(self, context):
"""Make a remote process call to retrieve the sync data for
routers that have been scheduled to a hosting device.
:param context: session context
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'cfg_sync_all_hosted_routers',
host=self.host) | python | def get_all_hosted_routers(self, context):
"""Make a remote process call to retrieve the sync data for
routers that have been scheduled to a hosting device.
:param context: session context
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'cfg_sync_all_hosted_routers',
host=self.host) | [
"def",
"get_all_hosted_routers",
"(",
"self",
",",
"context",
")",
":",
"cctxt",
"=",
"self",
".",
"client",
".",
"prepare",
"(",
")",
"return",
"cctxt",
".",
"call",
"(",
"context",
",",
"'cfg_sync_all_hosted_routers'",
",",
"host",
"=",
"self",
".",
"hos... | Make a remote process call to retrieve the sync data for
routers that have been scheduled to a hosting device.
:param context: session context | [
"Make",
"a",
"remote",
"process",
"call",
"to",
"retrieve",
"the",
"sync",
"data",
"for",
"routers",
"that",
"have",
"been",
"scheduled",
"to",
"a",
"hosting",
"device",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_auto_config_check.py#L62-L70 | train | 37,745 |
openstack/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_auto_config_check.py | CiscoRoutingPluginRPC.get_hardware_router_type_id | def get_hardware_router_type_id(self, context):
"""Get the ID for the ASR1k hardware router type."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_hardware_router_type_id',
host=self.host) | python | def get_hardware_router_type_id(self, context):
"""Get the ID for the ASR1k hardware router type."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_hardware_router_type_id',
host=self.host) | [
"def",
"get_hardware_router_type_id",
"(",
"self",
",",
"context",
")",
":",
"cctxt",
"=",
"self",
".",
"client",
".",
"prepare",
"(",
")",
"return",
"cctxt",
".",
"call",
"(",
"context",
",",
"'get_hardware_router_type_id'",
",",
"host",
"=",
"self",
".",
... | Get the ID for the ASR1k hardware router type. | [
"Get",
"the",
"ID",
"for",
"the",
"ASR1k",
"hardware",
"router",
"type",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_auto_config_check.py#L72-L77 | train | 37,746 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | RpcCallBacks.heartbeat | def heartbeat(self, context, msg):
"""Process heartbeat message from agents on compute nodes."""
args = jsonutils.loads(msg)
when = args.get('when')
agent = args.get('agent')
# The configurations in here, only used once when creating entry
# for an agent in DB for the first time.
configurations = {'uplink': ''}
LOG.debug('heartbeat received: %(time)s - %(agent)s', (
{'time': when, 'agent': agent}))
if self.obj.neutron_event:
self.obj.neutron_event.create_rpc_client(agent)
# Other option is to add the event to the queue for processig it later.
self.obj.update_agent_status(agent, when)
# Update the agents database.
agent_info = dict(timestamp=utils.utc_time(when), host=agent,
config=jsonutils.dumps(configurations))
self.obj.update_agent_db(agent_info) | python | def heartbeat(self, context, msg):
"""Process heartbeat message from agents on compute nodes."""
args = jsonutils.loads(msg)
when = args.get('when')
agent = args.get('agent')
# The configurations in here, only used once when creating entry
# for an agent in DB for the first time.
configurations = {'uplink': ''}
LOG.debug('heartbeat received: %(time)s - %(agent)s', (
{'time': when, 'agent': agent}))
if self.obj.neutron_event:
self.obj.neutron_event.create_rpc_client(agent)
# Other option is to add the event to the queue for processig it later.
self.obj.update_agent_status(agent, when)
# Update the agents database.
agent_info = dict(timestamp=utils.utc_time(when), host=agent,
config=jsonutils.dumps(configurations))
self.obj.update_agent_db(agent_info) | [
"def",
"heartbeat",
"(",
"self",
",",
"context",
",",
"msg",
")",
":",
"args",
"=",
"jsonutils",
".",
"loads",
"(",
"msg",
")",
"when",
"=",
"args",
".",
"get",
"(",
"'when'",
")",
"agent",
"=",
"args",
".",
"get",
"(",
"'agent'",
")",
"# The confi... | Process heartbeat message from agents on compute nodes. | [
"Process",
"heartbeat",
"message",
"from",
"agents",
"on",
"compute",
"nodes",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L69-L90 | train | 37,747 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | RpcCallBacks.request_uplink_info | def request_uplink_info(self, context, agent):
"""Process uplink message from an agent."""
LOG.debug('request_uplink_info from %(agent)s', {'agent': agent})
# Add the request into queue for processing.
event_type = 'agent.request.uplink'
payload = {'agent': agent}
timestamp = time.ctime()
data = (event_type, payload)
pri = self.obj.PRI_LOW_START + 1
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request uplink info into queue.')
return 0 | python | def request_uplink_info(self, context, agent):
"""Process uplink message from an agent."""
LOG.debug('request_uplink_info from %(agent)s', {'agent': agent})
# Add the request into queue for processing.
event_type = 'agent.request.uplink'
payload = {'agent': agent}
timestamp = time.ctime()
data = (event_type, payload)
pri = self.obj.PRI_LOW_START + 1
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request uplink info into queue.')
return 0 | [
"def",
"request_uplink_info",
"(",
"self",
",",
"context",
",",
"agent",
")",
":",
"LOG",
".",
"debug",
"(",
"'request_uplink_info from %(agent)s'",
",",
"{",
"'agent'",
":",
"agent",
"}",
")",
"# Add the request into queue for processing.",
"event_type",
"=",
"'age... | Process uplink message from an agent. | [
"Process",
"uplink",
"message",
"from",
"an",
"agent",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L92-L106 | train | 37,748 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | RpcCallBacks.set_static_ip_address | def set_static_ip_address(self, context, msg):
"""Process request for setting rules in iptables.
In cases that static ip address is assigned for a VM, it is needed
to update the iptables rule for that address.
"""
args = jsonutils.loads(msg)
macaddr = args.get('mac')
ipaddr = args.get('ip')
LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', (
{'mac': macaddr, 'ip': ipaddr}))
# Add the request into queue for processing.
event_type = 'cli.static_ip.set'
payload = {'mac': macaddr, 'ip': ipaddr}
timestamp = time.ctime()
data = (event_type, payload)
pri = self.obj.PRI_LOW_START
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request to add static ip into queue.')
return 0 | python | def set_static_ip_address(self, context, msg):
"""Process request for setting rules in iptables.
In cases that static ip address is assigned for a VM, it is needed
to update the iptables rule for that address.
"""
args = jsonutils.loads(msg)
macaddr = args.get('mac')
ipaddr = args.get('ip')
LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', (
{'mac': macaddr, 'ip': ipaddr}))
# Add the request into queue for processing.
event_type = 'cli.static_ip.set'
payload = {'mac': macaddr, 'ip': ipaddr}
timestamp = time.ctime()
data = (event_type, payload)
pri = self.obj.PRI_LOW_START
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request to add static ip into queue.')
return 0 | [
"def",
"set_static_ip_address",
"(",
"self",
",",
"context",
",",
"msg",
")",
":",
"args",
"=",
"jsonutils",
".",
"loads",
"(",
"msg",
")",
"macaddr",
"=",
"args",
".",
"get",
"(",
"'mac'",
")",
"ipaddr",
"=",
"args",
".",
"get",
"(",
"'ip'",
")",
... | Process request for setting rules in iptables.
In cases that static ip address is assigned for a VM, it is needed
to update the iptables rule for that address. | [
"Process",
"request",
"for",
"setting",
"rules",
"in",
"iptables",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L199-L220 | train | 37,749 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | RpcCallBacks.update_vm_result | def update_vm_result(self, context, msg):
"""Update VM's result field in the DB.
The result reflects the success of failure of operation when an
agent processes the vm info.
"""
args = jsonutils.loads(msg)
agent = context.get('agent')
port_id = args.get('port_uuid')
result = args.get('result')
LOG.debug('update_vm_result received from %(agent)s: '
'%(port_id)s %(result)s', {'agent': agent,
'port_id': port_id,
'result': result})
# Add the request into queue for processing.
event_type = 'agent.vm_result.update'
payload = {'port_id': port_id, 'result': result}
timestamp = time.ctime()
data = (event_type, payload)
# TODO(nlahouti) use value defined in constants
pri = self.obj.PRI_LOW_START + 10
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request vm result update into queue.')
return 0 | python | def update_vm_result(self, context, msg):
"""Update VM's result field in the DB.
The result reflects the success of failure of operation when an
agent processes the vm info.
"""
args = jsonutils.loads(msg)
agent = context.get('agent')
port_id = args.get('port_uuid')
result = args.get('result')
LOG.debug('update_vm_result received from %(agent)s: '
'%(port_id)s %(result)s', {'agent': agent,
'port_id': port_id,
'result': result})
# Add the request into queue for processing.
event_type = 'agent.vm_result.update'
payload = {'port_id': port_id, 'result': result}
timestamp = time.ctime()
data = (event_type, payload)
# TODO(nlahouti) use value defined in constants
pri = self.obj.PRI_LOW_START + 10
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request vm result update into queue.')
return 0 | [
"def",
"update_vm_result",
"(",
"self",
",",
"context",
",",
"msg",
")",
":",
"args",
"=",
"jsonutils",
".",
"loads",
"(",
"msg",
")",
"agent",
"=",
"context",
".",
"get",
"(",
"'agent'",
")",
"port_id",
"=",
"args",
".",
"get",
"(",
"'port_uuid'",
"... | Update VM's result field in the DB.
The result reflects the success of failure of operation when an
agent processes the vm info. | [
"Update",
"VM",
"s",
"result",
"field",
"in",
"the",
"DB",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L222-L247 | train | 37,750 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer._setup_rpc | def _setup_rpc(self):
"""Setup RPC server for dfa server."""
endpoints = RpcCallBacks(self)
self.server = rpc.DfaRpcServer(self.ser_q, self._host,
self.cfg.dfa_rpc.transport_url,
endpoints,
exchange=constants.DFA_EXCHANGE) | python | def _setup_rpc(self):
"""Setup RPC server for dfa server."""
endpoints = RpcCallBacks(self)
self.server = rpc.DfaRpcServer(self.ser_q, self._host,
self.cfg.dfa_rpc.transport_url,
endpoints,
exchange=constants.DFA_EXCHANGE) | [
"def",
"_setup_rpc",
"(",
"self",
")",
":",
"endpoints",
"=",
"RpcCallBacks",
"(",
"self",
")",
"self",
".",
"server",
"=",
"rpc",
".",
"DfaRpcServer",
"(",
"self",
".",
"ser_q",
",",
"self",
".",
"_host",
",",
"self",
".",
"cfg",
".",
"dfa_rpc",
"."... | Setup RPC server for dfa server. | [
"Setup",
"RPC",
"server",
"for",
"dfa",
"server",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L406-L413 | train | 37,751 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.register_segment_dcnm | def register_segment_dcnm(self, cfg, seg_id_min, seg_id_max):
"""Register segmentation id pool with DCNM. """
orch_id = cfg.dcnm.orchestrator_id
try:
segid_range = self.dcnm_client.get_segmentid_range(orch_id)
if segid_range is None:
self.dcnm_client.set_segmentid_range(orch_id, seg_id_min,
seg_id_max)
else:
conf_min, _, conf_max = segid_range[
"segmentIdRanges"].partition("-")
if int(conf_min) != seg_id_min or int(conf_max) != seg_id_max:
self.dcnm_client.update_segmentid_range(orch_id,
seg_id_min,
seg_id_max)
except dexc.DfaClientRequestFailed as exc:
LOG.error("Segment ID range could not be created/updated"
" on DCNM: %s", exc)
raise SystemExit(exc) | python | def register_segment_dcnm(self, cfg, seg_id_min, seg_id_max):
"""Register segmentation id pool with DCNM. """
orch_id = cfg.dcnm.orchestrator_id
try:
segid_range = self.dcnm_client.get_segmentid_range(orch_id)
if segid_range is None:
self.dcnm_client.set_segmentid_range(orch_id, seg_id_min,
seg_id_max)
else:
conf_min, _, conf_max = segid_range[
"segmentIdRanges"].partition("-")
if int(conf_min) != seg_id_min or int(conf_max) != seg_id_max:
self.dcnm_client.update_segmentid_range(orch_id,
seg_id_min,
seg_id_max)
except dexc.DfaClientRequestFailed as exc:
LOG.error("Segment ID range could not be created/updated"
" on DCNM: %s", exc)
raise SystemExit(exc) | [
"def",
"register_segment_dcnm",
"(",
"self",
",",
"cfg",
",",
"seg_id_min",
",",
"seg_id_max",
")",
":",
"orch_id",
"=",
"cfg",
".",
"dcnm",
".",
"orchestrator_id",
"try",
":",
"segid_range",
"=",
"self",
".",
"dcnm_client",
".",
"get_segmentid_range",
"(",
... | Register segmentation id pool with DCNM. | [
"Register",
"segmentation",
"id",
"pool",
"with",
"DCNM",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L426-L445 | train | 37,752 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.project_create_func | def project_create_func(self, proj_id, proj=None):
"""Create project given project uuid"""
if self.get_project_name(proj_id):
LOG.info("project %s exists, returning", proj_id)
return
if not proj:
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
# In the project name, dci_id may be included. Check if this is the
# case and extact the dci_id from the name, and provide dci_id when
# creating the project.
proj_name, dci_id = self._get_dci_id_and_proj_name(proj.name)
if proj_name in reserved_project_name:
proj_name = "_".join((proj_name, self.cfg.dcnm.orchestrator_id))
# The default partition name is 'os' (i.e. openstack) which reflects
# it is created by openstack.
part_name = self.cfg.dcnm.default_partition_name
if len(':'.join((proj_name, part_name))) > 32:
LOG.error('Invalid project name length: %s. The length of '
'org:part name is greater than 32',
len(':'.join((proj_name, part_name))))
return
try:
self.dcnm_client.create_project(self.cfg.dcnm.orchestrator_id,
proj_name, part_name, dci_id,
proj.description)
except dexc.DfaClientRequestFailed:
# Failed to send create project in DCNM.
# Save the info and mark it as failure and retry it later.
self.update_project_info_cache(proj_id, name=proj_name,
dci_id=dci_id,
result=constants.CREATE_FAIL)
LOG.error("Failed to create project %s on DCNM.", proj_name)
else:
self.update_project_info_cache(proj_id, name=proj_name,
dci_id=dci_id)
LOG.debug('project %(name)s %(dci)s %(desc)s', (
{'name': proj_name, 'dci': dci_id, 'desc': proj.description}))
self.project_create_notif(proj_id, proj_name) | python | def project_create_func(self, proj_id, proj=None):
"""Create project given project uuid"""
if self.get_project_name(proj_id):
LOG.info("project %s exists, returning", proj_id)
return
if not proj:
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
# In the project name, dci_id may be included. Check if this is the
# case and extact the dci_id from the name, and provide dci_id when
# creating the project.
proj_name, dci_id = self._get_dci_id_and_proj_name(proj.name)
if proj_name in reserved_project_name:
proj_name = "_".join((proj_name, self.cfg.dcnm.orchestrator_id))
# The default partition name is 'os' (i.e. openstack) which reflects
# it is created by openstack.
part_name = self.cfg.dcnm.default_partition_name
if len(':'.join((proj_name, part_name))) > 32:
LOG.error('Invalid project name length: %s. The length of '
'org:part name is greater than 32',
len(':'.join((proj_name, part_name))))
return
try:
self.dcnm_client.create_project(self.cfg.dcnm.orchestrator_id,
proj_name, part_name, dci_id,
proj.description)
except dexc.DfaClientRequestFailed:
# Failed to send create project in DCNM.
# Save the info and mark it as failure and retry it later.
self.update_project_info_cache(proj_id, name=proj_name,
dci_id=dci_id,
result=constants.CREATE_FAIL)
LOG.error("Failed to create project %s on DCNM.", proj_name)
else:
self.update_project_info_cache(proj_id, name=proj_name,
dci_id=dci_id)
LOG.debug('project %(name)s %(dci)s %(desc)s', (
{'name': proj_name, 'dci': dci_id, 'desc': proj.description}))
self.project_create_notif(proj_id, proj_name) | [
"def",
"project_create_func",
"(",
"self",
",",
"proj_id",
",",
"proj",
"=",
"None",
")",
":",
"if",
"self",
".",
"get_project_name",
"(",
"proj_id",
")",
":",
"LOG",
".",
"info",
"(",
"\"project %s exists, returning\"",
",",
"proj_id",
")",
"return",
"if",
... | Create project given project uuid | [
"Create",
"project",
"given",
"project",
"uuid"
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L484-L528 | train | 37,753 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.project_update_event | def project_update_event(self, proj_info):
"""Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM.
"""
LOG.debug("Processing project_update_event %(proj)s.",
{'proj': proj_info})
proj_id = proj_info.get('resource_info')
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
new_proj_name, new_dci_id = self._get_dci_id_and_proj_name(proj.name)
# Check if project name and dci_id are the same, there is no change.
orig_proj_name = self.get_project_name(proj_id)
orig_dci_id = self.get_dci_id(proj_id)
if orig_proj_name == new_proj_name and new_dci_id == orig_dci_id:
# This is an invalid update event.
LOG.warning('Project update event for %(proj)s is received '
'without changing in the project name: '
'%(orig_proj)s. Ignoring the event.',
{'proj': proj_id, 'orig_proj': orig_proj_name})
return
if orig_proj_name != new_proj_name:
# Project has new name and in DCNM the name of project cannot be
# modified. It is an invalid update. Do not process the event.
LOG.debug('Update request cannot be processed as name of project'
' is changed: %(proj)s %(orig_name)s %(orig_dci)s to '
'%(new_name)s %(new_dci)s.', (
{'proj': proj_id, 'orig_name': orig_proj_name,
'orig_dci': orig_dci_id, 'new_name': new_proj_name,
'new_dci': new_dci_id}))
return
# Valid update request.
LOG.debug('Changing project DCI id for %(proj)s from %(orig_dci)s to '
'%(new_dci)s.', {'proj': proj_id,
'orig_dci': orig_dci_id,
'new_dci': new_dci_id})
try:
self.dcnm_client.update_project(new_proj_name,
self.cfg.dcnm.
default_partition_name,
dci_id=new_dci_id)
except dexc.DfaClientRequestFailed:
# Failed to update project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to update project %s on DCNM.",
new_proj_name)
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update',
result=constants.UPDATE_FAIL)
else:
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update')
LOG.debug('Updated project %(proj)s %(name)s.',
{'proj': proj_id, 'name': proj.name}) | python | def project_update_event(self, proj_info):
"""Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM.
"""
LOG.debug("Processing project_update_event %(proj)s.",
{'proj': proj_info})
proj_id = proj_info.get('resource_info')
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
new_proj_name, new_dci_id = self._get_dci_id_and_proj_name(proj.name)
# Check if project name and dci_id are the same, there is no change.
orig_proj_name = self.get_project_name(proj_id)
orig_dci_id = self.get_dci_id(proj_id)
if orig_proj_name == new_proj_name and new_dci_id == orig_dci_id:
# This is an invalid update event.
LOG.warning('Project update event for %(proj)s is received '
'without changing in the project name: '
'%(orig_proj)s. Ignoring the event.',
{'proj': proj_id, 'orig_proj': orig_proj_name})
return
if orig_proj_name != new_proj_name:
# Project has new name and in DCNM the name of project cannot be
# modified. It is an invalid update. Do not process the event.
LOG.debug('Update request cannot be processed as name of project'
' is changed: %(proj)s %(orig_name)s %(orig_dci)s to '
'%(new_name)s %(new_dci)s.', (
{'proj': proj_id, 'orig_name': orig_proj_name,
'orig_dci': orig_dci_id, 'new_name': new_proj_name,
'new_dci': new_dci_id}))
return
# Valid update request.
LOG.debug('Changing project DCI id for %(proj)s from %(orig_dci)s to '
'%(new_dci)s.', {'proj': proj_id,
'orig_dci': orig_dci_id,
'new_dci': new_dci_id})
try:
self.dcnm_client.update_project(new_proj_name,
self.cfg.dcnm.
default_partition_name,
dci_id=new_dci_id)
except dexc.DfaClientRequestFailed:
# Failed to update project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to update project %s on DCNM.",
new_proj_name)
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update',
result=constants.UPDATE_FAIL)
else:
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update')
LOG.debug('Updated project %(proj)s %(name)s.',
{'proj': proj_id, 'name': proj.name}) | [
"def",
"project_update_event",
"(",
"self",
",",
"proj_info",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Processing project_update_event %(proj)s.\"",
",",
"{",
"'proj'",
":",
"proj_info",
"}",
")",
"proj_id",
"=",
"proj_info",
".",
"get",
"(",
"'resource_info'",
")... | Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM. | [
"Process",
"project",
"update",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L538-L604 | train | 37,754 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.project_delete_event | def project_delete_event(self, proj_info):
"""Process project delete event."""
LOG.debug("Processing project_delete_event...")
proj_id = proj_info.get('resource_info')
proj_name = self.get_project_name(proj_id)
if proj_name:
try:
self.dcnm_client.delete_project(proj_name,
self.cfg.dcnm.
default_partition_name)
except dexc.DfaClientRequestFailed:
# Failed to delete project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to create project %s on DCNM.",
proj_name)
self.update_project_info_cache(proj_id, name=proj_name,
opcode='delete',
result=constants.DELETE_FAIL)
else:
self.update_project_info_cache(proj_id, opcode='delete')
LOG.debug('Deleted project:%s', proj_name)
self.project_delete_notif(proj_id, proj_name) | python | def project_delete_event(self, proj_info):
"""Process project delete event."""
LOG.debug("Processing project_delete_event...")
proj_id = proj_info.get('resource_info')
proj_name = self.get_project_name(proj_id)
if proj_name:
try:
self.dcnm_client.delete_project(proj_name,
self.cfg.dcnm.
default_partition_name)
except dexc.DfaClientRequestFailed:
# Failed to delete project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to create project %s on DCNM.",
proj_name)
self.update_project_info_cache(proj_id, name=proj_name,
opcode='delete',
result=constants.DELETE_FAIL)
else:
self.update_project_info_cache(proj_id, opcode='delete')
LOG.debug('Deleted project:%s', proj_name)
self.project_delete_notif(proj_id, proj_name) | [
"def",
"project_delete_event",
"(",
"self",
",",
"proj_info",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Processing project_delete_event...\"",
")",
"proj_id",
"=",
"proj_info",
".",
"get",
"(",
"'resource_info'",
")",
"proj_name",
"=",
"self",
".",
"get_project_name"... | Process project delete event. | [
"Process",
"project",
"delete",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L606-L628 | train | 37,755 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.subnet_create_event | def subnet_create_event(self, subnet_info):
"""Process subnet create event."""
subnet = subnet_info.get('subnet')
if subnet:
self.create_subnet(subnet)
else:
# Check whether request is for subnets.
subnets = subnet_info.get('subnets')
if subnets:
for subnet in subnets:
self.create_subnet(subnet) | python | def subnet_create_event(self, subnet_info):
"""Process subnet create event."""
subnet = subnet_info.get('subnet')
if subnet:
self.create_subnet(subnet)
else:
# Check whether request is for subnets.
subnets = subnet_info.get('subnets')
if subnets:
for subnet in subnets:
self.create_subnet(subnet) | [
"def",
"subnet_create_event",
"(",
"self",
",",
"subnet_info",
")",
":",
"subnet",
"=",
"subnet_info",
".",
"get",
"(",
"'subnet'",
")",
"if",
"subnet",
":",
"self",
".",
"create_subnet",
"(",
"subnet",
")",
"else",
":",
"# Check whether request is for subnets."... | Process subnet create event. | [
"Process",
"subnet",
"create",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L630-L641 | train | 37,756 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.create_subnet | def create_subnet(self, snet):
"""Create subnet."""
snet_id = snet.get('id')
# This checks if the source of the subnet creation is FW,
# If yes, this event is ignored.
if self.fw_api.is_subnet_source_fw(snet.get('tenant_id'),
snet.get('cidr')):
LOG.info("Service subnet %s, returning", snet.get('cidr'))
return
if snet_id not in self.subnet:
self.subnet[snet_id] = {}
self.subnet[snet_id].update(snet)
net = self.network.get(self.subnet[snet_id].get('network_id'))
if not net:
LOG.error('Network %(network_id)s does not exist.',
{'network_id': self.subnet[snet_id].get('network_id')})
return
# Check if the network is created by DCNM.
query_net = self.get_network(net.get('id'))
if query_net.result != constants.SUBNET_PENDING:
LOG.info("Subnet exists, returning")
return
if query_net and query_net.source.lower() == 'dcnm':
# The network is created by DCNM.
# No need to process this event.
LOG.info('create_subnet: network %(name)s '
'was created by DCNM. Ignoring processing the '
'event.', {'name': query_net.name})
return
tenant_name = self.get_project_name(snet['tenant_id'])
subnet = utils.Dict2Obj(snet)
dcnm_net = utils.Dict2Obj(net)
if not tenant_name:
LOG.error('Project %(tenant_id)s does not exist.',
{'tenant_id': subnet.tenant_id})
self.update_network_db(dcnm_net.id, constants.CREATE_FAIL)
return
try:
self.dcnm_client.create_network(tenant_name, dcnm_net, subnet,
self.dcnm_dhcp)
self.update_network_db(net.get('id'), constants.RESULT_SUCCESS)
except dexc.DfaClientRequestFailed:
LOG.exception('Failed to create network %(net)s.',
{'net': dcnm_net.name})
# Update network database with failure result.
self.update_network_db(dcnm_net.id, constants.CREATE_FAIL)
self.network_sub_create_notif(snet.get('tenant_id'), tenant_name,
snet.get('cidr')) | python | def create_subnet(self, snet):
"""Create subnet."""
snet_id = snet.get('id')
# This checks if the source of the subnet creation is FW,
# If yes, this event is ignored.
if self.fw_api.is_subnet_source_fw(snet.get('tenant_id'),
snet.get('cidr')):
LOG.info("Service subnet %s, returning", snet.get('cidr'))
return
if snet_id not in self.subnet:
self.subnet[snet_id] = {}
self.subnet[snet_id].update(snet)
net = self.network.get(self.subnet[snet_id].get('network_id'))
if not net:
LOG.error('Network %(network_id)s does not exist.',
{'network_id': self.subnet[snet_id].get('network_id')})
return
# Check if the network is created by DCNM.
query_net = self.get_network(net.get('id'))
if query_net.result != constants.SUBNET_PENDING:
LOG.info("Subnet exists, returning")
return
if query_net and query_net.source.lower() == 'dcnm':
# The network is created by DCNM.
# No need to process this event.
LOG.info('create_subnet: network %(name)s '
'was created by DCNM. Ignoring processing the '
'event.', {'name': query_net.name})
return
tenant_name = self.get_project_name(snet['tenant_id'])
subnet = utils.Dict2Obj(snet)
dcnm_net = utils.Dict2Obj(net)
if not tenant_name:
LOG.error('Project %(tenant_id)s does not exist.',
{'tenant_id': subnet.tenant_id})
self.update_network_db(dcnm_net.id, constants.CREATE_FAIL)
return
try:
self.dcnm_client.create_network(tenant_name, dcnm_net, subnet,
self.dcnm_dhcp)
self.update_network_db(net.get('id'), constants.RESULT_SUCCESS)
except dexc.DfaClientRequestFailed:
LOG.exception('Failed to create network %(net)s.',
{'net': dcnm_net.name})
# Update network database with failure result.
self.update_network_db(dcnm_net.id, constants.CREATE_FAIL)
self.network_sub_create_notif(snet.get('tenant_id'), tenant_name,
snet.get('cidr')) | [
"def",
"create_subnet",
"(",
"self",
",",
"snet",
")",
":",
"snet_id",
"=",
"snet",
".",
"get",
"(",
"'id'",
")",
"# This checks if the source of the subnet creation is FW,",
"# If yes, this event is ignored.",
"if",
"self",
".",
"fw_api",
".",
"is_subnet_source_fw",
... | Create subnet. | [
"Create",
"subnet",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L643-L695 | train | 37,757 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer._get_segmentation_id | def _get_segmentation_id(self, netid, segid, source):
"""Allocate segmentation id. """
return self.seg_drvr.allocate_segmentation_id(netid, seg_id=segid,
source=source) | python | def _get_segmentation_id(self, netid, segid, source):
"""Allocate segmentation id. """
return self.seg_drvr.allocate_segmentation_id(netid, seg_id=segid,
source=source) | [
"def",
"_get_segmentation_id",
"(",
"self",
",",
"netid",
",",
"segid",
",",
"source",
")",
":",
"return",
"self",
".",
"seg_drvr",
".",
"allocate_segmentation_id",
"(",
"netid",
",",
"seg_id",
"=",
"segid",
",",
"source",
"=",
"source",
")"
] | Allocate segmentation id. | [
"Allocate",
"segmentation",
"id",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L697-L701 | train | 37,758 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.network_delete_event | def network_delete_event(self, network_info):
"""Process network delete event."""
net_id = network_info['network_id']
if net_id not in self.network:
LOG.error('network_delete_event: net_id %s does not exist.',
net_id)
return
segid = self.network[net_id].get('segmentation_id')
tenant_id = self.network[net_id].get('tenant_id')
tenant_name = self.get_project_name(tenant_id)
net = utils.Dict2Obj(self.network[net_id])
if not tenant_name:
LOG.error('Project %(tenant_id)s does not exist.',
{'tenant_id': tenant_id})
self.update_network_db(net.id, constants.DELETE_FAIL)
return
try:
self.dcnm_client.delete_network(tenant_name, net)
# Put back the segmentation id into the pool.
self.seg_drvr.release_segmentation_id(segid)
# Remove entry from database and cache.
self.delete_network_db(net_id)
del self.network[net_id]
snets = [k for k in self.subnet if (
self.subnet[k].get('network_id') == net_id)]
[self.subnet.pop(s) for s in snets]
except dexc.DfaClientRequestFailed:
LOG.error('Failed to create network %(net)s.',
{'net': net.name})
self.update_network_db(net_id, constants.DELETE_FAIL)
# deleting all related VMs
instances = self.get_vms()
instances_related = [k for k in instances if k.network_id == net_id]
for vm in instances_related:
LOG.debug("deleting vm %s because network is deleted", vm.name)
self.delete_vm_function(vm.port_id, vm)
self.network_del_notif(tenant_id, tenant_name, net_id) | python | def network_delete_event(self, network_info):
"""Process network delete event."""
net_id = network_info['network_id']
if net_id not in self.network:
LOG.error('network_delete_event: net_id %s does not exist.',
net_id)
return
segid = self.network[net_id].get('segmentation_id')
tenant_id = self.network[net_id].get('tenant_id')
tenant_name = self.get_project_name(tenant_id)
net = utils.Dict2Obj(self.network[net_id])
if not tenant_name:
LOG.error('Project %(tenant_id)s does not exist.',
{'tenant_id': tenant_id})
self.update_network_db(net.id, constants.DELETE_FAIL)
return
try:
self.dcnm_client.delete_network(tenant_name, net)
# Put back the segmentation id into the pool.
self.seg_drvr.release_segmentation_id(segid)
# Remove entry from database and cache.
self.delete_network_db(net_id)
del self.network[net_id]
snets = [k for k in self.subnet if (
self.subnet[k].get('network_id') == net_id)]
[self.subnet.pop(s) for s in snets]
except dexc.DfaClientRequestFailed:
LOG.error('Failed to create network %(net)s.',
{'net': net.name})
self.update_network_db(net_id, constants.DELETE_FAIL)
# deleting all related VMs
instances = self.get_vms()
instances_related = [k for k in instances if k.network_id == net_id]
for vm in instances_related:
LOG.debug("deleting vm %s because network is deleted", vm.name)
self.delete_vm_function(vm.port_id, vm)
self.network_del_notif(tenant_id, tenant_name, net_id) | [
"def",
"network_delete_event",
"(",
"self",
",",
"network_info",
")",
":",
"net_id",
"=",
"network_info",
"[",
"'network_id'",
"]",
"if",
"net_id",
"not",
"in",
"self",
".",
"network",
":",
"LOG",
".",
"error",
"(",
"'network_delete_event: net_id %s does not exist... | Process network delete event. | [
"Process",
"network",
"delete",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L809-L849 | train | 37,759 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.dcnm_network_delete_event | def dcnm_network_delete_event(self, network_info):
"""Process network delete event from DCNM."""
seg_id = network_info.get('segmentation_id')
if not seg_id:
LOG.error('Failed to delete network. Invalid network '
'info %s.', network_info)
query_net = self.get_network_by_segid(seg_id)
if not query_net:
LOG.info('dcnm_network_delete_event: network %(segid)s '
'does not exist.', {'segid': seg_id})
return
if self.fw_api.is_network_source_fw(query_net, query_net.name):
LOG.info("Service network %s, returning", query_net.name)
return
# Send network delete request to neutron
try:
del_net = self.network.pop(query_net.network_id)
self.neutronclient.delete_network(query_net.network_id)
self.delete_network_db(query_net.network_id)
except Exception as exc:
# Failed to delete network.
# Put back the entry to the local cache???
self.network[query_net.network_id] = del_net
LOG.exception('dcnm_network_delete_event: Failed to delete '
'%(network)s. Reason %(err)s.',
{'network': query_net.name, 'err': str(exc)}) | python | def dcnm_network_delete_event(self, network_info):
"""Process network delete event from DCNM."""
seg_id = network_info.get('segmentation_id')
if not seg_id:
LOG.error('Failed to delete network. Invalid network '
'info %s.', network_info)
query_net = self.get_network_by_segid(seg_id)
if not query_net:
LOG.info('dcnm_network_delete_event: network %(segid)s '
'does not exist.', {'segid': seg_id})
return
if self.fw_api.is_network_source_fw(query_net, query_net.name):
LOG.info("Service network %s, returning", query_net.name)
return
# Send network delete request to neutron
try:
del_net = self.network.pop(query_net.network_id)
self.neutronclient.delete_network(query_net.network_id)
self.delete_network_db(query_net.network_id)
except Exception as exc:
# Failed to delete network.
# Put back the entry to the local cache???
self.network[query_net.network_id] = del_net
LOG.exception('dcnm_network_delete_event: Failed to delete '
'%(network)s. Reason %(err)s.',
{'network': query_net.name, 'err': str(exc)}) | [
"def",
"dcnm_network_delete_event",
"(",
"self",
",",
"network_info",
")",
":",
"seg_id",
"=",
"network_info",
".",
"get",
"(",
"'segmentation_id'",
")",
"if",
"not",
"seg_id",
":",
"LOG",
".",
"error",
"(",
"'Failed to delete network. Invalid network '",
"'info %s.... | Process network delete event from DCNM. | [
"Process",
"network",
"delete",
"event",
"from",
"DCNM",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1005-L1030 | train | 37,760 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.update_port_ip_address | def update_port_ip_address(self):
"""Find the ip address that assinged to a port via DHCP
The port database will be updated with the ip address.
"""
leases = None
req = dict(ip='0.0.0.0')
instances = self.get_vms_for_this_req(**req)
if instances is None:
return
for vm in instances:
if not leases:
# For the first time finding the leases file.
leases = self._get_ip_leases()
if not leases:
# File does not exist.
return
for line in leases:
if line.startswith('lease') and line.endswith('{\n'):
ip_addr = line.split()[1]
if 'hardware ethernet' in line:
if vm.mac == line.replace(';', '').split()[2]:
LOG.info('Find IP address %(ip)s for %(mac)s',
{'ip': ip_addr, 'mac': vm.mac})
try:
rule_info = dict(ip=ip_addr, mac=vm.mac,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update"
"rules.")
else:
params = dict(columns=dict(ip=ip_addr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ip_addr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to '
'agent.') | python | def update_port_ip_address(self):
"""Find the ip address that assinged to a port via DHCP
The port database will be updated with the ip address.
"""
leases = None
req = dict(ip='0.0.0.0')
instances = self.get_vms_for_this_req(**req)
if instances is None:
return
for vm in instances:
if not leases:
# For the first time finding the leases file.
leases = self._get_ip_leases()
if not leases:
# File does not exist.
return
for line in leases:
if line.startswith('lease') and line.endswith('{\n'):
ip_addr = line.split()[1]
if 'hardware ethernet' in line:
if vm.mac == line.replace(';', '').split()[2]:
LOG.info('Find IP address %(ip)s for %(mac)s',
{'ip': ip_addr, 'mac': vm.mac})
try:
rule_info = dict(ip=ip_addr, mac=vm.mac,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update"
"rules.")
else:
params = dict(columns=dict(ip=ip_addr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ip_addr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to '
'agent.') | [
"def",
"update_port_ip_address",
"(",
"self",
")",
":",
"leases",
"=",
"None",
"req",
"=",
"dict",
"(",
"ip",
"=",
"'0.0.0.0'",
")",
"instances",
"=",
"self",
".",
"get_vms_for_this_req",
"(",
"*",
"*",
"req",
")",
"if",
"instances",
"is",
"None",
":",
... | Find the ip address that assinged to a port via DHCP
The port database will be updated with the ip address. | [
"Find",
"the",
"ip",
"address",
"that",
"assinged",
"to",
"a",
"port",
"via",
"DHCP"
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1336-L1393 | train | 37,761 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.request_vms_info | def request_vms_info(self, payload):
"""Get the VMs from the database and send the info to the agent."""
# This request is received from an agent when it runs for the first
# time and uplink is detected.
agent = payload.get('agent')
LOG.debug('request_vms_info: Getting VMs info for %s', agent)
req = dict(host=payload.get('agent'))
instances = self.get_vms_for_this_req(**req)
vm_info = []
for vm in instances:
vm_info.append(dict(status=vm.status,
vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host,
port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=vm.ip,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco')))
try:
self.neutron_event.send_vm_info(agent, str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | python | def request_vms_info(self, payload):
"""Get the VMs from the database and send the info to the agent."""
# This request is received from an agent when it runs for the first
# time and uplink is detected.
agent = payload.get('agent')
LOG.debug('request_vms_info: Getting VMs info for %s', agent)
req = dict(host=payload.get('agent'))
instances = self.get_vms_for_this_req(**req)
vm_info = []
for vm in instances:
vm_info.append(dict(status=vm.status,
vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host,
port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=vm.ip,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco')))
try:
self.neutron_event.send_vm_info(agent, str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | [
"def",
"request_vms_info",
"(",
"self",
",",
"payload",
")",
":",
"# This request is received from an agent when it runs for the first",
"# time and uplink is detected.",
"agent",
"=",
"payload",
".",
"get",
"(",
"'agent'",
")",
"LOG",
".",
"debug",
"(",
"'request_vms_inf... | Get the VMs from the database and send the info to the agent. | [
"Get",
"the",
"VMs",
"from",
"the",
"database",
"and",
"send",
"the",
"info",
"to",
"the",
"agent",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1465-L1491 | train | 37,762 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.request_uplink_info | def request_uplink_info(self, payload):
"""Get the uplink from the database and send the info to the agent."""
# This request is received from an agent when it run for the first
# Send the uplink name (physical port name that connectes compute
# node and switch fabric),
agent = payload.get('agent')
config_res = self.get_agent_configurations(agent)
LOG.debug('configurations on %(agent)s is %(cfg)s', (
{'agent': agent, 'cfg': config_res}))
try:
self.neutron_event.send_msg_to_agent(agent,
constants.UPLINK_NAME,
config_res)
except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError):
LOG.error("RPC error: Failed to send uplink name to agent.") | python | def request_uplink_info(self, payload):
"""Get the uplink from the database and send the info to the agent."""
# This request is received from an agent when it run for the first
# Send the uplink name (physical port name that connectes compute
# node and switch fabric),
agent = payload.get('agent')
config_res = self.get_agent_configurations(agent)
LOG.debug('configurations on %(agent)s is %(cfg)s', (
{'agent': agent, 'cfg': config_res}))
try:
self.neutron_event.send_msg_to_agent(agent,
constants.UPLINK_NAME,
config_res)
except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError):
LOG.error("RPC error: Failed to send uplink name to agent.") | [
"def",
"request_uplink_info",
"(",
"self",
",",
"payload",
")",
":",
"# This request is received from an agent when it run for the first",
"# Send the uplink name (physical port name that connectes compute",
"# node and switch fabric),",
"agent",
"=",
"payload",
... | Get the uplink from the database and send the info to the agent. | [
"Get",
"the",
"uplink",
"from",
"the",
"database",
"and",
"send",
"the",
"info",
"to",
"the",
"agent",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1493-L1508 | train | 37,763 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.set_static_ip_address | def set_static_ip_address(self, payload):
"""Set static ip address for a VM."""
# This request is received from CLI for setting ip address of an
# instance.
macaddr = payload.get('mac')
ipaddr = payload.get('ip')
# Find the entry associated with the mac in the database.
req = dict(mac=macaddr)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info('Updating IP address: %(ip)s %(mac)s.',
{'ip': ipaddr, 'mac': macaddr})
# Send request to update the rule.
try:
rule_info = dict(ip=ipaddr, mac=macaddr,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update rules.")
else:
# Update the database.
params = dict(columns=dict(ip=ipaddr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ipaddr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | python | def set_static_ip_address(self, payload):
"""Set static ip address for a VM."""
# This request is received from CLI for setting ip address of an
# instance.
macaddr = payload.get('mac')
ipaddr = payload.get('ip')
# Find the entry associated with the mac in the database.
req = dict(mac=macaddr)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info('Updating IP address: %(ip)s %(mac)s.',
{'ip': ipaddr, 'mac': macaddr})
# Send request to update the rule.
try:
rule_info = dict(ip=ipaddr, mac=macaddr,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update rules.")
else:
# Update the database.
params = dict(columns=dict(ip=ipaddr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ipaddr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | [
"def",
"set_static_ip_address",
"(",
"self",
",",
"payload",
")",
":",
"# This request is received from CLI for setting ip address of an",
"# instance.",
"macaddr",
"=",
"payload",
".",
"get",
"(",
"'mac'",
")",
"ipaddr",
"=",
"payload",
".",
"get",
"(",
"'ip'",
")"... | Set static ip address for a VM. | [
"Set",
"static",
"ip",
"address",
"for",
"a",
"VM",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1510-L1555 | train | 37,764 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.vm_result_update | def vm_result_update(self, payload):
"""Update the result field in VM database.
This request comes from an agent that needs to update the result
in VM database to success or failure to reflect the operation's result
in the agent.
"""
port_id = payload.get('port_id')
result = payload.get('result')
if port_id and result:
# Update the VM's result field.
params = dict(columns=dict(result=result))
self.update_vm_db(port_id, **params) | python | def vm_result_update(self, payload):
"""Update the result field in VM database.
This request comes from an agent that needs to update the result
in VM database to success or failure to reflect the operation's result
in the agent.
"""
port_id = payload.get('port_id')
result = payload.get('result')
if port_id and result:
# Update the VM's result field.
params = dict(columns=dict(result=result))
self.update_vm_db(port_id, **params) | [
"def",
"vm_result_update",
"(",
"self",
",",
"payload",
")",
":",
"port_id",
"=",
"payload",
".",
"get",
"(",
"'port_id'",
")",
"result",
"=",
"payload",
".",
"get",
"(",
"'result'",
")",
"if",
"port_id",
"and",
"result",
":",
"# Update the VM's result field... | Update the result field in VM database.
This request comes from an agent that needs to update the result
in VM database to success or failure to reflect the operation's result
in the agent. | [
"Update",
"the",
"result",
"field",
"in",
"VM",
"database",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1557-L1571 | train | 37,765 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.add_lbaas_port | def add_lbaas_port(self, port_id, lb_id):
"""Give port id, get port info and send vm info to agent.
:param port_id: port id of vip port
:param lb_id: vip id for v1 and lbaas_id for v2
"""
port_info = self.neutronclient.show_port(port_id)
port = port_info.get('port')
if not port:
LOG.error("Can not retrieve port info for port %s" % port_id)
return
LOG.debug("lbaas add port, %s", port)
if not port['binding:host_id']:
LOG.info("No host bind for lbaas port, octavia case")
return
port["device_id"] = lb_id
vm_info = self._make_vm_info(port, 'up', constants.LBAAS_PREFIX)
self.port[port_id] = vm_info
if self.send_vm_info(vm_info):
self.add_vms_db(vm_info, constants.RESULT_SUCCESS)
else:
self.add_vms_db(vm_info, constants.CREATE_FAIL) | python | def add_lbaas_port(self, port_id, lb_id):
"""Give port id, get port info and send vm info to agent.
:param port_id: port id of vip port
:param lb_id: vip id for v1 and lbaas_id for v2
"""
port_info = self.neutronclient.show_port(port_id)
port = port_info.get('port')
if not port:
LOG.error("Can not retrieve port info for port %s" % port_id)
return
LOG.debug("lbaas add port, %s", port)
if not port['binding:host_id']:
LOG.info("No host bind for lbaas port, octavia case")
return
port["device_id"] = lb_id
vm_info = self._make_vm_info(port, 'up', constants.LBAAS_PREFIX)
self.port[port_id] = vm_info
if self.send_vm_info(vm_info):
self.add_vms_db(vm_info, constants.RESULT_SUCCESS)
else:
self.add_vms_db(vm_info, constants.CREATE_FAIL) | [
"def",
"add_lbaas_port",
"(",
"self",
",",
"port_id",
",",
"lb_id",
")",
":",
"port_info",
"=",
"self",
".",
"neutronclient",
".",
"show_port",
"(",
"port_id",
")",
"port",
"=",
"port_info",
".",
"get",
"(",
"'port'",
")",
"if",
"not",
"port",
":",
"LO... | Give port id, get port info and send vm info to agent.
:param port_id: port id of vip port
:param lb_id: vip id for v1 and lbaas_id for v2 | [
"Give",
"port",
"id",
"get",
"port",
"info",
"and",
"send",
"vm",
"info",
"to",
"agent",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1581-L1603 | train | 37,766 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.delete_lbaas_port | def delete_lbaas_port(self, lb_id):
"""send vm down event and delete db.
:param lb_id: vip id for v1 and lbaas_id for v2
"""
lb_id = lb_id.replace('-', '')
req = dict(instance_id=lb_id)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info("deleting lbaas vm %s " % vm.name)
self.delete_vm_function(vm.port_id, vm) | python | def delete_lbaas_port(self, lb_id):
"""send vm down event and delete db.
:param lb_id: vip id for v1 and lbaas_id for v2
"""
lb_id = lb_id.replace('-', '')
req = dict(instance_id=lb_id)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info("deleting lbaas vm %s " % vm.name)
self.delete_vm_function(vm.port_id, vm) | [
"def",
"delete_lbaas_port",
"(",
"self",
",",
"lb_id",
")",
":",
"lb_id",
"=",
"lb_id",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"req",
"=",
"dict",
"(",
"instance_id",
"=",
"lb_id",
")",
"instances",
"=",
"self",
".",
"get_vms_for_this_req",
"(",
"... | send vm down event and delete db.
:param lb_id: vip id for v1 and lbaas_id for v2 | [
"send",
"vm",
"down",
"event",
"and",
"delete",
"db",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1605-L1615 | train | 37,767 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.vip_create_event | def vip_create_event(self, vip_info):
"""Process vip create event."""
vip_data = vip_info.get('vip')
port_id = vip_data.get('port_id')
vip_id = vip_data.get('id')
self.add_lbaas_port(port_id, vip_id) | python | def vip_create_event(self, vip_info):
"""Process vip create event."""
vip_data = vip_info.get('vip')
port_id = vip_data.get('port_id')
vip_id = vip_data.get('id')
self.add_lbaas_port(port_id, vip_id) | [
"def",
"vip_create_event",
"(",
"self",
",",
"vip_info",
")",
":",
"vip_data",
"=",
"vip_info",
".",
"get",
"(",
"'vip'",
")",
"port_id",
"=",
"vip_data",
".",
"get",
"(",
"'port_id'",
")",
"vip_id",
"=",
"vip_data",
".",
"get",
"(",
"'id'",
")",
"self... | Process vip create event. | [
"Process",
"vip",
"create",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1617-L1622 | train | 37,768 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.listener_create_event | def listener_create_event(self, listener_info):
"""Process listener create event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted
"""
listener_data = listener_info.get('listener')
lb_list = listener_data.get('loadbalancers')
for lb in lb_list:
lb_id = lb.get('id')
req = dict(instance_id=(lb_id.replace('-', '')))
instances = self.get_vms_for_this_req(**req)
if not instances:
lb_info = self.neutronclient.show_loadbalancer(lb_id)
if lb_info:
port_id = lb_info["loadbalancer"]["vip_port_id"]
self.add_lbaas_port(port_id, lb_id)
else:
LOG.info("lbaas port for lb %s already added" % lb_id) | python | def listener_create_event(self, listener_info):
"""Process listener create event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted
"""
listener_data = listener_info.get('listener')
lb_list = listener_data.get('loadbalancers')
for lb in lb_list:
lb_id = lb.get('id')
req = dict(instance_id=(lb_id.replace('-', '')))
instances = self.get_vms_for_this_req(**req)
if not instances:
lb_info = self.neutronclient.show_loadbalancer(lb_id)
if lb_info:
port_id = lb_info["loadbalancer"]["vip_port_id"]
self.add_lbaas_port(port_id, lb_id)
else:
LOG.info("lbaas port for lb %s already added" % lb_id) | [
"def",
"listener_create_event",
"(",
"self",
",",
"listener_info",
")",
":",
"listener_data",
"=",
"listener_info",
".",
"get",
"(",
"'listener'",
")",
"lb_list",
"=",
"listener_data",
".",
"get",
"(",
"'loadbalancers'",
")",
"for",
"lb",
"in",
"lb_list",
":",... | Process listener create event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted | [
"Process",
"listener",
"create",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1629-L1649 | train | 37,769 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.listener_delete_event | def listener_delete_event(self, listener_info):
"""Process listener delete event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted.
as the data only contains listener id, we will
scan all loadbalancers from db and delete the vdp
if their admin state is down in that loadbalancer
"""
lb_list = self.neutronclient.list_loadbalancers()
for lb in lb_list.get('loadbalancers'):
if not lb.get("listeners"):
lb_id = lb.get('id')
LOG.info("Deleting lb %s port" % lb_id)
self.delete_lbaas_port(lb_id) | python | def listener_delete_event(self, listener_info):
"""Process listener delete event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted.
as the data only contains listener id, we will
scan all loadbalancers from db and delete the vdp
if their admin state is down in that loadbalancer
"""
lb_list = self.neutronclient.list_loadbalancers()
for lb in lb_list.get('loadbalancers'):
if not lb.get("listeners"):
lb_id = lb.get('id')
LOG.info("Deleting lb %s port" % lb_id)
self.delete_lbaas_port(lb_id) | [
"def",
"listener_delete_event",
"(",
"self",
",",
"listener_info",
")",
":",
"lb_list",
"=",
"self",
".",
"neutronclient",
".",
"list_loadbalancers",
"(",
")",
"for",
"lb",
"in",
"lb_list",
".",
"get",
"(",
"'loadbalancers'",
")",
":",
"if",
"not",
"lb",
"... | Process listener delete event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted.
as the data only contains listener id, we will
scan all loadbalancers from db and delete the vdp
if their admin state is down in that loadbalancer | [
"Process",
"listener",
"delete",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1651-L1667 | train | 37,770 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.pool_create_event | def pool_create_event(self, pool_info):
"""Process pool create event.
Extract pool info and get listener info and call next
listen_create_event
"""
pool_data = pool_info.get('pool')
listeners = pool_data.get('listeners')
for listener in listeners:
l_id = listener.get('id')
l_info = self.neutronclient.show_listener(l_id)
self.listener_create_event(l_info) | python | def pool_create_event(self, pool_info):
"""Process pool create event.
Extract pool info and get listener info and call next
listen_create_event
"""
pool_data = pool_info.get('pool')
listeners = pool_data.get('listeners')
for listener in listeners:
l_id = listener.get('id')
l_info = self.neutronclient.show_listener(l_id)
self.listener_create_event(l_info) | [
"def",
"pool_create_event",
"(",
"self",
",",
"pool_info",
")",
":",
"pool_data",
"=",
"pool_info",
".",
"get",
"(",
"'pool'",
")",
"listeners",
"=",
"pool_data",
".",
"get",
"(",
"'listeners'",
")",
"for",
"listener",
"in",
"listeners",
":",
"l_id",
"=",
... | Process pool create event.
Extract pool info and get listener info and call next
listen_create_event | [
"Process",
"pool",
"create",
"event",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1669-L1680 | train | 37,771 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.sync_projects | def sync_projects(self):
"""Sync projects.
This function will retrieve project from keystone
and populate them dfa database and dcnm
"""
p = self.keystone_event._service.projects.list()
for proj in p:
if proj.name in not_create_project_name:
continue
LOG.info("Syncing project %s" % proj.name)
self.project_create_func(proj.id, proj=proj) | python | def sync_projects(self):
"""Sync projects.
This function will retrieve project from keystone
and populate them dfa database and dcnm
"""
p = self.keystone_event._service.projects.list()
for proj in p:
if proj.name in not_create_project_name:
continue
LOG.info("Syncing project %s" % proj.name)
self.project_create_func(proj.id, proj=proj) | [
"def",
"sync_projects",
"(",
"self",
")",
":",
"p",
"=",
"self",
".",
"keystone_event",
".",
"_service",
".",
"projects",
".",
"list",
"(",
")",
"for",
"proj",
"in",
"p",
":",
"if",
"proj",
".",
"name",
"in",
"not_create_project_name",
":",
"continue",
... | Sync projects.
This function will retrieve project from keystone
and populate them dfa database and dcnm | [
"Sync",
"projects",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1690-L1701 | train | 37,772 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.sync_networks | def sync_networks(self):
"""sync networks.
It will retrieve networks from neutron and populate
them in dfa database and dcnm
"""
nets = self.neutronclient.list_networks()
for net in nets.get("networks"):
LOG.info("Syncing network %s", net["id"])
self.network_create_func(net)
subnets = self.neutronclient.list_subnets()
for subnet in subnets.get("subnets"):
LOG.info("Syncing subnet %s", subnet["id"])
self.create_subnet(subnet) | python | def sync_networks(self):
"""sync networks.
It will retrieve networks from neutron and populate
them in dfa database and dcnm
"""
nets = self.neutronclient.list_networks()
for net in nets.get("networks"):
LOG.info("Syncing network %s", net["id"])
self.network_create_func(net)
subnets = self.neutronclient.list_subnets()
for subnet in subnets.get("subnets"):
LOG.info("Syncing subnet %s", subnet["id"])
self.create_subnet(subnet) | [
"def",
"sync_networks",
"(",
"self",
")",
":",
"nets",
"=",
"self",
".",
"neutronclient",
".",
"list_networks",
"(",
")",
"for",
"net",
"in",
"nets",
".",
"get",
"(",
"\"networks\"",
")",
":",
"LOG",
".",
"info",
"(",
"\"Syncing network %s\"",
",",
"net"... | sync networks.
It will retrieve networks from neutron and populate
them in dfa database and dcnm | [
"sync",
"networks",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1703-L1716 | train | 37,773 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.create_threads | def create_threads(self):
"""Create threads on server."""
# Create thread for neutron notifications.
neutron_thrd = utils.EventProcessingThread('Neutron_Event',
self.neutron_event,
'event_handler',
self._excpq)
self.dfa_threads.append(neutron_thrd)
# Create thread for processing notification events.
qp_thrd = utils.EventProcessingThread('Event_Queue', self,
'process_queue', self._excpq)
self.dfa_threads.append(qp_thrd)
# Create thread for keystone notifications.
keys_thrd = utils.EventProcessingThread('Keystone_Event',
self.keystone_event,
'event_handler', self._excpq)
self.dfa_threads.append(keys_thrd)
# Create thread to process RPC calls.
hb_thrd = utils.EventProcessingThread('RPC_Server', self, 'start_rpc',
self._excpq)
self.dfa_threads.append(hb_thrd)
# Create thread to listen to dcnm network events.
if self.dcnm_event is not None:
dcnmL_thrd = utils.EventProcessingThread('DcnmListener',
self.dcnm_event,
'process_amqp_msgs',
self._excpq)
self.dfa_threads.append(dcnmL_thrd)
# Create periodic task to process failure cases in create/delete
# networks and projects.
fr_thrd = utils.PeriodicTask(interval=constants.FAIL_REC_INTERVAL,
func=self.add_events,
event_queue=self.pqueue,
priority=self.PRI_LOW_START + 10,
excq=self._excpq)
# Start all the threads.
for t in self.dfa_threads:
t.start()
# Run the periodic tasks.
fr_thrd.run() | python | def create_threads(self):
"""Create threads on server."""
# Create thread for neutron notifications.
neutron_thrd = utils.EventProcessingThread('Neutron_Event',
self.neutron_event,
'event_handler',
self._excpq)
self.dfa_threads.append(neutron_thrd)
# Create thread for processing notification events.
qp_thrd = utils.EventProcessingThread('Event_Queue', self,
'process_queue', self._excpq)
self.dfa_threads.append(qp_thrd)
# Create thread for keystone notifications.
keys_thrd = utils.EventProcessingThread('Keystone_Event',
self.keystone_event,
'event_handler', self._excpq)
self.dfa_threads.append(keys_thrd)
# Create thread to process RPC calls.
hb_thrd = utils.EventProcessingThread('RPC_Server', self, 'start_rpc',
self._excpq)
self.dfa_threads.append(hb_thrd)
# Create thread to listen to dcnm network events.
if self.dcnm_event is not None:
dcnmL_thrd = utils.EventProcessingThread('DcnmListener',
self.dcnm_event,
'process_amqp_msgs',
self._excpq)
self.dfa_threads.append(dcnmL_thrd)
# Create periodic task to process failure cases in create/delete
# networks and projects.
fr_thrd = utils.PeriodicTask(interval=constants.FAIL_REC_INTERVAL,
func=self.add_events,
event_queue=self.pqueue,
priority=self.PRI_LOW_START + 10,
excq=self._excpq)
# Start all the threads.
for t in self.dfa_threads:
t.start()
# Run the periodic tasks.
fr_thrd.run() | [
"def",
"create_threads",
"(",
"self",
")",
":",
"# Create thread for neutron notifications.",
"neutron_thrd",
"=",
"utils",
".",
"EventProcessingThread",
"(",
"'Neutron_Event'",
",",
"self",
".",
"neutron_event",
",",
"'event_handler'",
",",
"self",
".",
"_excpq",
")"... | Create threads on server. | [
"Create",
"threads",
"on",
"server",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1718-L1765 | train | 37,774 |
openstack/networking-cisco | networking_cisco/apps/saf/db/dfa_db_models.py | DfaSegmentTypeDriver._allocate_specified_segment | def _allocate_specified_segment(self, session, seg_id, source):
"""Allocate specified segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed (duplicates), then return None
"""
try:
with session.begin(subtransactions=True):
alloc = (session.query(self.model).filter_by(
segmentation_id=seg_id).first())
if alloc:
if alloc.allocated:
# Segment already allocated
return
else:
# Segment not allocated
count = (session.query(self.model).
filter_by(allocated=False,
segmentation_id=seg_id).update(
{"allocated": True}))
if count:
return alloc
# Segment to create or already allocated
alloc = self.model(segmentation_id=seg_id,
allocated=True, source=source)
session.add(alloc)
except db_exc.DBDuplicateEntry:
# Segment already allocated (insert failure)
alloc = None
return alloc | python | def _allocate_specified_segment(self, session, seg_id, source):
"""Allocate specified segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed (duplicates), then return None
"""
try:
with session.begin(subtransactions=True):
alloc = (session.query(self.model).filter_by(
segmentation_id=seg_id).first())
if alloc:
if alloc.allocated:
# Segment already allocated
return
else:
# Segment not allocated
count = (session.query(self.model).
filter_by(allocated=False,
segmentation_id=seg_id).update(
{"allocated": True}))
if count:
return alloc
# Segment to create or already allocated
alloc = self.model(segmentation_id=seg_id,
allocated=True, source=source)
session.add(alloc)
except db_exc.DBDuplicateEntry:
# Segment already allocated (insert failure)
alloc = None
return alloc | [
"def",
"_allocate_specified_segment",
"(",
"self",
",",
"session",
",",
"seg_id",
",",
"source",
")",
":",
"try",
":",
"with",
"session",
".",
"begin",
"(",
"subtransactions",
"=",
"True",
")",
":",
"alloc",
"=",
"(",
"session",
".",
"query",
"(",
"self"... | Allocate specified segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed (duplicates), then return None | [
"Allocate",
"specified",
"segment",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L138-L171 | train | 37,775 |
openstack/networking-cisco | networking_cisco/apps/saf/db/dfa_db_models.py | DfaSegmentTypeDriver._allocate_segment | def _allocate_segment(self, session, net_id, source):
"""Allocate segment from pool.
Return allocated db object or None.
"""
with session.begin(subtransactions=True):
hour_lapse = utils.utc_time_lapse(self.seg_timeout)
count = (session.query(self.model).filter(
self.model.delete_time < hour_lapse).update(
{"delete_time": None}))
select = (session.query(self.model).filter_by(allocated=False,
delete_time=None))
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info("No segment resource available")
# No resource available
return
count = (session.query(self.model).
filter_by(segmentation_id=alloc.segmentation_id,
allocated=False).update({"allocated": True,
"network_id": net_id,
"source": source}))
if count:
return alloc
LOG.error("ERROR: Failed to allocate segment for net %(net)s"
" source %(src)s",
{'net': net_id, 'src': source}) | python | def _allocate_segment(self, session, net_id, source):
"""Allocate segment from pool.
Return allocated db object or None.
"""
with session.begin(subtransactions=True):
hour_lapse = utils.utc_time_lapse(self.seg_timeout)
count = (session.query(self.model).filter(
self.model.delete_time < hour_lapse).update(
{"delete_time": None}))
select = (session.query(self.model).filter_by(allocated=False,
delete_time=None))
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info("No segment resource available")
# No resource available
return
count = (session.query(self.model).
filter_by(segmentation_id=alloc.segmentation_id,
allocated=False).update({"allocated": True,
"network_id": net_id,
"source": source}))
if count:
return alloc
LOG.error("ERROR: Failed to allocate segment for net %(net)s"
" source %(src)s",
{'net': net_id, 'src': source}) | [
"def",
"_allocate_segment",
"(",
"self",
",",
"session",
",",
"net_id",
",",
"source",
")",
":",
"with",
"session",
".",
"begin",
"(",
"subtransactions",
"=",
"True",
")",
":",
"hour_lapse",
"=",
"utils",
".",
"utc_time_lapse",
"(",
"self",
".",
"seg_timeo... | Allocate segment from pool.
Return allocated db object or None. | [
"Allocate",
"segment",
"from",
"pool",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L173-L207 | train | 37,776 |
openstack/networking-cisco | networking_cisco/apps/saf/db/dfa_db_models.py | DfasubnetDriver.allocate_subnet | def allocate_subnet(self, subnet_lst, net_id=None):
"""Allocate subnet from pool.
Return allocated db object or None.
"""
session = db.get_session()
query_str = None
for sub in subnet_lst:
sub_que = (self.model.subnet_address != sub)
if query_str is not None:
query_str = query_str & sub_que
else:
query_str = sub_que
with session.begin(subtransactions=True):
select = (session.query(self.model).filter(
(self.model.allocated == 0) & query_str))
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info("No subnet resource available")
return
count = (session.query(self.model).
filter_by(subnet_address=alloc.subnet_address,
allocated=False).update({"allocated": True,
"network_id": net_id}))
if count:
return alloc.subnet_address
LOG.error("ERROR: Failed to allocate subnet for net %(net)s",
{'net': net_id})
return None | python | def allocate_subnet(self, subnet_lst, net_id=None):
"""Allocate subnet from pool.
Return allocated db object or None.
"""
session = db.get_session()
query_str = None
for sub in subnet_lst:
sub_que = (self.model.subnet_address != sub)
if query_str is not None:
query_str = query_str & sub_que
else:
query_str = sub_que
with session.begin(subtransactions=True):
select = (session.query(self.model).filter(
(self.model.allocated == 0) & query_str))
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info("No subnet resource available")
return
count = (session.query(self.model).
filter_by(subnet_address=alloc.subnet_address,
allocated=False).update({"allocated": True,
"network_id": net_id}))
if count:
return alloc.subnet_address
LOG.error("ERROR: Failed to allocate subnet for net %(net)s",
{'net': net_id})
return None | [
"def",
"allocate_subnet",
"(",
"self",
",",
"subnet_lst",
",",
"net_id",
"=",
"None",
")",
":",
"session",
"=",
"db",
".",
"get_session",
"(",
")",
"query_str",
"=",
"None",
"for",
"sub",
"in",
"subnet_lst",
":",
"sub_que",
"=",
"(",
"self",
".",
"mode... | Allocate subnet from pool.
Return allocated db object or None. | [
"Allocate",
"subnet",
"from",
"pool",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L935-L969 | train | 37,777 |
openstack/networking-cisco | networking_cisco/apps/saf/db/dfa_db_models.py | TopologyDiscoveryDb.add_update_topology_db | def add_update_topology_db(self, **params):
"""Add or update an entry to the topology DB. """
topo_dict = params.get('columns')
session = db.get_session()
host = topo_dict.get('host')
protocol_interface = topo_dict.get('protocol_interface')
with session.begin(subtransactions=True):
try:
# Check if entry exists.
session.query(DfaTopologyDb).filter_by(
host=host, protocol_interface=protocol_interface).one()
session.query(DfaTopologyDb).filter_by(
host=host, protocol_interface=protocol_interface).update(
topo_dict)
except orm_exc.NoResultFound:
LOG.info("Creating new topology entry for host "
"%(host)s on Interface %(intf)s",
{'host': host, 'intf': protocol_interface})
topo_disc = DfaTopologyDb(
host=host, protocol_interface=protocol_interface,
phy_interface=topo_dict.get('phy_interface'),
created=topo_dict.get('created'),
heartbeat=topo_dict.get('heartbeat'),
remote_mgmt_addr=topo_dict.get('remote_mgmt_addr'),
remote_system_name=topo_dict.get('remote_system_name'),
remote_system_desc=topo_dict.get('remote_system_desc'),
remote_port_id_mac=topo_dict.get('remote_port_id_mac'),
remote_chassis_id_mac=topo_dict.get(
'remote_chassis_id_mac'),
remote_port=topo_dict.get('remote_port'),
remote_evb_cfgd=topo_dict.get('remote_evb_cfgd'),
remote_evb_mode=topo_dict.get('remote_evb_mode'),
configurations=topo_dict.get('configurations'))
session.add(topo_disc)
except orm_exc.MultipleResultsFound:
LOG.error("More than one enty found for agent %(host)s."
"Interface %(intf)s",
{'host': host, 'intf': protocol_interface})
except Exception as exc:
LOG.error("Exception in add_update_topology_db %s", exc) | python | def add_update_topology_db(self, **params):
"""Add or update an entry to the topology DB. """
topo_dict = params.get('columns')
session = db.get_session()
host = topo_dict.get('host')
protocol_interface = topo_dict.get('protocol_interface')
with session.begin(subtransactions=True):
try:
# Check if entry exists.
session.query(DfaTopologyDb).filter_by(
host=host, protocol_interface=protocol_interface).one()
session.query(DfaTopologyDb).filter_by(
host=host, protocol_interface=protocol_interface).update(
topo_dict)
except orm_exc.NoResultFound:
LOG.info("Creating new topology entry for host "
"%(host)s on Interface %(intf)s",
{'host': host, 'intf': protocol_interface})
topo_disc = DfaTopologyDb(
host=host, protocol_interface=protocol_interface,
phy_interface=topo_dict.get('phy_interface'),
created=topo_dict.get('created'),
heartbeat=topo_dict.get('heartbeat'),
remote_mgmt_addr=topo_dict.get('remote_mgmt_addr'),
remote_system_name=topo_dict.get('remote_system_name'),
remote_system_desc=topo_dict.get('remote_system_desc'),
remote_port_id_mac=topo_dict.get('remote_port_id_mac'),
remote_chassis_id_mac=topo_dict.get(
'remote_chassis_id_mac'),
remote_port=topo_dict.get('remote_port'),
remote_evb_cfgd=topo_dict.get('remote_evb_cfgd'),
remote_evb_mode=topo_dict.get('remote_evb_mode'),
configurations=topo_dict.get('configurations'))
session.add(topo_disc)
except orm_exc.MultipleResultsFound:
LOG.error("More than one enty found for agent %(host)s."
"Interface %(intf)s",
{'host': host, 'intf': protocol_interface})
except Exception as exc:
LOG.error("Exception in add_update_topology_db %s", exc) | [
"def",
"add_update_topology_db",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"topo_dict",
"=",
"params",
".",
"get",
"(",
"'columns'",
")",
"session",
"=",
"db",
".",
"get_session",
"(",
")",
"host",
"=",
"topo_dict",
".",
"get",
"(",
"'host'",
")"... | Add or update an entry to the topology DB. | [
"Add",
"or",
"update",
"an",
"entry",
"to",
"the",
"topology",
"DB",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L1067-L1106 | train | 37,778 |
openstack/networking-cisco | networking_cisco/apps/saf/db/dfa_db_models.py | TopologyDiscoveryDb._convert_topo_obj_dict | def _convert_topo_obj_dict(self, topology_objs):
"""Convert topology object to dict. """
topo_lst = []
for topo_obj in topology_objs:
topo_dct = {
'host': topo_obj.host,
'protocol_interface': topo_obj.protocol_interface,
'phy_interface': topo_obj.phy_interface,
'created': topo_obj.created, 'heartbeat': topo_obj.heartbeat,
'remote_mgmt_addr': topo_obj.remote_mgmt_addr,
'remote_system_name': topo_obj.remote_system_name,
'remote_system_desc': topo_obj.remote_system_desc,
'remote_port_id_mac': topo_obj.remote_port_id_mac,
'remote_chassis_id_mac': topo_obj.remote_chassis_id_mac,
'remote_port': topo_obj.remote_port,
'remote_evb_cfgd': topo_obj.remote_evb_cfgd,
'remote_evb_mode': topo_obj.remote_evb_mode,
'configurations': topo_obj.configurations}
topo_lst.append(topo_dct)
return topo_lst | python | def _convert_topo_obj_dict(self, topology_objs):
"""Convert topology object to dict. """
topo_lst = []
for topo_obj in topology_objs:
topo_dct = {
'host': topo_obj.host,
'protocol_interface': topo_obj.protocol_interface,
'phy_interface': topo_obj.phy_interface,
'created': topo_obj.created, 'heartbeat': topo_obj.heartbeat,
'remote_mgmt_addr': topo_obj.remote_mgmt_addr,
'remote_system_name': topo_obj.remote_system_name,
'remote_system_desc': topo_obj.remote_system_desc,
'remote_port_id_mac': topo_obj.remote_port_id_mac,
'remote_chassis_id_mac': topo_obj.remote_chassis_id_mac,
'remote_port': topo_obj.remote_port,
'remote_evb_cfgd': topo_obj.remote_evb_cfgd,
'remote_evb_mode': topo_obj.remote_evb_mode,
'configurations': topo_obj.configurations}
topo_lst.append(topo_dct)
return topo_lst | [
"def",
"_convert_topo_obj_dict",
"(",
"self",
",",
"topology_objs",
")",
":",
"topo_lst",
"=",
"[",
"]",
"for",
"topo_obj",
"in",
"topology_objs",
":",
"topo_dct",
"=",
"{",
"'host'",
":",
"topo_obj",
".",
"host",
",",
"'protocol_interface'",
":",
"topo_obj",
... | Convert topology object to dict. | [
"Convert",
"topology",
"object",
"to",
"dict",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L1108-L1127 | train | 37,779 |
openstack/networking-cisco | networking_cisco/apps/saf/db/dfa_db_models.py | TopologyDiscoveryDb.query_topology_db | def query_topology_db(self, dict_convert=False, **req):
"""Query an entry to the topology DB. """
session = db.get_session()
with session.begin(subtransactions=True):
try:
# Check if entry exists.
topo_disc = session.query(DfaTopologyDb).filter_by(**req).all()
except orm_exc.NoResultFound:
LOG.info("No Topology results found for %s", req)
return None
if dict_convert:
return self._convert_topo_obj_dict(topo_disc)
return topo_disc | python | def query_topology_db(self, dict_convert=False, **req):
"""Query an entry to the topology DB. """
session = db.get_session()
with session.begin(subtransactions=True):
try:
# Check if entry exists.
topo_disc = session.query(DfaTopologyDb).filter_by(**req).all()
except orm_exc.NoResultFound:
LOG.info("No Topology results found for %s", req)
return None
if dict_convert:
return self._convert_topo_obj_dict(topo_disc)
return topo_disc | [
"def",
"query_topology_db",
"(",
"self",
",",
"dict_convert",
"=",
"False",
",",
"*",
"*",
"req",
")",
":",
"session",
"=",
"db",
".",
"get_session",
"(",
")",
"with",
"session",
".",
"begin",
"(",
"subtransactions",
"=",
"True",
")",
":",
"try",
":",
... | Query an entry to the topology DB. | [
"Query",
"an",
"entry",
"to",
"the",
"topology",
"DB",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L1129-L1141 | train | 37,780 |
openstack/networking-cisco | networking_cisco/apps/saf/db/dfa_db_models.py | TopologyDiscoveryDb.delete_topology_entry | def delete_topology_entry(self, **req):
"""Delete the entries from the topology DB. """
session = db.get_session()
with session.begin(subtransactions=True):
try:
rows = session.query(DfaTopologyDb).filter_by(**req).all()
except orm_exc.NoResultFound:
LOG.info("No Topology results found for %s", req)
return
try:
for row in rows:
session.delete(row)
except Exception as exc:
LOG.error("Exception raised %s", str(exc)) | python | def delete_topology_entry(self, **req):
"""Delete the entries from the topology DB. """
session = db.get_session()
with session.begin(subtransactions=True):
try:
rows = session.query(DfaTopologyDb).filter_by(**req).all()
except orm_exc.NoResultFound:
LOG.info("No Topology results found for %s", req)
return
try:
for row in rows:
session.delete(row)
except Exception as exc:
LOG.error("Exception raised %s", str(exc)) | [
"def",
"delete_topology_entry",
"(",
"self",
",",
"*",
"*",
"req",
")",
":",
"session",
"=",
"db",
".",
"get_session",
"(",
")",
"with",
"session",
".",
"begin",
"(",
"subtransactions",
"=",
"True",
")",
":",
"try",
":",
"rows",
"=",
"session",
".",
... | Delete the entries from the topology DB. | [
"Delete",
"the",
"entries",
"from",
"the",
"topology",
"DB",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L1143-L1156 | train | 37,781 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_lldp_tlv | def get_lldp_tlv(self, port_name, is_ncb=True, is_nb=False):
"""Function to Query LLDP TLV on the interface. """
reply = None
if is_ncb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "ncb"])
elif is_nb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "nb"])
else:
LOG.error("Both NCB and NB are not selected to "
"query LLDP")
return reply | python | def get_lldp_tlv(self, port_name, is_ncb=True, is_nb=False):
"""Function to Query LLDP TLV on the interface. """
reply = None
if is_ncb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "ncb"])
elif is_nb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "nb"])
else:
LOG.error("Both NCB and NB are not selected to "
"query LLDP")
return reply | [
"def",
"get_lldp_tlv",
"(",
"self",
",",
"port_name",
",",
"is_ncb",
"=",
"True",
",",
"is_nb",
"=",
"False",
")",
":",
"reply",
"=",
"None",
"if",
"is_ncb",
":",
"reply",
"=",
"self",
".",
"run_lldptool",
"(",
"[",
"\"get-tlv\"",
",",
"\"-n\"",
",",
... | Function to Query LLDP TLV on the interface. | [
"Function",
"to",
"Query",
"LLDP",
"TLV",
"on",
"the",
"interface",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L53-L65 | train | 37,782 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi._check_common_tlv_format | def _check_common_tlv_format(self, tlv_complete_data, tlv_data_pattern,
tlv_string):
"""Check for the common TLV format. """
if tlv_complete_data is None:
return False, None
tlv_string_split = tlv_complete_data.split(tlv_string)
if len(tlv_string_split) < 2:
return False, None
next_tlv_list = tlv_string_split[1].split('TLV')[0]
tlv_val_set = next_tlv_list.split(tlv_data_pattern)
if len(tlv_val_set) < 2:
return False, None
return True, tlv_val_set | python | def _check_common_tlv_format(self, tlv_complete_data, tlv_data_pattern,
tlv_string):
"""Check for the common TLV format. """
if tlv_complete_data is None:
return False, None
tlv_string_split = tlv_complete_data.split(tlv_string)
if len(tlv_string_split) < 2:
return False, None
next_tlv_list = tlv_string_split[1].split('TLV')[0]
tlv_val_set = next_tlv_list.split(tlv_data_pattern)
if len(tlv_val_set) < 2:
return False, None
return True, tlv_val_set | [
"def",
"_check_common_tlv_format",
"(",
"self",
",",
"tlv_complete_data",
",",
"tlv_data_pattern",
",",
"tlv_string",
")",
":",
"if",
"tlv_complete_data",
"is",
"None",
":",
"return",
"False",
",",
"None",
"tlv_string_split",
"=",
"tlv_complete_data",
".",
"split",
... | Check for the common TLV format. | [
"Check",
"for",
"the",
"common",
"TLV",
"format",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L77-L89 | train | 37,783 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_remote_evb_mode | def get_remote_evb_mode(self, tlv_data):
"""Returns the EVB mode in the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "mode:", "EVB Configuration TLV")
if not ret:
return None
mode_val = parsed_val[1].split()[0].strip()
return mode_val | python | def get_remote_evb_mode(self, tlv_data):
"""Returns the EVB mode in the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "mode:", "EVB Configuration TLV")
if not ret:
return None
mode_val = parsed_val[1].split()[0].strip()
return mode_val | [
"def",
"get_remote_evb_mode",
"(",
"self",
",",
"tlv_data",
")",
":",
"ret",
",",
"parsed_val",
"=",
"self",
".",
"_check_common_tlv_format",
"(",
"tlv_data",
",",
"\"mode:\"",
",",
"\"EVB Configuration TLV\"",
")",
"if",
"not",
"ret",
":",
"return",
"None",
"... | Returns the EVB mode in the TLV. | [
"Returns",
"the",
"EVB",
"mode",
"in",
"the",
"TLV",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L96-L103 | train | 37,784 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_remote_mgmt_addr | def get_remote_mgmt_addr(self, tlv_data):
"""Returns Remote Mgmt Addr from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "IPv4:", "Management Address TLV")
if not ret:
return None
addr_fam = 'IPv4:'
addr = parsed_val[1].split('\n')[0].strip()
return addr_fam + addr | python | def get_remote_mgmt_addr(self, tlv_data):
"""Returns Remote Mgmt Addr from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "IPv4:", "Management Address TLV")
if not ret:
return None
addr_fam = 'IPv4:'
addr = parsed_val[1].split('\n')[0].strip()
return addr_fam + addr | [
"def",
"get_remote_mgmt_addr",
"(",
"self",
",",
"tlv_data",
")",
":",
"ret",
",",
"parsed_val",
"=",
"self",
".",
"_check_common_tlv_format",
"(",
"tlv_data",
",",
"\"IPv4:\"",
",",
"\"Management Address TLV\"",
")",
"if",
"not",
"ret",
":",
"return",
"None",
... | Returns Remote Mgmt Addr from the TLV. | [
"Returns",
"Remote",
"Mgmt",
"Addr",
"from",
"the",
"TLV",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L105-L113 | train | 37,785 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_remote_sys_desc | def get_remote_sys_desc(self, tlv_data):
"""Returns Remote Sys Desc from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Description TLV")
if not ret:
return None
return parsed_val[1].strip() | python | def get_remote_sys_desc(self, tlv_data):
"""Returns Remote Sys Desc from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Description TLV")
if not ret:
return None
return parsed_val[1].strip() | [
"def",
"get_remote_sys_desc",
"(",
"self",
",",
"tlv_data",
")",
":",
"ret",
",",
"parsed_val",
"=",
"self",
".",
"_check_common_tlv_format",
"(",
"tlv_data",
",",
"\"\\n\"",
",",
"\"System Description TLV\"",
")",
"if",
"not",
"ret",
":",
"return",
"None",
"r... | Returns Remote Sys Desc from the TLV. | [
"Returns",
"Remote",
"Sys",
"Desc",
"from",
"the",
"TLV",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L115-L121 | train | 37,786 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_remote_sys_name | def get_remote_sys_name(self, tlv_data):
"""Returns Remote Sys Name from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Name TLV")
if not ret:
return None
return parsed_val[1].strip() | python | def get_remote_sys_name(self, tlv_data):
"""Returns Remote Sys Name from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Name TLV")
if not ret:
return None
return parsed_val[1].strip() | [
"def",
"get_remote_sys_name",
"(",
"self",
",",
"tlv_data",
")",
":",
"ret",
",",
"parsed_val",
"=",
"self",
".",
"_check_common_tlv_format",
"(",
"tlv_data",
",",
"\"\\n\"",
",",
"\"System Name TLV\"",
")",
"if",
"not",
"ret",
":",
"return",
"None",
"return",... | Returns Remote Sys Name from the TLV. | [
"Returns",
"Remote",
"Sys",
"Name",
"from",
"the",
"TLV",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L123-L129 | train | 37,787 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_remote_port | def get_remote_port(self, tlv_data):
"""Returns Remote Port from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "Port Description TLV")
if not ret:
return None
return parsed_val[1].strip() | python | def get_remote_port(self, tlv_data):
"""Returns Remote Port from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "Port Description TLV")
if not ret:
return None
return parsed_val[1].strip() | [
"def",
"get_remote_port",
"(",
"self",
",",
"tlv_data",
")",
":",
"ret",
",",
"parsed_val",
"=",
"self",
".",
"_check_common_tlv_format",
"(",
"tlv_data",
",",
"\"\\n\"",
",",
"\"Port Description TLV\"",
")",
"if",
"not",
"ret",
":",
"return",
"None",
"return"... | Returns Remote Port from the TLV. | [
"Returns",
"Remote",
"Port",
"from",
"the",
"TLV",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L131-L137 | train | 37,788 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_remote_chassis_id_mac | def get_remote_chassis_id_mac(self, tlv_data):
"""Returns Remote Chassis ID MAC from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "MAC:", "Chassis ID TLV")
if not ret:
return None
mac = parsed_val[1].split('\n')
return mac[0].strip() | python | def get_remote_chassis_id_mac(self, tlv_data):
"""Returns Remote Chassis ID MAC from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "MAC:", "Chassis ID TLV")
if not ret:
return None
mac = parsed_val[1].split('\n')
return mac[0].strip() | [
"def",
"get_remote_chassis_id_mac",
"(",
"self",
",",
"tlv_data",
")",
":",
"ret",
",",
"parsed_val",
"=",
"self",
".",
"_check_common_tlv_format",
"(",
"tlv_data",
",",
"\"MAC:\"",
",",
"\"Chassis ID TLV\"",
")",
"if",
"not",
"ret",
":",
"return",
"None",
"ma... | Returns Remote Chassis ID MAC from the TLV. | [
"Returns",
"Remote",
"Chassis",
"ID",
"MAC",
"from",
"the",
"TLV",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L139-L146 | train | 37,789 |
openstack/networking-cisco | networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py | LldpApi.get_remote_port_id_local | def get_remote_port_id_local(self, tlv_data):
"""Returns Remote Port ID Local from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "Local:", "Port ID TLV")
if not ret:
return None
local = parsed_val[1].split('\n')
return local[0].strip() | python | def get_remote_port_id_local(self, tlv_data):
"""Returns Remote Port ID Local from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "Local:", "Port ID TLV")
if not ret:
return None
local = parsed_val[1].split('\n')
return local[0].strip() | [
"def",
"get_remote_port_id_local",
"(",
"self",
",",
"tlv_data",
")",
":",
"ret",
",",
"parsed_val",
"=",
"self",
".",
"_check_common_tlv_format",
"(",
"tlv_data",
",",
"\"Local:\"",
",",
"\"Port ID TLV\"",
")",
"if",
"not",
"ret",
":",
"return",
"None",
"loca... | Returns Remote Port ID Local from the TLV. | [
"Returns",
"Remote",
"Port",
"ID",
"Local",
"from",
"the",
"TLV",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L157-L164 | train | 37,790 |
openstack/networking-cisco | networking_cisco/ml2_drivers/nexus/nexus_helpers.py | format_interface_name | def format_interface_name(intf_type, port, ch_grp=0):
"""Method to format interface name given type, port.
Given interface type, port, and channel-group, this
method formats an interface name. If channel-group is
non-zero, then port-channel is configured.
:param intf_type: Such as 'ethernet' or 'port-channel'
:param port: unique identification -- 1/32 or 1
:ch_grp: If non-zero, ignore other params and format
port-channel<ch_grp>
:returns: the full formatted interface name.
ex: ethernet:1/32, port-channel:1
"""
if ch_grp > 0:
return 'port-channel:%s' % str(ch_grp)
return '%s:%s' % (intf_type.lower(), port) | python | def format_interface_name(intf_type, port, ch_grp=0):
"""Method to format interface name given type, port.
Given interface type, port, and channel-group, this
method formats an interface name. If channel-group is
non-zero, then port-channel is configured.
:param intf_type: Such as 'ethernet' or 'port-channel'
:param port: unique identification -- 1/32 or 1
:ch_grp: If non-zero, ignore other params and format
port-channel<ch_grp>
:returns: the full formatted interface name.
ex: ethernet:1/32, port-channel:1
"""
if ch_grp > 0:
return 'port-channel:%s' % str(ch_grp)
return '%s:%s' % (intf_type.lower(), port) | [
"def",
"format_interface_name",
"(",
"intf_type",
",",
"port",
",",
"ch_grp",
"=",
"0",
")",
":",
"if",
"ch_grp",
">",
"0",
":",
"return",
"'port-channel:%s'",
"%",
"str",
"(",
"ch_grp",
")",
"return",
"'%s:%s'",
"%",
"(",
"intf_type",
".",
"lower",
"(",... | Method to format interface name given type, port.
Given interface type, port, and channel-group, this
method formats an interface name. If channel-group is
non-zero, then port-channel is configured.
:param intf_type: Such as 'ethernet' or 'port-channel'
:param port: unique identification -- 1/32 or 1
:ch_grp: If non-zero, ignore other params and format
port-channel<ch_grp>
:returns: the full formatted interface name.
ex: ethernet:1/32, port-channel:1 | [
"Method",
"to",
"format",
"interface",
"name",
"given",
"type",
"port",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_helpers.py#L23-L40 | train | 37,791 |
openstack/networking-cisco | networking_cisco/ml2_drivers/nexus/nexus_helpers.py | split_interface_name | def split_interface_name(interface, ch_grp=0):
"""Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name
"""
interface = interface.lower()
if ch_grp != 0:
intf_type = 'port-channel'
port = str(ch_grp)
elif ':' in interface:
intf_type, port = interface.split(':')
elif interface.startswith('ethernet'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('ethernet')
elif interface.startswith('port-channel'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('port-channel')
else:
intf_type, port = 'ethernet', interface
return intf_type, port | python | def split_interface_name(interface, ch_grp=0):
"""Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name
"""
interface = interface.lower()
if ch_grp != 0:
intf_type = 'port-channel'
port = str(ch_grp)
elif ':' in interface:
intf_type, port = interface.split(':')
elif interface.startswith('ethernet'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('ethernet')
elif interface.startswith('port-channel'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('port-channel')
else:
intf_type, port = 'ethernet', interface
return intf_type, port | [
"def",
"split_interface_name",
"(",
"interface",
",",
"ch_grp",
"=",
"0",
")",
":",
"interface",
"=",
"interface",
".",
"lower",
"(",
")",
"if",
"ch_grp",
"!=",
"0",
":",
"intf_type",
"=",
"'port-channel'",
"port",
"=",
"str",
"(",
"ch_grp",
")",
"elif",... | Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name | [
"Method",
"to",
"split",
"interface",
"type",
"id",
"from",
"name",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_helpers.py#L43-L71 | train | 37,792 |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py | DeviceMgrCfgAgentNotifyAPI._host_notification | def _host_notification(self, context, method, payload, host):
"""Notify the cfg agent that is handling the hosting device."""
LOG.debug('Notify Cisco cfg agent at %(host)s the message '
'%(method)s', {'host': host, 'method': method})
cctxt = self.client.prepare(server=host)
cctxt.cast(context, method, payload=payload) | python | def _host_notification(self, context, method, payload, host):
"""Notify the cfg agent that is handling the hosting device."""
LOG.debug('Notify Cisco cfg agent at %(host)s the message '
'%(method)s', {'host': host, 'method': method})
cctxt = self.client.prepare(server=host)
cctxt.cast(context, method, payload=payload) | [
"def",
"_host_notification",
"(",
"self",
",",
"context",
",",
"method",
",",
"payload",
",",
"host",
")",
":",
"LOG",
".",
"debug",
"(",
"'Notify Cisco cfg agent at %(host)s the message '",
"'%(method)s'",
",",
"{",
"'host'",
":",
"host",
",",
"'method'",
":",
... | Notify the cfg agent that is handling the hosting device. | [
"Notify",
"the",
"cfg",
"agent",
"that",
"is",
"handling",
"the",
"hosting",
"device",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py#L39-L44 | train | 37,793 |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py | DeviceMgrCfgAgentNotifyAPI._agent_notification | def _agent_notification(self, context, method, hosting_devices, operation):
"""Notify individual Cisco cfg agents."""
admin_context = context.is_admin and context or context.elevated()
for hosting_device in hosting_devices:
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, hosting_device['id'], admin_state_up=True,
schedule=True)
for agent in agents:
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
'message %(method)s',
{'agent_type': agent.agent_type,
'topic': agent.topic,
'host': agent.host,
'method': method})
cctxt = self.client.prepare(server=agent.host)
cctxt.cast(context, method) | python | def _agent_notification(self, context, method, hosting_devices, operation):
"""Notify individual Cisco cfg agents."""
admin_context = context.is_admin and context or context.elevated()
for hosting_device in hosting_devices:
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, hosting_device['id'], admin_state_up=True,
schedule=True)
for agent in agents:
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
'message %(method)s',
{'agent_type': agent.agent_type,
'topic': agent.topic,
'host': agent.host,
'method': method})
cctxt = self.client.prepare(server=agent.host)
cctxt.cast(context, method) | [
"def",
"_agent_notification",
"(",
"self",
",",
"context",
",",
"method",
",",
"hosting_devices",
",",
"operation",
")",
":",
"admin_context",
"=",
"context",
".",
"is_admin",
"and",
"context",
"or",
"context",
".",
"elevated",
"(",
")",
"for",
"hosting_device... | Notify individual Cisco cfg agents. | [
"Notify",
"individual",
"Cisco",
"cfg",
"agents",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py#L46-L61 | train | 37,794 |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py | DeviceMgrCfgAgentNotifyAPI.hosting_devices_unassigned_from_cfg_agent | def hosting_devices_unassigned_from_cfg_agent(self, context, ids, host):
"""Notify cfg agent to no longer handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>.
"""
self._host_notification(context,
'hosting_devices_unassigned_from_cfg_agent',
{'hosting_device_ids': ids}, host) | python | def hosting_devices_unassigned_from_cfg_agent(self, context, ids, host):
"""Notify cfg agent to no longer handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>.
"""
self._host_notification(context,
'hosting_devices_unassigned_from_cfg_agent',
{'hosting_device_ids': ids}, host) | [
"def",
"hosting_devices_unassigned_from_cfg_agent",
"(",
"self",
",",
"context",
",",
"ids",
",",
"host",
")",
":",
"self",
".",
"_host_notification",
"(",
"context",
",",
"'hosting_devices_unassigned_from_cfg_agent'",
",",
"{",
"'hosting_device_ids'",
":",
"ids",
"}"... | Notify cfg agent to no longer handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>. | [
"Notify",
"cfg",
"agent",
"to",
"no",
"longer",
"handle",
"some",
"hosting",
"devices",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py#L68-L76 | train | 37,795 |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py | DeviceMgrCfgAgentNotifyAPI.hosting_devices_assigned_to_cfg_agent | def hosting_devices_assigned_to_cfg_agent(self, context, ids, host):
"""Notify cfg agent to now handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>.
"""
self._host_notification(context,
'hosting_devices_assigned_to_cfg_agent',
{'hosting_device_ids': ids}, host) | python | def hosting_devices_assigned_to_cfg_agent(self, context, ids, host):
"""Notify cfg agent to now handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>.
"""
self._host_notification(context,
'hosting_devices_assigned_to_cfg_agent',
{'hosting_device_ids': ids}, host) | [
"def",
"hosting_devices_assigned_to_cfg_agent",
"(",
"self",
",",
"context",
",",
"ids",
",",
"host",
")",
":",
"self",
".",
"_host_notification",
"(",
"context",
",",
"'hosting_devices_assigned_to_cfg_agent'",
",",
"{",
"'hosting_device_ids'",
":",
"ids",
"}",
",",... | Notify cfg agent to now handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>. | [
"Notify",
"cfg",
"agent",
"to",
"now",
"handle",
"some",
"hosting",
"devices",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py#L78-L86 | train | 37,796 |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py | DeviceMgrCfgAgentNotifyAPI.hosting_devices_removed | def hosting_devices_removed(self, context, hosting_data, deconfigure,
host):
"""Notify cfg agent that some hosting devices have been removed.
This notification informs the cfg agent in <host> that the
hosting devices in the <hosting_data> dictionary have been removed
from the hosting device pool. The <hosting_data> dictionary also
contains the ids of the affected logical resources for each hosting
devices::
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...]},
'fw': [id1, ...],
...},
...}
The <deconfigure> argument is True if any configurations for the
logical resources should be removed from the hosting devices
"""
if hosting_data:
self._host_notification(context, 'hosting_devices_removed',
{'hosting_data': hosting_data,
'deconfigure': deconfigure}, host) | python | def hosting_devices_removed(self, context, hosting_data, deconfigure,
host):
"""Notify cfg agent that some hosting devices have been removed.
This notification informs the cfg agent in <host> that the
hosting devices in the <hosting_data> dictionary have been removed
from the hosting device pool. The <hosting_data> dictionary also
contains the ids of the affected logical resources for each hosting
devices::
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...]},
'fw': [id1, ...],
...},
...}
The <deconfigure> argument is True if any configurations for the
logical resources should be removed from the hosting devices
"""
if hosting_data:
self._host_notification(context, 'hosting_devices_removed',
{'hosting_data': hosting_data,
'deconfigure': deconfigure}, host) | [
"def",
"hosting_devices_removed",
"(",
"self",
",",
"context",
",",
"hosting_data",
",",
"deconfigure",
",",
"host",
")",
":",
"if",
"hosting_data",
":",
"self",
".",
"_host_notification",
"(",
"context",
",",
"'hosting_devices_removed'",
",",
"{",
"'hosting_data'... | Notify cfg agent that some hosting devices have been removed.
This notification informs the cfg agent in <host> that the
hosting devices in the <hosting_data> dictionary have been removed
from the hosting device pool. The <hosting_data> dictionary also
contains the ids of the affected logical resources for each hosting
devices::
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...]},
'fw': [id1, ...],
...},
...}
The <deconfigure> argument is True if any configurations for the
logical resources should be removed from the hosting devices | [
"Notify",
"cfg",
"agent",
"that",
"some",
"hosting",
"devices",
"have",
"been",
"removed",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py#L88-L112 | train | 37,797 |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py | DeviceMgrCfgAgentNotifyAPI.get_hosting_device_configuration | def get_hosting_device_configuration(self, context, id):
"""Fetch configuration of hosting device with id.
The configuration agent should respond with the running config of
the hosting device.
"""
admin_context = context.is_admin and context or context.elevated()
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, [id], admin_state_up=True, schedule=True)
if agents:
cctxt = self.client.prepare(server=agents[0].host)
return cctxt.call(context, 'get_hosting_device_configuration',
payload={'hosting_device_id': id}) | python | def get_hosting_device_configuration(self, context, id):
"""Fetch configuration of hosting device with id.
The configuration agent should respond with the running config of
the hosting device.
"""
admin_context = context.is_admin and context or context.elevated()
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, [id], admin_state_up=True, schedule=True)
if agents:
cctxt = self.client.prepare(server=agents[0].host)
return cctxt.call(context, 'get_hosting_device_configuration',
payload={'hosting_device_id': id}) | [
"def",
"get_hosting_device_configuration",
"(",
"self",
",",
"context",
",",
"id",
")",
":",
"admin_context",
"=",
"context",
".",
"is_admin",
"and",
"context",
"or",
"context",
".",
"elevated",
"(",
")",
"agents",
"=",
"self",
".",
"_dmplugin",
".",
"get_cf... | Fetch configuration of hosting device with id.
The configuration agent should respond with the running config of
the hosting device. | [
"Fetch",
"configuration",
"of",
"hosting",
"device",
"with",
"id",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devmgr_rpc_cfgagent_api.py#L115-L127 | train | 37,798 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py | FwMapAttr.store_policy | def store_policy(self, pol_id, policy):
"""Store the policy.
Policy is maintained as a dictionary of pol ID.
"""
if pol_id not in self.policies:
self.policies[pol_id] = policy
self.policy_cnt += 1 | python | def store_policy(self, pol_id, policy):
"""Store the policy.
Policy is maintained as a dictionary of pol ID.
"""
if pol_id not in self.policies:
self.policies[pol_id] = policy
self.policy_cnt += 1 | [
"def",
"store_policy",
"(",
"self",
",",
"pol_id",
",",
"policy",
")",
":",
"if",
"pol_id",
"not",
"in",
"self",
".",
"policies",
":",
"self",
".",
"policies",
"[",
"pol_id",
"]",
"=",
"policy",
"self",
".",
"policy_cnt",
"+=",
"1"
] | Store the policy.
Policy is maintained as a dictionary of pol ID. | [
"Store",
"the",
"policy",
"."
] | aa58a30aec25b86f9aa5952b0863045975debfa9 | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L106-L113 | train | 37,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.