idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
37,700
def is_subnet_present ( self , subnet_addr ) : try : subnet_list = self . neutronclient . list_subnets ( body = { } ) subnet_dat = subnet_list . get ( 'subnets' ) for sub in subnet_dat : if sub . get ( 'cidr' ) == subnet_addr : return True return False except Exception as exc : LOG . error ( "Failed to list subnet %(sub)s, Exc %(exc)s" , { 'sub' : subnet_addr , 'exc' : str ( exc ) } ) return False
Returns if a subnet is present .
37,701
def get_all_subnets_cidr ( self , no_mask = False ) : body = { } subnet_cidrs = [ ] try : subnet_list = self . neutronclient . list_subnets ( body = body ) subnet_dat = subnet_list . get ( 'subnets' ) for sub in subnet_dat : if no_mask : subnet_cidrs . append ( sub . get ( 'cidr' ) . split ( '/' ) [ 0 ] ) else : subnet_cidrs . append ( sub . get ( 'cidr' ) ) except Exception as exc : LOG . error ( "Failed to list subnet Exc %s" , str ( exc ) ) return subnet_cidrs
Returns all the subnets .
37,702
def get_subnets_for_net ( self , net ) : try : subnet_list = self . neutronclient . list_subnets ( network_id = net ) subnet_dat = subnet_list . get ( 'subnets' ) return subnet_dat except Exception as exc : LOG . error ( "Failed to list subnet net %(net)s, Exc: %(exc)s" , { 'net' : net , 'exc' : str ( exc ) } ) return None
Returns the subnets in a network .
37,703
def get_subnet_cidr ( self , subnet_id ) : try : subnet_list = self . neutronclient . list_subnets ( id = subnet_id ) subnet_dat = subnet_list . get ( 'subnets' ) [ 0 ] return subnet_dat . get ( 'cidr' ) except Exception as exc : LOG . error ( "Failed to list subnet for ID %(subnet)s, " "exc %(exc)s" , { 'subnet' : subnet_id , 'exc' : exc } ) return None
retrieve the CIDR associated with a subnet given its ID .
37,704
def delete_network_subname ( self , sub_name ) : try : body = { } net_list = self . neutronclient . list_networks ( body = body ) for net in net_list : if net . get ( 'name' ) . find ( sub_name ) != - 1 : self . delete_network_all_subnets ( net . get ( 'net_id' ) ) except Exception as exc : LOG . error ( "Failed to get network by subname %(name)s, " "Exc %(exc)s" , { 'name' : sub_name , 'exc' : str ( exc ) } )
Delete the network by part of its name use with caution .
37,705
def get_network_by_name ( self , nwk_name ) : ret_net_lst = [ ] try : body = { } net_list = self . neutronclient . list_networks ( body = body ) net_list = net_list . get ( 'networks' ) for net in net_list : if net . get ( 'name' ) == nwk_name : ret_net_lst . append ( net ) except Exception as exc : LOG . error ( "Failed to get network by name %(name)s, " "Exc %(exc)s" , { 'name' : nwk_name , 'exc' : str ( exc ) } ) return ret_net_lst
Search for a openstack network by name .
37,706
def get_network_by_tenant ( self , tenant_id ) : ret_net_lst = [ ] try : net_list = self . neutronclient . list_networks ( body = { } ) for net in net_list . get ( 'networks' ) : if net . get ( 'tenant_id' ) == tenant_id : ret_net_lst . append ( net ) except Exception as exc : LOG . error ( "Failed to get network by tenant %(tenant)s, " "Exc %(exc)s" , { 'tenant' : tenant_id , 'exc' : str ( exc ) } ) return ret_net_lst
Returns the network of a given tenant .
37,707
def get_rtr_by_name ( self , rtr_name ) : upd_rtr_list = [ ] try : rtr_list = self . neutronclient . list_routers ( ) for rtr in rtr_list . get ( 'routers' ) : if rtr_name == rtr [ 'name' ] : upd_rtr_list . append ( rtr ) except Exception as exc : LOG . error ( "Failed to get router by name %(name)s, " "Exc %(exc)s" , { 'name' : rtr_name , 'exc' : str ( exc ) } ) return upd_rtr_list
Search a router by its name .
37,708
def create_router ( self , name , tenant_id , subnet_lst ) : try : body = { 'router' : { 'name' : name , 'tenant_id' : tenant_id , 'admin_state_up' : True } } router = self . neutronclient . create_router ( body = body ) rout_dict = router . get ( 'router' ) rout_id = rout_dict . get ( 'id' ) except Exception as exc : LOG . error ( "Failed to create router with name %(name)s" " Exc %(exc)s" , { 'name' : name , 'exc' : str ( exc ) } ) return None ret = self . add_intf_router ( rout_id , tenant_id , subnet_lst ) if not ret : try : ret = self . neutronclient . delete_router ( rout_id ) except Exception as exc : LOG . error ( "Failed to delete router %(name)s, Exc %(exc)s" , { 'name' : name , 'exc' : str ( exc ) } ) return None return rout_id
Create a openstack router and add the interfaces .
37,709
def add_intf_router ( self , rout_id , tenant_id , subnet_lst ) : try : for subnet_id in subnet_lst : body = { 'subnet_id' : subnet_id } intf = self . neutronclient . add_interface_router ( rout_id , body = body ) intf . get ( 'port_id' ) except Exception as exc : LOG . error ( "Failed to create router intf ID %(id)s," " Exc %(exc)s" , { 'id' : rout_id , 'exc' : str ( exc ) } ) return False return True
Add the interfaces to a router .
37,710
def delete_router ( self , name , tenant_id , rout_id , subnet_lst ) : ret = self . delete_intf_router ( name , tenant_id , rout_id , subnet_lst ) if not ret : return False try : ret = self . neutronclient . delete_router ( rout_id ) except Exception as exc : LOG . error ( "Failed to delete router %(name)s ret %(ret)s " "Exc %(exc)s" , { 'name' : name , 'ret' : str ( ret ) , 'exc' : str ( exc ) } ) return False return True
Delete the openstack router .
37,711
def delete_intf_router ( self , name , tenant_id , rout_id , subnet_lst ) : try : for subnet_id in subnet_lst : body = { 'subnet_id' : subnet_id } intf = self . neutronclient . remove_interface_router ( rout_id , body = body ) intf . get ( 'id' ) except Exception as exc : LOG . error ( "Failed to delete router interface %(name)s, " " Exc %(exc)s" , { 'name' : name , 'exc' : str ( exc ) } ) return False return True
Delete the openstack router and remove the interfaces attached .
37,712
def delete_router_by_name ( self , rtr_name , tenant_id ) : try : routers = self . neutronclient . list_routers ( ) rtr_list = routers . get ( 'routers' ) for rtr in rtr_list : if rtr_name == rtr [ 'name' ] : self . neutronclient . delete_router ( rtr [ 'id' ] ) except Exception as exc : LOG . error ( "Failed to get and delete router by name %(name)s, " "Exc %(exc)s" , { 'name' : rtr_name , 'exc' : str ( exc ) } ) return False return True
Delete the openstack router and its interfaces given its name .
37,713
def get_rtr_name ( self , router_id ) : try : body = { } router = self . neutronclient . show_router ( router_id , body = body ) return router . get ( 'router' ) . get ( 'name' ) except Exception as exc : LOG . error ( "Failed to show router interface %(id)s " "Exc %(exc)s" , { 'id' : router_id , 'exc' : str ( exc ) } )
Retrieve the router name . Incomplete .
37,714
def find_rtr_namespace ( self , rout_id ) : if rout_id is None : return None args = [ 'ip' , 'netns' , 'list' ] try : ns_list = utils . execute ( args , root_helper = self . root_helper ) except Exception as exc : LOG . error ( "Unable to find the namespace list Exception %s" , exc ) return None for ns in ns_list . split ( ) : if 'router' in ns and rout_id in ns : return ns
Find the namespace associated with the router .
37,715
def program_rtr ( self , args , rout_id , namespace = None ) : if namespace is None : namespace = self . find_rtr_namespace ( rout_id ) if namespace is None : LOG . error ( "Unable to find namespace for router %s" , rout_id ) return False final_args = [ 'ip' , 'netns' , 'exec' , namespace ] + args try : utils . execute ( final_args , root_helper = self . root_helper ) except Exception as e : LOG . error ( "Unable to execute %(cmd)s. " "Exception: %(exception)s" , { 'cmd' : final_args , 'exception' : e } ) return False return True
Execute the command against the namespace .
37,716
def program_rtr_default_gw ( self , tenant_id , rout_id , gw ) : args = [ 'route' , 'add' , 'default' , 'gw' , gw ] ret = self . program_rtr ( args , rout_id ) if not ret : LOG . error ( "Program router returned error for %s" , rout_id ) return False return True
Program the default gateway of a router .
37,717
def get_subnet_nwk_excl ( self , tenant_id , excl_list , excl_part = False ) : net_list = self . get_network_by_tenant ( tenant_id ) ret_subnet_list = [ ] for net in net_list : if excl_part : name = net . get ( 'name' ) part = name . partition ( '::' ) [ 2 ] if part : continue subnet_lst = self . get_subnets_for_net ( net . get ( 'id' ) ) for subnet_elem in subnet_lst : subnet = subnet_elem . get ( 'cidr' ) . split ( '/' ) [ 0 ] subnet_and_mask = subnet_elem . get ( 'cidr' ) if subnet not in excl_list : ret_subnet_list . append ( subnet_and_mask ) return ret_subnet_list
Retrieve the subnets of a network .
37,718
def remove_rtr_nwk_next_hop ( self , rout_id , next_hop , subnet_lst , excl_list ) : namespace = self . find_rtr_namespace ( rout_id ) if namespace is None : LOG . error ( "Unable to find namespace for router %s" , rout_id ) return False args = [ 'ip' , 'route' ] ret = self . program_rtr_return ( args , rout_id , namespace = namespace ) if ret is None : LOG . error ( "Get routes return None %s" , rout_id ) return False routes = ret . split ( '\n' ) concat_lst = subnet_lst + excl_list for rout in routes : if len ( rout ) == 0 : continue nwk = rout . split ( ) [ 0 ] if nwk == 'default' : continue nwk_no_mask = nwk . split ( '/' ) [ 0 ] if nwk_no_mask not in concat_lst and nwk not in concat_lst : args = [ 'route' , 'del' , '-net' , nwk , 'gw' , next_hop ] ret = self . program_rtr ( args , rout_id , namespace = namespace ) if not ret : LOG . error ( "Program router returned error for %s" , rout_id ) return False return True
Remove the next hop for all networks of a tenant .
37,719
def get_fw ( self , fw_id ) : fw = None try : fw = self . neutronclient . show_firewall ( fw_id ) except Exception as exc : LOG . error ( "Failed to get firewall list for id %(id)s, " "Exc %(exc)s" , { 'id' : fw_id , 'exc' : str ( exc ) } ) return fw
Return the Firewall given its ID .
37,720
def get_fw_rule ( self , rule_id ) : rule = None try : rule = self . neutronclient . show_firewall_rule ( rule_id ) except Exception as exc : LOG . error ( "Failed to get firewall rule for id %(id)s " "Exc %(exc)s" , { 'id' : rule_id , 'exc' : str ( exc ) } ) return rule
Return the firewall rule given its ID .
37,721
def get_fw_policy ( self , policy_id ) : policy = None try : policy = self . neutronclient . show_firewall_policy ( policy_id ) except Exception as exc : LOG . error ( "Failed to get firewall plcy for id %(id)s " "Exc %(exc)s" , { 'id' : policy_id , 'exc' : str ( exc ) } ) return policy
Return the firewall policy given its ID .
37,722
def _add_redundancy_routers ( self , context , start_index , stop_index , user_visible_router , ports = None , ha_settings_db = None , create_ha_group = True ) : priority = ( DEFAULT_MASTER_PRIORITY + ( start_index - 1 ) * PRIORITY_INCREASE_STEP ) r = copy . deepcopy ( user_visible_router ) r [ 'tenant_id' ] = '' name = r [ 'name' ] redundancy_r_ids = [ ] for i in range ( start_index , stop_index ) : del r [ 'id' ] r . pop ( 'routes' , None ) r [ ha . ENABLED ] = False r [ 'name' ] = name + REDUNDANCY_ROUTER_SUFFIX + str ( i ) r [ routerrole . ROUTER_ROLE_ATTR ] = ROUTER_ROLE_HA_REDUNDANCY gw_info = r [ EXTERNAL_GW_INFO ] if gw_info and gw_info [ 'external_fixed_ips' ] : for e_fixed_ip in gw_info [ 'external_fixed_ips' ] : e_fixed_ip . pop ( 'ip_address' , None ) r = self . create_router ( context , { 'router' : r } ) LOG . debug ( "Created redundancy router %(index)d with router id " "%(r_id)s" , { 'index' : i , 'r_id' : r [ 'id' ] } ) priority += PRIORITY_INCREASE_STEP r_b_b = RouterRedundancyBinding ( redundancy_router_id = r [ 'id' ] , priority = priority , user_router_id = user_visible_router [ 'id' ] ) context . session . add ( r_b_b ) redundancy_r_ids . append ( r [ 'id' ] ) for port_db in ports or [ ] : port = self . _core_plugin . _make_port_dict ( port_db ) self . _add_redundancy_router_interfaces ( context , user_visible_router , None , port , redundancy_r_ids , ha_settings_db , create_ha_group )
Creates a redundancy router and its interfaces on the specified subnets .
37,723
def _remove_redundancy_routers ( self , context , router_ids , ports , delete_ha_groups = False ) : subnets_info = [ { 'subnet_id' : port [ 'fixed_ips' ] [ 0 ] [ 'subnet_id' ] } for port in ports ] for r_id in router_ids : for i in range ( len ( subnets_info ) ) : self . remove_router_interface ( context , r_id , subnets_info [ i ] ) LOG . debug ( "Removed interface on %(s_id)s to redundancy router " "with %(r_id)s" , { 's_id' : ports [ i ] [ 'network_id' ] , 'r_id' : r_id } ) if delete_ha_groups and r_id == router_ids [ 0 ] : self . _delete_ha_group ( context , ports [ i ] [ 'id' ] ) self . delete_router ( context , r_id ) LOG . debug ( "Deleted redundancy router %s" , r_id )
Deletes all interfaces of the specified redundancy routers and then the redundancy routers themselves .
37,724
def _update_redundancy_router_interfaces ( self , context , router , port , modified_port_data , redundancy_router_ids = None , ha_settings_db = None ) : router_id = router [ 'id' ] if ha_settings_db is None : ha_settings_db = self . _get_ha_settings_by_router_id ( context , router_id ) if ha_settings_db is None : return e_context = context . elevated ( ) rr_ids = self . _get_redundancy_router_ids ( e_context , router_id ) port_info_list = self . _core_plugin . get_ports ( e_context , filters = { 'device_id' : rr_ids , 'network_id' : [ port [ 'network_id' ] ] } , fields = [ 'device_id' , 'id' ] ) for port_info in port_info_list : self . _core_plugin . update_port ( e_context , port_info [ 'id' ] , modified_port_data ) self . _update_hidden_port ( e_context , port [ 'id' ] , modified_port_data )
To be called when the router interfaces are updated like in the case of change in port admin_state_up status
37,725
def _create_hidden_port ( self , context , network_id , device_id , fixed_ips , port_type = DEVICE_OWNER_ROUTER_INTF ) : port = { 'port' : { 'tenant_id' : '' , 'network_id' : network_id , 'mac_address' : ATTR_NOT_SPECIFIED , 'fixed_ips' : fixed_ips , 'device_id' : device_id , 'device_owner' : port_type , 'admin_state_up' : True , 'name' : '' } } if extensions . is_extension_supported ( self . _core_plugin , "dns-integration" ) : port [ 'port' ] . update ( dns_name = '' ) core_plugin = bc . get_plugin ( ) return core_plugin . create_port ( context , port )
Creates port used specially for HA purposes .
37,726
def allocate_fw_dev ( self , fw_id ) : for cnt in self . res : used = self . res . get ( cnt ) . get ( 'used' ) if used < self . res . get ( cnt ) . get ( 'quota' ) : self . res [ cnt ] [ 'used' ] = used + 1 self . res [ cnt ] [ 'fw_id_lst' ] . append ( fw_id ) return self . res [ cnt ] . get ( 'obj_dict' ) , ( self . res [ cnt ] . get ( 'mgmt_ip' ) ) return None , None
Allocate firewall device .
37,727
def populate_fw_dev ( self , fw_id , mgmt_ip , new ) : for cnt in self . res : used = self . res . get ( cnt ) . get ( 'used' ) if mgmt_ip == self . res [ cnt ] . get ( 'mgmt_ip' ) : if new : self . res [ cnt ] [ 'used' ] = used + 1 self . res [ cnt ] [ 'fw_id_lst' ] . append ( fw_id ) return self . res [ cnt ] . get ( 'obj_dict' ) , ( self . res [ cnt ] . get ( 'mgmt_ip' ) ) return None , None
Populate the class after a restart .
37,728
def get_fw_dev_map ( self , fw_id ) : for cnt in self . res : if fw_id in self . res . get ( cnt ) . get ( 'fw_id_lst' ) : return self . res [ cnt ] . get ( 'obj_dict' ) , ( self . res [ cnt ] . get ( 'mgmt_ip' ) ) return None , None
Return the object dict and mgmt ip for a firewall .
37,729
def deallocate_fw_dev ( self , fw_id ) : for cnt in self . res : if fw_id in self . res . get ( cnt ) . get ( 'fw_id_lst' ) : self . res [ cnt ] [ 'used' ] = self . res [ cnt ] [ 'used' ] - 1 self . res . get ( cnt ) . get ( 'fw_id_lst' ) . remove ( fw_id ) return
Release the firewall resource .
37,730
def populate_local_sch_cache ( self , fw_dict ) : for fw_id in fw_dict : fw_data = fw_dict . get ( fw_id ) mgmt_ip = fw_data . get ( 'fw_mgmt_ip' ) dev_status = fw_data . get ( 'device_status' ) if dev_status == 'SUCCESS' : new = True else : new = False if mgmt_ip is not None : drvr_dict , mgmt_ip = self . sched_obj . populate_fw_dev ( fw_id , mgmt_ip , new ) if drvr_dict is None or mgmt_ip is None : LOG . info ( "Pop cache for FW sch: drvr_dict or mgmt_ip " "is None" )
Populate the local cache from FW DB after restart .
37,731
def drvr_initialize ( self , cfg ) : cnt = 0 for ip in self . obj_dict : cfg_dict = { } drvr_obj = self . obj_dict . get ( ip ) . get ( 'drvr_obj' ) cfg_dict [ 'mgmt_ip_addr' ] = ip if self . user_list is not None : cfg_dict [ 'user' ] = self . user_list [ cnt ] if self . pwd_list is not None : cfg_dict [ 'pwd' ] = self . pwd_list [ cnt ] if self . interface_in_list is not None : cfg_dict [ 'interface_in' ] = self . interface_in_list [ cnt ] if self . interface_out_list is not None : cfg_dict [ 'interface_out' ] = self . interface_out_list [ cnt ] drvr_obj . initialize ( cfg_dict ) cnt = cnt + 1
Initialize the driver routines .
37,732
def populate_event_que ( self , que_obj ) : for ip in self . obj_dict : drvr_obj = self . obj_dict . get ( ip ) . get ( 'drvr_obj' ) drvr_obj . populate_event_que ( que_obj )
Populates the event queue object .
37,733
def populate_dcnm_obj ( self , dcnm_obj ) : for ip in self . obj_dict : drvr_obj = self . obj_dict . get ( ip ) . get ( 'drvr_obj' ) drvr_obj . populate_dcnm_obj ( dcnm_obj )
Populates the DCNM object .
37,734
def is_device_virtual ( self ) : for ip in self . obj_dict : drvr_obj = self . obj_dict . get ( ip ) . get ( 'drvr_obj' ) ret = drvr_obj . is_device_virtual ( ) return ret
Returns if the device is physical or virtual .
37,735
def create_fw_device ( self , tenant_id , fw_id , data ) : drvr_dict , mgmt_ip = self . sched_obj . allocate_fw_dev ( fw_id ) if drvr_dict is not None and mgmt_ip is not None : self . update_fw_db_mgmt_ip ( fw_id , mgmt_ip ) ret = drvr_dict . get ( 'drvr_obj' ) . create_fw ( tenant_id , data ) if not ret : self . sched_obj . deallocate_fw_dev ( fw_id ) return ret else : return False
Creates the Firewall .
37,736
def modify_fw_device ( self , tenant_id , fw_id , data ) : drvr_dict , mgmt_ip = self . sched_obj . get_fw_dev_map ( fw_id ) return drvr_dict . get ( 'drvr_obj' ) . modify_fw ( tenant_id , data )
Modifies the firewall cfg .
37,737
def network_create_notif ( self , tenant_id , tenant_name , cidr ) : for ip in self . obj_dict : drvr_obj = self . obj_dict . get ( ip ) . get ( 'drvr_obj' ) ret = drvr_obj . network_create_notif ( tenant_id , tenant_name , cidr ) LOG . info ( "Driver with IP %(ip)s return %(ret)s" , { 'ip' : ip , 'ret' : ret } )
Notification for Network create .
37,738
def network_delete_notif ( self , tenant_id , tenant_name , net_id ) : for ip in self . obj_dict : drvr_obj = self . obj_dict . get ( ip ) . get ( 'drvr_obj' ) ret = drvr_obj . network_delete_notif ( tenant_id , tenant_name , net_id ) LOG . info ( "Driver with IP %(ip)s return %(ret)s for network " "delete notification" , { 'ip' : ip , 'ret' : ret } )
Notification for Network delete .
37,739
def cfg_sync_routers ( self , context , host , router_ids = None , hosting_device_ids = None ) : adm_context = bc . context . get_admin_context ( ) try : routers = ( self . _l3plugin . list_active_sync_routers_on_hosting_devices ( adm_context , host , router_ids , hosting_device_ids ) ) except AttributeError : routers = [ ] LOG . debug ( 'Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s' , { 'agt' : host , 'routers' : jsonutils . dumps ( routers , indent = 5 ) } ) return routers
Sync routers according to filters to a specific Cisco cfg agent .
37,740
def update_floatingip_statuses_cfg ( self , context , router_id , fip_statuses ) : with context . session . begin ( subtransactions = True ) : for ( floatingip_id , status ) in six . iteritems ( fip_statuses ) : LOG . debug ( "New status for floating IP %(floatingip_id)s: " "%(status)s" , { 'floatingip_id' : floatingip_id , 'status' : status } ) try : self . _l3plugin . update_floatingip_status ( context , floatingip_id , status ) except l3_exceptions . FloatingIPNotFound : LOG . debug ( "Floating IP: %s no longer present." , floatingip_id ) known_router_fips = self . _l3plugin . get_floatingips ( context , { 'last_known_router_id' : [ router_id ] } ) fips_to_disable = ( fip [ 'id' ] for fip in known_router_fips if not fip [ 'router_id' ] ) for fip_id in fips_to_disable : LOG . debug ( "update_fip_statuses: disable: %s" , fip_id ) self . _l3plugin . update_floatingip_status ( context , fip_id , bc . constants . FLOATINGIP_STATUS_DOWN )
Update operational status for one or several floating IPs .
37,741
def update_port_statuses_cfg ( self , context , port_ids , status ) : self . _l3plugin . update_router_port_statuses ( context , port_ids , status )
Update the operational statuses of a list of router ports .
37,742
def get_mysql_credentials ( cfg_file ) : try : parser = ConfigParser . ConfigParser ( ) cfg_fp = open ( cfg_file ) parser . readfp ( cfg_fp ) cfg_fp . close ( ) except ConfigParser . NoOptionError : cfg_fp . close ( ) print ( 'Failed to find mysql connections credentials.' ) sys . exit ( 1 ) except IOError : print ( 'ERROR: Cannot open %s.' , cfg_file ) sys . exit ( 1 ) value = parser . get ( 'dfa_mysql' , 'connection' ) try : sobj = re . search ( r"(://).*(@).*(/).*(\?)" , value ) indices = [ sobj . start ( 1 ) , sobj . start ( 2 ) , sobj . start ( 3 ) , sobj . start ( 4 ) ] cred = value [ indices [ 0 ] + 3 : indices [ 1 ] ] . split ( ':' ) host = value [ indices [ 1 ] + 1 : indices [ 2 ] ] db_name = value [ indices [ 2 ] + 1 : indices [ 3 ] ] charset = value [ indices [ 3 ] + 1 : ] . split ( '=' ) [ 1 ] return cred [ 0 ] , cred [ 1 ] , host , db_name , charset except ( ValueError , IndexError , AttributeError ) : print ( 'Failed to find mysql connections credentials.' ) sys . exit ( 1 )
Get the credentials and database name from options in config file .
37,743
def modify_conf ( cfgfile , service_name , outfn ) : if not cfgfile or not outfn : print ( 'ERROR: There is no config file.' ) sys . exit ( 0 ) options = service_options [ service_name ] with open ( cfgfile , 'r' ) as cf : lines = cf . readlines ( ) for opt in options : op = opt . get ( 'option' ) res = [ line for line in lines if line . startswith ( op ) ] if len ( res ) > 1 : print ( 'ERROR: There are more than one %s option.' % res ) sys . exit ( 0 ) if res : ( op , sep , val ) = ( res [ 0 ] . strip ( '\n' ) . replace ( ' ' , '' ) . partition ( '=' ) ) new_val = None if opt . get ( 'is_list' ) : if not any ( opt . get ( 'value' ) == value for value in val . split ( ',' ) ) : new_val = ',' . join ( ( val , opt . get ( 'value' ) ) ) else : if val != opt . get ( 'value' ) : new_val = opt . get ( 'value' ) if new_val : opt_idx = lines . index ( res [ 0 ] ) lines . pop ( opt_idx ) lines . insert ( opt_idx , '=' . join ( ( opt . get ( 'option' ) , new_val + '\n' ) ) ) else : try : sec_idx = lines . index ( '[' + opt . get ( 'section' ) + ']\n' ) lines . insert ( sec_idx + 1 , '=' . join ( ( opt . get ( 'option' ) , opt . get ( 'value' ) + '\n' ) ) ) except ValueError : print ( 'Invalid %s section name.' % opt . get ( 'section' ) ) sys . exit ( 0 ) with open ( outfn , 'w' ) as fwp : all_lines = '' for line in lines : all_lines += line fwp . write ( all_lines )
Modify config file neutron and keystone to include enabler options .
37,744
def get_all_hosting_devices ( self , context ) : cctxt = self . client . prepare ( ) return cctxt . call ( context , 'get_all_hosting_devices' , host = self . host )
Get a list of all hosting devices .
37,745
def get_all_hosted_routers ( self , context ) : cctxt = self . client . prepare ( ) return cctxt . call ( context , 'cfg_sync_all_hosted_routers' , host = self . host )
Make a remote process call to retrieve the sync data for routers that have been scheduled to a hosting device .
37,746
def get_hardware_router_type_id ( self , context ) : cctxt = self . client . prepare ( ) return cctxt . call ( context , 'get_hardware_router_type_id' , host = self . host )
Get the ID for the ASR1k hardware router type .
37,747
def heartbeat ( self , context , msg ) : args = jsonutils . loads ( msg ) when = args . get ( 'when' ) agent = args . get ( 'agent' ) configurations = { 'uplink' : '' } LOG . debug ( 'heartbeat received: %(time)s - %(agent)s' , ( { 'time' : when , 'agent' : agent } ) ) if self . obj . neutron_event : self . obj . neutron_event . create_rpc_client ( agent ) self . obj . update_agent_status ( agent , when ) agent_info = dict ( timestamp = utils . utc_time ( when ) , host = agent , config = jsonutils . dumps ( configurations ) ) self . obj . update_agent_db ( agent_info )
Process heartbeat message from agents on compute nodes .
37,748
def request_uplink_info ( self , context , agent ) : LOG . debug ( 'request_uplink_info from %(agent)s' , { 'agent' : agent } ) event_type = 'agent.request.uplink' payload = { 'agent' : agent } timestamp = time . ctime ( ) data = ( event_type , payload ) pri = self . obj . PRI_LOW_START + 1 self . obj . pqueue . put ( ( pri , timestamp , data ) ) LOG . debug ( 'Added request uplink info into queue.' ) return 0
Process uplink message from an agent .
37,749
def set_static_ip_address ( self , context , msg ) : args = jsonutils . loads ( msg ) macaddr = args . get ( 'mac' ) ipaddr = args . get ( 'ip' ) LOG . debug ( 'set_static_ip_address received: %(mac)s %(ip)s' , ( { 'mac' : macaddr , 'ip' : ipaddr } ) ) event_type = 'cli.static_ip.set' payload = { 'mac' : macaddr , 'ip' : ipaddr } timestamp = time . ctime ( ) data = ( event_type , payload ) pri = self . obj . PRI_LOW_START self . obj . pqueue . put ( ( pri , timestamp , data ) ) LOG . debug ( 'Added request to add static ip into queue.' ) return 0
Process request for setting rules in iptables .
37,750
def update_vm_result ( self , context , msg ) : args = jsonutils . loads ( msg ) agent = context . get ( 'agent' ) port_id = args . get ( 'port_uuid' ) result = args . get ( 'result' ) LOG . debug ( 'update_vm_result received from %(agent)s: ' '%(port_id)s %(result)s' , { 'agent' : agent , 'port_id' : port_id , 'result' : result } ) event_type = 'agent.vm_result.update' payload = { 'port_id' : port_id , 'result' : result } timestamp = time . ctime ( ) data = ( event_type , payload ) pri = self . obj . PRI_LOW_START + 10 self . obj . pqueue . put ( ( pri , timestamp , data ) ) LOG . debug ( 'Added request vm result update into queue.' ) return 0
Update VM s result field in the DB .
37,751
def _setup_rpc ( self ) : endpoints = RpcCallBacks ( self ) self . server = rpc . DfaRpcServer ( self . ser_q , self . _host , self . cfg . dfa_rpc . transport_url , endpoints , exchange = constants . DFA_EXCHANGE )
Setup RPC server for dfa server .
37,752
def register_segment_dcnm ( self , cfg , seg_id_min , seg_id_max ) : orch_id = cfg . dcnm . orchestrator_id try : segid_range = self . dcnm_client . get_segmentid_range ( orch_id ) if segid_range is None : self . dcnm_client . set_segmentid_range ( orch_id , seg_id_min , seg_id_max ) else : conf_min , _ , conf_max = segid_range [ "segmentIdRanges" ] . partition ( "-" ) if int ( conf_min ) != seg_id_min or int ( conf_max ) != seg_id_max : self . dcnm_client . update_segmentid_range ( orch_id , seg_id_min , seg_id_max ) except dexc . DfaClientRequestFailed as exc : LOG . error ( "Segment ID range could not be created/updated" " on DCNM: %s" , exc ) raise SystemExit ( exc )
Register segmentation id pool with DCNM .
37,753
def project_create_func ( self , proj_id , proj = None ) : if self . get_project_name ( proj_id ) : LOG . info ( "project %s exists, returning" , proj_id ) return if not proj : try : proj = self . keystone_event . _service . projects . get ( proj_id ) except Exception : LOG . error ( "Failed to find project %s." , proj_id ) return proj_name , dci_id = self . _get_dci_id_and_proj_name ( proj . name ) if proj_name in reserved_project_name : proj_name = "_" . join ( ( proj_name , self . cfg . dcnm . orchestrator_id ) ) part_name = self . cfg . dcnm . default_partition_name if len ( ':' . join ( ( proj_name , part_name ) ) ) > 32 : LOG . error ( 'Invalid project name length: %s. The length of ' 'org:part name is greater than 32' , len ( ':' . join ( ( proj_name , part_name ) ) ) ) return try : self . dcnm_client . create_project ( self . cfg . dcnm . orchestrator_id , proj_name , part_name , dci_id , proj . description ) except dexc . DfaClientRequestFailed : self . update_project_info_cache ( proj_id , name = proj_name , dci_id = dci_id , result = constants . CREATE_FAIL ) LOG . error ( "Failed to create project %s on DCNM." , proj_name ) else : self . update_project_info_cache ( proj_id , name = proj_name , dci_id = dci_id ) LOG . debug ( 'project %(name)s %(dci)s %(desc)s' , ( { 'name' : proj_name , 'dci' : dci_id , 'desc' : proj . description } ) ) self . project_create_notif ( proj_id , proj_name )
Create project given project uuid
37,754
def project_update_event ( self , proj_info ) : LOG . debug ( "Processing project_update_event %(proj)s." , { 'proj' : proj_info } ) proj_id = proj_info . get ( 'resource_info' ) try : proj = self . keystone_event . _service . projects . get ( proj_id ) except Exception : LOG . error ( "Failed to find project %s." , proj_id ) return new_proj_name , new_dci_id = self . _get_dci_id_and_proj_name ( proj . name ) orig_proj_name = self . get_project_name ( proj_id ) orig_dci_id = self . get_dci_id ( proj_id ) if orig_proj_name == new_proj_name and new_dci_id == orig_dci_id : LOG . warning ( 'Project update event for %(proj)s is received ' 'without changing in the project name: ' '%(orig_proj)s. Ignoring the event.' , { 'proj' : proj_id , 'orig_proj' : orig_proj_name } ) return if orig_proj_name != new_proj_name : LOG . debug ( 'Update request cannot be processed as name of project' ' is changed: %(proj)s %(orig_name)s %(orig_dci)s to ' '%(new_name)s %(new_dci)s.' , ( { 'proj' : proj_id , 'orig_name' : orig_proj_name , 'orig_dci' : orig_dci_id , 'new_name' : new_proj_name , 'new_dci' : new_dci_id } ) ) return LOG . debug ( 'Changing project DCI id for %(proj)s from %(orig_dci)s to ' '%(new_dci)s.' , { 'proj' : proj_id , 'orig_dci' : orig_dci_id , 'new_dci' : new_dci_id } ) try : self . dcnm_client . update_project ( new_proj_name , self . cfg . dcnm . default_partition_name , dci_id = new_dci_id ) except dexc . DfaClientRequestFailed : LOG . error ( "Failed to update project %s on DCNM." , new_proj_name ) self . update_project_info_cache ( proj_id , name = new_proj_name , dci_id = new_dci_id , opcode = 'update' , result = constants . UPDATE_FAIL ) else : self . update_project_info_cache ( proj_id , name = new_proj_name , dci_id = new_dci_id , opcode = 'update' ) LOG . debug ( 'Updated project %(proj)s %(name)s.' , { 'proj' : proj_id , 'name' : proj . name } )
Process project update event .
37,755
def project_delete_event ( self , proj_info ) : LOG . debug ( "Processing project_delete_event..." ) proj_id = proj_info . get ( 'resource_info' ) proj_name = self . get_project_name ( proj_id ) if proj_name : try : self . dcnm_client . delete_project ( proj_name , self . cfg . dcnm . default_partition_name ) except dexc . DfaClientRequestFailed : LOG . error ( "Failed to create project %s on DCNM." , proj_name ) self . update_project_info_cache ( proj_id , name = proj_name , opcode = 'delete' , result = constants . DELETE_FAIL ) else : self . update_project_info_cache ( proj_id , opcode = 'delete' ) LOG . debug ( 'Deleted project:%s' , proj_name ) self . project_delete_notif ( proj_id , proj_name )
Process project delete event .
37,756
def subnet_create_event ( self , subnet_info ) : subnet = subnet_info . get ( 'subnet' ) if subnet : self . create_subnet ( subnet ) else : subnets = subnet_info . get ( 'subnets' ) if subnets : for subnet in subnets : self . create_subnet ( subnet )
Process subnet create event .
37,757
def create_subnet ( self , snet ) : snet_id = snet . get ( 'id' ) if self . fw_api . is_subnet_source_fw ( snet . get ( 'tenant_id' ) , snet . get ( 'cidr' ) ) : LOG . info ( "Service subnet %s, returning" , snet . get ( 'cidr' ) ) return if snet_id not in self . subnet : self . subnet [ snet_id ] = { } self . subnet [ snet_id ] . update ( snet ) net = self . network . get ( self . subnet [ snet_id ] . get ( 'network_id' ) ) if not net : LOG . error ( 'Network %(network_id)s does not exist.' , { 'network_id' : self . subnet [ snet_id ] . get ( 'network_id' ) } ) return query_net = self . get_network ( net . get ( 'id' ) ) if query_net . result != constants . SUBNET_PENDING : LOG . info ( "Subnet exists, returning" ) return if query_net and query_net . source . lower ( ) == 'dcnm' : LOG . info ( 'create_subnet: network %(name)s ' 'was created by DCNM. Ignoring processing the ' 'event.' , { 'name' : query_net . name } ) return tenant_name = self . get_project_name ( snet [ 'tenant_id' ] ) subnet = utils . Dict2Obj ( snet ) dcnm_net = utils . Dict2Obj ( net ) if not tenant_name : LOG . error ( 'Project %(tenant_id)s does not exist.' , { 'tenant_id' : subnet . tenant_id } ) self . update_network_db ( dcnm_net . id , constants . CREATE_FAIL ) return try : self . dcnm_client . create_network ( tenant_name , dcnm_net , subnet , self . dcnm_dhcp ) self . update_network_db ( net . get ( 'id' ) , constants . RESULT_SUCCESS ) except dexc . DfaClientRequestFailed : LOG . exception ( 'Failed to create network %(net)s.' , { 'net' : dcnm_net . name } ) self . update_network_db ( dcnm_net . id , constants . CREATE_FAIL ) self . network_sub_create_notif ( snet . get ( 'tenant_id' ) , tenant_name , snet . get ( 'cidr' ) )
Create subnet .
37,758
def _get_segmentation_id ( self , netid , segid , source ) : return self . seg_drvr . allocate_segmentation_id ( netid , seg_id = segid , source = source )
Allocate segmentation id .
37,759
def network_delete_event ( self , network_info ) : net_id = network_info [ 'network_id' ] if net_id not in self . network : LOG . error ( 'network_delete_event: net_id %s does not exist.' , net_id ) return segid = self . network [ net_id ] . get ( 'segmentation_id' ) tenant_id = self . network [ net_id ] . get ( 'tenant_id' ) tenant_name = self . get_project_name ( tenant_id ) net = utils . Dict2Obj ( self . network [ net_id ] ) if not tenant_name : LOG . error ( 'Project %(tenant_id)s does not exist.' , { 'tenant_id' : tenant_id } ) self . update_network_db ( net . id , constants . DELETE_FAIL ) return try : self . dcnm_client . delete_network ( tenant_name , net ) self . seg_drvr . release_segmentation_id ( segid ) self . delete_network_db ( net_id ) del self . network [ net_id ] snets = [ k for k in self . subnet if ( self . subnet [ k ] . get ( 'network_id' ) == net_id ) ] [ self . subnet . pop ( s ) for s in snets ] except dexc . DfaClientRequestFailed : LOG . error ( 'Failed to create network %(net)s.' , { 'net' : net . name } ) self . update_network_db ( net_id , constants . DELETE_FAIL ) instances = self . get_vms ( ) instances_related = [ k for k in instances if k . network_id == net_id ] for vm in instances_related : LOG . debug ( "deleting vm %s because network is deleted" , vm . name ) self . delete_vm_function ( vm . port_id , vm ) self . network_del_notif ( tenant_id , tenant_name , net_id )
Process network delete event .
37,760
def dcnm_network_delete_event ( self , network_info ) : seg_id = network_info . get ( 'segmentation_id' ) if not seg_id : LOG . error ( 'Failed to delete network. Invalid network ' 'info %s.' , network_info ) query_net = self . get_network_by_segid ( seg_id ) if not query_net : LOG . info ( 'dcnm_network_delete_event: network %(segid)s ' 'does not exist.' , { 'segid' : seg_id } ) return if self . fw_api . is_network_source_fw ( query_net , query_net . name ) : LOG . info ( "Service network %s, returning" , query_net . name ) return try : del_net = self . network . pop ( query_net . network_id ) self . neutronclient . delete_network ( query_net . network_id ) self . delete_network_db ( query_net . network_id ) except Exception as exc : self . network [ query_net . network_id ] = del_net LOG . exception ( 'dcnm_network_delete_event: Failed to delete ' '%(network)s. Reason %(err)s.' , { 'network' : query_net . name , 'err' : str ( exc ) } )
Process network delete event from DCNM .
37,761
def update_port_ip_address ( self ) : leases = None req = dict ( ip = '0.0.0.0' ) instances = self . get_vms_for_this_req ( ** req ) if instances is None : return for vm in instances : if not leases : leases = self . _get_ip_leases ( ) if not leases : return for line in leases : if line . startswith ( 'lease' ) and line . endswith ( '{\n' ) : ip_addr = line . split ( ) [ 1 ] if 'hardware ethernet' in line : if vm . mac == line . replace ( ';' , '' ) . split ( ) [ 2 ] : LOG . info ( 'Find IP address %(ip)s for %(mac)s' , { 'ip' : ip_addr , 'mac' : vm . mac } ) try : rule_info = dict ( ip = ip_addr , mac = vm . mac , port = vm . port_id , status = 'up' ) self . neutron_event . update_ip_rule ( str ( vm . host ) , str ( rule_info ) ) except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) : LOG . error ( "RPC error: Failed to update" "rules." ) else : params = dict ( columns = dict ( ip = ip_addr ) ) self . update_vm_db ( vm . port_id , ** params ) vm_info = dict ( status = vm . status , vm_mac = vm . mac , segmentation_id = vm . segmentation_id , host = vm . host , port_uuid = vm . port_id , net_uuid = vm . network_id , oui = dict ( ip_addr = ip_addr , vm_name = vm . name , vm_uuid = vm . instance_id , gw_mac = vm . gw_mac , fwd_mod = vm . fwd_mod , oui_id = 'cisco' ) ) try : self . neutron_event . send_vm_info ( vm . host , str ( vm_info ) ) except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) : LOG . error ( 'Failed to send VM info to ' 'agent.' )
Find the ip address that assinged to a port via DHCP
37,762
def request_vms_info ( self , payload ) : agent = payload . get ( 'agent' ) LOG . debug ( 'request_vms_info: Getting VMs info for %s' , agent ) req = dict ( host = payload . get ( 'agent' ) ) instances = self . get_vms_for_this_req ( ** req ) vm_info = [ ] for vm in instances : vm_info . append ( dict ( status = vm . status , vm_mac = vm . mac , segmentation_id = vm . segmentation_id , host = vm . host , port_uuid = vm . port_id , net_uuid = vm . network_id , oui = dict ( ip_addr = vm . ip , vm_name = vm . name , vm_uuid = vm . instance_id , gw_mac = vm . gw_mac , fwd_mod = vm . fwd_mod , oui_id = 'cisco' ) ) ) try : self . neutron_event . send_vm_info ( agent , str ( vm_info ) ) except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) : LOG . error ( 'Failed to send VM info to agent.' )
Get the VMs from the database and send the info to the agent .
37,763
def request_uplink_info ( self , payload ) : agent = payload . get ( 'agent' ) config_res = self . get_agent_configurations ( agent ) LOG . debug ( 'configurations on %(agent)s is %(cfg)s' , ( { 'agent' : agent , 'cfg' : config_res } ) ) try : self . neutron_event . send_msg_to_agent ( agent , constants . UPLINK_NAME , config_res ) except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) : LOG . error ( "RPC error: Failed to send uplink name to agent." )
Get the uplink from the database and send the info to the agent .
37,764
def set_static_ip_address ( self , payload ) : macaddr = payload . get ( 'mac' ) ipaddr = payload . get ( 'ip' ) req = dict ( mac = macaddr ) instances = self . get_vms_for_this_req ( ** req ) for vm in instances : LOG . info ( 'Updating IP address: %(ip)s %(mac)s.' , { 'ip' : ipaddr , 'mac' : macaddr } ) try : rule_info = dict ( ip = ipaddr , mac = macaddr , port = vm . port_id , status = 'up' ) self . neutron_event . update_ip_rule ( str ( vm . host ) , str ( rule_info ) ) except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) : LOG . error ( "RPC error: Failed to update rules." ) else : params = dict ( columns = dict ( ip = ipaddr ) ) self . update_vm_db ( vm . port_id , ** params ) vm_info = dict ( status = vm . status , vm_mac = vm . mac , segmentation_id = vm . segmentation_id , host = vm . host , port_uuid = vm . port_id , net_uuid = vm . network_id , oui = dict ( ip_addr = ipaddr , vm_name = vm . name , vm_uuid = vm . instance_id , gw_mac = vm . gw_mac , fwd_mod = vm . fwd_mod , oui_id = 'cisco' ) ) try : self . neutron_event . send_vm_info ( vm . host , str ( vm_info ) ) except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) : LOG . error ( 'Failed to send VM info to agent.' )
Set static ip address for a VM .
37,765
def vm_result_update ( self , payload ) : port_id = payload . get ( 'port_id' ) result = payload . get ( 'result' ) if port_id and result : params = dict ( columns = dict ( result = result ) ) self . update_vm_db ( port_id , ** params )
Update the result field in VM database .
37,766
def add_lbaas_port ( self , port_id , lb_id ) : port_info = self . neutronclient . show_port ( port_id ) port = port_info . get ( 'port' ) if not port : LOG . error ( "Can not retrieve port info for port %s" % port_id ) return LOG . debug ( "lbaas add port, %s" , port ) if not port [ 'binding:host_id' ] : LOG . info ( "No host bind for lbaas port, octavia case" ) return port [ "device_id" ] = lb_id vm_info = self . _make_vm_info ( port , 'up' , constants . LBAAS_PREFIX ) self . port [ port_id ] = vm_info if self . send_vm_info ( vm_info ) : self . add_vms_db ( vm_info , constants . RESULT_SUCCESS ) else : self . add_vms_db ( vm_info , constants . CREATE_FAIL )
Give port id get port info and send vm info to agent .
37,767
def delete_lbaas_port ( self , lb_id ) : lb_id = lb_id . replace ( '-' , '' ) req = dict ( instance_id = lb_id ) instances = self . get_vms_for_this_req ( ** req ) for vm in instances : LOG . info ( "deleting lbaas vm %s " % vm . name ) self . delete_vm_function ( vm . port_id , vm )
send vm down event and delete db .
37,768
def vip_create_event ( self , vip_info ) : vip_data = vip_info . get ( 'vip' ) port_id = vip_data . get ( 'port_id' ) vip_id = vip_data . get ( 'id' ) self . add_lbaas_port ( port_id , vip_id )
Process vip create event .
37,769
def listener_create_event ( self , listener_info ) : listener_data = listener_info . get ( 'listener' ) lb_list = listener_data . get ( 'loadbalancers' ) for lb in lb_list : lb_id = lb . get ( 'id' ) req = dict ( instance_id = ( lb_id . replace ( '-' , '' ) ) ) instances = self . get_vms_for_this_req ( ** req ) if not instances : lb_info = self . neutronclient . show_loadbalancer ( lb_id ) if lb_info : port_id = lb_info [ "loadbalancer" ] [ "vip_port_id" ] self . add_lbaas_port ( port_id , lb_id ) else : LOG . info ( "lbaas port for lb %s already added" % lb_id )
Process listener create event .
37,770
def listener_delete_event ( self , listener_info ) : lb_list = self . neutronclient . list_loadbalancers ( ) for lb in lb_list . get ( 'loadbalancers' ) : if not lb . get ( "listeners" ) : lb_id = lb . get ( 'id' ) LOG . info ( "Deleting lb %s port" % lb_id ) self . delete_lbaas_port ( lb_id )
Process listener delete event .
37,771
def pool_create_event ( self , pool_info ) : pool_data = pool_info . get ( 'pool' ) listeners = pool_data . get ( 'listeners' ) for listener in listeners : l_id = listener . get ( 'id' ) l_info = self . neutronclient . show_listener ( l_id ) self . listener_create_event ( l_info )
Process pool create event .
37,772
def sync_projects ( self ) : p = self . keystone_event . _service . projects . list ( ) for proj in p : if proj . name in not_create_project_name : continue LOG . info ( "Syncing project %s" % proj . name ) self . project_create_func ( proj . id , proj = proj )
Sync projects .
37,773
def sync_networks ( self ) : nets = self . neutronclient . list_networks ( ) for net in nets . get ( "networks" ) : LOG . info ( "Syncing network %s" , net [ "id" ] ) self . network_create_func ( net ) subnets = self . neutronclient . list_subnets ( ) for subnet in subnets . get ( "subnets" ) : LOG . info ( "Syncing subnet %s" , subnet [ "id" ] ) self . create_subnet ( subnet )
sync networks .
37,774
def create_threads ( self ) : neutron_thrd = utils . EventProcessingThread ( 'Neutron_Event' , self . neutron_event , 'event_handler' , self . _excpq ) self . dfa_threads . append ( neutron_thrd ) qp_thrd = utils . EventProcessingThread ( 'Event_Queue' , self , 'process_queue' , self . _excpq ) self . dfa_threads . append ( qp_thrd ) keys_thrd = utils . EventProcessingThread ( 'Keystone_Event' , self . keystone_event , 'event_handler' , self . _excpq ) self . dfa_threads . append ( keys_thrd ) hb_thrd = utils . EventProcessingThread ( 'RPC_Server' , self , 'start_rpc' , self . _excpq ) self . dfa_threads . append ( hb_thrd ) if self . dcnm_event is not None : dcnmL_thrd = utils . EventProcessingThread ( 'DcnmListener' , self . dcnm_event , 'process_amqp_msgs' , self . _excpq ) self . dfa_threads . append ( dcnmL_thrd ) fr_thrd = utils . PeriodicTask ( interval = constants . FAIL_REC_INTERVAL , func = self . add_events , event_queue = self . pqueue , priority = self . PRI_LOW_START + 10 , excq = self . _excpq ) for t in self . dfa_threads : t . start ( ) fr_thrd . run ( )
Create threads on server .
37,775
def _allocate_specified_segment ( self , session , seg_id , source ) : try : with session . begin ( subtransactions = True ) : alloc = ( session . query ( self . model ) . filter_by ( segmentation_id = seg_id ) . first ( ) ) if alloc : if alloc . allocated : return else : count = ( session . query ( self . model ) . filter_by ( allocated = False , segmentation_id = seg_id ) . update ( { "allocated" : True } ) ) if count : return alloc alloc = self . model ( segmentation_id = seg_id , allocated = True , source = source ) session . add ( alloc ) except db_exc . DBDuplicateEntry : alloc = None return alloc
Allocate specified segment .
37,776
def _allocate_segment ( self , session , net_id , source ) : with session . begin ( subtransactions = True ) : hour_lapse = utils . utc_time_lapse ( self . seg_timeout ) count = ( session . query ( self . model ) . filter ( self . model . delete_time < hour_lapse ) . update ( { "delete_time" : None } ) ) select = ( session . query ( self . model ) . filter_by ( allocated = False , delete_time = None ) ) for attempt in range ( DB_MAX_RETRIES + 1 ) : alloc = select . first ( ) if not alloc : LOG . info ( "No segment resource available" ) return count = ( session . query ( self . model ) . filter_by ( segmentation_id = alloc . segmentation_id , allocated = False ) . update ( { "allocated" : True , "network_id" : net_id , "source" : source } ) ) if count : return alloc LOG . error ( "ERROR: Failed to allocate segment for net %(net)s" " source %(src)s" , { 'net' : net_id , 'src' : source } )
Allocate segment from pool .
37,777
def allocate_subnet ( self , subnet_lst , net_id = None ) : session = db . get_session ( ) query_str = None for sub in subnet_lst : sub_que = ( self . model . subnet_address != sub ) if query_str is not None : query_str = query_str & sub_que else : query_str = sub_que with session . begin ( subtransactions = True ) : select = ( session . query ( self . model ) . filter ( ( self . model . allocated == 0 ) & query_str ) ) for attempt in range ( DB_MAX_RETRIES + 1 ) : alloc = select . first ( ) if not alloc : LOG . info ( "No subnet resource available" ) return count = ( session . query ( self . model ) . filter_by ( subnet_address = alloc . subnet_address , allocated = False ) . update ( { "allocated" : True , "network_id" : net_id } ) ) if count : return alloc . subnet_address LOG . error ( "ERROR: Failed to allocate subnet for net %(net)s" , { 'net' : net_id } ) return None
Allocate subnet from pool .
37,778
def add_update_topology_db ( self , ** params ) : topo_dict = params . get ( 'columns' ) session = db . get_session ( ) host = topo_dict . get ( 'host' ) protocol_interface = topo_dict . get ( 'protocol_interface' ) with session . begin ( subtransactions = True ) : try : session . query ( DfaTopologyDb ) . filter_by ( host = host , protocol_interface = protocol_interface ) . one ( ) session . query ( DfaTopologyDb ) . filter_by ( host = host , protocol_interface = protocol_interface ) . update ( topo_dict ) except orm_exc . NoResultFound : LOG . info ( "Creating new topology entry for host " "%(host)s on Interface %(intf)s" , { 'host' : host , 'intf' : protocol_interface } ) topo_disc = DfaTopologyDb ( host = host , protocol_interface = protocol_interface , phy_interface = topo_dict . get ( 'phy_interface' ) , created = topo_dict . get ( 'created' ) , heartbeat = topo_dict . get ( 'heartbeat' ) , remote_mgmt_addr = topo_dict . get ( 'remote_mgmt_addr' ) , remote_system_name = topo_dict . get ( 'remote_system_name' ) , remote_system_desc = topo_dict . get ( 'remote_system_desc' ) , remote_port_id_mac = topo_dict . get ( 'remote_port_id_mac' ) , remote_chassis_id_mac = topo_dict . get ( 'remote_chassis_id_mac' ) , remote_port = topo_dict . get ( 'remote_port' ) , remote_evb_cfgd = topo_dict . get ( 'remote_evb_cfgd' ) , remote_evb_mode = topo_dict . get ( 'remote_evb_mode' ) , configurations = topo_dict . get ( 'configurations' ) ) session . add ( topo_disc ) except orm_exc . MultipleResultsFound : LOG . error ( "More than one enty found for agent %(host)s." "Interface %(intf)s" , { 'host' : host , 'intf' : protocol_interface } ) except Exception as exc : LOG . error ( "Exception in add_update_topology_db %s" , exc )
Add or update an entry to the topology DB .
37,779
def _convert_topo_obj_dict ( self , topology_objs ) : topo_lst = [ ] for topo_obj in topology_objs : topo_dct = { 'host' : topo_obj . host , 'protocol_interface' : topo_obj . protocol_interface , 'phy_interface' : topo_obj . phy_interface , 'created' : topo_obj . created , 'heartbeat' : topo_obj . heartbeat , 'remote_mgmt_addr' : topo_obj . remote_mgmt_addr , 'remote_system_name' : topo_obj . remote_system_name , 'remote_system_desc' : topo_obj . remote_system_desc , 'remote_port_id_mac' : topo_obj . remote_port_id_mac , 'remote_chassis_id_mac' : topo_obj . remote_chassis_id_mac , 'remote_port' : topo_obj . remote_port , 'remote_evb_cfgd' : topo_obj . remote_evb_cfgd , 'remote_evb_mode' : topo_obj . remote_evb_mode , 'configurations' : topo_obj . configurations } topo_lst . append ( topo_dct ) return topo_lst
Convert topology object to dict .
37,780
def query_topology_db ( self , dict_convert = False , ** req ) : session = db . get_session ( ) with session . begin ( subtransactions = True ) : try : topo_disc = session . query ( DfaTopologyDb ) . filter_by ( ** req ) . all ( ) except orm_exc . NoResultFound : LOG . info ( "No Topology results found for %s" , req ) return None if dict_convert : return self . _convert_topo_obj_dict ( topo_disc ) return topo_disc
Query an entry to the topology DB .
37,781
def delete_topology_entry ( self , ** req ) : session = db . get_session ( ) with session . begin ( subtransactions = True ) : try : rows = session . query ( DfaTopologyDb ) . filter_by ( ** req ) . all ( ) except orm_exc . NoResultFound : LOG . info ( "No Topology results found for %s" , req ) return try : for row in rows : session . delete ( row ) except Exception as exc : LOG . error ( "Exception raised %s" , str ( exc ) )
Delete the entries from the topology DB .
37,782
def get_lldp_tlv ( self , port_name , is_ncb = True , is_nb = False ) : reply = None if is_ncb : reply = self . run_lldptool ( [ "get-tlv" , "-n" , "-i" , port_name , "-g" , "ncb" ] ) elif is_nb : reply = self . run_lldptool ( [ "get-tlv" , "-n" , "-i" , port_name , "-g" , "nb" ] ) else : LOG . error ( "Both NCB and NB are not selected to " "query LLDP" ) return reply
Function to Query LLDP TLV on the interface .
37,783
def _check_common_tlv_format ( self , tlv_complete_data , tlv_data_pattern , tlv_string ) : if tlv_complete_data is None : return False , None tlv_string_split = tlv_complete_data . split ( tlv_string ) if len ( tlv_string_split ) < 2 : return False , None next_tlv_list = tlv_string_split [ 1 ] . split ( 'TLV' ) [ 0 ] tlv_val_set = next_tlv_list . split ( tlv_data_pattern ) if len ( tlv_val_set ) < 2 : return False , None return True , tlv_val_set
Check for the common TLV format .
37,784
def get_remote_evb_mode ( self , tlv_data ) : ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "mode:" , "EVB Configuration TLV" ) if not ret : return None mode_val = parsed_val [ 1 ] . split ( ) [ 0 ] . strip ( ) return mode_val
Returns the EVB mode in the TLV .
37,785
def get_remote_mgmt_addr ( self , tlv_data ) : ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "IPv4:" , "Management Address TLV" ) if not ret : return None addr_fam = 'IPv4:' addr = parsed_val [ 1 ] . split ( '\n' ) [ 0 ] . strip ( ) return addr_fam + addr
Returns Remote Mgmt Addr from the TLV .
37,786
def get_remote_sys_desc ( self , tlv_data ) : ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "\n" , "System Description TLV" ) if not ret : return None return parsed_val [ 1 ] . strip ( )
Returns Remote Sys Desc from the TLV .
37,787
def get_remote_sys_name ( self , tlv_data ) : ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "\n" , "System Name TLV" ) if not ret : return None return parsed_val [ 1 ] . strip ( )
Returns Remote Sys Name from the TLV .
37,788
def get_remote_port ( self , tlv_data ) : ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "\n" , "Port Description TLV" ) if not ret : return None return parsed_val [ 1 ] . strip ( )
Returns Remote Port from the TLV .
37,789
def get_remote_chassis_id_mac ( self , tlv_data ) : ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "MAC:" , "Chassis ID TLV" ) if not ret : return None mac = parsed_val [ 1 ] . split ( '\n' ) return mac [ 0 ] . strip ( )
Returns Remote Chassis ID MAC from the TLV .
37,790
def get_remote_port_id_local ( self , tlv_data ) : ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "Local:" , "Port ID TLV" ) if not ret : return None local = parsed_val [ 1 ] . split ( '\n' ) return local [ 0 ] . strip ( )
Returns Remote Port ID Local from the TLV .
37,791
def format_interface_name ( intf_type , port , ch_grp = 0 ) : if ch_grp > 0 : return 'port-channel:%s' % str ( ch_grp ) return '%s:%s' % ( intf_type . lower ( ) , port )
Method to format interface name given type port .
37,792
def split_interface_name ( interface , ch_grp = 0 ) : interface = interface . lower ( ) if ch_grp != 0 : intf_type = 'port-channel' port = str ( ch_grp ) elif ':' in interface : intf_type , port = interface . split ( ':' ) elif interface . startswith ( 'ethernet' ) : interface = interface . replace ( " " , "" ) _ , intf_type , port = interface . partition ( 'ethernet' ) elif interface . startswith ( 'port-channel' ) : interface = interface . replace ( " " , "" ) _ , intf_type , port = interface . partition ( 'port-channel' ) else : intf_type , port = 'ethernet' , interface return intf_type , port
Method to split interface type id from name .
37,793
def _host_notification ( self , context , method , payload , host ) : LOG . debug ( 'Notify Cisco cfg agent at %(host)s the message ' '%(method)s' , { 'host' : host , 'method' : method } ) cctxt = self . client . prepare ( server = host ) cctxt . cast ( context , method , payload = payload )
Notify the cfg agent that is handling the hosting device .
37,794
def _agent_notification ( self , context , method , hosting_devices , operation ) : admin_context = context . is_admin and context or context . elevated ( ) for hosting_device in hosting_devices : agents = self . _dmplugin . get_cfg_agents_for_hosting_devices ( admin_context , hosting_device [ 'id' ] , admin_state_up = True , schedule = True ) for agent in agents : LOG . debug ( 'Notify %(agent_type)s at %(topic)s.%(host)s the ' 'message %(method)s' , { 'agent_type' : agent . agent_type , 'topic' : agent . topic , 'host' : agent . host , 'method' : method } ) cctxt = self . client . prepare ( server = agent . host ) cctxt . cast ( context , method )
Notify individual Cisco cfg agents .
37,795
def hosting_devices_unassigned_from_cfg_agent ( self , context , ids , host ) : self . _host_notification ( context , 'hosting_devices_unassigned_from_cfg_agent' , { 'hosting_device_ids' : ids } , host )
Notify cfg agent to no longer handle some hosting devices .
37,796
def hosting_devices_assigned_to_cfg_agent ( self , context , ids , host ) : self . _host_notification ( context , 'hosting_devices_assigned_to_cfg_agent' , { 'hosting_device_ids' : ids } , host )
Notify cfg agent to now handle some hosting devices .
37,797
def hosting_devices_removed ( self , context , hosting_data , deconfigure , host ) : if hosting_data : self . _host_notification ( context , 'hosting_devices_removed' , { 'hosting_data' : hosting_data , 'deconfigure' : deconfigure } , host )
Notify cfg agent that some hosting devices have been removed .
37,798
def get_hosting_device_configuration ( self , context , id ) : admin_context = context . is_admin and context or context . elevated ( ) agents = self . _dmplugin . get_cfg_agents_for_hosting_devices ( admin_context , [ id ] , admin_state_up = True , schedule = True ) if agents : cctxt = self . client . prepare ( server = agents [ 0 ] . host ) return cctxt . call ( context , 'get_hosting_device_configuration' , payload = { 'hosting_device_id' : id } )
Fetch configuration of hosting device with id .
37,799
def store_policy ( self , pol_id , policy ) : if pol_id not in self . policies : self . policies [ pol_id ] = policy self . policy_cnt += 1
Store the policy .