idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
37,800 | def store_rule ( self , rule_id , rule ) : if rule_id not in self . rules : self . rules [ rule_id ] = rule self . rule_cnt += 1 | Store the rules . |
37,801 | def delete_rule ( self , rule_id ) : if rule_id not in self . rules : LOG . error ( "No Rule id present for deleting %s" , rule_id ) return del self . rules [ rule_id ] self . rule_cnt -= 1 | Delete the specific Rule from dictionary indexed by rule id . |
37,802 | def rule_update ( self , rule_id , rule ) : if rule_id not in self . rules : LOG . error ( "Rule ID not present %s" , rule_id ) return self . rules [ rule_id ] . update ( rule ) | Update the rule . |
37,803 | def is_fw_present ( self , fw_id ) : if self . fw_id is None or self . fw_id != fw_id : return False else : return True | Returns if firewall index by ID is present in dictionary . |
37,804 | def create_fw ( self , proj_name , pol_id , fw_id , fw_name , fw_type , rtr_id ) : self . tenant_name = proj_name self . fw_id = fw_id self . fw_name = fw_name self . fw_created = True self . active_pol_id = pol_id self . fw_type = fw_type self . router_id = rtr_id | Fills up the local attributes when FW is created . |
37,805 | def delete_fw ( self , fw_id ) : self . fw_id = None self . fw_name = None self . fw_created = False self . active_pol_id = None | Deletes the FW local attributes . |
37,806 | def delete_policy ( self , pol_id ) : if pol_id not in self . policies : LOG . error ( "Invalid policy %s" , pol_id ) return del self . policies [ pol_id ] self . policy_cnt -= 1 | Deletes the policy from the local dictionary . |
37,807 | def is_fw_complete ( self ) : LOG . info ( "In fw_complete needed %(fw_created)s " "%(active_policy_id)s %(is_fw_drvr_created)s " "%(pol_present)s %(fw_type)s" , { 'fw_created' : self . fw_created , 'active_policy_id' : self . active_pol_id , 'is_fw_drvr_created' : self . is_fw_drvr_created ( ) , 'pol_present' : self . active_pol_id in self . policies , 'fw_type' : self . fw_type } ) if self . active_pol_id is not None : LOG . info ( "In Drvr create needed %(len_policy)s %(one_rule)s" , { 'len_policy' : len ( self . policies [ self . active_pol_id ] [ 'rule_dict' ] ) , 'one_rule' : self . one_rule_present ( self . active_pol_id ) } ) return self . fw_created and self . active_pol_id and ( self . is_fw_drvr_created ( ) ) and self . fw_type and ( self . active_pol_id in self . policies ) and ( len ( self . policies [ self . active_pol_id ] [ 'rule_dict' ] ) ) > 0 and ( self . one_rule_present ( self . active_pol_id ) ) | This API returns the completion status of FW . |
37,808 | def one_rule_present ( self , pol_id ) : pol_dict = self . policies [ pol_id ] for rule in pol_dict [ 'rule_dict' ] : if self . is_rule_present ( rule ) : return True return False | Returns if atleast one rule is present in the policy . |
37,809 | def get_fw_dict ( self ) : fw_dict = { } if self . fw_id is None : return fw_dict fw_dict = { 'rules' : { } , 'tenant_name' : self . tenant_name , 'tenant_id' : self . tenant_id , 'fw_id' : self . fw_id , 'fw_name' : self . fw_name , 'firewall_policy_id' : self . active_pol_id , 'fw_type' : self . fw_type , 'router_id' : self . router_id } if self . active_pol_id not in self . policies : return fw_dict pol_dict = self . policies [ self . active_pol_id ] for rule in pol_dict [ 'rule_dict' ] : fw_dict [ 'rules' ] [ rule ] = self . rules [ rule ] return fw_dict | This API creates a FW dictionary from the local attributes . |
37,810 | def update_fw_params ( self , rtr_id = - 1 , fw_type = - 1 ) : if rtr_id != - 1 : self . router_id = rtr_id if fw_type != - 1 : self . fw_type = fw_type | Updates the FW parameters . |
37,811 | def populate_cfg_dcnm ( self , cfg , dcnm_obj ) : if not self . fw_init : return self . dcnm_obj = dcnm_obj self . fabric . store_dcnm ( dcnm_obj ) self . populate_dcnm_obj ( dcnm_obj ) | This routine stores the DCNM object . |
37,812 | def populate_event_queue ( self , cfg , que_obj ) : if not self . fw_init : return self . que_obj = que_obj self . populate_event_que ( que_obj ) | This routine is for storing the Event Queue obj . |
37,813 | def network_sub_create_notif ( self , tenant_id , tenant_name , cidr ) : if not self . fw_init : return self . network_create_notif ( tenant_id , tenant_name , cidr ) | Network create notification . |
37,814 | def network_del_notif ( self , tenant_id , tenant_name , net_id ) : if not self . fw_init : return self . network_delete_notif ( tenant_id , tenant_name , net_id ) | Network delete notification . |
37,815 | def project_create_notif ( self , tenant_id , tenant_name ) : if not self . fw_init : return self . os_helper . create_router ( '_' . join ( [ fw_constants . TENANT_EDGE_RTR , tenant_name ] ) , tenant_id , [ ] ) | Tenant Create notification . |
37,816 | def project_delete_notif ( self , tenant_id , tenant_name ) : if not self . fw_init : return rtr_name = '_' . join ( [ fw_constants . TENANT_EDGE_RTR , tenant_name ] ) self . os_helper . delete_router_by_name ( rtr_name , tenant_id ) | Tenant Delete notification . |
37,817 | def _create_fw_fab_dev_te ( self , tenant_id , drvr_name , fw_dict ) : is_fw_virt = self . is_device_virtual ( ) ret = self . fabric . prepare_fabric_fw ( tenant_id , fw_dict , is_fw_virt , fw_constants . RESULT_FW_CREATE_INIT ) if not ret : LOG . error ( "Prepare Fabric failed" ) return else : self . update_fw_db_final_result ( fw_dict . get ( 'fw_id' ) , ( fw_constants . RESULT_FW_CREATE_DONE ) ) ret = self . create_fw_device ( tenant_id , fw_dict . get ( 'fw_id' ) , fw_dict ) if ret : self . fwid_attr [ tenant_id ] . fw_drvr_created ( True ) self . update_fw_db_dev_status ( fw_dict . get ( 'fw_id' ) , 'SUCCESS' ) LOG . info ( "FW device create returned success for tenant %s" , tenant_id ) else : LOG . error ( "FW device create returned failure for tenant %s" , tenant_id ) | Prepares the Fabric and configures the device . |
37,818 | def _create_fw_fab_dev ( self , tenant_id , drvr_name , fw_dict ) : if fw_dict . get ( 'fw_type' ) == fw_constants . FW_TENANT_EDGE : self . _create_fw_fab_dev_te ( tenant_id , drvr_name , fw_dict ) | This routine calls the Tenant Edge routine if FW Type is TE . |
37,819 | def _check_create_fw ( self , tenant_id , drvr_name ) : if self . fwid_attr [ tenant_id ] . is_fw_drvr_create_needed ( ) : fw_dict = self . fwid_attr [ tenant_id ] . get_fw_dict ( ) try : with self . fwid_attr [ tenant_id ] . mutex_lock : ret = self . add_fw_db ( fw_dict . get ( 'fw_id' ) , fw_dict , fw_constants . RESULT_FW_CREATE_INIT ) if not ret : LOG . error ( "Adding FW DB failed for tenant %s" , tenant_id ) return self . _create_fw_fab_dev ( tenant_id , drvr_name , fw_dict ) except Exception as exc : LOG . error ( "Exception raised in create fw %s" , str ( exc ) ) | Creates the Firewall if all conditions are met . |
37,820 | def _check_delete_fw ( self , tenant_id , drvr_name ) : fw_dict = self . fwid_attr [ tenant_id ] . get_fw_dict ( ) ret = False try : with self . fwid_attr [ tenant_id ] . mutex_lock : self . update_fw_db_final_result ( fw_dict . get ( 'fw_id' ) , ( fw_constants . RESULT_FW_DELETE_INIT ) ) ret = self . _delete_fw_fab_dev ( tenant_id , drvr_name , fw_dict ) except Exception as exc : LOG . error ( "Exception raised in delete fw %s" , str ( exc ) ) return ret | Deletes the Firewall if all conditioms are met . |
37,821 | def _check_update_fw ( self , tenant_id , drvr_name ) : if self . fwid_attr [ tenant_id ] . is_fw_complete ( ) : fw_dict = self . fwid_attr [ tenant_id ] . get_fw_dict ( ) self . modify_fw_device ( tenant_id , fw_dict . get ( 'fw_id' ) , fw_dict ) | Update the Firewall config by calling the driver . |
37,822 | def _fw_create ( self , drvr_name , data , cache ) : fw = data . get ( 'firewall' ) tenant_id = fw . get ( 'tenant_id' ) fw_name = fw . get ( 'name' ) fw_id = fw . get ( 'id' ) fw_pol_id = fw . get ( 'firewall_policy_id' ) admin_state = fw . get ( 'admin_state_up' ) rtr_id = None if 'router_ids' in fw and len ( fw . get ( 'router_ids' ) ) != 0 : rtr_id = fw . get ( 'router_ids' ) [ 0 ] if not admin_state : LOG . debug ( "Admin state disabled" ) return name = dfa_dbm . DfaDBMixin . get_project_name ( self , tenant_id ) rtr_name = '_' . join ( [ fw_constants . TENANT_EDGE_RTR , name ] ) fw_rtr_name = self . os_helper . get_rtr_name ( rtr_id ) fw_type = None if fw_rtr_name == rtr_name : fw_type = fw_constants . FW_TENANT_EDGE if tenant_id not in self . fwid_attr : self . fwid_attr [ tenant_id ] = FwMapAttr ( tenant_id ) tenant_obj = self . fwid_attr [ tenant_id ] tenant_obj . create_fw ( name , fw_pol_id , fw_id , fw_name , fw_type , rtr_id ) self . tenant_db . store_fw_tenant ( fw_id , tenant_id ) if not cache : self . _check_create_fw ( tenant_id , drvr_name ) if fw_pol_id is not None and not ( tenant_obj . is_policy_present ( fw_pol_id ) ) : pol_data = self . os_helper . get_fw_policy ( fw_pol_id ) if pol_data is not None : self . fw_policy_create ( pol_data , cache = cache ) | Firewall create routine . |
37,823 | def fw_create ( self , data , fw_name = None , cache = False ) : LOG . debug ( "FW create %s" , data ) try : self . _fw_create ( fw_name , data , cache ) except Exception as exc : LOG . error ( "Exception in fw_create %s" , str ( exc ) ) | Top level FW create function . |
37,824 | def _fw_update ( self , drvr_name , data ) : fw = data . get ( 'firewall' ) tenant_id = fw . get ( 'tenant_id' ) if self . fwid_attr [ tenant_id ] . is_fw_complete ( ) or self . fwid_attr [ tenant_id ] . is_fw_drvr_create_needed ( ) : prev_info_complete = True else : prev_info_complete = False tenant_obj = self . fwid_attr [ tenant_id ] if 'router_ids' in fw and len ( fw . get ( 'router_ids' ) ) != 0 : rtr_id = fw . get ( 'router_ids' ) [ 0 ] name = dfa_dbm . DfaDBMixin . get_project_name ( self , tenant_id ) rtr_name = '_' . join ( [ fw_constants . TENANT_EDGE_RTR , name ] ) fw_rtr_name = self . os_helper . get_rtr_name ( rtr_id ) fw_type = None if fw_rtr_name == rtr_name : fw_type = fw_constants . FW_TENANT_EDGE tenant_obj . update_fw_params ( rtr_id , fw_type ) if not prev_info_complete : self . _check_create_fw ( tenant_id , drvr_name ) | Update routine for the Firewall . |
37,825 | def fw_update ( self , data , fw_name = None ) : LOG . debug ( "FW Update %s" , data ) self . _fw_update ( fw_name , data ) | Top level FW update function . |
37,826 | def _fw_delete ( self , drvr_name , data ) : fw_id = data . get ( 'firewall_id' ) tenant_id = self . tenant_db . get_fw_tenant ( fw_id ) if tenant_id not in self . fwid_attr : LOG . error ( "Invalid tenant id for FW delete %s" , tenant_id ) return tenant_obj = self . fwid_attr [ tenant_id ] ret = self . _check_delete_fw ( tenant_id , drvr_name ) if ret : tenant_obj . delete_fw ( fw_id ) self . tenant_db . del_fw_tenant ( fw_id ) | Firewall Delete routine . |
37,827 | def _fw_rule_decode_store ( self , data ) : fw_rule = data . get ( 'firewall_rule' ) rule = { 'protocol' : fw_rule . get ( 'protocol' ) , 'source_ip_address' : fw_rule . get ( 'source_ip_address' ) , 'destination_ip_address' : fw_rule . get ( 'destination_ip_address' ) , 'source_port' : fw_rule . get ( 'source_port' ) , 'destination_port' : fw_rule . get ( 'destination_port' ) , 'action' : fw_rule . get ( 'action' ) , 'enabled' : fw_rule . get ( 'enabled' ) , 'name' : fw_rule . get ( 'name' ) } return rule | Misc function to decode the firewall rule from Openstack . |
37,828 | def _fw_rule_create ( self , drvr_name , data , cache ) : tenant_id = data . get ( 'firewall_rule' ) . get ( 'tenant_id' ) fw_rule = data . get ( 'firewall_rule' ) rule = self . _fw_rule_decode_store ( data ) fw_pol_id = fw_rule . get ( 'firewall_policy_id' ) rule_id = fw_rule . get ( 'id' ) if tenant_id not in self . fwid_attr : self . fwid_attr [ tenant_id ] = FwMapAttr ( tenant_id ) self . fwid_attr [ tenant_id ] . store_rule ( rule_id , rule ) if not cache : self . _check_create_fw ( tenant_id , drvr_name ) self . tenant_db . store_rule_tenant ( rule_id , tenant_id ) if fw_pol_id is not None and not ( self . fwid_attr [ tenant_id ] . is_policy_present ( fw_pol_id ) ) : pol_data = self . os_helper . get_fw_policy ( fw_pol_id ) if pol_data is not None : self . fw_policy_create ( pol_data , cache = cache ) | Firewall Rule create routine . |
37,829 | def fw_rule_create ( self , data , fw_name = None , cache = False ) : LOG . debug ( "FW Rule create %s" , data ) self . _fw_rule_create ( fw_name , data , cache ) | Top level rule creation routine . |
37,830 | def _fw_rule_delete ( self , drvr_name , data ) : rule_id = data . get ( 'firewall_rule_id' ) tenant_id = self . tenant_db . get_rule_tenant ( rule_id ) if tenant_id not in self . fwid_attr : LOG . error ( "Invalid tenant id for FW delete %s" , tenant_id ) return tenant_obj = self . fwid_attr [ tenant_id ] tenant_obj . delete_rule ( rule_id ) self . tenant_db . del_rule_tenant ( rule_id ) | Function that updates its local cache after a rule is deleted . |
37,831 | def fw_rule_delete ( self , data , fw_name = None ) : LOG . debug ( "FW Rule delete %s" , data ) self . _fw_rule_delete ( fw_name , data ) | Top level rule delete function . |
37,832 | def _fw_rule_update ( self , drvr_name , data ) : LOG . debug ( "FW Update %s" , data ) tenant_id = data . get ( 'firewall_rule' ) . get ( 'tenant_id' ) fw_rule = data . get ( 'firewall_rule' ) rule = self . _fw_rule_decode_store ( data ) rule_id = fw_rule . get ( 'id' ) if tenant_id not in self . fwid_attr or not ( self . fwid_attr [ tenant_id ] . is_rule_present ( rule_id ) ) : LOG . error ( "Incorrect update info for tenant %s" , tenant_id ) return self . fwid_attr [ tenant_id ] . rule_update ( rule_id , rule ) self . _check_update_fw ( tenant_id , drvr_name ) | Firewall Rule update routine . |
37,833 | def fw_rule_update ( self , data , fw_name = None ) : LOG . debug ( "FW Update Debug" ) self . _fw_rule_update ( fw_name , data ) | Top level rule update routine . |
37,834 | def _fw_policy_delete ( self , drvr_name , data ) : policy_id = data . get ( 'firewall_policy_id' ) tenant_id = self . tenant_db . get_policy_tenant ( policy_id ) if tenant_id not in self . fwid_attr : LOG . error ( "Invalid tenant id for FW delete %s" , tenant_id ) return tenant_obj = self . fwid_attr [ tenant_id ] tenant_obj . delete_policy ( policy_id ) self . tenant_db . del_policy_tenant ( policy_id ) | Routine to delete the policy from local cache . |
37,835 | def fw_policy_delete ( self , data , fw_name = None ) : LOG . debug ( "FW Policy Debug" ) self . _fw_policy_delete ( fw_name , data ) | Top level policy delete routine . |
37,836 | def _fw_policy_create ( self , drvr_name , data , cache ) : policy = { } fw_policy = data . get ( 'firewall_policy' ) tenant_id = fw_policy . get ( 'tenant_id' ) LOG . info ( "Creating policy for tenant %s" , tenant_id ) policy_id = fw_policy . get ( 'id' ) policy_name = fw_policy . get ( 'name' ) pol_rule_dict = fw_policy . get ( 'firewall_rules' ) if tenant_id not in self . fwid_attr : self . fwid_attr [ tenant_id ] = FwMapAttr ( tenant_id ) policy [ 'name' ] = policy_name policy [ 'rule_dict' ] = pol_rule_dict self . fwid_attr [ tenant_id ] . store_policy ( policy_id , policy ) if not cache : self . _check_create_fw ( tenant_id , drvr_name ) self . tenant_db . store_policy_tenant ( policy_id , tenant_id ) for rule in pol_rule_dict : rule_id = rule if not self . fwid_attr [ tenant_id ] . is_rule_present ( rule_id ) : rule_data = self . os_helper . get_fw_rule ( rule_id ) if rule_data is not None : self . fw_rule_create ( rule_data , cache = cache ) | Firewall Policy create routine . |
37,837 | def fw_policy_create ( self , data , fw_name = None , cache = False ) : LOG . debug ( "FW Policy Debug" ) self . _fw_policy_create ( fw_name , data , cache ) | Top level policy create routine . |
37,838 | def convert_fwdb_event_msg ( self , rule , tenant_id , rule_id , policy_id ) : rule . update ( { 'tenant_id' : tenant_id , 'id' : rule_id , 'firewall_policy_id' : policy_id } ) fw_rule_data = { 'firewall_rule' : rule } return fw_rule_data | Convert the Firewall DB to a event message format . |
37,839 | def convert_fwdb ( self , tenant_id , name , policy_id , fw_id ) : fw_dict = { 'tenant_id' : tenant_id , 'name' : name , 'id' : fw_id , 'firewall_policy_id' : policy_id , 'admin_state_up' : True } fw_data = { 'firewall' : fw_dict } return fw_data | Convert the Firewall DB to a query response . |
37,840 | def populate_local_cache ( self ) : fw_dict = self . get_all_fw_db ( ) LOG . info ( "Populating FW Mgr Local Cache" ) for fw_id in fw_dict : fw_data = fw_dict . get ( fw_id ) tenant_id = fw_data . get ( 'tenant_id' ) rule_dict = fw_data . get ( 'rules' ) . get ( 'rules' ) policy_id = fw_data . get ( 'rules' ) . get ( 'firewall_policy_id' ) for rule in rule_dict : fw_evt_data = self . convert_fwdb_event_msg ( rule_dict . get ( rule ) , tenant_id , rule , policy_id ) LOG . info ( "Populating Rules for tenant %s" , tenant_id ) self . fw_rule_create ( fw_evt_data , cache = True ) fw_os_data = self . os_helper . get_fw ( fw_id ) if fw_os_data is None : fw_os_data = self . convert_fwdb ( tenant_id , fw_data . get ( 'name' ) , policy_id , fw_id ) LOG . info ( "Populating FW for tenant %s" , tenant_id ) self . fw_create ( fw_os_data , cache = True ) if fw_data . get ( 'device_status' ) == 'SUCCESS' : self . fwid_attr [ tenant_id ] . fw_drvr_created ( True ) else : self . fwid_attr [ tenant_id ] . fw_drvr_created ( False ) return fw_dict | This populates the local cache after reading the Database . |
37,841 | def retry_failure_fab_dev_create ( self , tenant_id , fw_data , fw_dict ) : result = fw_data . get ( 'result' ) . split ( '(' ) [ 0 ] is_fw_virt = self . is_device_virtual ( ) if result == fw_constants . RESULT_FW_CREATE_INIT : name = dfa_dbm . DfaDBMixin . get_project_name ( self , tenant_id ) ret = self . fabric . retry_failure ( tenant_id , name , fw_dict , is_fw_virt , result ) if not ret : LOG . error ( "Retry failure returned fail for tenant %s" , tenant_id ) return else : result = fw_constants . RESULT_FW_CREATE_DONE self . update_fw_db_final_result ( fw_dict . get ( 'fw_id' ) , result ) if result == fw_constants . RESULT_FW_CREATE_DONE : if fw_data . get ( 'device_status' ) != 'SUCCESS' : ret = self . create_fw_device ( tenant_id , fw_dict . get ( 'fw_id' ) , fw_dict ) if ret : self . fwid_attr [ tenant_id ] . fw_drvr_created ( True ) self . update_fw_db_dev_status ( fw_dict . get ( 'fw_id' ) , 'SUCCESS' ) LOG . info ( "Retry failue return success for create" " tenant %s" , tenant_id ) | This module calls routine in fabric to retry the failure cases . |
37,842 | def retry_failure_fab_dev_delete ( self , tenant_id , fw_data , fw_dict ) : result = fw_data . get ( 'result' ) . split ( '(' ) [ 0 ] name = dfa_dbm . DfaDBMixin . get_project_name ( self , tenant_id ) fw_dict [ 'tenant_name' ] = name is_fw_virt = self . is_device_virtual ( ) if result == fw_constants . RESULT_FW_DELETE_INIT : if self . fwid_attr [ tenant_id ] . is_fw_drvr_created ( ) : ret = self . delete_fw_device ( tenant_id , fw_dict . get ( 'fw_id' ) , fw_dict ) if ret : self . update_fw_db_dev_status ( fw_dict . get ( 'fw_id' ) , '' ) self . fwid_attr [ tenant_id ] . fw_drvr_created ( False ) LOG . info ( "Retry failue dev return success for delete" " tenant %s" , tenant_id ) else : return name = dfa_dbm . DfaDBMixin . get_project_name ( self , tenant_id ) ret = self . fabric . retry_failure ( tenant_id , name , fw_dict , is_fw_virt , result ) if not ret : LOG . error ( "Retry failure returned fail for tenant %s" , tenant_id ) return result = fw_constants . RESULT_FW_DELETE_DONE self . update_fw_db_final_result ( fw_dict . get ( 'fw_id' ) , result ) self . delete_fw ( fw_dict . get ( 'fw_id' ) ) self . fwid_attr [ tenant_id ] . delete_fw ( fw_dict . get ( 'fw_id' ) ) self . tenant_db . del_fw_tenant ( fw_dict . get ( 'fw_id' ) ) | Retry the failure cases for delete . |
37,843 | def fw_retry_failures_create ( self ) : for tenant_id in self . fwid_attr : try : with self . fwid_attr [ tenant_id ] . mutex_lock : if self . fwid_attr [ tenant_id ] . is_fw_drvr_create_needed ( ) : fw_dict = self . fwid_attr [ tenant_id ] . get_fw_dict ( ) if fw_dict : fw_obj , fw_data = self . get_fw ( fw_dict . get ( 'fw_id' ) ) self . retry_failure_fab_dev_create ( tenant_id , fw_data , fw_dict ) else : LOG . error ( "FW data not found for tenant %s" , tenant_id ) except Exception as exc : LOG . error ( "Exception in retry failure create %s" , str ( exc ) ) | This module is called for retrying the create cases . |
37,844 | def fill_fw_dict_from_db ( self , fw_data ) : rule_dict = fw_data . get ( 'rules' ) . get ( 'rules' ) fw_dict = { 'fw_id' : fw_data . get ( 'fw_id' ) , 'fw_name' : fw_data . get ( 'name' ) , 'firewall_policy_id' : fw_data . get ( 'firewall_policy_id' ) , 'fw_type' : fw_data . get ( 'fw_type' ) , 'router_id' : fw_data . get ( 'router_id' ) , 'rules' : { } } for rule in rule_dict : fw_dict [ 'rules' ] [ rule ] = rule_dict . get ( rule ) return fw_dict | This routine is called to create a local fw_dict with data from DB . |
37,845 | def fw_retry_failures_delete ( self ) : for tenant_id in self . fwid_attr : try : with self . fwid_attr [ tenant_id ] . mutex_lock : fw_data = self . get_fw_by_tenant_id ( tenant_id ) if fw_data is None : LOG . info ( "No FW for tenant %s" , tenant_id ) continue result = fw_data . get ( 'result' ) . split ( '(' ) [ 0 ] if result == fw_constants . RESULT_FW_DELETE_INIT : fw_dict = self . fwid_attr [ tenant_id ] . get_fw_dict ( ) if not fw_dict : fw_dict = self . fill_fw_dict_from_db ( fw_data ) self . retry_failure_fab_dev_delete ( tenant_id , fw_data , fw_dict ) except Exception as exc : LOG . error ( "Exception in retry failure delete %s" , str ( exc ) ) | This routine is called for retrying the delete cases . |
37,846 | def fw_retry_failures ( self ) : if not self . fw_init : return try : self . fw_retry_failures_create ( ) self . fw_retry_failures_delete ( ) except Exception as exc : LOG . error ( "Exception in retry failures %s" , str ( exc ) ) | Top level retry routine called . |
37,847 | def _find_starts ( self , linespec ) : linespec += ".*" start_points = [ ] for item in self . _indent_list : match = re . search ( linespec , item [ 1 ] ) if match : entry = ( item , self . _indent_list . index ( item ) ) start_points . append ( entry ) return start_points | Finds the start points . |
37,848 | def find_children ( self , linespec ) : res = [ ] for parent in self . find_objects ( linespec ) : res . append ( parent . line ) res . extend ( [ child . line for child in parent . children ] ) return res | Find lines and immediate children that match the linespec regex . |
37,849 | def enable_evb ( self ) : if self . is_ncb : self . run_lldptool ( [ "-T" , "-i" , self . port_name , "-g" , "ncb" , "-V" , "evb" , "enableTx=yes" ] ) ret = self . enable_gpid ( ) return ret else : LOG . error ( "EVB cannot be set on NB" ) return False | Function to enable EVB on the interface . |
37,850 | def enable_gpid ( self ) : if self . is_ncb : self . run_lldptool ( [ "-T" , "-i" , self . port_name , "-g" , "ncb" , "-V" , "evb" , "-c" , "evbgpid=yes" ] ) return True else : LOG . error ( "GPID cannot be set on NB" ) return False | Function to enable Group ID on the interface . |
37,851 | def _vdp_refrsh_hndlr ( self ) : LOG . debug ( "Refresh handler" ) try : if not self . vdp_vif_map : LOG . debug ( "vdp_vif_map not created, returning" ) return vdp_vif_map = dict . copy ( self . vdp_vif_map ) oui_vif_map = dict . copy ( self . oui_vif_map ) for key in six . iterkeys ( vdp_vif_map ) : lvdp_dict = vdp_vif_map . get ( key ) loui_dict = oui_vif_map . get ( key ) if not lvdp_dict : return if not loui_dict : oui_id = "" oui_data = "" else : oui_id = loui_dict . get ( 'oui_id' ) oui_data = loui_dict . get ( 'oui_data' ) with self . mutex_lock : if key in self . vdp_vif_map : LOG . debug ( "Sending Refresh for VSI %s" , lvdp_dict ) vdp_vlan , fail_reason = self . send_vdp_assoc ( vsiid = lvdp_dict . get ( 'vsiid' ) , mgrid = lvdp_dict . get ( 'mgrid' ) , typeid = lvdp_dict . get ( 'typeid' ) , typeid_ver = lvdp_dict . get ( 'typeid_ver' ) , vsiid_frmt = lvdp_dict . get ( 'vsiid_frmt' ) , filter_frmt = lvdp_dict . get ( 'filter_frmt' ) , gid = lvdp_dict . get ( 'gid' ) , mac = lvdp_dict . get ( 'mac' ) , vlan = 0 , oui_id = oui_id , oui_data = oui_data , sw_resp = True ) if not utils . is_valid_vlan_tag ( vdp_vlan ) : LOG . error ( "Returned vlan %(vlan)s is invalid." , { 'vlan' : vdp_vlan } ) vdp_vlan = 0 exist_vdp_vlan = lvdp_dict . get ( 'vdp_vlan' ) exist_fail_reason = lvdp_dict . get ( 'fail_reason' ) callback_count = lvdp_dict . get ( 'callback_count' ) if vdp_vlan != exist_vdp_vlan or ( fail_reason != exist_fail_reason or callback_count > vdp_const . CALLBACK_THRESHOLD ) : cb_fn = lvdp_dict . get ( 'vsw_cb_fn' ) cb_data = lvdp_dict . get ( 'vsw_cb_data' ) if cb_fn : cb_fn ( cb_data , vdp_vlan , fail_reason ) lvdp_dict [ 'vdp_vlan' ] = vdp_vlan lvdp_dict [ 'fail_reason' ] = fail_reason lvdp_dict [ 'callback_count' ] = 0 else : lvdp_dict [ 'callback_count' ] += 1 except Exception as e : LOG . error ( "Exception in Refrsh %s" , str ( e ) ) | Periodic refresh of vNIC events to VDP . |
37,852 | def store_oui ( self , port_uuid , oui_type , oui_data ) : self . oui_vif_map [ port_uuid ] = { 'oui_id' : oui_type , 'oui_data' : oui_data } | Function for storing the OUI . |
37,853 | def clear_oui ( self , port_uuid ) : if port_uuid in self . oui_vif_map : del self . oui_vif_map [ port_uuid ] else : LOG . debug ( "OUI does not exist" ) | Clears the OUI specific info . |
37,854 | def gen_cisco_vdp_oui ( self , oui_id , oui_data ) : oui_list = [ ] vm_name = oui_data . get ( 'vm_name' ) if vm_name is not None : oui_str = "oui=%s," % oui_id oui_name_str = oui_str + "vm_name=" + vm_name oui_list . append ( oui_name_str ) ip_addr = oui_data . get ( 'ip_addr' ) if ip_addr is not None : oui_str = "oui=%s," % oui_id ip_addr_str = oui_str + "ipv4_addr=" + ip_addr oui_list . append ( ip_addr_str ) vm_uuid = oui_data . get ( 'vm_uuid' ) if vm_uuid is not None : oui_str = "oui=%s," % oui_id vm_uuid_str = oui_str + "vm_uuid=" + vm_uuid oui_list . append ( vm_uuid_str ) return oui_list | Cisco specific handler for constructing OUI arguments . |
37,855 | def gen_oui_str ( self , oui_list ) : oui_str = [ ] for oui in oui_list : oui_str . append ( '-c' ) oui_str . append ( oui ) return oui_str | Generate the OUI string for vdptool . |
37,856 | def construct_vdp_dict ( self , mode , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data ) : vdp_keyword_str = { } if mgrid is None : mgrid = self . vdp_opts . get ( 'mgrid' ) mgrid_str = "mgrid2=%s" % mgrid if typeid is None : typeid = self . vdp_opts . get ( 'typeid' ) typeid_str = "typeid=%s" % typeid if typeid_ver is None : typeid_ver = self . vdp_opts . get ( 'typeidver' ) typeid_ver_str = "typeidver=%s" % typeid_ver if int ( vsiid_frmt ) == int ( self . vdp_opts . get ( 'vsiidfrmt' ) ) : vsiid_str = "uuid=%s" % vsiid else : LOG . error ( "Unsupported VSIID Format1" ) return vdp_keyword_str if vlan == constants . INVALID_VLAN : vlan = 0 if int ( filter_frmt ) == vdp_const . VDP_FILTER_GIDMACVID : if not mac or gid == 0 : LOG . error ( "Incorrect Filter Format Specified" ) return vdp_keyword_str else : f = "filter=%s-%s-%s" filter_str = f % ( vlan , mac , gid ) elif int ( filter_frmt ) == vdp_const . VDP_FILTER_GIDVID : if gid == 0 : LOG . error ( "NULL GID Specified" ) return vdp_keyword_str else : filter_str = "filter=" + '%d' % vlan + "--" + '%ld' % gid elif int ( filter_frmt ) == vdp_const . VDP_FILTER_MACVID : if not mac : LOG . error ( "NULL MAC Specified" ) return vdp_keyword_str else : filter_str = "filter=" + '%d' % vlan + "-" + mac elif int ( filter_frmt ) == vdp_const . VDP_FILTER_VID : filter_str = "filter=" + '%d' % vlan else : LOG . error ( "Incorrect Filter Format Specified" ) return vdp_keyword_str oui_list = [ ] if oui_id is not None and oui_data is not None : if oui_id is 'cisco' : oui_list = self . gen_cisco_vdp_oui ( oui_id , oui_data ) mode_str = "mode=" + mode vdp_keyword_str = dict ( mode = mode_str , mgrid = mgrid_str , typeid = typeid_str , typeid_ver = typeid_ver_str , vsiid = vsiid_str , filter = filter_str , oui_list = oui_list ) return vdp_keyword_str | Constructs the VDP Message . |
37,857 | def send_vdp_query_msg ( self , mode , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data ) : if not self . is_ncb : LOG . error ( "EVB cannot be set on NB" ) return vdp_key_str = self . construct_vdp_dict ( mode , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , None , None ) if len ( vdp_key_str ) == 0 : LOG . error ( "NULL List" ) return reply = self . run_vdptool ( [ "-t" , "-i" , self . port_name , "-R" , "-V" , mode , "-c" , vdp_key_str [ 'mode' ] , "-c" , vdp_key_str [ 'mgrid' ] , "-c" , vdp_key_str [ 'typeid' ] , "-c" , vdp_key_str [ 'typeid_ver' ] , "-c" , vdp_key_str [ 'vsiid' ] ] ) return reply | Constructs and Sends the VDP Query Message . |
37,858 | def send_vdp_msg ( self , mode , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data , sw_resp ) : if not self . is_ncb : LOG . error ( "EVB cannot be set on NB" ) return vdp_key_str = self . construct_vdp_dict ( mode , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data ) if len ( vdp_key_str ) == 0 : LOG . error ( "NULL List" ) return oui_cmd_str = self . gen_oui_str ( vdp_key_str [ 'oui_list' ] ) if sw_resp : reply = self . run_vdptool ( [ "-T" , "-i" , self . port_name , "-W" , "-V" , mode , "-c" , vdp_key_str [ 'mode' ] , "-c" , vdp_key_str [ 'mgrid' ] , "-c" , vdp_key_str [ 'typeid' ] , "-c" , vdp_key_str [ 'typeid_ver' ] , "-c" , vdp_key_str [ 'vsiid' ] , "-c" , "hints=none" , "-c" , vdp_key_str [ 'filter' ] ] , oui_args = oui_cmd_str ) else : reply = self . run_vdptool ( [ "-T" , "-i" , self . port_name , "-V" , mode , "-c" , vdp_key_str [ 'mode' ] , "-c" , vdp_key_str [ 'mgrid' ] , "-c" , vdp_key_str [ 'typeid' ] , "-c" , vdp_key_str [ 'typeid_ver' ] , "-c" , vdp_key_str [ 'vsiid' ] , "-c" , "hints=none" , "-c" , vdp_key_str [ 'filter' ] ] , oui_args = oui_cmd_str ) return reply | Constructs and Sends the VDP Message . |
37,859 | def crosscheck_query_vsiid_mac ( self , reply , vsiid , mac ) : vsiid_reply = reply . partition ( "uuid" ) [ 2 ] . split ( ) [ 0 ] [ 4 : ] if vsiid != vsiid_reply : fail_reason = vdp_const . vsi_mismatch_failure_reason % ( vsiid , vsiid_reply ) LOG . error ( "%s" , fail_reason ) return False , fail_reason mac_reply = reply . partition ( "filter" ) [ 2 ] . split ( '-' ) [ 1 ] if mac != mac_reply : fail_reason = vdp_const . mac_mismatch_failure_reason % ( mac , mac_reply ) LOG . error ( "%s" , fail_reason ) return False , fail_reason return True , None | Cross Check the reply against the input vsiid mac for get query . |
37,860 | def get_vdp_failure_reason ( self , reply ) : try : fail_reason = reply . partition ( "filter" ) [ 0 ] . replace ( '\t' , '' ) . split ( '\n' ) [ - 2 ] if len ( fail_reason ) == 0 : fail_reason = vdp_const . retrieve_failure_reason % ( reply ) except Exception : fail_reason = vdp_const . retrieve_failure_reason % ( reply ) return fail_reason | Parse the failure reason from VDP . |
37,861 | def check_filter_validity ( self , reply , filter_str ) : try : f_ind = reply . index ( filter_str ) l_ind = reply . rindex ( filter_str ) except Exception : fail_reason = vdp_const . filter_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return False , fail_reason if f_ind != l_ind : fail_reason = vdp_const . multiple_filter_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return False , fail_reason return True , None | Check for the validify of the filter . |
37,862 | def get_vlan_from_associate_reply ( self , reply , vsiid , mac ) : try : verify_flag , fail_reason = self . crosscheck_reply_vsiid_mac ( reply , vsiid , mac ) if not verify_flag : return constants . INVALID_VLAN , fail_reason mode_str = reply . partition ( "mode = " ) [ 2 ] . split ( ) [ 0 ] if mode_str != "assoc" : fail_reason = self . get_vdp_failure_reason ( reply ) return constants . INVALID_VLAN , fail_reason except Exception : fail_reason = vdp_const . mode_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return constants . INVALID_VLAN , fail_reason check_filter , fail_reason = self . check_filter_validity ( reply , "filter = " ) if not check_filter : return constants . INVALID_VLAN , fail_reason try : vlan_val = reply . partition ( "filter = " ) [ 2 ] . split ( '-' ) [ 0 ] vlan = int ( vlan_val ) except ValueError : fail_reason = vdp_const . format_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return constants . INVALID_VLAN , fail_reason return vlan , None | Parse the associate reply from VDP daemon to get the VLAN value . |
37,863 | def check_hints ( self , reply ) : try : f_ind = reply . index ( "hints" ) l_ind = reply . rindex ( "hints" ) except Exception : fail_reason = vdp_const . hints_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return False , fail_reason if f_ind != l_ind : fail_reason = vdp_const . multiple_hints_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return False , fail_reason try : hints_compl = reply . partition ( "hints" ) [ 2 ] hints_val = reply . partition ( "hints" ) [ 2 ] [ 0 : 4 ] len_hints = int ( hints_val ) hints_val = hints_compl [ 4 : 4 + len_hints ] hints = int ( hints_val ) if hints != 0 : fail_reason = vdp_const . nonzero_hints_failure % ( hints ) return False , fail_reason except ValueError : fail_reason = vdp_const . format_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return False , fail_reason return True , None | Parse the hints to check for errors . |
37,864 | def get_vlan_from_query_reply ( self , reply , vsiid , mac ) : hints_ret , fail_reason = self . check_hints ( reply ) if not hints_ret : LOG . error ( "Incorrect hints found %s" , reply ) return constants . INVALID_VLAN , fail_reason check_filter , fail_reason = self . check_filter_validity ( reply , "filter" ) if not check_filter : return constants . INVALID_VLAN , fail_reason try : verify_flag , fail_reason = self . crosscheck_query_vsiid_mac ( reply , vsiid , mac ) if not verify_flag : return constants . INVALID_VLAN , fail_reason filter_val = reply . partition ( "filter" ) [ 2 ] len_fil = len ( filter_val ) vlan_val = filter_val [ 4 : len_fil ] . split ( '-' ) [ 0 ] vlan = int ( vlan_val ) except ValueError : fail_reason = vdp_const . format_failure_reason % ( reply ) LOG . error ( "%s" , fail_reason ) return constants . INVALID_VLAN , fail_reason return vlan , None | Parse the query reply from VDP daemon to get the VLAN value . |
37,865 | def send_vdp_assoc ( self , vsiid = None , mgrid = None , typeid = None , typeid_ver = None , vsiid_frmt = vdp_const . VDP_VSIFRMT_UUID , filter_frmt = vdp_const . VDP_FILTER_GIDMACVID , gid = 0 , mac = "" , vlan = 0 , oui_id = "" , oui_data = "" , sw_resp = False ) : if sw_resp and filter_frmt == vdp_const . VDP_FILTER_GIDMACVID : reply = self . send_vdp_query_msg ( "assoc" , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data ) vlan_resp , fail_reason = self . get_vlan_from_query_reply ( reply , vsiid , mac ) if vlan_resp != constants . INVALID_VLAN : return vlan_resp , fail_reason reply = self . send_vdp_msg ( "assoc" , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data , sw_resp ) if sw_resp : vlan , fail_reason = self . get_vlan_from_associate_reply ( reply , vsiid , mac ) return vlan , fail_reason return None , None | Sends the VDP Associate Message . |
37,866 | def send_vdp_vnic_up ( self , port_uuid = None , vsiid = None , mgrid = None , typeid = None , typeid_ver = None , vsiid_frmt = vdp_const . VDP_VSIFRMT_UUID , filter_frmt = vdp_const . VDP_FILTER_GIDMACVID , gid = 0 , mac = "" , vlan = 0 , oui = None , new_network = False , vsw_cb_fn = None , vsw_cb_data = None ) : if oui is None : oui = { } oui_id = None oui_data = None if 'oui_id' in oui : oui_id = oui [ 'oui_id' ] oui_data = oui reply , fail_reason = self . send_vdp_assoc ( vsiid = vsiid , mgrid = mgrid , typeid = typeid , typeid_ver = typeid_ver , vsiid_frmt = vsiid_frmt , filter_frmt = filter_frmt , gid = gid , mac = mac , vlan = vlan , oui_id = oui_id , oui_data = oui_data , sw_resp = new_network ) self . store_vdp_vsi ( port_uuid , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , new_network , reply , oui_id , oui_data , vsw_cb_fn , vsw_cb_data , fail_reason ) return reply , fail_reason | Interface function to apps called for a vNIC UP . |
37,867 | def send_vdp_vnic_down ( self , port_uuid = None , vsiid = None , mgrid = None , typeid = None , typeid_ver = None , vsiid_frmt = vdp_const . VDP_VSIFRMT_UUID , filter_frmt = vdp_const . VDP_FILTER_GIDMACVID , gid = 0 , mac = "" , vlan = 0 , oui = "" ) : try : with self . mutex_lock : self . send_vdp_deassoc ( vsiid = vsiid , mgrid = mgrid , typeid = typeid , typeid_ver = typeid_ver , vsiid_frmt = vsiid_frmt , filter_frmt = filter_frmt , gid = gid , mac = mac , vlan = vlan ) self . clear_vdp_vsi ( port_uuid ) except Exception as e : LOG . error ( "VNIC Down exception %s" , e ) | Interface function to apps called for a vNIC DOWN . |
37,868 | def run_vdptool ( self , args , oui_args = None ) : if oui_args is None : oui_args = [ ] full_args = [ 'vdptool' ] + args + oui_args try : return utils . execute ( full_args , root_helper = self . root_helper ) except Exception as e : LOG . error ( "Unable to execute %(cmd)s. " "Exception: %(exception)s" , { 'cmd' : full_args , 'exception' : e } ) | Function that runs the vdptool utility . |
37,869 | def get_routers ( self , context , router_ids = None , hd_ids = None ) : cctxt = self . client . prepare ( version = '1.1' ) return cctxt . call ( context , 'cfg_sync_routers' , host = self . host , router_ids = router_ids , hosting_device_ids = hd_ids ) | Make a remote process call to retrieve the sync data for routers . |
37,870 | def update_floatingip_statuses ( self , context , router_id , fip_statuses ) : cctxt = self . client . prepare ( version = '1.1' ) return cctxt . call ( context , 'update_floatingip_statuses_cfg' , router_id = router_id , fip_statuses = fip_statuses ) | Make a remote process call to update operational status for one or several floating IPs . |
37,871 | def send_update_port_statuses ( self , context , port_ids , status ) : cctxt = self . client . prepare ( version = '1.1' ) return cctxt . call ( context , 'update_port_statuses_cfg' , port_ids = port_ids , status = status ) | Call the pluging to update the port status which updates the DB . |
37,872 | def router_deleted ( self , context , routers ) : LOG . debug ( 'Got router deleted notification for %s' , routers ) self . _update_removed_routers_cache ( routers ) | Deal with router deletion RPC message . |
37,873 | def routers_updated ( self , context , routers ) : LOG . debug ( 'Got routers updated notification :%s' , routers ) if routers : if isinstance ( routers [ 0 ] , dict ) : routers = [ router [ 'id' ] for router in routers ] self . _update_updated_routers_cache ( routers ) | Deal with routers modification and creation RPC message . |
37,874 | def collect_state ( self , configurations ) : num_ex_gw_ports = 0 num_interfaces = 0 num_floating_ips = 0 router_infos = self . router_info . values ( ) num_routers = len ( router_infos ) num_hd_routers = collections . defaultdict ( int ) for ri in router_infos : ex_gw_port = ri . router . get ( 'gw_port' ) if ex_gw_port : num_ex_gw_ports += 1 num_interfaces += len ( ri . router . get ( bc . constants . INTERFACE_KEY , [ ] ) ) num_floating_ips += len ( ri . router . get ( bc . constants . FLOATINGIP_KEY , [ ] ) ) hd = ri . router [ 'hosting_device' ] if hd : num_hd_routers [ hd [ 'id' ] ] += 1 routers_per_hd = dict ( ( hd_id , { 'routers' : num } ) for hd_id , num in num_hd_routers . items ( ) ) non_responding = self . _dev_status . get_backlogged_hosting_devices ( ) configurations [ 'total routers' ] = num_routers configurations [ 'total ex_gw_ports' ] = num_ex_gw_ports configurations [ 'total interfaces' ] = num_interfaces configurations [ 'total floating_ips' ] = num_floating_ips configurations [ 'hosting_devices' ] = routers_per_hd configurations [ 'non_responding_hosting_devices' ] = non_responding return configurations | Collect state from this helper . |
37,875 | def _fetch_router_info ( self , router_ids = None , device_ids = None , all_routers = False ) : try : if all_routers : LOG . debug ( 'Fetching all routers' ) router_ids = self . plugin_rpc . get_router_ids ( self . context ) routers = self . _fetch_router_chunk_data ( router_ids ) elif router_ids : routers = self . _fetch_router_chunk_data ( router_ids ) elif device_ids : return self . plugin_rpc . get_routers ( self . context , hd_ids = device_ids ) except oslo_messaging . MessagingTimeout : if self . sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE : self . sync_routers_chunk_size = max ( int ( round ( self . sync_routers_chunk_size / 2 ) ) , SYNC_ROUTERS_MIN_CHUNK_SIZE ) LOG . warning ( 'Server failed to return info for routers in ' 'required time, decreasing chunk size to: %s' , self . sync_routers_chunk_size ) else : LOG . warning ( 'Server failed to return info for routers in ' 'required time even with min chunk size: %s. ' 'It might be under very high load or just ' 'inoperable' , self . sync_routers_chunk_size ) raise except oslo_messaging . MessagingException : LOG . exception ( "RPC Error in fetching routers from plugin" ) self . fullsync = True raise n_exc . AbortSyncRouters ( ) LOG . debug ( "Periodic_sync_routers_task successfully completed" ) if ( self . sync_routers_chunk_size < cfg . CONF . cfg_agent . max_device_sync_batch_size ) : self . sync_routers_chunk_size = min ( self . sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE , cfg . CONF . cfg_agent . max_device_sync_batch_size ) return routers | Fetch router dict from the routing plugin . |
37,876 | def _fetch_router_chunk_data ( self , router_ids = None ) : curr_router = [ ] if len ( router_ids ) > self . sync_routers_chunk_size : for i in range ( 0 , len ( router_ids ) , self . sync_routers_chunk_size ) : routers = self . plugin_rpc . get_routers ( self . context , ( router_ids [ i : i + self . sync_routers_chunk_size ] ) ) LOG . debug ( 'Processing :%r' , routers ) for r in routers : curr_router . append ( r ) else : curr_router = self . plugin_rpc . get_routers ( self . context , router_ids = router_ids ) return curr_router | Fetch router data from the routing plugin in chunks . |
37,877 | def _handle_sync_devices ( self , routers ) : sync_devices_list = list ( self . sync_devices ) LOG . debug ( "Fetching routers on:%s" , sync_devices_list ) fetched_routers = self . _fetch_router_info ( device_ids = sync_devices_list ) if fetched_routers : LOG . debug ( "[sync_devices] Fetched routers :%s" , pp . pformat ( fetched_routers ) ) for router_dict in fetched_routers : self . _del_from_updated_routers_cache ( router_dict [ 'id' ] ) self . _del_from_removed_routers_cache ( router_dict [ 'id' ] ) LOG . debug ( "[sync_devices] invoking " "_router_removed(%s)" , router_dict [ 'id' ] ) self . _router_removed ( router_dict [ 'id' ] , deconfigure = False ) self . _cleanup_invalid_cfg ( fetched_routers ) routers . extend ( fetched_routers ) self . sync_devices . clear ( ) LOG . debug ( "[sync_devices] %s finished" , sync_devices_list ) else : self . sync_devices_attempts += 1 if ( self . sync_devices_attempts >= cfg . CONF . cfg_agent . max_device_sync_attempts ) : LOG . debug ( "Max number [%d / %d ] of sync_devices " "attempted. No further retries will " "be attempted." % ( self . sync_devices_attempts , cfg . CONF . cfg_agent . max_device_sync_attempts ) ) self . sync_devices . clear ( ) self . sync_devices_attempts = 0 else : LOG . debug ( "Fetched routers was blank for sync attempt " "[%d / %d], will attempt resync of %s devices " "again in the next iteration" % ( self . sync_devices_attempts , cfg . CONF . cfg_agent . max_device_sync_attempts , pp . pformat ( self . sync_devices ) ) ) | Handles routers during a device_sync . |
37,878 | def _get_router_ids_from_removed_devices_info ( removed_devices_info ) : removed_router_ids = [ ] for hd_id , resources in removed_devices_info [ 'hosting_data' ] . items ( ) : removed_router_ids += resources . get ( 'routers' , [ ] ) return removed_router_ids | Extract router_ids from the removed devices info dict . |
37,879 | def _sort_resources_per_hosting_device ( resources ) : hosting_devices = { } for key in resources . keys ( ) : for r in resources . get ( key ) or [ ] : if r . get ( 'hosting_device' ) is None : continue hd_id = r [ 'hosting_device' ] [ 'id' ] hosting_devices . setdefault ( hd_id , { } ) hosting_devices [ hd_id ] . setdefault ( key , [ ] ) . append ( r ) return hosting_devices | This function will sort the resources on hosting device . |
37,880 | def _adjust_router_list_for_global_router ( self , routers ) : for r in routers : if r [ ROUTER_ROLE_ATTR ] == c_constants . ROUTER_ROLE_GLOBAL : LOG . debug ( "Global router:%s found. Moved to the end of list " "for processing" , r [ 'id' ] ) routers . remove ( r ) routers . append ( r ) | Pushes Global routers to the end of the router list so that deleting default route occurs before deletion of external nw subintf |
37,881 | def _send_update_port_statuses ( self , port_ids , status ) : if not port_ids : return MAX_PORTS_IN_BATCH = 50 list_chunks_ports = [ port_ids [ i : i + MAX_PORTS_IN_BATCH ] for i in six . moves . range ( 0 , len ( port_ids ) , MAX_PORTS_IN_BATCH ) ] for chunk_ports in list_chunks_ports : self . plugin_rpc . send_update_port_statuses ( self . context , chunk_ports , status ) | Sends update notifications to set the operational status of the list of router ports provided . To make each notification doesn t exceed the RPC length each message contains a maximum of MAX_PORTS_IN_BATCH port ids . |
37,882 | def _process_router ( self , ri ) : try : ex_gw_port = ri . router . get ( 'gw_port' ) ri . ha_info = ri . router . get ( 'ha_info' , None ) gateway_set = ex_gw_port and not ri . ex_gw_port gateway_cleared = not ex_gw_port and ri . ex_gw_port internal_ports = ri . router . get ( bc . constants . INTERFACE_KEY , [ ] ) if gateway_set : self . _add_rid_to_vrf_list ( ri ) new_ports , old_ports , change_details = ( self . _get_internal_port_changes ( ri , internal_ports ) ) list_port_ids_up = [ ] non_global_router_roles = [ None , c_constants . ROUTER_ROLE_HA_REDUNDANCY ] if ri . router [ ROUTER_ROLE_ATTR ] in non_global_router_roles : self . _process_new_ports ( ri , new_ports , ex_gw_port , list_port_ids_up , change_details ) self . _process_old_ports ( ri , old_ports , ex_gw_port , change_details ) else : self . _process_new_ports_global ( ri , new_ports , ex_gw_port , list_port_ids_up ) self . _process_old_ports_global ( ri , old_ports , ex_gw_port ) if gateway_set : self . _process_gateway_set ( ri , ex_gw_port , list_port_ids_up ) elif gateway_cleared : self . _process_gateway_cleared ( ri , ri . ex_gw_port ) self . _send_update_port_statuses ( list_port_ids_up , bc . constants . PORT_STATUS_ACTIVE ) if ex_gw_port : self . _process_router_floating_ips ( ri , ex_gw_port ) global_router_roles = [ c_constants . ROUTER_ROLE_GLOBAL , c_constants . ROUTER_ROLE_LOGICAL_GLOBAL ] if ri . router [ ROUTER_ROLE_ATTR ] not in global_router_roles : self . _enable_disable_ports ( ri , ex_gw_port , internal_ports ) if gateway_cleared : self . _remove_rid_from_vrf_list ( ri ) ri . ex_gw_port = ex_gw_port self . _routes_updated ( ri ) except cfg_exceptions . HAParamsMissingException as e : self . _update_updated_routers_cache ( [ ri . router_id ] ) LOG . warning ( e ) except cfg_exceptions . DriverException as e : with excutils . save_and_reraise_exception ( ) : self . _update_updated_routers_cache ( [ ri . router_id ] ) LOG . error ( e ) | Process a router apply latest configuration and update router_info . |
37,883 | def _process_router_floating_ips ( self , ri , ex_gw_port ) : current_fips = ri . router . get ( bc . constants . FLOATINGIP_KEY , [ ] ) current_fip_ids = { fip [ 'id' ] for fip in current_fips } configured_fip_ids = { fip [ 'id' ] for fip in ri . floating_ips } id_to_current_fip_map = { } fips_to_add = [ ] for configured_fip in current_fips : if configured_fip [ 'port_id' ] : id_to_current_fip_map [ configured_fip [ 'id' ] ] = configured_fip if configured_fip [ 'id' ] not in configured_fip_ids : fips_to_add . append ( configured_fip ) fip_ids_to_remove = configured_fip_ids - current_fip_ids LOG . debug ( "fip_ids_to_add: %s" % fips_to_add ) LOG . debug ( "fip_ids_to_remove: %s" % fip_ids_to_remove ) fips_to_remove = [ ] fip_statuses = { } for configured_fip in ri . floating_ips : if configured_fip [ 'id' ] in fip_ids_to_remove : fips_to_remove . append ( configured_fip ) self . _floating_ip_removed ( ri , ri . ex_gw_port , configured_fip [ 'floating_ip_address' ] , configured_fip [ 'fixed_ip_address' ] ) fip_statuses [ configured_fip [ 'id' ] ] = ( bc . constants . FLOATINGIP_STATUS_DOWN ) LOG . debug ( "Add to fip_statuses DOWN id:%s fl_ip:%s fx_ip:%s" , configured_fip [ 'id' ] , configured_fip [ 'floating_ip_address' ] , configured_fip [ 'fixed_ip_address' ] ) else : configured_fixed_ip = configured_fip [ 'fixed_ip_address' ] new_fip = id_to_current_fip_map [ configured_fip [ 'id' ] ] current_fixed_ip = new_fip [ 'fixed_ip_address' ] if ( current_fixed_ip and configured_fixed_ip and current_fixed_ip != configured_fixed_ip ) : floating_ip = configured_fip [ 'floating_ip_address' ] self . _floating_ip_removed ( ri , ri . ex_gw_port , floating_ip , configured_fixed_ip ) fip_statuses [ configured_fip [ 'id' ] ] = ( bc . constants . FLOATINGIP_STATUS_DOWN ) fips_to_remove . append ( configured_fip ) fips_to_add . append ( new_fip ) for configured_fip in fips_to_remove : ri . floating_ips . remove ( configured_fip ) for configured_fip in fips_to_add : self . _floating_ip_added ( ri , ex_gw_port , configured_fip [ 'floating_ip_address' ] , configured_fip [ 'fixed_ip_address' ] ) ri . floating_ips . append ( configured_fip ) fip_statuses [ configured_fip [ 'id' ] ] = ( bc . constants . FLOATINGIP_STATUS_ACTIVE ) LOG . debug ( "Add to fip_statuses ACTIVE id:%s fl_ip:%s fx_ip:%s" , configured_fip [ 'id' ] , configured_fip [ 'floating_ip_address' ] , configured_fip [ 'fixed_ip_address' ] ) if fip_statuses : LOG . debug ( "Sending floatingip_statuses_update: %s" , fip_statuses ) self . plugin_rpc . update_floatingip_statuses ( self . context , ri . router_id , fip_statuses ) | Process a router s floating ips . |
37,884 | def _router_added ( self , router_id , router ) : ri = RouterInfo ( router_id , router ) driver = self . driver_manager . set_driver ( router ) if router [ ROUTER_ROLE_ATTR ] in [ c_constants . ROUTER_ROLE_GLOBAL , c_constants . ROUTER_ROLE_LOGICAL_GLOBAL ] : LOG . debug ( "Skipping router_added device processing for %(id)s as " "its role is %(role)s" , { 'id' : router_id , 'role' : router [ ROUTER_ROLE_ATTR ] } ) else : driver . router_added ( ri ) self . router_info [ router_id ] = ri | Operations when a router is added . |
37,885 | def _router_removed ( self , router_id , deconfigure = True ) : ri = self . router_info . get ( router_id ) if ri is None : LOG . warning ( "Info for router %s was not found. " "Skipping router removal." , router_id ) return ri . router [ 'gw_port' ] = None ri . router [ bc . constants . INTERFACE_KEY ] = [ ] ri . router [ bc . constants . FLOATINGIP_KEY ] = [ ] try : hd = ri . router [ 'hosting_device' ] if ( deconfigure and self . _dev_status . is_hosting_device_reachable ( hd ) ) : self . _process_router ( ri ) driver = self . driver_manager . get_driver ( router_id ) driver . router_removed ( ri ) self . driver_manager . remove_driver ( router_id ) del self . router_info [ router_id ] self . _del_from_removed_routers_cache ( router_id ) except cfg_exceptions . DriverException : LOG . warning ( "Router remove for router_id: %s was incomplete. " "Adding the router to removed_routers list" , router_id ) self . _update_removed_routers_cache ( [ router_id ] ) self . _del_from_updated_routers_cache ( router_id ) except ncc_errors . SessionCloseError as e : LOG . exception ( "ncclient Unexpected session close %s" " while attempting to remove router" , e ) if not self . _dev_status . is_hosting_device_reachable ( hd ) : LOG . debug ( "Lost connectivity to Hosting Device %s" % hd [ 'id' ] ) else : self . _update_removed_routers_cache ( [ router_id ] ) LOG . debug ( "Interim connectivity lost to hosting device %s, " "enqueuing router %s in removed_routers set" % pp . pformat ( hd ) , router_id ) | Operations when a router is removed . |
37,886 | def _routes_updated ( self , ri ) : new_routes = ri . router [ 'routes' ] old_routes = ri . routes adds , removes = bc . common_utils . diff_list_of_dict ( old_routes , new_routes ) for route in adds : LOG . debug ( "Added route entry is '%s'" , route ) for del_route in removes : if route [ 'destination' ] == del_route [ 'destination' ] : removes . remove ( del_route ) driver = self . driver_manager . get_driver ( ri . id ) driver . routes_updated ( ri , 'replace' , route ) for route in removes : LOG . debug ( "Removed route entry is '%s'" , route ) driver = self . driver_manager . get_driver ( ri . id ) driver . routes_updated ( ri , 'delete' , route ) ri . routes = new_routes | Update the state of routes in the router . |
37,887 | def subintf_real_ip_check_gw_port ( self , gw_port , ip_addr , netmask ) : if gw_port is not None : found = False for i in range ( len ( gw_port [ 'fixed_ips' ] ) ) : target_ip = gw_port [ 'fixed_ips' ] [ i ] [ 'ip_address' ] if ip_addr == target_ip : found = True break if found is False : LOG . info ( "Subintf real IP is incorrect, deleting" ) return False subnet_id = gw_port [ 'fixed_ips' ] [ i ] [ 'subnet_id' ] subnet = next ( sn for sn in gw_port [ 'subnets' ] if sn [ 'id' ] == subnet_id ) target_net = netaddr . IPNetwork ( subnet [ 'cidr' ] ) if netmask != str ( target_net . netmask ) : LOG . info ( "Subintf has incorrect netmask, deleting" ) return False return True return False | checks running - cfg derived ip_addr and netmask against neutron - db gw_port |
37,888 | def _get_connection ( self ) : try : if self . _ncc_connection and self . _ncc_connection . connected : return self . _ncc_connection else : self . _ncc_connection = manager . connect ( host = self . _host_ip , port = self . _host_ssh_port , username = self . _username , password = self . _password , device_params = { 'name' : "csr" } , timeout = self . _timeout ) if not self . _itfcs_enabled : self . _itfcs_enabled = self . _enable_itfcs ( self . _ncc_connection ) return self . _ncc_connection except Exception as e : conn_params = { 'host' : self . _host_ip , 'port' : self . _host_ssh_port , 'user' : self . _username , 'timeout' : self . _timeout , 'reason' : e . message } raise cfg_exc . ConnectionException ( ** conn_params ) | Make SSH connection to the IOS XE device . |
37,889 | def _get_interfaces ( self ) : ios_cfg = self . _get_running_config ( ) parse = HTParser ( ios_cfg ) itfcs_raw = parse . find_lines ( "^interface GigabitEthernet" ) itfcs = [ raw_if . strip ( ) . split ( ' ' ) [ 1 ] for raw_if in itfcs_raw ] LOG . debug ( "Interfaces on hosting device: %s" , itfcs ) return itfcs | Get a list of interfaces on this hosting device . |
37,890 | def _get_interface_ip ( self , interface_name ) : ios_cfg = self . _get_running_config ( ) parse = HTParser ( ios_cfg ) children = parse . find_children ( "^interface %s" % interface_name ) for line in children : if 'ip address' in line : ip_address = line . strip ( ) . split ( ' ' ) [ 2 ] LOG . debug ( "IP Address:%s" , ip_address ) return ip_address LOG . warning ( "Cannot find interface: %s" , interface_name ) return None | Get the ip address for an interface . |
37,891 | def _interface_exists ( self , interface ) : ios_cfg = self . _get_running_config ( ) parse = HTParser ( ios_cfg ) itfcs_raw = parse . find_lines ( "^interface " + interface ) return len ( itfcs_raw ) > 0 | Check whether interface exists . |
37,892 | def _get_vrfs ( self ) : vrfs = [ ] ios_cfg = self . _get_running_config ( ) parse = HTParser ( ios_cfg ) vrfs_raw = parse . find_lines ( "^vrf definition" ) for line in vrfs_raw : vrf_name = line . strip ( ) . split ( ' ' ) [ 2 ] vrfs . append ( vrf_name ) LOG . info ( "VRFs:%s" , vrfs ) return vrfs | Get the current VRFs configured in the device . |
37,893 | def _get_capabilities ( self ) : conn = self . _get_connection ( ) capabilities = [ ] for c in conn . server_capabilities : capabilities . append ( c ) LOG . debug ( "Server capabilities: %s" , capabilities ) return capabilities | Get the servers NETCONF capabilities . |
37,894 | def _get_running_config ( self , split = True ) : conn = self . _get_connection ( ) config = conn . get_config ( source = "running" ) if config : root = ET . fromstring ( config . _raw ) running_config = root [ 0 ] [ 0 ] if split is True : rgx = re . compile ( "\r*\n+" ) ioscfg = rgx . split ( running_config . text ) else : ioscfg = running_config . text return ioscfg | Get the IOS XE device s current running config . |
37,895 | def _check_acl ( self , acl_no , network , netmask ) : exp_cfg_lines = [ 'ip access-list standard ' + str ( acl_no ) , ' permit ' + str ( network ) + ' ' + str ( netmask ) ] ios_cfg = self . _get_running_config ( ) parse = HTParser ( ios_cfg ) acls_raw = parse . find_children ( exp_cfg_lines [ 0 ] ) if acls_raw : if exp_cfg_lines [ 1 ] in acls_raw : return True LOG . error ( "Mismatch in ACL configuration for %s" , acl_no ) return False LOG . debug ( "%s is not present in config" , acl_no ) return False | Check a ACL config exists in the running config . |
37,896 | def _cfg_exists ( self , cfg_str ) : ios_cfg = self . _get_running_config ( ) parse = HTParser ( ios_cfg ) cfg_raw = parse . find_lines ( "^" + cfg_str ) LOG . debug ( "_cfg_exists(): Found lines %s" , cfg_raw ) return len ( cfg_raw ) > 0 | Check a partial config string exists in the running config . |
37,897 | def _check_response ( self , rpc_obj , snippet_name , conf_str = None ) : LOG . debug ( "RPCReply for %(snippet_name)s is %(rpc_obj)s" , { 'snippet_name' : snippet_name , 'rpc_obj' : rpc_obj . xml } ) xml_str = rpc_obj . xml if "<ok />" in xml_str : LOG . info ( "%s was successfully executed" , snippet_name ) return True e_type = rpc_obj . _root [ 0 ] [ 0 ] . text e_tag = rpc_obj . _root [ 0 ] [ 1 ] . text params = { 'snippet' : snippet_name , 'type' : e_type , 'tag' : e_tag , 'dev_id' : self . hosting_device [ 'id' ] , 'ip' : self . _host_ip , 'confstr' : conf_str } raise cfg_exc . IOSXEConfigException ( ** params ) | This function checks the rpc response object for status . |
37,898 | def _get_instances_for_project ( self , project_id ) : search_opts = { 'marker' : None , 'all_tenants' : True , 'project_id' : project_id } try : servers = self . _novaclnt . servers . list ( True , search_opts ) LOG . debug ( '_get_instances_for_project: servers=%s' , servers ) return servers except nexc . Unauthorized : emsg = ( _LE ( 'Failed to get novaclient:Unauthorised ' 'project_id=%(proj)s user=%(user)s' ) , { 'proj' : self . _project_id , 'user' : self . _user_name } ) LOG . exception ( emsg ) raise nexc . ClientException ( emsg ) except nexc . AuthorizationFailure as err : emsg = ( _LE ( "Failed to get novaclient %s" ) ) LOG . exception ( emsg , err ) raise nexc . ClientException ( emsg % err ) | Return all instances for a given project . |
37,899 | def get_instance_for_uuid ( self , uuid , project_id ) : instance_name = self . _inst_info_cache . get ( ( uuid , project_id ) ) if instance_name : return instance_name instances = self . _get_instances_for_project ( project_id ) for inst in instances : if inst . id . replace ( '-' , '' ) == uuid : LOG . debug ( 'get_instance_for_uuid: name=%s' , inst . name ) instance_name = inst . name self . _inst_info_cache [ ( uuid , project_id ) ] = instance_name return instance_name return instance_name | Return instance name for given uuid of an instance and project . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.