idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
41,400
def add_router_interface ( self , context , router_id , interface_info ) : new_router = super ( AristaL3ServicePlugin , self ) . add_router_interface ( context , router_id , interface_info ) core = directory . get_plugin ( ) add_by_port , add_by_sub = self . _validate_interface_info ( interface_info ) if add_by_sub : subnet = core . get_subnet ( context , interface_info [ 'subnet_id' ] ) elif add_by_port : port = core . get_port ( context , interface_info [ 'port_id' ] ) subnet_id = port [ 'fixed_ips' ] [ 0 ] [ 'subnet_id' ] subnet = core . get_subnet ( context , subnet_id ) network_id = subnet [ 'network_id' ] ml2_db = NetworkContext ( self , context , { 'id' : network_id } ) seg_id = ml2_db . network_segments [ 0 ] [ 'segmentation_id' ] router = self . get_router ( context , router_id ) router_info = copy . deepcopy ( new_router ) router_info [ 'seg_id' ] = seg_id router_info [ 'name' ] = router [ 'name' ] router_info [ 'cidr' ] = subnet [ 'cidr' ] router_info [ 'gip' ] = subnet [ 'gateway_ip' ] router_info [ 'ip_version' ] = subnet [ 'ip_version' ] try : self . driver . add_router_interface ( context , router_info ) return new_router except Exception : with excutils . save_and_reraise_exception ( ) : LOG . error ( _LE ( "Error Adding subnet %(subnet)s to " "router %(router_id)s on Arista HW" ) , { 'subnet' : subnet , 'router_id' : router_id } ) super ( AristaL3ServicePlugin , self ) . remove_router_interface ( context , router_id , interface_info )
Add a subnet of a network to an existing router .
41,401
def remove_router_interface ( self , context , router_id , interface_info ) : router_to_del = ( super ( AristaL3ServicePlugin , self ) . remove_router_interface ( context , router_id , interface_info ) ) core = directory . get_plugin ( ) subnet = core . get_subnet ( context , router_to_del [ 'subnet_id' ] ) network_id = subnet [ 'network_id' ] ml2_db = NetworkContext ( self , context , { 'id' : network_id } ) seg_id = ml2_db . network_segments [ 0 ] [ 'segmentation_id' ] router = self . get_router ( context , router_id ) router_info = copy . deepcopy ( router_to_del ) router_info [ 'seg_id' ] = seg_id router_info [ 'name' ] = router [ 'name' ] try : self . driver . remove_router_interface ( context , router_info ) return router_to_del except Exception as exc : LOG . error ( _LE ( "Error removing interface %(interface)s from " "router %(router_id)s on Arista HW" "Exception =(exc)s" ) , { 'interface' : interface_info , 'router_id' : router_id , 'exc' : exc } )
Remove a subnet of a network from an existing router .
41,402
def initialize_switch_endpoints ( self ) : self . _switches = { } self . _port_group_info = { } self . _validate_config ( ) for s in cfg . CONF . ml2_arista . switch_info : switch_ip , switch_user , switch_pass = s . split ( ":" ) if switch_pass == "''" : switch_pass = '' self . _switches [ switch_ip ] = api . EAPIClient ( switch_ip , switch_user , switch_pass , verify = False , timeout = cfg . CONF . ml2_arista . conn_timeout ) self . _check_dynamic_acl_support ( )
Initialize endpoints for switch communication
41,403
def _check_dynamic_acl_support ( self ) : cmds = [ 'ip access-list openstack-test dynamic' , 'no ip access-list openstack-test' ] for switch_ip , switch_client in self . _switches . items ( ) : try : self . run_openstack_sg_cmds ( cmds ) except Exception : LOG . error ( "Switch %s does not support dynamic ACLs. SG " "support will not be enabled on this switch." , switch_ip )
Log an error if any switches don t support dynamic ACLs
41,404
def _validate_config ( self ) : if len ( cfg . CONF . ml2_arista . get ( 'switch_info' ) ) < 1 : msg = _ ( 'Required option - when "sec_group_support" is enabled, ' 'at least one switch must be specified ' ) LOG . exception ( msg ) raise arista_exc . AristaConfigError ( msg = msg )
Ensure at least one switch is configured
41,405
def _update_port_group_info ( self , switches = None ) : if switches is None : switches = self . _switches . keys ( ) for switch_ip in switches : client = self . _switches . get ( switch_ip ) ret = self . _run_eos_cmds ( [ 'show interfaces' ] , client ) if not ret or len ( ret ) == 0 : LOG . warning ( "Unable to retrieve interface info for %s" , switch_ip ) continue intf_info = ret [ 0 ] self . _port_group_info [ switch_ip ] = intf_info . get ( 'interfaces' , { } )
Refresh data on switch interfaces port group membership
41,406
def _get_port_for_acl ( self , port_id , switch ) : all_intf_info = self . _port_group_info . get ( switch , { } ) intf_info = all_intf_info . get ( port_id , { } ) member_info = intf_info . get ( 'interfaceMembership' , '' ) port_group_info = re . search ( 'Member of (?P<port_group>\S+)' , member_info ) if port_group_info : port_id = port_group_info . group ( 'port_group' ) return port_id
Gets interface name for ACLs
41,407
def _supported_rule ( protocol , ethertype ) : if not protocol or protocol not in utils . SUPPORTED_SG_PROTOCOLS : return False if ethertype != n_const . IPv4 : return False return True
Checks that the rule is an IPv4 rule of a supported protocol
41,408
def _format_rule ( self , protocol , cidr , min_port , max_port , direction ) : if cidr is None : cidr = 'any' if direction == n_const . INGRESS_DIRECTION : dst_ip = 'any' src_ip = cidr elif direction == n_const . EGRESS_DIRECTION : dst_ip = cidr src_ip = 'any' if protocol == n_const . PROTO_NAME_ICMP : rule = "permit icmp %s %s" % ( src_ip , dst_ip ) if min_port : rule += " %s" % ( min_port ) if max_port : rule += " %s" % ( max_port ) else : rule = "permit %s %s %s" % ( protocol , src_ip , dst_ip ) if min_port and max_port : rule += " range %s %s" % ( min_port , max_port ) elif min_port and not max_port : rule += " eq %s" % min_port return rule
Get EOS formatted rule
41,409
def run_cmds_on_all_switches ( self , cmds ) : for switch in self . _switches . values ( ) : self . run_openstack_sg_cmds ( cmds , switch )
Runs all cmds on all configured switches
41,410
def run_per_switch_cmds ( self , switch_cmds ) : for switch_ip , cmds in switch_cmds . items ( ) : switch = self . _switches . get ( switch_ip ) self . run_openstack_sg_cmds ( cmds , switch )
Applies cmds to appropriate switches
41,411
def _get_switches ( self , profile ) : switchports = self . _get_switchports ( profile ) switches = set ( [ switchport [ 0 ] for switchport in switchports ] ) return switches
Get set of switches referenced in a port binding profile
41,412
def get_create_security_group_commands ( self , sg_id , sg_rules ) : cmds = [ ] in_rules , eg_rules = self . _format_rules_for_eos ( sg_rules ) cmds . append ( "ip access-list %s dynamic" % self . _acl_name ( sg_id , n_const . INGRESS_DIRECTION ) ) for in_rule in in_rules : cmds . append ( in_rule ) cmds . append ( "exit" ) cmds . append ( "ip access-list %s dynamic" % self . _acl_name ( sg_id , n_const . EGRESS_DIRECTION ) ) for eg_rule in eg_rules : cmds . append ( eg_rule ) cmds . append ( "exit" ) return cmds
Commands for creating ACL
41,413
def get_delete_security_group_commands ( self , sg_id ) : cmds = [ ] cmds . append ( "no ip access-list %s" % self . _acl_name ( sg_id , n_const . INGRESS_DIRECTION ) ) cmds . append ( "no ip access-list %s" % self . _acl_name ( sg_id , n_const . EGRESS_DIRECTION ) ) return cmds
Commands for deleting ACL
41,414
def get_delete_security_group_rule_commands ( self , sg_id , sg_rule ) : return self . _get_rule_cmds ( sg_id , sg_rule , delete = True )
Commands for removing rule from ACLS
41,415
def get_remove_security_group_commands ( self , sg_id , profile ) : return self . _get_interface_commands ( sg_id , profile , delete = True )
Commands for removing ACL from interface
41,416
def _parse_acl_config ( self , acl_config ) : parsed_acls = dict ( ) for acl in acl_config [ 'aclList' ] : parsed_acls [ acl [ 'name' ] ] = set ( ) for rule in acl [ 'sequence' ] : parsed_acls [ acl [ 'name' ] ] . add ( rule [ 'text' ] ) return parsed_acls
Parse configured ACLs and rules
41,417
def _parse_binding_config ( self , binding_config ) : parsed_bindings = set ( ) for acl in binding_config [ 'aclList' ] : for intf in acl [ 'configuredIngressIntfs' ] : parsed_bindings . add ( ( intf [ 'name' ] , acl [ 'name' ] , a_const . INGRESS_DIRECTION ) ) for intf in acl [ 'configuredEgressIntfs' ] : parsed_bindings . add ( ( intf [ 'name' ] , acl [ 'name' ] , a_const . EGRESS_DIRECTION ) ) return parsed_bindings
Parse configured interface - > ACL bindings
41,418
def _get_dynamic_acl_info ( self , switch_ip ) : cmds = [ "enable" , "show ip access-lists dynamic" , "show ip access-lists summary dynamic" ] switch = self . _switches . get ( switch_ip ) _ , acls , bindings = self . _run_eos_cmds ( cmds , switch ) parsed_acls = self . _parse_acl_config ( acls ) parsed_bindings = self . _parse_binding_config ( bindings ) return parsed_acls , parsed_bindings
Retrieve ACLs ACLs rules and interface bindings from switch
41,419
def get_expected_acls ( self ) : security_groups = db_lib . get_security_groups ( ) expected_acls = collections . defaultdict ( set ) for sg in security_groups : in_rules , out_rules = self . _format_rules_for_eos ( sg [ 'rules' ] ) ingress_acl_name = self . _acl_name ( sg [ 'id' ] , n_const . INGRESS_DIRECTION ) egress_acl_name = self . _acl_name ( sg [ 'id' ] , n_const . EGRESS_DIRECTION ) expected_acls [ ingress_acl_name ] . update ( in_rules ) expected_acls [ egress_acl_name ] . update ( out_rules ) return expected_acls
Query the neutron DB for Security Groups and Rules
41,420
def get_expected_bindings ( self ) : sg_bindings = db_lib . get_baremetal_sg_bindings ( ) all_expected_bindings = collections . defaultdict ( set ) for sg_binding , port_binding in sg_bindings : sg_id = sg_binding [ 'security_group_id' ] try : binding_profile = json . loads ( port_binding . profile ) except ValueError : binding_profile = { } switchports = self . _get_switchports ( binding_profile ) for switch , intf in switchports : ingress_name = self . _acl_name ( sg_id , n_const . INGRESS_DIRECTION ) egress_name = self . _acl_name ( sg_id , n_const . EGRESS_DIRECTION ) all_expected_bindings [ switch ] . add ( ( intf , ingress_name , a_const . INGRESS_DIRECTION ) ) all_expected_bindings [ switch ] . add ( ( intf , egress_name , a_const . EGRESS_DIRECTION ) ) return all_expected_bindings
Query the neutron DB for SG - > switch interface bindings
41,421
def adjust_bindings_for_lag ( self , switch_ip , bindings ) : self . _update_port_group_info ( [ switch_ip ] ) adjusted_bindings = set ( ) for binding in bindings : adjusted_bindings . add ( ( self . _get_port_for_acl ( binding [ 0 ] , switch_ip ) , ) + binding [ 1 : ] ) return adjusted_bindings
Adjusting interface names for expected bindings where LAGs exist
41,422
def get_sync_acl_cmds ( self , switch_acls , expected_acls ) : switch_cmds = list ( ) acls_to_delete = ( set ( switch_acls . keys ( ) ) - set ( expected_acls . keys ( ) ) ) for acl in acls_to_delete : switch_cmds . append ( 'no ip access-list %s' % acl ) for acl , expected_rules in expected_acls . items ( ) : switch_rules = switch_acls . get ( acl , set ( ) ) rules_to_delete = switch_rules - expected_rules rules_to_add = expected_rules - switch_rules if ( acl in switch_acls and len ( rules_to_add | rules_to_delete ) == 0 ) : continue switch_cmds . append ( 'ip access-list %s dynamic' % acl ) for rule in rules_to_delete : switch_cmds . append ( 'no ' + rule ) for rule in rules_to_add : switch_cmds . append ( rule ) switch_cmds . append ( 'exit' ) return switch_cmds
Returns the list of commands required synchronize switch ACLs
41,423
def get_sync_binding_cmds ( self , switch_bindings , expected_bindings ) : switch_cmds = list ( ) bindings_to_delete = switch_bindings - expected_bindings bindings_to_add = expected_bindings - switch_bindings for intf , acl , direction in bindings_to_delete : switch_cmds . extend ( [ 'interface %s' % intf , 'no ip access-group %s %s' % ( acl , direction ) , 'exit' ] ) for intf , acl , direction in bindings_to_add : switch_cmds . extend ( [ 'interface %s' % intf , 'ip access-group %s %s' % ( acl , direction ) , 'exit' ] ) return switch_cmds
Returns the list of commands required to synchronize ACL bindings
41,424
def read_price_data ( files , name_func = None ) : if name_func is None : def name_func ( x ) : return os . path . split ( x ) [ 1 ] . split ( "." ) [ 0 ] dfs = [ ] for f in files : name = name_func ( f ) df = pd . read_csv ( f , index_col = 0 , parse_dates = True ) df . sort_index ( inplace = True ) df . index = pd . MultiIndex . from_product ( [ df . index , [ name ] ] , names = [ "date" , "contract" ] ) dfs . append ( df ) return pd . concat ( dfs , axis = 0 , sort = False ) . sort_index ( )
Convenience function for reading in pricing data from csv files
41,425
def flatten ( weights ) : if isinstance ( weights , pd . DataFrame ) : wts = weights . stack ( ) . reset_index ( ) wts . columns = [ "date" , "contract" , "generic" , "weight" ] elif isinstance ( weights , dict ) : wts = [ ] for key in sorted ( weights . keys ( ) ) : wt = weights [ key ] . stack ( ) . reset_index ( ) wt . columns = [ "date" , "contract" , "generic" , "weight" ] wt . loc [ : , "key" ] = key wts . append ( wt ) wts = pd . concat ( wts , axis = 0 ) . reset_index ( drop = True ) else : raise ValueError ( "weights must be pd.DataFrame or dict" ) return wts
Flatten weights into a long DataFrame .
41,426
def unflatten ( flat_weights ) : if flat_weights . columns . contains ( "key" ) : weights = { } for key in flat_weights . loc [ : , "key" ] . unique ( ) : flt_wts = flat_weights . loc [ flat_weights . loc [ : , "key" ] == key , : ] flt_wts = flt_wts . drop ( labels = "key" , axis = 1 ) wts = flt_wts . pivot_table ( index = [ "date" , "contract" ] , columns = [ "generic" ] , values = [ "weight" ] ) wts . columns = wts . columns . droplevel ( 0 ) weights [ key ] = wts else : weights = flat_weights . pivot_table ( index = [ "date" , "contract" ] , columns = [ "generic" ] , values = [ "weight" ] ) weights . columns = weights . columns . droplevel ( 0 ) return weights
Pivot weights from long DataFrame into weighting matrix .
41,427
def calc_rets ( returns , weights ) : if not isinstance ( returns , dict ) : returns = { "" : returns } if not isinstance ( weights , dict ) : weights = { "" : weights } generic_superset = [ ] for root in weights : generic_superset . extend ( weights [ root ] . columns . tolist ( ) ) if len ( set ( generic_superset ) ) != len ( generic_superset ) : raise ValueError ( "Columns for weights must all be unique" ) _check_indices ( returns , weights ) grets = [ ] cols = [ ] for root in returns : root_wts = weights [ root ] root_rets = returns [ root ] for generic in root_wts . columns : gnrc_wts = root_wts . loc [ : , generic ] gnrc_wts = gnrc_wts . loc [ gnrc_wts != 0 ] rets = root_rets . loc [ gnrc_wts . index ] group_rets = ( rets * gnrc_wts ) . groupby ( level = 0 ) grets . append ( group_rets . apply ( pd . DataFrame . sum , skipna = False ) ) cols . extend ( root_wts . columns . tolist ( ) ) rets = pd . concat ( grets , axis = 1 , keys = cols ) . sort_index ( axis = 1 ) return rets
Calculate continuous return series for futures instruments . These consist of weighted underlying instrument returns who s weights can vary over time .
41,428
def calc_trades ( current_contracts , desired_holdings , trade_weights , prices , multipliers , ** kwargs ) : if not isinstance ( trade_weights , dict ) : trade_weights = { "" : trade_weights } generics = [ ] for key in trade_weights : generics . extend ( trade_weights [ key ] . columns ) if not set ( desired_holdings . index ) . issubset ( set ( generics ) ) : raise ValueError ( "'desired_holdings.index' contains values which " "cannot be mapped to tradeables.\n" "Received: 'desired_holdings.index'\n {0}\n" "Expected in 'trade_weights' set of columns:\n {1}\n" . format ( sorted ( desired_holdings . index ) , sorted ( generics ) ) ) desired_contracts = [ ] for root_key in trade_weights : gnrc_weights = trade_weights [ root_key ] subset = gnrc_weights . columns . intersection ( desired_holdings . index ) gnrc_des_hlds = desired_holdings . loc [ subset ] gnrc_weights = gnrc_weights . loc [ : , subset ] gnrc_weights = gnrc_weights . loc [ ~ ( gnrc_weights == 0 ) . all ( axis = 1 ) ] instr_des_hlds = gnrc_des_hlds * gnrc_weights instr_des_hlds = instr_des_hlds . sum ( axis = 1 ) wprices = prices . loc [ instr_des_hlds . index ] desired_contracts . append ( to_contracts ( instr_des_hlds , wprices , multipliers , ** kwargs ) ) desired_contracts = pd . concat ( desired_contracts , axis = 0 ) trades = desired_contracts . subtract ( current_contracts , fill_value = 0 ) trades = trades . loc [ trades != 0 ] trades = trades . sort_index ( ) return trades
Calculate the number of tradeable contracts for rebalancing from a set of current contract holdings to a set of desired generic notional holdings based on prevailing prices and mapping from generics to tradeable instruments . Differences between current holdings and desired holdings are treated as 0 . Zero trades are dropped .
41,429
def to_notional ( instruments , prices , multipliers , desired_ccy = None , instr_fx = None , fx_rates = None ) : notionals = _instr_conv ( instruments , prices , multipliers , True , desired_ccy , instr_fx , fx_rates ) return notionals
Convert number of contracts of tradeable instruments to notional value of tradeable instruments in a desired currency .
41,430
def to_contracts ( instruments , prices , multipliers , desired_ccy = None , instr_fx = None , fx_rates = None , rounder = None ) : contracts = _instr_conv ( instruments , prices , multipliers , False , desired_ccy , instr_fx , fx_rates ) if rounder is None : rounder = pd . Series . round contracts = rounder ( contracts ) contracts = contracts . astype ( int ) return contracts
Convert notional amount of tradeable instruments to number of instrument contracts rounding to nearest integer number of contracts .
41,431
def get_multiplier ( weights , root_generic_multiplier ) : if len ( root_generic_multiplier ) > 1 and not isinstance ( weights , dict ) : raise ValueError ( "For multiple generic instruments weights must be a " "dictionary" ) mults = [ ] intrs = [ ] for ast , multiplier in root_generic_multiplier . iteritems ( ) : if isinstance ( weights , dict ) : weights_ast = weights [ ast ] . index else : weights_ast = weights . index mults . extend ( np . repeat ( multiplier , len ( weights_ast ) ) ) intrs . extend ( weights_ast ) imults = pd . Series ( mults , intrs ) imults = imults . sort_index ( ) return imults
Determine tradeable instrument multiplier based on generic asset multipliers and weights mapping from generics to tradeables .
41,432
def weighted_expiration ( weights , contract_dates ) : cols = weights . columns weights = weights . reset_index ( level = - 1 ) expiries = contract_dates . to_dict ( ) weights . loc [ : , "expiry" ] = weights . iloc [ : , 0 ] . apply ( lambda x : expiries [ x ] ) diffs = ( pd . DatetimeIndex ( weights . expiry ) - pd . Series ( weights . index , weights . index ) ) . apply ( lambda x : x . days ) weights = weights . loc [ : , cols ] wexp = weights . mul ( diffs , axis = 0 ) . groupby ( level = 0 ) . sum ( ) return wexp
Calculate the days to expiration for generic futures weighted by the composition of the underlying tradeable instruments .
41,433
def _valid_baremetal_port ( port ) : if port . get ( portbindings . VNIC_TYPE ) != portbindings . VNIC_BAREMETAL : return False sgs = port . get ( 'security_groups' , [ ] ) if len ( sgs ) == 0 : return False if len ( port . get ( 'security_groups' , [ ] ) ) > 1 : LOG . warning ( 'SG provisioning failed for %(port)s. Only one ' 'SG may be applied per port.' , { 'port' : port [ 'id' ] } ) return False return True
Check if port is a baremetal port with exactly one security group
41,434
def synchronize_resources ( self ) : if not self . _rpc . sync_start ( ) : LOG . info ( "%(pid)s Failed to grab the sync lock" , { 'pid' : os . getpid ( ) } ) greenthread . sleep ( 1 ) return for resource in self . _resources_to_update : self . update_neutron_resource ( resource ) self . _resources_to_update = list ( ) for resource_type in reversed ( self . sync_order ) : resource_type . delete_cvx_resources ( ) for resource_type in self . sync_order : resource_type . create_cvx_resources ( ) self . _rpc . sync_end ( ) if self . _synchronizing_uuid : LOG . info ( "%(pid)s Full sync for cvx uuid %(uuid)s complete" , { 'uuid' : self . _synchronizing_uuid , 'pid' : os . getpid ( ) } ) self . _cvx_uuid = self . _synchronizing_uuid self . _synchronizing_uuid = None
Synchronize worker with CVX
41,435
def register ( self , resource , event , trigger , ** kwargs ) : super ( AristaTrunkDriver , self ) . register ( resource , event , trigger , kwargs ) registry . subscribe ( self . subport_create , resources . SUBPORTS , events . AFTER_CREATE ) registry . subscribe ( self . subport_delete , resources . SUBPORTS , events . AFTER_DELETE ) registry . subscribe ( self . trunk_create , resources . TRUNK , events . AFTER_CREATE ) registry . subscribe ( self . trunk_update , resources . TRUNK , events . AFTER_UPDATE ) registry . subscribe ( self . trunk_delete , resources . TRUNK , events . AFTER_DELETE ) self . core_plugin = directory . get_plugin ( ) LOG . debug ( "Arista trunk driver initialized." )
Called in trunk plugin s AFTER_INIT
41,436
def create_router_on_eos ( self , router_name , rdm , server ) : cmds = [ ] rd = "%s:%s" % ( rdm , rdm ) for c in self . routerDict [ 'create' ] : cmds . append ( c . format ( router_name , rd ) ) if self . _mlag_configured : mac = VIRTUAL_ROUTER_MAC for c in self . _additionalRouterCmdsDict [ 'create' ] : cmds . append ( c . format ( mac ) ) self . _run_config_cmds ( cmds , server )
Creates a router on Arista HW Device .
41,437
def delete_router_from_eos ( self , router_name , server ) : cmds = [ ] for c in self . routerDict [ 'delete' ] : cmds . append ( c . format ( router_name ) ) if self . _mlag_configured : for c in self . _additionalRouterCmdsDict [ 'delete' ] : cmds . append ( c ) self . _run_config_cmds ( cmds , server )
Deletes a router from Arista HW Device .
41,438
def add_interface_to_router ( self , segment_id , router_name , gip , router_ip , mask , server ) : if not segment_id : segment_id = DEFAULT_VLAN cmds = [ ] for c in self . _interfaceDict [ 'add' ] : if self . _mlag_configured : ip = router_ip else : ip = gip + '/' + mask cmds . append ( c . format ( segment_id , router_name , ip ) ) if self . _mlag_configured : for c in self . _additionalInterfaceCmdsDict [ 'add' ] : cmds . append ( c . format ( gip ) ) self . _run_config_cmds ( cmds , server )
Adds an interface to existing HW router on Arista HW device .
41,439
def delete_interface_from_router ( self , segment_id , router_name , server ) : if not segment_id : segment_id = DEFAULT_VLAN cmds = [ ] for c in self . _interfaceDict [ 'remove' ] : cmds . append ( c . format ( segment_id ) ) self . _run_config_cmds ( cmds , server )
Deletes an interface from existing HW router on Arista HW device .
41,440
def create_router ( self , context , router ) : if router : router_name = self . _arista_router_name ( router [ 'id' ] , router [ 'name' ] ) hashed = hashlib . sha256 ( router_name . encode ( 'utf-8' ) ) rdm = str ( int ( hashed . hexdigest ( ) , 16 ) % 65536 ) mlag_peer_failed = False for s in self . _servers : try : self . create_router_on_eos ( router_name , rdm , s ) mlag_peer_failed = False except Exception : if self . _mlag_configured and not mlag_peer_failed : mlag_peer_failed = True else : msg = ( _ ( 'Failed to create router %s on EOS' ) % router_name ) LOG . exception ( msg ) raise arista_exc . AristaServicePluginRpcError ( msg = msg )
Creates a router on Arista Switch .
41,441
def delete_router ( self , context , router_id , router ) : if router : router_name = self . _arista_router_name ( router_id , router [ 'name' ] ) mlag_peer_failed = False for s in self . _servers : try : self . delete_router_from_eos ( router_name , s ) mlag_peer_failed = False except Exception : if self . _mlag_configured and not mlag_peer_failed : mlag_peer_failed = True else : msg = ( _ ( 'Failed to create router %s on EOS' ) % router_name ) LOG . exception ( msg ) raise arista_exc . AristaServicePluginRpcError ( msg = msg )
Deletes a router from Arista Switch .
41,442
def add_router_interface ( self , context , router_info ) : if router_info : self . _select_dicts ( router_info [ 'ip_version' ] ) cidr = router_info [ 'cidr' ] subnet_mask = cidr . split ( '/' ) [ 1 ] router_name = self . _arista_router_name ( router_info [ 'id' ] , router_info [ 'name' ] ) if self . _mlag_configured : mlag_peer_failed = False for i , server in enumerate ( self . _servers ) : router_ip = self . _get_router_ip ( cidr , i , router_info [ 'ip_version' ] ) try : self . add_interface_to_router ( router_info [ 'seg_id' ] , router_name , router_info [ 'gip' ] , router_ip , subnet_mask , server ) mlag_peer_failed = False except Exception : if not mlag_peer_failed : mlag_peer_failed = True else : msg = ( _ ( 'Failed to add interface to router ' '%s on EOS' ) % router_name ) LOG . exception ( msg ) raise arista_exc . AristaServicePluginRpcError ( msg = msg ) else : for s in self . _servers : self . add_interface_to_router ( router_info [ 'seg_id' ] , router_name , router_info [ 'gip' ] , None , subnet_mask , s )
Adds an interface to a router created on Arista HW router .
41,443
def remove_router_interface ( self , context , router_info ) : if router_info : router_name = self . _arista_router_name ( router_info [ 'id' ] , router_info [ 'name' ] ) mlag_peer_failed = False for s in self . _servers : try : self . delete_interface_from_router ( router_info [ 'seg_id' ] , router_name , s ) if self . _mlag_configured : mlag_peer_failed = False except Exception : if self . _mlag_configured and not mlag_peer_failed : mlag_peer_failed = True else : msg = ( _ ( 'Failed to add interface to router ' '%s on EOS' ) % router_name ) LOG . exception ( msg ) raise arista_exc . AristaServicePluginRpcError ( msg = msg )
Removes previously configured interface from router on Arista HW .
41,444
def _get_binary_from_ipv4 ( self , ip_addr ) : return struct . unpack ( "!L" , socket . inet_pton ( socket . AF_INET , ip_addr ) ) [ 0 ]
Converts IPv4 address to binary form .
41,445
def _get_binary_from_ipv6 ( self , ip_addr ) : hi , lo = struct . unpack ( "!QQ" , socket . inet_pton ( socket . AF_INET6 , ip_addr ) ) return ( hi << 64 ) | lo
Converts IPv6 address to binary form .
41,446
def _get_ipv4_from_binary ( self , bin_addr ) : return socket . inet_ntop ( socket . AF_INET , struct . pack ( "!L" , bin_addr ) )
Converts binary address to Ipv4 format .
41,447
def _get_ipv6_from_binary ( self , bin_addr ) : hi = bin_addr >> 64 lo = bin_addr & 0xFFFFFFFF return socket . inet_ntop ( socket . AF_INET6 , struct . pack ( "!QQ" , hi , lo ) )
Converts binary address to Ipv6 format .
41,448
def _get_router_ip ( self , cidr , ip_count , ip_ver ) : start_ip = MLAG_SWITCHES + ip_count network_addr , prefix = cidr . split ( '/' ) if ip_ver == 4 : bits = IPV4_BITS ip = self . _get_binary_from_ipv4 ( network_addr ) elif ip_ver == 6 : bits = IPV6_BITS ip = self . _get_binary_from_ipv6 ( network_addr ) mask = ( pow ( 2 , bits ) - 1 ) << ( bits - int ( prefix ) ) network_addr = ip & mask router_ip = pow ( 2 , bits - int ( prefix ) ) - start_ip router_ip = network_addr | router_ip if ip_ver == 4 : return self . _get_ipv4_from_binary ( router_ip ) + '/' + prefix else : return self . _get_ipv6_from_binary ( router_ip ) + '/' + prefix
For a given IP subnet and IP version type generate IP for router .
41,449
def create_tenant ( self , tenant_id ) : t_res = MechResource ( tenant_id , a_const . TENANT_RESOURCE , a_const . CREATE ) self . provision_queue . put ( t_res )
Enqueue tenant create
41,450
def delete_tenant_if_removed ( self , tenant_id ) : if not db_lib . tenant_provisioned ( tenant_id ) : t_res = MechResource ( tenant_id , a_const . TENANT_RESOURCE , a_const . DELETE ) self . provision_queue . put ( t_res )
Enqueue tenant delete if it s no longer in the db
41,451
def create_network ( self , network ) : n_res = MechResource ( network [ 'id' ] , a_const . NETWORK_RESOURCE , a_const . CREATE ) self . provision_queue . put ( n_res )
Enqueue network create
41,452
def delete_network ( self , network ) : n_res = MechResource ( network [ 'id' ] , a_const . NETWORK_RESOURCE , a_const . DELETE ) self . provision_queue . put ( n_res )
Enqueue network delete
41,453
def create_segments ( self , segments ) : for segment in segments : s_res = MechResource ( segment [ 'id' ] , a_const . SEGMENT_RESOURCE , a_const . CREATE ) self . provision_queue . put ( s_res )
Enqueue segment creates
41,454
def delete_segments ( self , segments ) : for segment in segments : s_res = MechResource ( segment [ 'id' ] , a_const . SEGMENT_RESOURCE , a_const . DELETE ) self . provision_queue . put ( s_res )
Enqueue segment deletes
41,455
def get_instance_type ( self , port ) : if port [ portbindings . VNIC_TYPE ] == portbindings . VNIC_BAREMETAL : return a_const . BAREMETAL_RESOURCE owner_to_type = { n_const . DEVICE_OWNER_DHCP : a_const . DHCP_RESOURCE , n_const . DEVICE_OWNER_DVR_INTERFACE : a_const . ROUTER_RESOURCE , trunk_consts . TRUNK_SUBPORT_OWNER : a_const . VM_RESOURCE } if port [ 'device_owner' ] in owner_to_type . keys ( ) : return owner_to_type [ port [ 'device_owner' ] ] elif port [ 'device_owner' ] . startswith ( n_const . DEVICE_OWNER_COMPUTE_PREFIX ) : return a_const . VM_RESOURCE return None
Determine the port type based on device owner and vnic type
41,456
def create_instance ( self , port ) : instance_type = self . get_instance_type ( port ) if not instance_type : return i_res = MechResource ( port [ 'device_id' ] , instance_type , a_const . CREATE ) self . provision_queue . put ( i_res )
Enqueue instance create
41,457
def delete_instance_if_removed ( self , port ) : instance_type = self . get_instance_type ( port ) if not instance_type : return if not db_lib . instance_provisioned ( port [ 'device_id' ] ) : i_res = MechResource ( port [ 'device_id' ] , instance_type , a_const . DELETE ) self . provision_queue . put ( i_res )
Enqueue instance delete if it s no longer in the db
41,458
def create_port ( self , port ) : instance_type = self . get_instance_type ( port ) if not instance_type : return port_type = instance_type + a_const . PORT_SUFFIX p_res = MechResource ( port [ 'id' ] , port_type , a_const . CREATE ) self . provision_queue . put ( p_res )
Enqueue port create
41,459
def delete_port_if_removed ( self , port ) : instance_type = self . get_instance_type ( port ) if not instance_type : return port_type = instance_type + a_const . PORT_SUFFIX if not db_lib . port_provisioned ( port [ 'id' ] ) : p_res = MechResource ( port [ 'id' ] , port_type , a_const . DELETE ) self . provision_queue . put ( p_res )
Enqueue port delete
41,460
def _get_binding_keys ( self , port , host ) : binding_keys = list ( ) switch_binding = port [ portbindings . PROFILE ] . get ( 'local_link_information' , None ) if switch_binding : for binding in switch_binding : switch_id = binding . get ( 'switch_id' ) port_id = binding . get ( 'port_id' ) binding_keys . append ( ( port [ 'id' ] , ( switch_id , port_id ) ) ) else : binding_keys . append ( ( port [ 'id' ] , host ) ) return binding_keys
Get binding keys from the port binding
41,461
def create_port_binding ( self , port , host ) : if not self . get_instance_type ( port ) : return for pb_key in self . _get_binding_keys ( port , host ) : pb_res = MechResource ( pb_key , a_const . PORT_BINDING_RESOURCE , a_const . CREATE ) self . provision_queue . put ( pb_res )
Enqueue port binding create
41,462
def delete_port_binding ( self , port , host ) : if not self . get_instance_type ( port ) : return for pb_key in self . _get_binding_keys ( port , host ) : pb_res = MechResource ( pb_key , a_const . PORT_BINDING_RESOURCE , a_const . DELETE ) self . provision_queue . put ( pb_res )
Enqueue port binding delete
41,463
def create_network_postcommit ( self , context ) : network = context . current log_context ( "create_network_postcommit: network" , network ) segments = context . network_segments tenant_id = network [ 'project_id' ] self . create_tenant ( tenant_id ) self . create_network ( network ) self . create_segments ( segments )
Provision the network on CVX
41,464
def delete_network_postcommit ( self , context ) : network = context . current log_context ( "delete_network_postcommit: network" , network ) segments = context . network_segments tenant_id = network [ 'project_id' ] self . delete_segments ( segments ) self . delete_network ( network ) self . delete_tenant_if_removed ( tenant_id )
Delete the network from CVX
41,465
def update_port_postcommit ( self , context ) : port = context . current orig_port = context . original network = context . network . current log_context ( "update_port_postcommit: port" , port ) log_context ( "update_port_postcommit: orig" , orig_port ) tenant_id = port [ 'project_id' ] if orig_port and port [ 'device_id' ] != orig_port [ 'device_id' ] : self . _delete_port_resources ( orig_port , context . original_host ) if context . status == n_const . PORT_STATUS_DOWN : if ( context . original_host and context . status != context . original_status ) : self . _delete_port_resources ( orig_port , context . original_host ) self . _try_to_release_dynamic_segment ( context , migration = True ) else : self . create_tenant ( tenant_id ) self . create_network ( network ) if context . binding_levels : segments = [ level [ 'bound_segment' ] for level in context . binding_levels ] self . create_segments ( segments ) self . create_instance ( port ) self . create_port ( port ) self . create_port_binding ( port , context . host )
Send port updates to CVX
41,466
def delete_port_postcommit ( self , context ) : port = context . current log_context ( "delete_port_postcommit: port" , port ) self . _delete_port_resources ( port , context . host ) self . _try_to_release_dynamic_segment ( context )
Delete the port from CVX
41,467
def _bind_baremetal_port ( self , context , segment ) : port = context . current vif_details = { portbindings . VIF_DETAILS_VLAN : str ( segment [ driver_api . SEGMENTATION_ID ] ) } context . set_binding ( segment [ driver_api . ID ] , portbindings . VIF_TYPE_OTHER , vif_details , n_const . ACTIVE ) LOG . debug ( "AristaDriver: bound port info- port ID %(id)s " "on network %(network)s" , { 'id' : port [ 'id' ] , 'network' : context . network . current [ 'id' ] } ) if port . get ( 'trunk_details' ) : self . trunk_driver . bind_port ( port ) return True
Bind the baremetal port to the segment
41,468
def _get_physnet ( self , context ) : port = context . current physnet = None if ( port . get ( portbindings . VNIC_TYPE ) == portbindings . VNIC_BAREMETAL ) : physnet = self . eapi . get_baremetal_physnet ( context ) else : physnet = self . eapi . get_host_physnet ( context ) physnet = self . mlag_pairs . get ( physnet , physnet ) return physnet
Find the appropriate physnet for the host
41,469
def _bind_fabric ( self , context , segment ) : port_id = context . current [ 'id' ] physnet = self . _get_physnet ( context ) if not physnet : LOG . debug ( "bind_port for port %(port)s: no physical_network " "found" , { 'port' : port_id } ) return False next_segment = context . allocate_dynamic_segment ( { 'network_id' : context . network . current [ 'id' ] , 'network_type' : n_const . TYPE_VLAN , 'physical_network' : physnet } ) LOG . debug ( "bind_port for port %(port)s: " "current_segment=%(current_seg)s, " "next_segment=%(next_seg)s" , { 'port' : port_id , 'current_seg' : segment , 'next_seg' : next_segment } ) context . continue_binding ( segment [ 'id' ] , [ next_segment ] ) return True
Allocate dynamic segments for the port
41,470
def bind_port ( self , context ) : port = context . current log_context ( "bind_port: port" , port ) for segment in context . segments_to_bind : physnet = segment . get ( driver_api . PHYSICAL_NETWORK ) segment_type = segment [ driver_api . NETWORK_TYPE ] if not physnet : if ( segment_type == n_const . TYPE_VXLAN and self . manage_fabric ) : if self . _bind_fabric ( context , segment ) : continue elif ( port . get ( portbindings . VNIC_TYPE ) == portbindings . VNIC_BAREMETAL ) : if ( not self . managed_physnets or physnet in self . managed_physnets ) : if self . _bind_baremetal_port ( context , segment ) : continue LOG . debug ( "Arista mech driver unable to bind port %(port)s to " "%(seg_type)s segment on physical_network %(physnet)s" , { 'port' : port . get ( 'id' ) , 'seg_type' : segment_type , 'physnet' : physnet } )
Bind port to a network segment .
41,471
def _try_to_release_dynamic_segment ( self , context , migration = False ) : if migration : binding_levels = context . original_binding_levels else : binding_levels = context . binding_levels LOG . debug ( "_try_release_dynamic_segment: " "binding_levels=%(bl)s" , { 'bl' : binding_levels } ) if not binding_levels : return for prior_level , binding in enumerate ( binding_levels [ 1 : ] ) : allocating_driver = binding_levels [ prior_level ] . get ( driver_api . BOUND_DRIVER ) if allocating_driver != a_const . MECHANISM_DRV_NAME : continue bound_segment = binding . get ( driver_api . BOUND_SEGMENT , { } ) segment_id = bound_segment . get ( 'id' ) if not db_lib . segment_is_dynamic ( segment_id ) : continue if not db_lib . segment_bound ( segment_id ) : context . release_dynamic_segment ( segment_id ) LOG . debug ( "Released dynamic segment %(seg)s allocated " "by %(drv)s" , { 'seg' : segment_id , 'drv' : allocating_driver } )
Release dynamic segment if necessary
41,472
def roller ( timestamps , contract_dates , get_weights , ** kwargs ) : timestamps = sorted ( timestamps ) contract_dates = contract_dates . sort_values ( ) _check_contract_dates ( contract_dates ) weights = [ ] validate_inputs = True ts = timestamps [ 0 ] weights . extend ( get_weights ( ts , contract_dates , validate_inputs = validate_inputs , ** kwargs ) ) validate_inputs = False for ts in timestamps [ 1 : ] : weights . extend ( get_weights ( ts , contract_dates , validate_inputs = validate_inputs , ** kwargs ) ) weights = aggregate_weights ( weights ) return weights
Calculate weight allocations to tradeable instruments for generic futures at a set of timestamps for a given root generic .
41,473
def aggregate_weights ( weights , drop_date = False ) : dwts = pd . DataFrame ( weights , columns = [ "generic" , "contract" , "weight" , "date" ] ) dwts = dwts . pivot_table ( index = [ 'date' , 'contract' ] , columns = [ 'generic' ] , values = 'weight' , fill_value = 0 ) dwts = dwts . astype ( float ) dwts = dwts . sort_index ( ) if drop_date : dwts . index = dwts . index . levels [ - 1 ] return dwts
Transforms list of tuples of weights into pandas . DataFrame of weights .
41,474
def synchronize_switch ( self , switch_ip , expected_acls , expected_bindings ) : switch_acls , switch_bindings = self . _get_dynamic_acl_info ( switch_ip ) expected_bindings = self . adjust_bindings_for_lag ( switch_ip , expected_bindings ) switch_cmds = list ( ) switch_cmds . extend ( self . get_sync_acl_cmds ( switch_acls , expected_acls ) ) switch_cmds . extend ( self . get_sync_binding_cmds ( switch_bindings , expected_bindings ) ) self . run_openstack_sg_cmds ( switch_cmds , self . _switches . get ( switch_ip ) )
Update ACL config on a switch to match expected config
41,475
def synchronize ( self ) : expected_acls = self . get_expected_acls ( ) all_expected_bindings = self . get_expected_bindings ( ) for switch_ip in self . _switches . keys ( ) : expected_bindings = all_expected_bindings . get ( switch_ip , [ ] ) try : self . synchronize_switch ( switch_ip , expected_acls , expected_bindings ) except Exception : LOG . exception ( "Failed to sync SGs for %(switch)s" , { 'switch' : switch_ip } )
Perform sync of the security groups between ML2 and EOS .
41,476
def check_vlan_type_driver_commands ( self ) : cmd = [ 'show openstack resource-pool vlan region %s uuid' % self . region ] try : self . _run_eos_cmds ( cmd ) self . cli_commands [ 'resource-pool' ] = cmd except arista_exc . AristaRpcError : self . cli_commands [ 'resource-pool' ] = [ ] LOG . warning ( _LW ( "'resource-pool' command '%s' is not available on EOS" ) , cmd )
Checks the validity of CLI commands for Arista s VLAN type driver .
41,477
def get_vlan_assignment_uuid ( self ) : vlan_uuid_cmd = self . cli_commands [ 'resource-pool' ] if vlan_uuid_cmd : return self . _run_eos_cmds ( commands = vlan_uuid_cmd ) [ 0 ] return None
Returns the UUID for the region s vlan assignment on CVX
41,478
def get_vlan_allocation ( self ) : if not self . cli_commands [ 'resource-pool' ] : LOG . warning ( _ ( 'The version of CVX you are using does not support' 'arista VLAN type driver.' ) ) else : cmd = [ 'show openstack resource-pools region %s' % self . region ] command_output = self . _run_eos_cmds ( cmd ) if command_output : regions = command_output [ 0 ] [ 'physicalNetwork' ] if self . region in regions . keys ( ) : return regions [ self . region ] [ 'vlanPool' ] [ 'default' ] return { 'assignedVlans' : '' , 'availableVlans' : '' , 'allocatedVlans' : '' }
Returns the status of the region s VLAN pool in CVX
41,479
def _build_command ( self , cmds , sync = False ) : region_cmd = 'region %s' % self . region if sync : region_cmd = self . cli_commands [ const . CMD_REGION_SYNC ] full_command = [ 'enable' , 'configure' , 'cvx' , 'service openstack' , region_cmd , ] full_command . extend ( cmds ) return full_command
Build full EOS s openstack CLI command .
41,480
def get_baremetal_physnet ( self , context ) : port = context . current host_id = context . host cmd = [ 'show network physical-topology hosts' ] try : response = self . _run_eos_cmds ( cmd ) binding_profile = port . get ( portbindings . PROFILE , { } ) link_info = binding_profile . get ( 'local_link_information' , [ ] ) for link in link_info : switch_id = link . get ( 'switch_id' ) for host in response [ 0 ] [ 'hosts' ] . values ( ) : if switch_id == host [ 'name' ] : physnet = host [ 'hostname' ] LOG . debug ( "get_physical_network: Physical Network for " "%(host)s is %(physnet)s" , { 'host' : host_id , 'physnet' : physnet } ) return physnet LOG . debug ( "Physical network not found for %(host)s" , { 'host' : host_id } ) except Exception as exc : LOG . error ( _LE ( 'command %(cmd)s failed with ' '%(exc)s' ) , { 'cmd' : cmd , 'exc' : exc } ) return None
Returns dictionary which contains mac to hostname mapping
41,481
def get_host_physnet ( self , context ) : host_id = utils . hostname ( context . host ) cmd = [ 'show network physical-topology neighbors' ] try : response = self . _run_eos_cmds ( cmd ) neighbors = response [ 0 ] [ 'neighbors' ] for neighbor in neighbors : if host_id in neighbor : physnet = neighbors [ neighbor ] [ 'toPort' ] [ 0 ] [ 'hostname' ] LOG . debug ( "get_physical_network: Physical Network for " "%(host)s is %(physnet)s" , { 'host' : host_id , 'physnet' : physnet } ) return physnet LOG . debug ( "Physical network not found for %(host)s" , { 'host' : host_id } ) except Exception as exc : LOG . error ( _LE ( 'command %(cmd)s failed with ' '%(exc)s' ) , { 'cmd' : cmd , 'exc' : exc } ) return None
Returns dictionary which contains physical topology information
41,482
def filter_unnecessary_segments ( query ) : segment_model = segment_models . NetworkSegment network_model = models_v2 . Network query = ( query . join_if_necessary ( network_model ) . join_if_necessary ( segment_model ) . filter ( network_model . project_id != '' ) . filter_network_type ( ) ) return query
Filter segments are not needed on CVX
41,483
def filter_network_type ( query ) : segment_model = segment_models . NetworkSegment query = ( query . filter ( segment_model . network_type . in_ ( utils . SUPPORTED_NETWORK_TYPES ) ) ) return query
Filter unsupported segment types
41,484
def filter_unbound_ports ( query ) : none = None port_model = models_v2 . Port binding_level_model = ml2_models . PortBindingLevel query = ( query . join_if_necessary ( port_model ) . join_if_necessary ( binding_level_model ) . filter ( binding_level_model . host != '' , port_model . device_id != none , port_model . network_id != none ) ) return query
Filter ports not bound to a host or network
41,485
def filter_by_device_owner ( query , device_owners = None ) : port_model = models_v2 . Port if not device_owners : device_owners = utils . SUPPORTED_DEVICE_OWNERS supported_device_owner_filter = [ port_model . device_owner . ilike ( '%s%%' % owner ) for owner in device_owners ] unsupported_device_owner_filter = [ port_model . device_owner . notilike ( '%s%%' % owner ) for owner in utils . UNSUPPORTED_DEVICE_OWNERS ] query = ( query . filter ( and_ ( * unsupported_device_owner_filter ) , or_ ( * supported_device_owner_filter ) ) ) return query
Filter ports by device_owner
41,486
def filter_by_device_id ( query ) : port_model = models_v2 . Port unsupported_device_id_filter = [ port_model . device_id . notilike ( '%s%%' % id ) for id in utils . UNSUPPORTED_DEVICE_IDS ] query = ( query . filter ( and_ ( * unsupported_device_id_filter ) ) ) return query
Filter ports attached to devices we don t care about
41,487
def filter_unmanaged_physnets ( query ) : config = cfg . CONF . ml2_arista managed_physnets = config [ 'managed_physnets' ] segment_model = segment_models . NetworkSegment if managed_physnets : query = ( query . join_if_necessary ( segment_model ) . filter ( segment_model . physical_network . in_ ( managed_physnets ) ) ) return query
Filter ports managed by other ML2 plugins
41,488
def filter_inactive_ports ( query ) : port_model = models_v2 . Port query = ( query . filter ( port_model . status == n_const . PORT_STATUS_ACTIVE ) ) return query
Filter ports that aren t in active status
41,489
def filter_unnecessary_ports ( query , device_owners = None , vnic_type = None , active = True ) : query = ( query . filter_unbound_ports ( ) . filter_by_device_owner ( device_owners ) . filter_by_device_id ( ) . filter_unmanaged_physnets ( ) ) if active : query = query . filter_inactive_ports ( ) if vnic_type : query = query . filter_by_vnic_type ( vnic_type ) return query
Filter out all ports are not needed on CVX
41,490
def get_networks ( network_id = None ) : session = db . get_reader_session ( ) with session . begin ( ) : model = models_v2 . Network networks = session . query ( model ) . filter ( model . project_id != '' ) if network_id : networks = networks . filter ( model . id == network_id ) return networks . all ( )
Returns list of all networks that may be relevant on CVX
41,491
def get_segments ( segment_id = None ) : session = db . get_reader_session ( ) with session . begin ( ) : model = segment_models . NetworkSegment segments = session . query ( model ) . filter_unnecessary_segments ( ) if segment_id : segments = segments . filter ( model . id == segment_id ) return segments . all ( )
Returns list of all network segments that may be relevant on CVX
41,492
def get_instances ( device_owners = None , vnic_type = None , instance_id = None ) : session = db . get_reader_session ( ) with session . begin ( ) : port_model = models_v2 . Port binding_model = ml2_models . PortBinding instances = ( session . query ( port_model , binding_model ) . outerjoin ( binding_model , port_model . id == binding_model . port_id ) . distinct ( port_model . device_id ) . group_by ( port_model . device_id ) . filter_unnecessary_ports ( device_owners , vnic_type ) ) if instance_id : instances = instances . filter ( port_model . device_id == instance_id ) return instances . all ( )
Returns filtered list of all instances in the neutron db
41,493
def get_ports ( device_owners = None , vnic_type = None , port_id = None , active = True ) : session = db . get_reader_session ( ) with session . begin ( ) : port_model = models_v2 . Port ports = ( session . query ( port_model ) . filter_unnecessary_ports ( device_owners , vnic_type , active ) ) if port_id : ports = ports . filter ( port_model . id == port_id ) return ports . all ( )
Returns list of all ports in neutron the db
41,494
def get_port_bindings ( binding_key = None ) : session = db . get_reader_session ( ) with session . begin ( ) : binding_level_model = ml2_models . PortBindingLevel aliased_blm = aliased ( ml2_models . PortBindingLevel ) port_binding_model = ml2_models . PortBinding dist_binding_model = ml2_models . DistributedPortBinding bindings = ( session . query ( port_binding_model , aliased_blm ) . join ( binding_level_model , and_ ( port_binding_model . port_id == binding_level_model . port_id , port_binding_model . host == binding_level_model . host ) ) . filter_unnecessary_ports ( ) . join ( aliased_blm , and_ ( port_binding_model . port_id == aliased_blm . port_id , port_binding_model . host == aliased_blm . host ) ) ) dist_bindings = ( session . query ( dist_binding_model , aliased_blm ) . join ( binding_level_model , and_ ( dist_binding_model . port_id == binding_level_model . port_id , dist_binding_model . host == binding_level_model . host ) ) . filter_unnecessary_ports ( ) . filter ( dist_binding_model . status == n_const . PORT_STATUS_ACTIVE ) . join ( aliased_blm , and_ ( dist_binding_model . port_id == aliased_blm . port_id , dist_binding_model . host == aliased_blm . host ) ) ) if binding_key : port_id = binding_key [ 0 ] if type ( binding_key [ 1 ] ) == tuple : switch_id = binding_key [ 1 ] [ 0 ] switch_port = binding_key [ 1 ] [ 1 ] bindings = bindings . filter ( and_ ( port_binding_model . port_id == port_id , port_binding_model . profile . ilike ( '%%%s%%' % switch_id ) , port_binding_model . profile . ilike ( '%%%s%%' % switch_port ) ) ) dist_bindings = dist_bindings . filter ( and_ ( dist_binding_model . port_id == port_id , dist_binding_model . profile . ilike ( '%%%s%%' % switch_id ) , dist_binding_model . profile . ilike ( '%%%s%%' % switch_port ) ) ) else : host_id = binding_key [ 1 ] bindings = bindings . filter ( and_ ( port_binding_model . port_id == port_id , port_binding_model . host == host_id ) ) dist_bindings = dist_bindings . filter ( and_ ( dist_binding_model . port_id == port_id , dist_binding_model . host == host_id ) ) binding_levels = collections . defaultdict ( list ) for binding , level in bindings . all ( ) + dist_bindings . all ( ) : binding_levels [ binding ] . append ( level ) bindings_with_levels = list ( ) for binding , levels in binding_levels . items ( ) : binding . levels = levels bindings_with_levels . append ( binding ) return bindings_with_levels
Returns filtered list of port bindings that may be relevant on CVX
41,495
def tenant_provisioned ( tenant_id ) : session = db . get_reader_session ( ) with session . begin ( ) : res = any ( session . query ( m ) . filter ( m . tenant_id == tenant_id ) . count ( ) for m in [ models_v2 . Network , models_v2 . Port ] ) return res
Returns true if any networks or ports exist for a tenant .
41,496
def instance_provisioned ( device_id ) : session = db . get_reader_session ( ) with session . begin ( ) : port_model = models_v2 . Port res = bool ( session . query ( port_model ) . filter ( port_model . device_id == device_id ) . count ( ) ) return res
Returns true if any ports exist for an instance .
41,497
def port_provisioned ( port_id ) : session = db . get_reader_session ( ) with session . begin ( ) : port_model = models_v2 . Port res = bool ( session . query ( port_model ) . filter ( port_model . id == port_id ) . count ( ) ) return res
Returns true if port still exists .
41,498
def get_parent ( port_id ) : session = db . get_reader_session ( ) res = dict ( ) with session . begin ( ) : subport_model = trunk_models . SubPort trunk_model = trunk_models . Trunk subport = ( session . query ( subport_model ) . filter ( subport_model . port_id == port_id ) . first ( ) ) if subport : trunk = ( session . query ( trunk_model ) . filter ( trunk_model . id == subport . trunk_id ) . first ( ) ) if trunk : trunk_port_id = trunk . port . id res = get_ports ( port_id = trunk_port_id , active = False ) [ 0 ] return res
Get trunk subport s parent port
41,499
def get_port_binding_level ( filters ) : session = db . get_reader_session ( ) with session . begin ( ) : return ( session . query ( ml2_models . PortBindingLevel ) . filter_by ( ** filters ) . order_by ( ml2_models . PortBindingLevel . level ) . all ( ) )
Returns entries from PortBindingLevel based on the specified filters .