repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
openstack/networking-arista
networking_arista/l3Plugin/l3_arista.py
AristaL3ServicePlugin.add_router_interface
def add_router_interface(self, context, router_id, interface_info): """Add a subnet of a network to an existing router.""" new_router = super(AristaL3ServicePlugin, self).add_router_interface( context, router_id, interface_info) core = directory.get_plugin() # Get network info for the subnet that is being added to the router. # Check if the interface information is by port-id or subnet-id add_by_port, add_by_sub = self._validate_interface_info(interface_info) if add_by_sub: subnet = core.get_subnet(context, interface_info['subnet_id']) elif add_by_port: port = core.get_port(context, interface_info['port_id']) subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = core.get_subnet(context, subnet_id) network_id = subnet['network_id'] # To create SVI's in Arista HW, the segmentation Id is required # for this network. ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] # Package all the info needed for Hw programming router = self.get_router(context, router_id) router_info = copy.deepcopy(new_router) router_info['seg_id'] = seg_id router_info['name'] = router['name'] router_info['cidr'] = subnet['cidr'] router_info['gip'] = subnet['gateway_ip'] router_info['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(context, router_info) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error Adding subnet %(subnet)s to " "router %(router_id)s on Arista HW"), {'subnet': subnet, 'router_id': router_id}) super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info)
python
def add_router_interface(self, context, router_id, interface_info): """Add a subnet of a network to an existing router.""" new_router = super(AristaL3ServicePlugin, self).add_router_interface( context, router_id, interface_info) core = directory.get_plugin() # Get network info for the subnet that is being added to the router. # Check if the interface information is by port-id or subnet-id add_by_port, add_by_sub = self._validate_interface_info(interface_info) if add_by_sub: subnet = core.get_subnet(context, interface_info['subnet_id']) elif add_by_port: port = core.get_port(context, interface_info['port_id']) subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = core.get_subnet(context, subnet_id) network_id = subnet['network_id'] # To create SVI's in Arista HW, the segmentation Id is required # for this network. ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] # Package all the info needed for Hw programming router = self.get_router(context, router_id) router_info = copy.deepcopy(new_router) router_info['seg_id'] = seg_id router_info['name'] = router['name'] router_info['cidr'] = subnet['cidr'] router_info['gip'] = subnet['gateway_ip'] router_info['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(context, router_info) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error Adding subnet %(subnet)s to " "router %(router_id)s on Arista HW"), {'subnet': subnet, 'router_id': router_id}) super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info)
[ "def", "add_router_interface", "(", "self", ",", "context", ",", "router_id", ",", "interface_info", ")", ":", "new_router", "=", "super", "(", "AristaL3ServicePlugin", ",", "self", ")", ".", "add_router_interface", "(", "context", ",", "router_id", ",", "interf...
Add a subnet of a network to an existing router.
[ "Add", "a", "subnet", "of", "a", "network", "to", "an", "existing", "router", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/l3_arista.py#L287-L331
train
41,400
openstack/networking-arista
networking_arista/l3Plugin/l3_arista.py
AristaL3ServicePlugin.remove_router_interface
def remove_router_interface(self, context, router_id, interface_info): """Remove a subnet of a network from an existing router.""" router_to_del = ( super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info) ) # Get network information of the subnet that is being removed core = directory.get_plugin() subnet = core.get_subnet(context, router_to_del['subnet_id']) network_id = subnet['network_id'] # For SVI removal from Arista HW, segmentation ID is needed ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] router = self.get_router(context, router_id) router_info = copy.deepcopy(router_to_del) router_info['seg_id'] = seg_id router_info['name'] = router['name'] try: self.driver.remove_router_interface(context, router_info) return router_to_del except Exception as exc: LOG.error(_LE("Error removing interface %(interface)s from " "router %(router_id)s on Arista HW" "Exception =(exc)s"), {'interface': interface_info, 'router_id': router_id, 'exc': exc})
python
def remove_router_interface(self, context, router_id, interface_info): """Remove a subnet of a network from an existing router.""" router_to_del = ( super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info) ) # Get network information of the subnet that is being removed core = directory.get_plugin() subnet = core.get_subnet(context, router_to_del['subnet_id']) network_id = subnet['network_id'] # For SVI removal from Arista HW, segmentation ID is needed ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] router = self.get_router(context, router_id) router_info = copy.deepcopy(router_to_del) router_info['seg_id'] = seg_id router_info['name'] = router['name'] try: self.driver.remove_router_interface(context, router_info) return router_to_del except Exception as exc: LOG.error(_LE("Error removing interface %(interface)s from " "router %(router_id)s on Arista HW" "Exception =(exc)s"), {'interface': interface_info, 'router_id': router_id, 'exc': exc})
[ "def", "remove_router_interface", "(", "self", ",", "context", ",", "router_id", ",", "interface_info", ")", ":", "router_to_del", "=", "(", "super", "(", "AristaL3ServicePlugin", ",", "self", ")", ".", "remove_router_interface", "(", "context", ",", "router_id", ...
Remove a subnet of a network from an existing router.
[ "Remove", "a", "subnet", "of", "a", "network", "from", "an", "existing", "router", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/l3_arista.py#L334-L366
train
41,401
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSwitchHelper.initialize_switch_endpoints
def initialize_switch_endpoints(self): """Initialize endpoints for switch communication""" self._switches = {} self._port_group_info = {} self._validate_config() for s in cfg.CONF.ml2_arista.switch_info: switch_ip, switch_user, switch_pass = s.split(":") if switch_pass == "''": switch_pass = '' self._switches[switch_ip] = api.EAPIClient( switch_ip, switch_user, switch_pass, verify=False, timeout=cfg.CONF.ml2_arista.conn_timeout) self._check_dynamic_acl_support()
python
def initialize_switch_endpoints(self): """Initialize endpoints for switch communication""" self._switches = {} self._port_group_info = {} self._validate_config() for s in cfg.CONF.ml2_arista.switch_info: switch_ip, switch_user, switch_pass = s.split(":") if switch_pass == "''": switch_pass = '' self._switches[switch_ip] = api.EAPIClient( switch_ip, switch_user, switch_pass, verify=False, timeout=cfg.CONF.ml2_arista.conn_timeout) self._check_dynamic_acl_support()
[ "def", "initialize_switch_endpoints", "(", "self", ")", ":", "self", ".", "_switches", "=", "{", "}", "self", ".", "_port_group_info", "=", "{", "}", "self", ".", "_validate_config", "(", ")", "for", "s", "in", "cfg", ".", "CONF", ".", "ml2_arista", ".",...
Initialize endpoints for switch communication
[ "Initialize", "endpoints", "for", "switch", "communication" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L42-L57
train
41,402
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSwitchHelper._check_dynamic_acl_support
def _check_dynamic_acl_support(self): """Log an error if any switches don't support dynamic ACLs""" cmds = ['ip access-list openstack-test dynamic', 'no ip access-list openstack-test'] for switch_ip, switch_client in self._switches.items(): try: self.run_openstack_sg_cmds(cmds) except Exception: LOG.error("Switch %s does not support dynamic ACLs. SG " "support will not be enabled on this switch.", switch_ip)
python
def _check_dynamic_acl_support(self): """Log an error if any switches don't support dynamic ACLs""" cmds = ['ip access-list openstack-test dynamic', 'no ip access-list openstack-test'] for switch_ip, switch_client in self._switches.items(): try: self.run_openstack_sg_cmds(cmds) except Exception: LOG.error("Switch %s does not support dynamic ACLs. SG " "support will not be enabled on this switch.", switch_ip)
[ "def", "_check_dynamic_acl_support", "(", "self", ")", ":", "cmds", "=", "[", "'ip access-list openstack-test dynamic'", ",", "'no ip access-list openstack-test'", "]", "for", "switch_ip", ",", "switch_client", "in", "self", ".", "_switches", ".", "items", "(", ")", ...
Log an error if any switches don't support dynamic ACLs
[ "Log", "an", "error", "if", "any", "switches", "don", "t", "support", "dynamic", "ACLs" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L59-L69
train
41,403
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSwitchHelper._validate_config
def _validate_config(self): """Ensure at least one switch is configured""" if len(cfg.CONF.ml2_arista.get('switch_info')) < 1: msg = _('Required option - when "sec_group_support" is enabled, ' 'at least one switch must be specified ') LOG.exception(msg) raise arista_exc.AristaConfigError(msg=msg)
python
def _validate_config(self): """Ensure at least one switch is configured""" if len(cfg.CONF.ml2_arista.get('switch_info')) < 1: msg = _('Required option - when "sec_group_support" is enabled, ' 'at least one switch must be specified ') LOG.exception(msg) raise arista_exc.AristaConfigError(msg=msg)
[ "def", "_validate_config", "(", "self", ")", ":", "if", "len", "(", "cfg", ".", "CONF", ".", "ml2_arista", ".", "get", "(", "'switch_info'", ")", ")", "<", "1", ":", "msg", "=", "_", "(", "'Required option - when \"sec_group_support\" is enabled, '", "'at leas...
Ensure at least one switch is configured
[ "Ensure", "at", "least", "one", "switch", "is", "configured" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L71-L77
train
41,404
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSwitchHelper._update_port_group_info
def _update_port_group_info(self, switches=None): """Refresh data on switch interfaces' port group membership""" if switches is None: switches = self._switches.keys() for switch_ip in switches: client = self._switches.get(switch_ip) ret = self._run_eos_cmds(['show interfaces'], client) if not ret or len(ret) == 0: LOG.warning("Unable to retrieve interface info for %s", switch_ip) continue intf_info = ret[0] self._port_group_info[switch_ip] = intf_info.get('interfaces', {})
python
def _update_port_group_info(self, switches=None): """Refresh data on switch interfaces' port group membership""" if switches is None: switches = self._switches.keys() for switch_ip in switches: client = self._switches.get(switch_ip) ret = self._run_eos_cmds(['show interfaces'], client) if not ret or len(ret) == 0: LOG.warning("Unable to retrieve interface info for %s", switch_ip) continue intf_info = ret[0] self._port_group_info[switch_ip] = intf_info.get('interfaces', {})
[ "def", "_update_port_group_info", "(", "self", ",", "switches", "=", "None", ")", ":", "if", "switches", "is", "None", ":", "switches", "=", "self", ".", "_switches", ".", "keys", "(", ")", "for", "switch_ip", "in", "switches", ":", "client", "=", "self"...
Refresh data on switch interfaces' port group membership
[ "Refresh", "data", "on", "switch", "interfaces", "port", "group", "membership" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L146-L158
train
41,405
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSwitchHelper._get_port_for_acl
def _get_port_for_acl(self, port_id, switch): """Gets interface name for ACLs Finds the Port-Channel name if port_id is in a Port-Channel, otherwise ACLs are applied to Ethernet interface. :param port_id: Name of port from ironic db :param server: Server endpoint on the Arista switch to be configured """ all_intf_info = self._port_group_info.get(switch, {}) intf_info = all_intf_info.get(port_id, {}) member_info = intf_info.get('interfaceMembership', '') port_group_info = re.search('Member of (?P<port_group>\S+)', member_info) if port_group_info: port_id = port_group_info.group('port_group') return port_id
python
def _get_port_for_acl(self, port_id, switch): """Gets interface name for ACLs Finds the Port-Channel name if port_id is in a Port-Channel, otherwise ACLs are applied to Ethernet interface. :param port_id: Name of port from ironic db :param server: Server endpoint on the Arista switch to be configured """ all_intf_info = self._port_group_info.get(switch, {}) intf_info = all_intf_info.get(port_id, {}) member_info = intf_info.get('interfaceMembership', '') port_group_info = re.search('Member of (?P<port_group>\S+)', member_info) if port_group_info: port_id = port_group_info.group('port_group') return port_id
[ "def", "_get_port_for_acl", "(", "self", ",", "port_id", ",", "switch", ")", ":", "all_intf_info", "=", "self", ".", "_port_group_info", ".", "get", "(", "switch", ",", "{", "}", ")", "intf_info", "=", "all_intf_info", ".", "get", "(", "port_id", ",", "{...
Gets interface name for ACLs Finds the Port-Channel name if port_id is in a Port-Channel, otherwise ACLs are applied to Ethernet interface. :param port_id: Name of port from ironic db :param server: Server endpoint on the Arista switch to be configured
[ "Gets", "interface", "name", "for", "ACLs" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L160-L176
train
41,406
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSwitchHelper._supported_rule
def _supported_rule(protocol, ethertype): """Checks that the rule is an IPv4 rule of a supported protocol""" if not protocol or protocol not in utils.SUPPORTED_SG_PROTOCOLS: return False if ethertype != n_const.IPv4: return False return True
python
def _supported_rule(protocol, ethertype): """Checks that the rule is an IPv4 rule of a supported protocol""" if not protocol or protocol not in utils.SUPPORTED_SG_PROTOCOLS: return False if ethertype != n_const.IPv4: return False return True
[ "def", "_supported_rule", "(", "protocol", ",", "ethertype", ")", ":", "if", "not", "protocol", "or", "protocol", "not", "in", "utils", ".", "SUPPORTED_SG_PROTOCOLS", ":", "return", "False", "if", "ethertype", "!=", "n_const", ".", "IPv4", ":", "return", "Fa...
Checks that the rule is an IPv4 rule of a supported protocol
[ "Checks", "that", "the", "rule", "is", "an", "IPv4", "rule", "of", "a", "supported", "protocol" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L179-L187
train
41,407
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSwitchHelper._format_rule
def _format_rule(self, protocol, cidr, min_port, max_port, direction): """Get EOS formatted rule""" if cidr is None: cidr = 'any' if direction == n_const.INGRESS_DIRECTION: dst_ip = 'any' src_ip = cidr elif direction == n_const.EGRESS_DIRECTION: dst_ip = cidr src_ip = 'any' if protocol == n_const.PROTO_NAME_ICMP: rule = "permit icmp %s %s" % (src_ip, dst_ip) if min_port: rule += " %s" % (min_port) if max_port: rule += " %s" % (max_port) else: rule = "permit %s %s %s" % (protocol, src_ip, dst_ip) if min_port and max_port: rule += " range %s %s" % (min_port, max_port) elif min_port and not max_port: rule += " eq %s" % min_port return rule
python
def _format_rule(self, protocol, cidr, min_port, max_port, direction): """Get EOS formatted rule""" if cidr is None: cidr = 'any' if direction == n_const.INGRESS_DIRECTION: dst_ip = 'any' src_ip = cidr elif direction == n_const.EGRESS_DIRECTION: dst_ip = cidr src_ip = 'any' if protocol == n_const.PROTO_NAME_ICMP: rule = "permit icmp %s %s" % (src_ip, dst_ip) if min_port: rule += " %s" % (min_port) if max_port: rule += " %s" % (max_port) else: rule = "permit %s %s %s" % (protocol, src_ip, dst_ip) if min_port and max_port: rule += " range %s %s" % (min_port, max_port) elif min_port and not max_port: rule += " eq %s" % min_port return rule
[ "def", "_format_rule", "(", "self", ",", "protocol", ",", "cidr", ",", "min_port", ",", "max_port", ",", "direction", ")", ":", "if", "cidr", "is", "None", ":", "cidr", "=", "'any'", "if", "direction", "==", "n_const", ".", "INGRESS_DIRECTION", ":", "dst...
Get EOS formatted rule
[ "Get", "EOS", "formatted", "rule" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L189-L213
train
41,408
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupCallbackHelper.run_cmds_on_all_switches
def run_cmds_on_all_switches(self, cmds): """Runs all cmds on all configured switches This helper is used for ACL and rule creation/deletion as ACLs and rules must exist on all switches. """ for switch in self._switches.values(): self.run_openstack_sg_cmds(cmds, switch)
python
def run_cmds_on_all_switches(self, cmds): """Runs all cmds on all configured switches This helper is used for ACL and rule creation/deletion as ACLs and rules must exist on all switches. """ for switch in self._switches.values(): self.run_openstack_sg_cmds(cmds, switch)
[ "def", "run_cmds_on_all_switches", "(", "self", ",", "cmds", ")", ":", "for", "switch", "in", "self", ".", "_switches", ".", "values", "(", ")", ":", "self", ".", "run_openstack_sg_cmds", "(", "cmds", ",", "switch", ")" ]
Runs all cmds on all configured switches This helper is used for ACL and rule creation/deletion as ACLs and rules must exist on all switches.
[ "Runs", "all", "cmds", "on", "all", "configured", "switches" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L239-L246
train
41,409
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupCallbackHelper.run_per_switch_cmds
def run_per_switch_cmds(self, switch_cmds): """Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch. """ for switch_ip, cmds in switch_cmds.items(): switch = self._switches.get(switch_ip) self.run_openstack_sg_cmds(cmds, switch)
python
def run_per_switch_cmds(self, switch_cmds): """Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch. """ for switch_ip, cmds in switch_cmds.items(): switch = self._switches.get(switch_ip) self.run_openstack_sg_cmds(cmds, switch)
[ "def", "run_per_switch_cmds", "(", "self", ",", "switch_cmds", ")", ":", "for", "switch_ip", ",", "cmds", "in", "switch_cmds", ".", "items", "(", ")", ":", "switch", "=", "self", ".", "_switches", ".", "get", "(", "switch_ip", ")", "self", ".", "run_open...
Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch.
[ "Applies", "cmds", "to", "appropriate", "switches" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L248-L258
train
41,410
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupCallbackHelper._get_switches
def _get_switches(self, profile): """Get set of switches referenced in a port binding profile""" switchports = self._get_switchports(profile) switches = set([switchport[0] for switchport in switchports]) return switches
python
def _get_switches(self, profile): """Get set of switches referenced in a port binding profile""" switchports = self._get_switchports(profile) switches = set([switchport[0] for switchport in switchports]) return switches
[ "def", "_get_switches", "(", "self", ",", "profile", ")", ":", "switchports", "=", "self", ".", "_get_switchports", "(", "profile", ")", "switches", "=", "set", "(", "[", "switchport", "[", "0", "]", "for", "switchport", "in", "switchports", "]", ")", "r...
Get set of switches referenced in a port binding profile
[ "Get", "set", "of", "switches", "referenced", "in", "a", "port", "binding", "profile" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L260-L264
train
41,411
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupCallbackHelper.get_create_security_group_commands
def get_create_security_group_commands(self, sg_id, sg_rules): """Commands for creating ACL""" cmds = [] in_rules, eg_rules = self._format_rules_for_eos(sg_rules) cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.INGRESS_DIRECTION)) for in_rule in in_rules: cmds.append(in_rule) cmds.append("exit") cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.EGRESS_DIRECTION)) for eg_rule in eg_rules: cmds.append(eg_rule) cmds.append("exit") return cmds
python
def get_create_security_group_commands(self, sg_id, sg_rules): """Commands for creating ACL""" cmds = [] in_rules, eg_rules = self._format_rules_for_eos(sg_rules) cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.INGRESS_DIRECTION)) for in_rule in in_rules: cmds.append(in_rule) cmds.append("exit") cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.EGRESS_DIRECTION)) for eg_rule in eg_rules: cmds.append(eg_rule) cmds.append("exit") return cmds
[ "def", "get_create_security_group_commands", "(", "self", ",", "sg_id", ",", "sg_rules", ")", ":", "cmds", "=", "[", "]", "in_rules", ",", "eg_rules", "=", "self", ".", "_format_rules_for_eos", "(", "sg_rules", ")", "cmds", ".", "append", "(", "\"ip access-lis...
Commands for creating ACL
[ "Commands", "for", "creating", "ACL" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L266-L280
train
41,412
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupCallbackHelper.get_delete_security_group_commands
def get_delete_security_group_commands(self, sg_id): """Commands for deleting ACL""" cmds = [] cmds.append("no ip access-list %s" % self._acl_name(sg_id, n_const.INGRESS_DIRECTION)) cmds.append("no ip access-list %s" % self._acl_name(sg_id, n_const.EGRESS_DIRECTION)) return cmds
python
def get_delete_security_group_commands(self, sg_id): """Commands for deleting ACL""" cmds = [] cmds.append("no ip access-list %s" % self._acl_name(sg_id, n_const.INGRESS_DIRECTION)) cmds.append("no ip access-list %s" % self._acl_name(sg_id, n_const.EGRESS_DIRECTION)) return cmds
[ "def", "get_delete_security_group_commands", "(", "self", ",", "sg_id", ")", ":", "cmds", "=", "[", "]", "cmds", ".", "append", "(", "\"no ip access-list %s\"", "%", "self", ".", "_acl_name", "(", "sg_id", ",", "n_const", ".", "INGRESS_DIRECTION", ")", ")", ...
Commands for deleting ACL
[ "Commands", "for", "deleting", "ACL" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L282-L289
train
41,413
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupCallbackHelper.get_delete_security_group_rule_commands
def get_delete_security_group_rule_commands(self, sg_id, sg_rule): """Commands for removing rule from ACLS""" return self._get_rule_cmds(sg_id, sg_rule, delete=True)
python
def get_delete_security_group_rule_commands(self, sg_id, sg_rule): """Commands for removing rule from ACLS""" return self._get_rule_cmds(sg_id, sg_rule, delete=True)
[ "def", "get_delete_security_group_rule_commands", "(", "self", ",", "sg_id", ",", "sg_rule", ")", ":", "return", "self", ".", "_get_rule_cmds", "(", "sg_id", ",", "sg_rule", ",", "delete", "=", "True", ")" ]
Commands for removing rule from ACLS
[ "Commands", "for", "removing", "rule", "from", "ACLS" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L316-L318
train
41,414
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupCallbackHelper.get_remove_security_group_commands
def get_remove_security_group_commands(self, sg_id, profile): """Commands for removing ACL from interface""" return self._get_interface_commands(sg_id, profile, delete=True)
python
def get_remove_security_group_commands(self, sg_id, profile): """Commands for removing ACL from interface""" return self._get_interface_commands(sg_id, profile, delete=True)
[ "def", "get_remove_security_group_commands", "(", "self", ",", "sg_id", ",", "profile", ")", ":", "return", "self", ".", "_get_interface_commands", "(", "sg_id", ",", "profile", ",", "delete", "=", "True", ")" ]
Commands for removing ACL from interface
[ "Commands", "for", "removing", "ACL", "from", "interface" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L347-L349
train
41,415
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper._parse_acl_config
def _parse_acl_config(self, acl_config): """Parse configured ACLs and rules ACLs are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., } """ parsed_acls = dict() for acl in acl_config['aclList']: parsed_acls[acl['name']] = set() for rule in acl['sequence']: parsed_acls[acl['name']].add(rule['text']) return parsed_acls
python
def _parse_acl_config(self, acl_config): """Parse configured ACLs and rules ACLs are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., } """ parsed_acls = dict() for acl in acl_config['aclList']: parsed_acls[acl['name']] = set() for rule in acl['sequence']: parsed_acls[acl['name']].add(rule['text']) return parsed_acls
[ "def", "_parse_acl_config", "(", "self", ",", "acl_config", ")", ":", "parsed_acls", "=", "dict", "(", ")", "for", "acl", "in", "acl_config", "[", "'aclList'", "]", ":", "parsed_acls", "[", "acl", "[", "'name'", "]", "]", "=", "set", "(", ")", "for", ...
Parse configured ACLs and rules ACLs are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., }
[ "Parse", "configured", "ACLs", "and", "rules" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L354-L368
train
41,416
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper._parse_binding_config
def _parse_binding_config(self, binding_config): """Parse configured interface -> ACL bindings Bindings are returned as a set of (intf, name, direction) tuples: set([(intf1, acl_name, direction), (intf2, acl_name, direction), ..., ]) """ parsed_bindings = set() for acl in binding_config['aclList']: for intf in acl['configuredIngressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.INGRESS_DIRECTION)) for intf in acl['configuredEgressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.EGRESS_DIRECTION)) return parsed_bindings
python
def _parse_binding_config(self, binding_config): """Parse configured interface -> ACL bindings Bindings are returned as a set of (intf, name, direction) tuples: set([(intf1, acl_name, direction), (intf2, acl_name, direction), ..., ]) """ parsed_bindings = set() for acl in binding_config['aclList']: for intf in acl['configuredIngressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.INGRESS_DIRECTION)) for intf in acl['configuredEgressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.EGRESS_DIRECTION)) return parsed_bindings
[ "def", "_parse_binding_config", "(", "self", ",", "binding_config", ")", ":", "parsed_bindings", "=", "set", "(", ")", "for", "acl", "in", "binding_config", "[", "'aclList'", "]", ":", "for", "intf", "in", "acl", "[", "'configuredIngressIntfs'", "]", ":", "p...
Parse configured interface -> ACL bindings Bindings are returned as a set of (intf, name, direction) tuples: set([(intf1, acl_name, direction), (intf2, acl_name, direction), ..., ])
[ "Parse", "configured", "interface", "-", ">", "ACL", "bindings" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L370-L387
train
41,417
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper._get_dynamic_acl_info
def _get_dynamic_acl_info(self, switch_ip): """Retrieve ACLs, ACLs rules and interface bindings from switch""" cmds = ["enable", "show ip access-lists dynamic", "show ip access-lists summary dynamic"] switch = self._switches.get(switch_ip) _, acls, bindings = self._run_eos_cmds(cmds, switch) parsed_acls = self._parse_acl_config(acls) parsed_bindings = self._parse_binding_config(bindings) return parsed_acls, parsed_bindings
python
def _get_dynamic_acl_info(self, switch_ip): """Retrieve ACLs, ACLs rules and interface bindings from switch""" cmds = ["enable", "show ip access-lists dynamic", "show ip access-lists summary dynamic"] switch = self._switches.get(switch_ip) _, acls, bindings = self._run_eos_cmds(cmds, switch) parsed_acls = self._parse_acl_config(acls) parsed_bindings = self._parse_binding_config(bindings) return parsed_acls, parsed_bindings
[ "def", "_get_dynamic_acl_info", "(", "self", ",", "switch_ip", ")", ":", "cmds", "=", "[", "\"enable\"", ",", "\"show ip access-lists dynamic\"", ",", "\"show ip access-lists summary dynamic\"", "]", "switch", "=", "self", ".", "_switches", ".", "get", "(", "switch_...
Retrieve ACLs, ACLs rules and interface bindings from switch
[ "Retrieve", "ACLs", "ACLs", "rules", "and", "interface", "bindings", "from", "switch" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L389-L401
train
41,418
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper.get_expected_acls
def get_expected_acls(self): """Query the neutron DB for Security Groups and Rules Groups and rules are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., } """ security_groups = db_lib.get_security_groups() expected_acls = collections.defaultdict(set) for sg in security_groups: in_rules, out_rules = self._format_rules_for_eos(sg['rules']) ingress_acl_name = self._acl_name(sg['id'], n_const.INGRESS_DIRECTION) egress_acl_name = self._acl_name(sg['id'], n_const.EGRESS_DIRECTION) expected_acls[ingress_acl_name].update(in_rules) expected_acls[egress_acl_name].update(out_rules) return expected_acls
python
def get_expected_acls(self): """Query the neutron DB for Security Groups and Rules Groups and rules are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., } """ security_groups = db_lib.get_security_groups() expected_acls = collections.defaultdict(set) for sg in security_groups: in_rules, out_rules = self._format_rules_for_eos(sg['rules']) ingress_acl_name = self._acl_name(sg['id'], n_const.INGRESS_DIRECTION) egress_acl_name = self._acl_name(sg['id'], n_const.EGRESS_DIRECTION) expected_acls[ingress_acl_name].update(in_rules) expected_acls[egress_acl_name].update(out_rules) return expected_acls
[ "def", "get_expected_acls", "(", "self", ")", ":", "security_groups", "=", "db_lib", ".", "get_security_groups", "(", ")", "expected_acls", "=", "collections", ".", "defaultdict", "(", "set", ")", "for", "sg", "in", "security_groups", ":", "in_rules", ",", "ou...
Query the neutron DB for Security Groups and Rules Groups and rules are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., }
[ "Query", "the", "neutron", "DB", "for", "Security", "Groups", "and", "Rules" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L403-L423
train
41,419
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper.get_expected_bindings
def get_expected_bindings(self): """Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., } """ sg_bindings = db_lib.get_baremetal_sg_bindings() all_expected_bindings = collections.defaultdict(set) for sg_binding, port_binding in sg_bindings: sg_id = sg_binding['security_group_id'] try: binding_profile = json.loads(port_binding.profile) except ValueError: binding_profile = {} switchports = self._get_switchports(binding_profile) for switch, intf in switchports: ingress_name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION) egress_name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION) all_expected_bindings[switch].add( (intf, ingress_name, a_const.INGRESS_DIRECTION)) all_expected_bindings[switch].add( (intf, egress_name, a_const.EGRESS_DIRECTION)) return all_expected_bindings
python
def get_expected_bindings(self): """Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., } """ sg_bindings = db_lib.get_baremetal_sg_bindings() all_expected_bindings = collections.defaultdict(set) for sg_binding, port_binding in sg_bindings: sg_id = sg_binding['security_group_id'] try: binding_profile = json.loads(port_binding.profile) except ValueError: binding_profile = {} switchports = self._get_switchports(binding_profile) for switch, intf in switchports: ingress_name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION) egress_name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION) all_expected_bindings[switch].add( (intf, ingress_name, a_const.INGRESS_DIRECTION)) all_expected_bindings[switch].add( (intf, egress_name, a_const.EGRESS_DIRECTION)) return all_expected_bindings
[ "def", "get_expected_bindings", "(", "self", ")", ":", "sg_bindings", "=", "db_lib", ".", "get_baremetal_sg_bindings", "(", ")", "all_expected_bindings", "=", "collections", ".", "defaultdict", "(", "set", ")", "for", "sg_binding", ",", "port_binding", "in", "sg_b...
Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., }
[ "Query", "the", "neutron", "DB", "for", "SG", "-", ">", "switch", "interface", "bindings" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L425-L452
train
41,420
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper.adjust_bindings_for_lag
def adjust_bindings_for_lag(self, switch_ip, bindings): """Adjusting interface names for expected bindings where LAGs exist""" # Get latest LAG info for switch self._update_port_group_info([switch_ip]) # Update bindings to account for LAG info adjusted_bindings = set() for binding in bindings: adjusted_bindings.add( (self._get_port_for_acl(binding[0], switch_ip),) + binding[1:]) return adjusted_bindings
python
def adjust_bindings_for_lag(self, switch_ip, bindings): """Adjusting interface names for expected bindings where LAGs exist""" # Get latest LAG info for switch self._update_port_group_info([switch_ip]) # Update bindings to account for LAG info adjusted_bindings = set() for binding in bindings: adjusted_bindings.add( (self._get_port_for_acl(binding[0], switch_ip),) + binding[1:]) return adjusted_bindings
[ "def", "adjust_bindings_for_lag", "(", "self", ",", "switch_ip", ",", "bindings", ")", ":", "# Get latest LAG info for switch", "self", ".", "_update_port_group_info", "(", "[", "switch_ip", "]", ")", "# Update bindings to account for LAG info", "adjusted_bindings", "=", ...
Adjusting interface names for expected bindings where LAGs exist
[ "Adjusting", "interface", "names", "for", "expected", "bindings", "where", "LAGs", "exist" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L454-L465
train
41,421
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper.get_sync_acl_cmds
def get_sync_acl_cmds(self, switch_acls, expected_acls): """Returns the list of commands required synchronize switch ACLs 1. Identify unexpected ACLs and delete them 2. Iterate over expected ACLs a. Add missing ACLs + all rules b. Delete unexpected rules c. Add missing rules """ switch_cmds = list() # Delete any stale ACLs acls_to_delete = (set(switch_acls.keys()) - set(expected_acls.keys())) for acl in acls_to_delete: switch_cmds.append('no ip access-list %s' % acl) # Update or create ACLs and rules for acl, expected_rules in expected_acls.items(): switch_rules = switch_acls.get(acl, set()) rules_to_delete = switch_rules - expected_rules rules_to_add = expected_rules - switch_rules # Check if ACL requires create or rule changes if (acl in switch_acls and len(rules_to_add | rules_to_delete) == 0): continue switch_cmds.append('ip access-list %s dynamic' % acl) # Delete any stale rules for rule in rules_to_delete: switch_cmds.append('no ' + rule) # Add any missing rules for rule in rules_to_add: switch_cmds.append(rule) switch_cmds.append('exit') return switch_cmds
python
def get_sync_acl_cmds(self, switch_acls, expected_acls): """Returns the list of commands required synchronize switch ACLs 1. Identify unexpected ACLs and delete them 2. Iterate over expected ACLs a. Add missing ACLs + all rules b. Delete unexpected rules c. Add missing rules """ switch_cmds = list() # Delete any stale ACLs acls_to_delete = (set(switch_acls.keys()) - set(expected_acls.keys())) for acl in acls_to_delete: switch_cmds.append('no ip access-list %s' % acl) # Update or create ACLs and rules for acl, expected_rules in expected_acls.items(): switch_rules = switch_acls.get(acl, set()) rules_to_delete = switch_rules - expected_rules rules_to_add = expected_rules - switch_rules # Check if ACL requires create or rule changes if (acl in switch_acls and len(rules_to_add | rules_to_delete) == 0): continue switch_cmds.append('ip access-list %s dynamic' % acl) # Delete any stale rules for rule in rules_to_delete: switch_cmds.append('no ' + rule) # Add any missing rules for rule in rules_to_add: switch_cmds.append(rule) switch_cmds.append('exit') return switch_cmds
[ "def", "get_sync_acl_cmds", "(", "self", ",", "switch_acls", ",", "expected_acls", ")", ":", "switch_cmds", "=", "list", "(", ")", "# Delete any stale ACLs", "acls_to_delete", "=", "(", "set", "(", "switch_acls", ".", "keys", "(", ")", ")", "-", "set", "(", ...
Returns the list of commands required synchronize switch ACLs 1. Identify unexpected ACLs and delete them 2. Iterate over expected ACLs a. Add missing ACLs + all rules b. Delete unexpected rules c. Add missing rules
[ "Returns", "the", "list", "of", "commands", "required", "synchronize", "switch", "ACLs" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L467-L500
train
41,422
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
AristaSecurityGroupSyncHelper.get_sync_binding_cmds
def get_sync_binding_cmds(self, switch_bindings, expected_bindings): """Returns the list of commands required to synchronize ACL bindings 1. Delete any unexpected bindings 2. Add any missing bindings """ switch_cmds = list() # Update any necessary switch interface ACLs bindings_to_delete = switch_bindings - expected_bindings bindings_to_add = expected_bindings - switch_bindings for intf, acl, direction in bindings_to_delete: switch_cmds.extend(['interface %s' % intf, 'no ip access-group %s %s' % (acl, direction), 'exit']) for intf, acl, direction in bindings_to_add: switch_cmds.extend(['interface %s' % intf, 'ip access-group %s %s' % (acl, direction), 'exit']) return switch_cmds
python
def get_sync_binding_cmds(self, switch_bindings, expected_bindings): """Returns the list of commands required to synchronize ACL bindings 1. Delete any unexpected bindings 2. Add any missing bindings """ switch_cmds = list() # Update any necessary switch interface ACLs bindings_to_delete = switch_bindings - expected_bindings bindings_to_add = expected_bindings - switch_bindings for intf, acl, direction in bindings_to_delete: switch_cmds.extend(['interface %s' % intf, 'no ip access-group %s %s' % (acl, direction), 'exit']) for intf, acl, direction in bindings_to_add: switch_cmds.extend(['interface %s' % intf, 'ip access-group %s %s' % (acl, direction), 'exit']) return switch_cmds
[ "def", "get_sync_binding_cmds", "(", "self", ",", "switch_bindings", ",", "expected_bindings", ")", ":", "switch_cmds", "=", "list", "(", ")", "# Update any necessary switch interface ACLs", "bindings_to_delete", "=", "switch_bindings", "-", "expected_bindings", "bindings_t...
Returns the list of commands required to synchronize ACL bindings 1. Delete any unexpected bindings 2. Add any missing bindings
[ "Returns", "the", "list", "of", "commands", "required", "to", "synchronize", "ACL", "bindings" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L502-L522
train
41,423
matthewgilbert/mapping
mapping/util.py
read_price_data
def read_price_data(files, name_func=None): """ Convenience function for reading in pricing data from csv files Parameters ---------- files: list List of strings refering to csv files to read data in from, first column should be dates name_func: func A function to apply to the file strings to infer the instrument name, used in the second level of the MultiIndex index. Default is the file name excluding the pathname and file ending, e.g. /path/to/file/name.csv -> name Returns ------- A pandas.DataFrame with a pandas.MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Columns are given by the csv file columns. """ if name_func is None: def name_func(x): return os.path.split(x)[1].split(".")[0] dfs = [] for f in files: name = name_func(f) df = pd.read_csv(f, index_col=0, parse_dates=True) df.sort_index(inplace=True) df.index = pd.MultiIndex.from_product([df.index, [name]], names=["date", "contract"]) dfs.append(df) return pd.concat(dfs, axis=0, sort=False).sort_index()
python
def read_price_data(files, name_func=None): """ Convenience function for reading in pricing data from csv files Parameters ---------- files: list List of strings refering to csv files to read data in from, first column should be dates name_func: func A function to apply to the file strings to infer the instrument name, used in the second level of the MultiIndex index. Default is the file name excluding the pathname and file ending, e.g. /path/to/file/name.csv -> name Returns ------- A pandas.DataFrame with a pandas.MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Columns are given by the csv file columns. """ if name_func is None: def name_func(x): return os.path.split(x)[1].split(".")[0] dfs = [] for f in files: name = name_func(f) df = pd.read_csv(f, index_col=0, parse_dates=True) df.sort_index(inplace=True) df.index = pd.MultiIndex.from_product([df.index, [name]], names=["date", "contract"]) dfs.append(df) return pd.concat(dfs, axis=0, sort=False).sort_index()
[ "def", "read_price_data", "(", "files", ",", "name_func", "=", "None", ")", ":", "if", "name_func", "is", "None", ":", "def", "name_func", "(", "x", ")", ":", "return", "os", ".", "path", ".", "split", "(", "x", ")", "[", "1", "]", ".", "split", ...
Convenience function for reading in pricing data from csv files Parameters ---------- files: list List of strings refering to csv files to read data in from, first column should be dates name_func: func A function to apply to the file strings to infer the instrument name, used in the second level of the MultiIndex index. Default is the file name excluding the pathname and file ending, e.g. /path/to/file/name.csv -> name Returns ------- A pandas.DataFrame with a pandas.MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Columns are given by the csv file columns.
[ "Convenience", "function", "for", "reading", "in", "pricing", "data", "from", "csv", "files" ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L6-L40
train
41,424
matthewgilbert/mapping
mapping/util.py
flatten
def flatten(weights): """ Flatten weights into a long DataFrame. Parameters ---------- weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas. Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A long DataFrame of weights, where columns are "date", "contract", "generic" and "weight". If a dictionary is passed, DataFrame will contain additional colum "key" containing the key value and be sorted according to this key value. Example ------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLH5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"]) >>> util.flatten(weights) """ # NOQA if isinstance(weights, pd.DataFrame): wts = weights.stack().reset_index() wts.columns = ["date", "contract", "generic", "weight"] elif isinstance(weights, dict): wts = [] for key in sorted(weights.keys()): wt = weights[key].stack().reset_index() wt.columns = ["date", "contract", "generic", "weight"] wt.loc[:, "key"] = key wts.append(wt) wts = pd.concat(wts, axis=0).reset_index(drop=True) else: raise ValueError("weights must be pd.DataFrame or dict") return wts
python
def flatten(weights): """ Flatten weights into a long DataFrame. Parameters ---------- weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas. Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A long DataFrame of weights, where columns are "date", "contract", "generic" and "weight". If a dictionary is passed, DataFrame will contain additional colum "key" containing the key value and be sorted according to this key value. Example ------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLH5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"]) >>> util.flatten(weights) """ # NOQA if isinstance(weights, pd.DataFrame): wts = weights.stack().reset_index() wts.columns = ["date", "contract", "generic", "weight"] elif isinstance(weights, dict): wts = [] for key in sorted(weights.keys()): wt = weights[key].stack().reset_index() wt.columns = ["date", "contract", "generic", "weight"] wt.loc[:, "key"] = key wts.append(wt) wts = pd.concat(wts, axis=0).reset_index(drop=True) else: raise ValueError("weights must be pd.DataFrame or dict") return wts
[ "def", "flatten", "(", "weights", ")", ":", "# NOQA", "if", "isinstance", "(", "weights", ",", "pd", ".", "DataFrame", ")", ":", "wts", "=", "weights", ".", "stack", "(", ")", ".", "reset_index", "(", ")", "wts", ".", "columns", "=", "[", "\"date\"",...
Flatten weights into a long DataFrame. Parameters ---------- weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas. Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A long DataFrame of weights, where columns are "date", "contract", "generic" and "weight". If a dictionary is passed, DataFrame will contain additional colum "key" containing the key value and be sorted according to this key value. Example ------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLH5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"]) >>> util.flatten(weights)
[ "Flatten", "weights", "into", "a", "long", "DataFrame", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L43-L89
train
41,425
matthewgilbert/mapping
mapping/util.py
unflatten
def unflatten(flat_weights): """ Pivot weights from long DataFrame into weighting matrix. Parameters ---------- flat_weights: pandas.DataFrame A long DataFrame of weights, where columns are "date", "contract", "generic", "weight" and optionally "key". If "key" column is present a dictionary of unflattened DataFrames is returned with the dictionary keys corresponding to the "key" column and each sub DataFrame containing rows for this key. Returns ------- A DataFrame or dict of DataFrames of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is returned the dict keys correspond to the "key" column of the input. Example ------- >>> import pandas as pd >>> from pandas import Timestamp as TS >>> import mapping.util as util >>> long_wts = pd.DataFrame( ... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4, ... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2, ... "generic": ["CL1", "CL2"] * 4, ... "weight": [1, 0, 0, 1, 1, 0, 0, 1]} ... ).loc[:, ["date", "contract", "generic", "weight"]] >>> util.unflatten(long_wts) See also: calc_rets() """ # NOQA if flat_weights.columns.contains("key"): weights = {} for key in flat_weights.loc[:, "key"].unique(): flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :] flt_wts = flt_wts.drop(labels="key", axis=1) wts = flt_wts.pivot_table(index=["date", "contract"], columns=["generic"], values=["weight"]) wts.columns = wts.columns.droplevel(0) weights[key] = wts else: weights = flat_weights.pivot_table(index=["date", "contract"], columns=["generic"], values=["weight"]) weights.columns = weights.columns.droplevel(0) return weights
python
def unflatten(flat_weights): """ Pivot weights from long DataFrame into weighting matrix. Parameters ---------- flat_weights: pandas.DataFrame A long DataFrame of weights, where columns are "date", "contract", "generic", "weight" and optionally "key". If "key" column is present a dictionary of unflattened DataFrames is returned with the dictionary keys corresponding to the "key" column and each sub DataFrame containing rows for this key. Returns ------- A DataFrame or dict of DataFrames of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is returned the dict keys correspond to the "key" column of the input. Example ------- >>> import pandas as pd >>> from pandas import Timestamp as TS >>> import mapping.util as util >>> long_wts = pd.DataFrame( ... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4, ... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2, ... "generic": ["CL1", "CL2"] * 4, ... "weight": [1, 0, 0, 1, 1, 0, 0, 1]} ... ).loc[:, ["date", "contract", "generic", "weight"]] >>> util.unflatten(long_wts) See also: calc_rets() """ # NOQA if flat_weights.columns.contains("key"): weights = {} for key in flat_weights.loc[:, "key"].unique(): flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :] flt_wts = flt_wts.drop(labels="key", axis=1) wts = flt_wts.pivot_table(index=["date", "contract"], columns=["generic"], values=["weight"]) wts.columns = wts.columns.droplevel(0) weights[key] = wts else: weights = flat_weights.pivot_table(index=["date", "contract"], columns=["generic"], values=["weight"]) weights.columns = weights.columns.droplevel(0) return weights
[ "def", "unflatten", "(", "flat_weights", ")", ":", "# NOQA", "if", "flat_weights", ".", "columns", ".", "contains", "(", "\"key\"", ")", ":", "weights", "=", "{", "}", "for", "key", "in", "flat_weights", ".", "loc", "[", ":", ",", "\"key\"", "]", ".", ...
Pivot weights from long DataFrame into weighting matrix. Parameters ---------- flat_weights: pandas.DataFrame A long DataFrame of weights, where columns are "date", "contract", "generic", "weight" and optionally "key". If "key" column is present a dictionary of unflattened DataFrames is returned with the dictionary keys corresponding to the "key" column and each sub DataFrame containing rows for this key. Returns ------- A DataFrame or dict of DataFrames of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is returned the dict keys correspond to the "key" column of the input. Example ------- >>> import pandas as pd >>> from pandas import Timestamp as TS >>> import mapping.util as util >>> long_wts = pd.DataFrame( ... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4, ... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2, ... "generic": ["CL1", "CL2"] * 4, ... "weight": [1, 0, 0, 1, 1, 0, 0, 1]} ... ).loc[:, ["date", "contract", "generic", "weight"]] >>> util.unflatten(long_wts) See also: calc_rets()
[ "Pivot", "weights", "from", "long", "DataFrame", "into", "weighting", "matrix", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L92-L143
train
41,426
matthewgilbert/mapping
mapping/util.py
calc_rets
def calc_rets(returns, weights): """ Calculate continuous return series for futures instruments. These consist of weighted underlying instrument returns, who's weights can vary over time. Parameters ---------- returns: pandas.Series or dict A Series of instrument returns with a MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Values correspond to one period instrument returns. returns should be available for all for all Timestamps and instruments provided in weights. If dict is given this should be a dict of pandas.Series in the above format, with keys which are a subset of the keys given in weights weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A pandas.DataFrame of continuous returns for generics. The index is pandas.Timestamps and the columns is generic names, corresponding to weights.columns Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx) >>> vals = [1, 1/2, 1/2, 1] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"]) >>> irets = price.groupby(level=-1).pct_change() >>> util.calc_rets(irets, weights) """ # NOQA if not isinstance(returns, dict): returns = {"": returns} if not isinstance(weights, dict): weights = {"": weights} generic_superset = [] for root in weights: generic_superset.extend(weights[root].columns.tolist()) if len(set(generic_superset)) != len(generic_superset): raise ValueError("Columns for weights must all be unique") _check_indices(returns, weights) grets = [] cols = [] for root in returns: root_wts = weights[root] root_rets = returns[root] for generic in root_wts.columns: gnrc_wts = root_wts.loc[:, generic] # drop generics where weight is 0, this avoids potential KeyError # in later indexing of rets even when ret has weight of 0 gnrc_wts = gnrc_wts.loc[gnrc_wts != 0] rets = root_rets.loc[gnrc_wts.index] # groupby time group_rets = (rets * gnrc_wts).groupby(level=0) grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False)) cols.extend(root_wts.columns.tolist()) rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1) return rets
python
def calc_rets(returns, weights): """ Calculate continuous return series for futures instruments. These consist of weighted underlying instrument returns, who's weights can vary over time. Parameters ---------- returns: pandas.Series or dict A Series of instrument returns with a MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Values correspond to one period instrument returns. returns should be available for all for all Timestamps and instruments provided in weights. If dict is given this should be a dict of pandas.Series in the above format, with keys which are a subset of the keys given in weights weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A pandas.DataFrame of continuous returns for generics. The index is pandas.Timestamps and the columns is generic names, corresponding to weights.columns Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx) >>> vals = [1, 1/2, 1/2, 1] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"]) >>> irets = price.groupby(level=-1).pct_change() >>> util.calc_rets(irets, weights) """ # NOQA if not isinstance(returns, dict): returns = {"": returns} if not isinstance(weights, dict): weights = {"": weights} generic_superset = [] for root in weights: generic_superset.extend(weights[root].columns.tolist()) if len(set(generic_superset)) != len(generic_superset): raise ValueError("Columns for weights must all be unique") _check_indices(returns, weights) grets = [] cols = [] for root in returns: root_wts = weights[root] root_rets = returns[root] for generic in root_wts.columns: gnrc_wts = root_wts.loc[:, generic] # drop generics where weight is 0, this avoids potential KeyError # in later indexing of rets even when ret has weight of 0 gnrc_wts = gnrc_wts.loc[gnrc_wts != 0] rets = root_rets.loc[gnrc_wts.index] # groupby time group_rets = (rets * gnrc_wts).groupby(level=0) grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False)) cols.extend(root_wts.columns.tolist()) rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1) return rets
[ "def", "calc_rets", "(", "returns", ",", "weights", ")", ":", "# NOQA", "if", "not", "isinstance", "(", "returns", ",", "dict", ")", ":", "returns", "=", "{", "\"\"", ":", "returns", "}", "if", "not", "isinstance", "(", "weights", ",", "dict", ")", "...
Calculate continuous return series for futures instruments. These consist of weighted underlying instrument returns, who's weights can vary over time. Parameters ---------- returns: pandas.Series or dict A Series of instrument returns with a MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Values correspond to one period instrument returns. returns should be available for all for all Timestamps and instruments provided in weights. If dict is given this should be a dict of pandas.Series in the above format, with keys which are a subset of the keys given in weights weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A pandas.DataFrame of continuous returns for generics. The index is pandas.Timestamps and the columns is generic names, corresponding to weights.columns Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx) >>> vals = [1, 1/2, 1/2, 1] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"]) >>> irets = price.groupby(level=-1).pct_change() >>> util.calc_rets(irets, weights)
[ "Calculate", "continuous", "return", "series", "for", "futures", "instruments", ".", "These", "consist", "of", "weighted", "underlying", "instrument", "returns", "who", "s", "weights", "can", "vary", "over", "time", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L146-L225
train
41,427
matthewgilbert/mapping
mapping/util.py
calc_trades
def calc_trades(current_contracts, desired_holdings, trade_weights, prices, multipliers, **kwargs): """ Calculate the number of tradeable contracts for rebalancing from a set of current contract holdings to a set of desired generic notional holdings based on prevailing prices and mapping from generics to tradeable instruments. Differences between current holdings and desired holdings are treated as 0. Zero trades are dropped. Parameters ---------- current_contracts: pandas.Series Series of current number of contracts held for tradeable instruments. Can pass 0 if all holdings are 0. desired_holdings: pandas.Series Series of desired holdings in base notional currency of generics. Index is generic contracts, these should be the same generics as in trade_weights. trade_weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments **for a given date**. The columns refer to generic contracts and the index is strings representing instrument names. If dict is given keys should be root generic names, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all columns should be a superset of the desired_holdings.index prices: pandas.Series Series of instrument prices. Index is instrument name and values are number of contracts. Extra instrument prices will be ignored. multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of mapped desired_holdings intruments. kwargs: key word arguments Key word arguments to be passed to to_contracts() Returns ------- A pandas.Series of instrument contract trades, lexigraphically sorted. Example ------- >>> import pandas as pd >>> import mapping.util as util >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=["CL1", "CL2"]) >>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"]) >>> current_contracts = pd.Series([0, 1, 0], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> prices = pd.Series([50.32, 50.41, 50.48], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> multipliers = pd.Series([100, 100, 100], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> trades = util.calc_trades(current_contracts, desired_holdings, wts, ... prices, multipliers) """ if not isinstance(trade_weights, dict): trade_weights = {"": trade_weights} generics = [] for key in trade_weights: generics.extend(trade_weights[key].columns) if not set(desired_holdings.index).issubset(set(generics)): raise ValueError("'desired_holdings.index' contains values which " "cannot be mapped to tradeables.\n" "Received: 'desired_holdings.index'\n {0}\n" "Expected in 'trade_weights' set of columns:\n {1}\n" .format(sorted(desired_holdings.index), sorted(generics))) desired_contracts = [] for root_key in trade_weights: gnrc_weights = trade_weights[root_key] subset = gnrc_weights.columns.intersection(desired_holdings.index) gnrc_des_hlds = desired_holdings.loc[subset] gnrc_weights = gnrc_weights.loc[:, subset] # drop indexes where all non zero weights were in columns dropped above gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)] instr_des_hlds = gnrc_des_hlds * gnrc_weights instr_des_hlds = instr_des_hlds.sum(axis=1) wprices = prices.loc[instr_des_hlds.index] desired_contracts.append(to_contracts(instr_des_hlds, wprices, multipliers, **kwargs)) desired_contracts = pd.concat(desired_contracts, axis=0) trades = desired_contracts.subtract(current_contracts, fill_value=0) trades = trades.loc[trades != 0] trades = trades.sort_index() return trades
python
def calc_trades(current_contracts, desired_holdings, trade_weights, prices, multipliers, **kwargs): """ Calculate the number of tradeable contracts for rebalancing from a set of current contract holdings to a set of desired generic notional holdings based on prevailing prices and mapping from generics to tradeable instruments. Differences between current holdings and desired holdings are treated as 0. Zero trades are dropped. Parameters ---------- current_contracts: pandas.Series Series of current number of contracts held for tradeable instruments. Can pass 0 if all holdings are 0. desired_holdings: pandas.Series Series of desired holdings in base notional currency of generics. Index is generic contracts, these should be the same generics as in trade_weights. trade_weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments **for a given date**. The columns refer to generic contracts and the index is strings representing instrument names. If dict is given keys should be root generic names, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all columns should be a superset of the desired_holdings.index prices: pandas.Series Series of instrument prices. Index is instrument name and values are number of contracts. Extra instrument prices will be ignored. multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of mapped desired_holdings intruments. kwargs: key word arguments Key word arguments to be passed to to_contracts() Returns ------- A pandas.Series of instrument contract trades, lexigraphically sorted. Example ------- >>> import pandas as pd >>> import mapping.util as util >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=["CL1", "CL2"]) >>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"]) >>> current_contracts = pd.Series([0, 1, 0], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> prices = pd.Series([50.32, 50.41, 50.48], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> multipliers = pd.Series([100, 100, 100], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> trades = util.calc_trades(current_contracts, desired_holdings, wts, ... prices, multipliers) """ if not isinstance(trade_weights, dict): trade_weights = {"": trade_weights} generics = [] for key in trade_weights: generics.extend(trade_weights[key].columns) if not set(desired_holdings.index).issubset(set(generics)): raise ValueError("'desired_holdings.index' contains values which " "cannot be mapped to tradeables.\n" "Received: 'desired_holdings.index'\n {0}\n" "Expected in 'trade_weights' set of columns:\n {1}\n" .format(sorted(desired_holdings.index), sorted(generics))) desired_contracts = [] for root_key in trade_weights: gnrc_weights = trade_weights[root_key] subset = gnrc_weights.columns.intersection(desired_holdings.index) gnrc_des_hlds = desired_holdings.loc[subset] gnrc_weights = gnrc_weights.loc[:, subset] # drop indexes where all non zero weights were in columns dropped above gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)] instr_des_hlds = gnrc_des_hlds * gnrc_weights instr_des_hlds = instr_des_hlds.sum(axis=1) wprices = prices.loc[instr_des_hlds.index] desired_contracts.append(to_contracts(instr_des_hlds, wprices, multipliers, **kwargs)) desired_contracts = pd.concat(desired_contracts, axis=0) trades = desired_contracts.subtract(current_contracts, fill_value=0) trades = trades.loc[trades != 0] trades = trades.sort_index() return trades
[ "def", "calc_trades", "(", "current_contracts", ",", "desired_holdings", ",", "trade_weights", ",", "prices", ",", "multipliers", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "trade_weights", ",", "dict", ")", ":", "trade_weights", "=", ...
Calculate the number of tradeable contracts for rebalancing from a set of current contract holdings to a set of desired generic notional holdings based on prevailing prices and mapping from generics to tradeable instruments. Differences between current holdings and desired holdings are treated as 0. Zero trades are dropped. Parameters ---------- current_contracts: pandas.Series Series of current number of contracts held for tradeable instruments. Can pass 0 if all holdings are 0. desired_holdings: pandas.Series Series of desired holdings in base notional currency of generics. Index is generic contracts, these should be the same generics as in trade_weights. trade_weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments **for a given date**. The columns refer to generic contracts and the index is strings representing instrument names. If dict is given keys should be root generic names, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all columns should be a superset of the desired_holdings.index prices: pandas.Series Series of instrument prices. Index is instrument name and values are number of contracts. Extra instrument prices will be ignored. multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of mapped desired_holdings intruments. kwargs: key word arguments Key word arguments to be passed to to_contracts() Returns ------- A pandas.Series of instrument contract trades, lexigraphically sorted. Example ------- >>> import pandas as pd >>> import mapping.util as util >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=["CL1", "CL2"]) >>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"]) >>> current_contracts = pd.Series([0, 1, 0], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> prices = pd.Series([50.32, 50.41, 50.48], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> multipliers = pd.Series([100, 100, 100], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> trades = util.calc_trades(current_contracts, desired_holdings, wts, ... prices, multipliers)
[ "Calculate", "the", "number", "of", "tradeable", "contracts", "for", "rebalancing", "from", "a", "set", "of", "current", "contract", "holdings", "to", "a", "set", "of", "desired", "generic", "notional", "holdings", "based", "on", "prevailing", "prices", "and", ...
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L365-L458
train
41,428
matthewgilbert/mapping
mapping/util.py
to_notional
def to_notional(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None): """ Convert number of contracts of tradeable instruments to notional value of tradeable instruments in a desired currency. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are number of contracts. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index otherwise NaN returned for instruments without prices multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. Returns ------- pandas.Series of notional amounts of instruments with Index of instruments names Example ------- >>> import pandas as pd >>> import mapping.util as util >>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16']) >>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16']) >>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16']) >>> ntln = util.to_notional(current_contracts, prices, multipliers) """ notionals = _instr_conv(instruments, prices, multipliers, True, desired_ccy, instr_fx, fx_rates) return notionals
python
def to_notional(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None): """ Convert number of contracts of tradeable instruments to notional value of tradeable instruments in a desired currency. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are number of contracts. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index otherwise NaN returned for instruments without prices multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. Returns ------- pandas.Series of notional amounts of instruments with Index of instruments names Example ------- >>> import pandas as pd >>> import mapping.util as util >>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16']) >>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16']) >>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16']) >>> ntln = util.to_notional(current_contracts, prices, multipliers) """ notionals = _instr_conv(instruments, prices, multipliers, True, desired_ccy, instr_fx, fx_rates) return notionals
[ "def", "to_notional", "(", "instruments", ",", "prices", ",", "multipliers", ",", "desired_ccy", "=", "None", ",", "instr_fx", "=", "None", ",", "fx_rates", "=", "None", ")", ":", "notionals", "=", "_instr_conv", "(", "instruments", ",", "prices", ",", "mu...
Convert number of contracts of tradeable instruments to notional value of tradeable instruments in a desired currency. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are number of contracts. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index otherwise NaN returned for instruments without prices multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. Returns ------- pandas.Series of notional amounts of instruments with Index of instruments names Example ------- >>> import pandas as pd >>> import mapping.util as util >>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16']) >>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16']) >>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16']) >>> ntln = util.to_notional(current_contracts, prices, multipliers)
[ "Convert", "number", "of", "contracts", "of", "tradeable", "instruments", "to", "notional", "value", "of", "tradeable", "instruments", "in", "a", "desired", "currency", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L461-L508
train
41,429
matthewgilbert/mapping
mapping/util.py
to_contracts
def to_contracts(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None, rounder=None): """ Convert notional amount of tradeable instruments to number of instrument contracts, rounding to nearest integer number of contracts. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are notional amount on instrument. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. rounder: function Function to round pd.Series contracts to integers, if None default pd.Series.round is used. Returns ------- pandas.Series of contract numbers of instruments with Index of instruments names """ contracts = _instr_conv(instruments, prices, multipliers, False, desired_ccy, instr_fx, fx_rates) if rounder is None: rounder = pd.Series.round contracts = rounder(contracts) contracts = contracts.astype(int) return contracts
python
def to_contracts(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None, rounder=None): """ Convert notional amount of tradeable instruments to number of instrument contracts, rounding to nearest integer number of contracts. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are notional amount on instrument. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. rounder: function Function to round pd.Series contracts to integers, if None default pd.Series.round is used. Returns ------- pandas.Series of contract numbers of instruments with Index of instruments names """ contracts = _instr_conv(instruments, prices, multipliers, False, desired_ccy, instr_fx, fx_rates) if rounder is None: rounder = pd.Series.round contracts = rounder(contracts) contracts = contracts.astype(int) return contracts
[ "def", "to_contracts", "(", "instruments", ",", "prices", ",", "multipliers", ",", "desired_ccy", "=", "None", ",", "instr_fx", "=", "None", ",", "fx_rates", "=", "None", ",", "rounder", "=", "None", ")", ":", "contracts", "=", "_instr_conv", "(", "instrum...
Convert notional amount of tradeable instruments to number of instrument contracts, rounding to nearest integer number of contracts. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are notional amount on instrument. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. rounder: function Function to round pd.Series contracts to integers, if None default pd.Series.round is used. Returns ------- pandas.Series of contract numbers of instruments with Index of instruments names
[ "Convert", "notional", "amount", "of", "tradeable", "instruments", "to", "number", "of", "instrument", "contracts", "rounding", "to", "nearest", "integer", "number", "of", "contracts", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L511-L557
train
41,430
matthewgilbert/mapping
mapping/util.py
get_multiplier
def get_multiplier(weights, root_generic_multiplier): """ Determine tradeable instrument multiplier based on generic asset multipliers and weights mapping from generics to tradeables. Parameters ---------- weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments **for a given date**. The columns are integers refering to generic number indexed from 0, e.g. [0, 1], and the index is strings representing instrument names. If dict is given keys should be generic instrument names, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index root_generic_multiplier: pandas.Series Series of multipliers for generic instruments lexigraphically sorted. If a dictionary of weights is given, root_generic_multiplier.index should correspond to the weights keys. Returns ------- A pandas.Series of multipliers for tradeable instruments. Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=[0, 1]) >>> ast_mult = pd.Series([1000], index=["CL"]) >>> util.get_multiplier(wts, ast_mult) """ if len(root_generic_multiplier) > 1 and not isinstance(weights, dict): raise ValueError("For multiple generic instruments weights must be a " "dictionary") mults = [] intrs = [] for ast, multiplier in root_generic_multiplier.iteritems(): if isinstance(weights, dict): weights_ast = weights[ast].index else: weights_ast = weights.index mults.extend(np.repeat(multiplier, len(weights_ast))) intrs.extend(weights_ast) imults = pd.Series(mults, intrs) imults = imults.sort_index() return imults
python
def get_multiplier(weights, root_generic_multiplier): """ Determine tradeable instrument multiplier based on generic asset multipliers and weights mapping from generics to tradeables. Parameters ---------- weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments **for a given date**. The columns are integers refering to generic number indexed from 0, e.g. [0, 1], and the index is strings representing instrument names. If dict is given keys should be generic instrument names, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index root_generic_multiplier: pandas.Series Series of multipliers for generic instruments lexigraphically sorted. If a dictionary of weights is given, root_generic_multiplier.index should correspond to the weights keys. Returns ------- A pandas.Series of multipliers for tradeable instruments. Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=[0, 1]) >>> ast_mult = pd.Series([1000], index=["CL"]) >>> util.get_multiplier(wts, ast_mult) """ if len(root_generic_multiplier) > 1 and not isinstance(weights, dict): raise ValueError("For multiple generic instruments weights must be a " "dictionary") mults = [] intrs = [] for ast, multiplier in root_generic_multiplier.iteritems(): if isinstance(weights, dict): weights_ast = weights[ast].index else: weights_ast = weights.index mults.extend(np.repeat(multiplier, len(weights_ast))) intrs.extend(weights_ast) imults = pd.Series(mults, intrs) imults = imults.sort_index() return imults
[ "def", "get_multiplier", "(", "weights", ",", "root_generic_multiplier", ")", ":", "if", "len", "(", "root_generic_multiplier", ")", ">", "1", "and", "not", "isinstance", "(", "weights", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"For multiple generic ...
Determine tradeable instrument multiplier based on generic asset multipliers and weights mapping from generics to tradeables. Parameters ---------- weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments **for a given date**. The columns are integers refering to generic number indexed from 0, e.g. [0, 1], and the index is strings representing instrument names. If dict is given keys should be generic instrument names, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index root_generic_multiplier: pandas.Series Series of multipliers for generic instruments lexigraphically sorted. If a dictionary of weights is given, root_generic_multiplier.index should correspond to the weights keys. Returns ------- A pandas.Series of multipliers for tradeable instruments. Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=[0, 1]) >>> ast_mult = pd.Series([1000], index=["CL"]) >>> util.get_multiplier(wts, ast_mult)
[ "Determine", "tradeable", "instrument", "multiplier", "based", "on", "generic", "asset", "multipliers", "and", "weights", "mapping", "from", "generics", "to", "tradeables", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L593-L643
train
41,431
matthewgilbert/mapping
mapping/util.py
weighted_expiration
def weighted_expiration(weights, contract_dates): """ Calculate the days to expiration for generic futures, weighted by the composition of the underlying tradeable instruments. Parameters: ----------- weights: pandas.DataFrame A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values Returns: -------- A pandas.DataFrame with columns of generic futures and index of dates. Values are the weighted average of days to expiration for the underlying contracts. Examples: --------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'), ... (pd.Timestamp('2015-01-03'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLF15'), ... (pd.Timestamp('2015-01-04'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLH15'), ... (pd.Timestamp('2015-01-05'), 'CLG15'), ... (pd.Timestamp('2015-01-05'), 'CLH15')]) >>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx) >>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'), ... pd.Timestamp('2015-02-21'), ... pd.Timestamp('2015-03-20')], ... index=['CLF15', 'CLG15', 'CLH15']) >>> util.weighted_expiration(weights, contract_dates) """ # NOQA cols = weights.columns weights = weights.reset_index(level=-1) expiries = contract_dates.to_dict() weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x]) diffs = (pd.DatetimeIndex(weights.expiry) - pd.Series(weights.index, weights.index)).apply(lambda x: x.days) weights = weights.loc[:, cols] wexp = weights.mul(diffs, axis=0).groupby(level=0).sum() return wexp
python
def weighted_expiration(weights, contract_dates): """ Calculate the days to expiration for generic futures, weighted by the composition of the underlying tradeable instruments. Parameters: ----------- weights: pandas.DataFrame A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values Returns: -------- A pandas.DataFrame with columns of generic futures and index of dates. Values are the weighted average of days to expiration for the underlying contracts. Examples: --------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'), ... (pd.Timestamp('2015-01-03'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLF15'), ... (pd.Timestamp('2015-01-04'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLH15'), ... (pd.Timestamp('2015-01-05'), 'CLG15'), ... (pd.Timestamp('2015-01-05'), 'CLH15')]) >>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx) >>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'), ... pd.Timestamp('2015-02-21'), ... pd.Timestamp('2015-03-20')], ... index=['CLF15', 'CLG15', 'CLH15']) >>> util.weighted_expiration(weights, contract_dates) """ # NOQA cols = weights.columns weights = weights.reset_index(level=-1) expiries = contract_dates.to_dict() weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x]) diffs = (pd.DatetimeIndex(weights.expiry) - pd.Series(weights.index, weights.index)).apply(lambda x: x.days) weights = weights.loc[:, cols] wexp = weights.mul(diffs, axis=0).groupby(level=0).sum() return wexp
[ "def", "weighted_expiration", "(", "weights", ",", "contract_dates", ")", ":", "# NOQA", "cols", "=", "weights", ".", "columns", "weights", "=", "weights", ".", "reset_index", "(", "level", "=", "-", "1", ")", "expiries", "=", "contract_dates", ".", "to_dict...
Calculate the days to expiration for generic futures, weighted by the composition of the underlying tradeable instruments. Parameters: ----------- weights: pandas.DataFrame A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values Returns: -------- A pandas.DataFrame with columns of generic futures and index of dates. Values are the weighted average of days to expiration for the underlying contracts. Examples: --------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'), ... (pd.Timestamp('2015-01-03'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLF15'), ... (pd.Timestamp('2015-01-04'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLH15'), ... (pd.Timestamp('2015-01-05'), 'CLG15'), ... (pd.Timestamp('2015-01-05'), 'CLH15')]) >>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx) >>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'), ... pd.Timestamp('2015-02-21'), ... pd.Timestamp('2015-03-20')], ... index=['CLF15', 'CLG15', 'CLH15']) >>> util.weighted_expiration(weights, contract_dates)
[ "Calculate", "the", "days", "to", "expiration", "for", "generic", "futures", "weighted", "by", "the", "composition", "of", "the", "underlying", "tradeable", "instruments", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L646-L694
train
41,432
openstack/networking-arista
networking_arista/ml2/security_groups/arista_security_groups.py
AristaSecurityGroupHandler._valid_baremetal_port
def _valid_baremetal_port(port): """Check if port is a baremetal port with exactly one security group""" if port.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL: return False sgs = port.get('security_groups', []) if len(sgs) == 0: # Nothing to do return False if len(port.get('security_groups', [])) > 1: LOG.warning('SG provisioning failed for %(port)s. Only one ' 'SG may be applied per port.', {'port': port['id']}) return False return True
python
def _valid_baremetal_port(port): """Check if port is a baremetal port with exactly one security group""" if port.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL: return False sgs = port.get('security_groups', []) if len(sgs) == 0: # Nothing to do return False if len(port.get('security_groups', [])) > 1: LOG.warning('SG provisioning failed for %(port)s. Only one ' 'SG may be applied per port.', {'port': port['id']}) return False return True
[ "def", "_valid_baremetal_port", "(", "port", ")", ":", "if", "port", ".", "get", "(", "portbindings", ".", "VNIC_TYPE", ")", "!=", "portbindings", ".", "VNIC_BAREMETAL", ":", "return", "False", "sgs", "=", "port", ".", "get", "(", "'security_groups'", ",", ...
Check if port is a baremetal port with exactly one security group
[ "Check", "if", "port", "is", "a", "baremetal", "port", "with", "exactly", "one", "security", "group" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/arista_security_groups.py#L87-L100
train
41,433
openstack/networking-arista
networking_arista/ml2/arista_sync.py
AristaSyncWorker.synchronize_resources
def synchronize_resources(self): """Synchronize worker with CVX All database queries must occur while the sync lock is held. This tightly couples reads with writes and ensures that an older read does not result in the last write. Eg: Worker 1 reads (P1 created) Worder 2 reads (P1 deleted) Worker 2 writes (Delete P1 from CVX) Worker 1 writes (Create P1 on CVX) By ensuring that all reads occur with the sync lock held, we ensure that Worker 1 completes its writes before Worker2 is allowed to read. A failure to write results in a full resync and purges all reads from memory. It is also important that we compute resources to sync in reverse sync order in order to avoid missing dependencies on creation. Eg: If we query in sync order 1. Query Instances -> I1 isn't there 2. Query Port table -> Port P1 is there, connected to I1 3. We send P1 to CVX without sending I1 -> Error raised But if we query P1 first: 1. Query Ports P1 -> P1 is not there 2. Query Instances -> find I1 3. We create I1, not P1 -> harmless, mech driver creates P1 Missing dependencies on deletion will helpfully result in the dependent resource not being created: 1. Query Ports -> P1 is found 2. Query Instances -> I1 not found 3. Creating P1 fails on CVX """ # Grab the sync lock if not self._rpc.sync_start(): LOG.info("%(pid)s Failed to grab the sync lock", {'pid': os.getpid()}) greenthread.sleep(1) return for resource in self._resources_to_update: self.update_neutron_resource(resource) self._resources_to_update = list() # Sync any necessary resources. # We delete in reverse order and create in order to ensure that # dependent resources are deleted before the resources they depend # on and created after them for resource_type in reversed(self.sync_order): resource_type.delete_cvx_resources() for resource_type in self.sync_order: resource_type.create_cvx_resources() # Release the sync lock self._rpc.sync_end() # Update local uuid if this was a full sync if self._synchronizing_uuid: LOG.info("%(pid)s Full sync for cvx uuid %(uuid)s complete", {'uuid': self._synchronizing_uuid, 'pid': os.getpid()}) self._cvx_uuid = self._synchronizing_uuid self._synchronizing_uuid = None
python
def synchronize_resources(self): """Synchronize worker with CVX All database queries must occur while the sync lock is held. This tightly couples reads with writes and ensures that an older read does not result in the last write. Eg: Worker 1 reads (P1 created) Worder 2 reads (P1 deleted) Worker 2 writes (Delete P1 from CVX) Worker 1 writes (Create P1 on CVX) By ensuring that all reads occur with the sync lock held, we ensure that Worker 1 completes its writes before Worker2 is allowed to read. A failure to write results in a full resync and purges all reads from memory. It is also important that we compute resources to sync in reverse sync order in order to avoid missing dependencies on creation. Eg: If we query in sync order 1. Query Instances -> I1 isn't there 2. Query Port table -> Port P1 is there, connected to I1 3. We send P1 to CVX without sending I1 -> Error raised But if we query P1 first: 1. Query Ports P1 -> P1 is not there 2. Query Instances -> find I1 3. We create I1, not P1 -> harmless, mech driver creates P1 Missing dependencies on deletion will helpfully result in the dependent resource not being created: 1. Query Ports -> P1 is found 2. Query Instances -> I1 not found 3. Creating P1 fails on CVX """ # Grab the sync lock if not self._rpc.sync_start(): LOG.info("%(pid)s Failed to grab the sync lock", {'pid': os.getpid()}) greenthread.sleep(1) return for resource in self._resources_to_update: self.update_neutron_resource(resource) self._resources_to_update = list() # Sync any necessary resources. # We delete in reverse order and create in order to ensure that # dependent resources are deleted before the resources they depend # on and created after them for resource_type in reversed(self.sync_order): resource_type.delete_cvx_resources() for resource_type in self.sync_order: resource_type.create_cvx_resources() # Release the sync lock self._rpc.sync_end() # Update local uuid if this was a full sync if self._synchronizing_uuid: LOG.info("%(pid)s Full sync for cvx uuid %(uuid)s complete", {'uuid': self._synchronizing_uuid, 'pid': os.getpid()}) self._cvx_uuid = self._synchronizing_uuid self._synchronizing_uuid = None
[ "def", "synchronize_resources", "(", "self", ")", ":", "# Grab the sync lock", "if", "not", "self", ".", "_rpc", ".", "sync_start", "(", ")", ":", "LOG", ".", "info", "(", "\"%(pid)s Failed to grab the sync lock\"", ",", "{", "'pid'", ":", "os", ".", "getpid",...
Synchronize worker with CVX All database queries must occur while the sync lock is held. This tightly couples reads with writes and ensures that an older read does not result in the last write. Eg: Worker 1 reads (P1 created) Worder 2 reads (P1 deleted) Worker 2 writes (Delete P1 from CVX) Worker 1 writes (Create P1 on CVX) By ensuring that all reads occur with the sync lock held, we ensure that Worker 1 completes its writes before Worker2 is allowed to read. A failure to write results in a full resync and purges all reads from memory. It is also important that we compute resources to sync in reverse sync order in order to avoid missing dependencies on creation. Eg: If we query in sync order 1. Query Instances -> I1 isn't there 2. Query Port table -> Port P1 is there, connected to I1 3. We send P1 to CVX without sending I1 -> Error raised But if we query P1 first: 1. Query Ports P1 -> P1 is not there 2. Query Instances -> find I1 3. We create I1, not P1 -> harmless, mech driver creates P1 Missing dependencies on deletion will helpfully result in the dependent resource not being created: 1. Query Ports -> P1 is found 2. Query Instances -> I1 not found 3. Creating P1 fails on CVX
[ "Synchronize", "worker", "with", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/arista_sync.py#L176-L241
train
41,434
openstack/networking-arista
networking_arista/ml2/arista_trunk.py
AristaTrunkDriver.register
def register(self, resource, event, trigger, **kwargs): """Called in trunk plugin's AFTER_INIT""" super(AristaTrunkDriver, self).register(resource, event, trigger, kwargs) registry.subscribe(self.subport_create, resources.SUBPORTS, events.AFTER_CREATE) registry.subscribe(self.subport_delete, resources.SUBPORTS, events.AFTER_DELETE) registry.subscribe(self.trunk_create, resources.TRUNK, events.AFTER_CREATE) registry.subscribe(self.trunk_update, resources.TRUNK, events.AFTER_UPDATE) registry.subscribe(self.trunk_delete, resources.TRUNK, events.AFTER_DELETE) self.core_plugin = directory.get_plugin() LOG.debug("Arista trunk driver initialized.")
python
def register(self, resource, event, trigger, **kwargs): """Called in trunk plugin's AFTER_INIT""" super(AristaTrunkDriver, self).register(resource, event, trigger, kwargs) registry.subscribe(self.subport_create, resources.SUBPORTS, events.AFTER_CREATE) registry.subscribe(self.subport_delete, resources.SUBPORTS, events.AFTER_DELETE) registry.subscribe(self.trunk_create, resources.TRUNK, events.AFTER_CREATE) registry.subscribe(self.trunk_update, resources.TRUNK, events.AFTER_UPDATE) registry.subscribe(self.trunk_delete, resources.TRUNK, events.AFTER_DELETE) self.core_plugin = directory.get_plugin() LOG.debug("Arista trunk driver initialized.")
[ "def", "register", "(", "self", ",", "resource", ",", "event", ",", "trigger", ",", "*", "*", "kwargs", ")", ":", "super", "(", "AristaTrunkDriver", ",", "self", ")", ".", "register", "(", "resource", ",", "event", ",", "trigger", ",", "kwargs", ")", ...
Called in trunk plugin's AFTER_INIT
[ "Called", "in", "trunk", "plugin", "s", "AFTER_INIT" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/arista_trunk.py#L54-L69
train
41,435
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.create_router_on_eos
def create_router_on_eos(self, router_name, rdm, server): """Creates a router on Arista HW Device. :param router_name: globally unique identifier for router/VRF :param rdm: A value generated by hashing router name :param server: Server endpoint on the Arista switch to be configured """ cmds = [] rd = "%s:%s" % (rdm, rdm) for c in self.routerDict['create']: cmds.append(c.format(router_name, rd)) if self._mlag_configured: mac = VIRTUAL_ROUTER_MAC for c in self._additionalRouterCmdsDict['create']: cmds.append(c.format(mac)) self._run_config_cmds(cmds, server)
python
def create_router_on_eos(self, router_name, rdm, server): """Creates a router on Arista HW Device. :param router_name: globally unique identifier for router/VRF :param rdm: A value generated by hashing router name :param server: Server endpoint on the Arista switch to be configured """ cmds = [] rd = "%s:%s" % (rdm, rdm) for c in self.routerDict['create']: cmds.append(c.format(router_name, rd)) if self._mlag_configured: mac = VIRTUAL_ROUTER_MAC for c in self._additionalRouterCmdsDict['create']: cmds.append(c.format(mac)) self._run_config_cmds(cmds, server)
[ "def", "create_router_on_eos", "(", "self", ",", "router_name", ",", "rdm", ",", "server", ")", ":", "cmds", "=", "[", "]", "rd", "=", "\"%s:%s\"", "%", "(", "rdm", ",", "rdm", ")", "for", "c", "in", "self", ".", "routerDict", "[", "'create'", "]", ...
Creates a router on Arista HW Device. :param router_name: globally unique identifier for router/VRF :param rdm: A value generated by hashing router name :param server: Server endpoint on the Arista switch to be configured
[ "Creates", "a", "router", "on", "Arista", "HW", "Device", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L181-L199
train
41,436
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.delete_router_from_eos
def delete_router_from_eos(self, router_name, server): """Deletes a router from Arista HW Device. :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured """ cmds = [] for c in self.routerDict['delete']: cmds.append(c.format(router_name)) if self._mlag_configured: for c in self._additionalRouterCmdsDict['delete']: cmds.append(c) self._run_config_cmds(cmds, server)
python
def delete_router_from_eos(self, router_name, server): """Deletes a router from Arista HW Device. :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured """ cmds = [] for c in self.routerDict['delete']: cmds.append(c.format(router_name)) if self._mlag_configured: for c in self._additionalRouterCmdsDict['delete']: cmds.append(c) self._run_config_cmds(cmds, server)
[ "def", "delete_router_from_eos", "(", "self", ",", "router_name", ",", "server", ")", ":", "cmds", "=", "[", "]", "for", "c", "in", "self", ".", "routerDict", "[", "'delete'", "]", ":", "cmds", ".", "append", "(", "c", ".", "format", "(", "router_name"...
Deletes a router from Arista HW Device. :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured
[ "Deletes", "a", "router", "from", "Arista", "HW", "Device", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L201-L214
train
41,437
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.add_interface_to_router
def add_interface_to_router(self, segment_id, router_name, gip, router_ip, mask, server): """Adds an interface to existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param gip: Gateway IP associated with the subnet :param router_ip: IP address of the router :param mask: subnet mask to be used :param server: Server endpoint on the Arista switch to be configured """ if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['add']: if self._mlag_configured: # In VARP config, use router ID else, use gateway IP address. ip = router_ip else: ip = gip + '/' + mask cmds.append(c.format(segment_id, router_name, ip)) if self._mlag_configured: for c in self._additionalInterfaceCmdsDict['add']: cmds.append(c.format(gip)) self._run_config_cmds(cmds, server)
python
def add_interface_to_router(self, segment_id, router_name, gip, router_ip, mask, server): """Adds an interface to existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param gip: Gateway IP associated with the subnet :param router_ip: IP address of the router :param mask: subnet mask to be used :param server: Server endpoint on the Arista switch to be configured """ if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['add']: if self._mlag_configured: # In VARP config, use router ID else, use gateway IP address. ip = router_ip else: ip = gip + '/' + mask cmds.append(c.format(segment_id, router_name, ip)) if self._mlag_configured: for c in self._additionalInterfaceCmdsDict['add']: cmds.append(c.format(gip)) self._run_config_cmds(cmds, server)
[ "def", "add_interface_to_router", "(", "self", ",", "segment_id", ",", "router_name", ",", "gip", ",", "router_ip", ",", "mask", ",", "server", ")", ":", "if", "not", "segment_id", ":", "segment_id", "=", "DEFAULT_VLAN", "cmds", "=", "[", "]", "for", "c", ...
Adds an interface to existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param gip: Gateway IP associated with the subnet :param router_ip: IP address of the router :param mask: subnet mask to be used :param server: Server endpoint on the Arista switch to be configured
[ "Adds", "an", "interface", "to", "existing", "HW", "router", "on", "Arista", "HW", "device", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L234-L260
train
41,438
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.delete_interface_from_router
def delete_interface_from_router(self, segment_id, router_name, server): """Deletes an interface from existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured """ if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['remove']: cmds.append(c.format(segment_id)) self._run_config_cmds(cmds, server)
python
def delete_interface_from_router(self, segment_id, router_name, server): """Deletes an interface from existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured """ if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['remove']: cmds.append(c.format(segment_id)) self._run_config_cmds(cmds, server)
[ "def", "delete_interface_from_router", "(", "self", ",", "segment_id", ",", "router_name", ",", "server", ")", ":", "if", "not", "segment_id", ":", "segment_id", "=", "DEFAULT_VLAN", "cmds", "=", "[", "]", "for", "c", "in", "self", ".", "_interfaceDict", "["...
Deletes an interface from existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured
[ "Deletes", "an", "interface", "from", "existing", "HW", "router", "on", "Arista", "HW", "device", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L262-L276
train
41,439
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.create_router
def create_router(self, context, router): """Creates a router on Arista Switch. Deals with multiple configurations - such as Router per VRF, a router in default VRF, Virtual Router in MLAG configurations """ if router: router_name = self._arista_router_name(router['id'], router['name']) hashed = hashlib.sha256(router_name.encode('utf-8')) rdm = str(int(hashed.hexdigest(), 16) % 65536) mlag_peer_failed = False for s in self._servers: try: self.create_router_on_eos(router_name, rdm, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
python
def create_router(self, context, router): """Creates a router on Arista Switch. Deals with multiple configurations - such as Router per VRF, a router in default VRF, Virtual Router in MLAG configurations """ if router: router_name = self._arista_router_name(router['id'], router['name']) hashed = hashlib.sha256(router_name.encode('utf-8')) rdm = str(int(hashed.hexdigest(), 16) % 65536) mlag_peer_failed = False for s in self._servers: try: self.create_router_on_eos(router_name, rdm, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
[ "def", "create_router", "(", "self", ",", "context", ",", "router", ")", ":", "if", "router", ":", "router_name", "=", "self", ".", "_arista_router_name", "(", "router", "[", "'id'", "]", ",", "router", "[", "'name'", "]", ")", "hashed", "=", "hashlib", ...
Creates a router on Arista Switch. Deals with multiple configurations - such as Router per VRF, a router in default VRF, Virtual Router in MLAG configurations
[ "Creates", "a", "router", "on", "Arista", "Switch", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L278-L304
train
41,440
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.delete_router
def delete_router(self, context, router_id, router): """Deletes a router from Arista Switch.""" if router: router_name = self._arista_router_name(router_id, router['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_router_from_eos(router_name, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
python
def delete_router(self, context, router_id, router): """Deletes a router from Arista Switch.""" if router: router_name = self._arista_router_name(router_id, router['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_router_from_eos(router_name, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
[ "def", "delete_router", "(", "self", ",", "context", ",", "router_id", ",", "router", ")", ":", "if", "router", ":", "router_name", "=", "self", ".", "_arista_router_name", "(", "router_id", ",", "router", "[", "'name'", "]", ")", "mlag_peer_failed", "=", ...
Deletes a router from Arista Switch.
[ "Deletes", "a", "router", "from", "Arista", "Switch", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L306-L324
train
41,441
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.add_router_interface
def add_router_interface(self, context, router_info): """Adds an interface to a router created on Arista HW router. This deals with both IPv6 and IPv4 configurations. """ if router_info: self._select_dicts(router_info['ip_version']) cidr = router_info['cidr'] subnet_mask = cidr.split('/')[1] router_name = self._arista_router_name(router_info['id'], router_info['name']) if self._mlag_configured: # For MLAG, we send a specific IP address as opposed to cidr # For now, we are using x.x.x.253 and x.x.x.254 as virtual IP mlag_peer_failed = False for i, server in enumerate(self._servers): # Get appropriate virtual IP address for this router router_ip = self._get_router_ip(cidr, i, router_info['ip_version']) try: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], router_ip, subnet_mask, server) mlag_peer_failed = False except Exception: if not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError( msg=msg) else: for s in self._servers: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], None, subnet_mask, s)
python
def add_router_interface(self, context, router_info): """Adds an interface to a router created on Arista HW router. This deals with both IPv6 and IPv4 configurations. """ if router_info: self._select_dicts(router_info['ip_version']) cidr = router_info['cidr'] subnet_mask = cidr.split('/')[1] router_name = self._arista_router_name(router_info['id'], router_info['name']) if self._mlag_configured: # For MLAG, we send a specific IP address as opposed to cidr # For now, we are using x.x.x.253 and x.x.x.254 as virtual IP mlag_peer_failed = False for i, server in enumerate(self._servers): # Get appropriate virtual IP address for this router router_ip = self._get_router_ip(cidr, i, router_info['ip_version']) try: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], router_ip, subnet_mask, server) mlag_peer_failed = False except Exception: if not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError( msg=msg) else: for s in self._servers: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], None, subnet_mask, s)
[ "def", "add_router_interface", "(", "self", ",", "context", ",", "router_info", ")", ":", "if", "router_info", ":", "self", ".", "_select_dicts", "(", "router_info", "[", "'ip_version'", "]", ")", "cidr", "=", "router_info", "[", "'cidr'", "]", "subnet_mask", ...
Adds an interface to a router created on Arista HW router. This deals with both IPv6 and IPv4 configurations.
[ "Adds", "an", "interface", "to", "a", "router", "created", "on", "Arista", "HW", "router", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L333-L374
train
41,442
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver.remove_router_interface
def remove_router_interface(self, context, router_info): """Removes previously configured interface from router on Arista HW. This deals with both IPv6 and IPv4 configurations. """ if router_info: router_name = self._arista_router_name(router_info['id'], router_info['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_interface_from_router(router_info['seg_id'], router_name, s) if self._mlag_configured: mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
python
def remove_router_interface(self, context, router_info): """Removes previously configured interface from router on Arista HW. This deals with both IPv6 and IPv4 configurations. """ if router_info: router_name = self._arista_router_name(router_info['id'], router_info['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_interface_from_router(router_info['seg_id'], router_name, s) if self._mlag_configured: mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
[ "def", "remove_router_interface", "(", "self", ",", "context", ",", "router_info", ")", ":", "if", "router_info", ":", "router_name", "=", "self", ".", "_arista_router_name", "(", "router_info", "[", "'id'", "]", ",", "router_info", "[", "'name'", "]", ")", ...
Removes previously configured interface from router on Arista HW. This deals with both IPv6 and IPv4 configurations.
[ "Removes", "previously", "configured", "interface", "from", "router", "on", "Arista", "HW", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L376-L398
train
41,443
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver._get_binary_from_ipv4
def _get_binary_from_ipv4(self, ip_addr): """Converts IPv4 address to binary form.""" return struct.unpack("!L", socket.inet_pton(socket.AF_INET, ip_addr))[0]
python
def _get_binary_from_ipv4(self, ip_addr): """Converts IPv4 address to binary form.""" return struct.unpack("!L", socket.inet_pton(socket.AF_INET, ip_addr))[0]
[ "def", "_get_binary_from_ipv4", "(", "self", ",", "ip_addr", ")", ":", "return", "struct", ".", "unpack", "(", "\"!L\"", ",", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET", ",", "ip_addr", ")", ")", "[", "0", "]" ]
Converts IPv4 address to binary form.
[ "Converts", "IPv4", "address", "to", "binary", "form", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L439-L443
train
41,444
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver._get_binary_from_ipv6
def _get_binary_from_ipv6(self, ip_addr): """Converts IPv6 address to binary form.""" hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, ip_addr)) return (hi << 64) | lo
python
def _get_binary_from_ipv6(self, ip_addr): """Converts IPv6 address to binary form.""" hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, ip_addr)) return (hi << 64) | lo
[ "def", "_get_binary_from_ipv6", "(", "self", ",", "ip_addr", ")", ":", "hi", ",", "lo", "=", "struct", ".", "unpack", "(", "\"!QQ\"", ",", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "ip_addr", ")", ")", "return", "(", "hi", "<<", ...
Converts IPv6 address to binary form.
[ "Converts", "IPv6", "address", "to", "binary", "form", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L445-L450
train
41,445
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver._get_ipv4_from_binary
def _get_ipv4_from_binary(self, bin_addr): """Converts binary address to Ipv4 format.""" return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr))
python
def _get_ipv4_from_binary(self, bin_addr): """Converts binary address to Ipv4 format.""" return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr))
[ "def", "_get_ipv4_from_binary", "(", "self", ",", "bin_addr", ")", ":", "return", "socket", ".", "inet_ntop", "(", "socket", ".", "AF_INET", ",", "struct", ".", "pack", "(", "\"!L\"", ",", "bin_addr", ")", ")" ]
Converts binary address to Ipv4 format.
[ "Converts", "binary", "address", "to", "Ipv4", "format", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L452-L455
train
41,446
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver._get_ipv6_from_binary
def _get_ipv6_from_binary(self, bin_addr): """Converts binary address to Ipv6 format.""" hi = bin_addr >> 64 lo = bin_addr & 0xFFFFFFFF return socket.inet_ntop(socket.AF_INET6, struct.pack("!QQ", hi, lo))
python
def _get_ipv6_from_binary(self, bin_addr): """Converts binary address to Ipv6 format.""" hi = bin_addr >> 64 lo = bin_addr & 0xFFFFFFFF return socket.inet_ntop(socket.AF_INET6, struct.pack("!QQ", hi, lo))
[ "def", "_get_ipv6_from_binary", "(", "self", ",", "bin_addr", ")", ":", "hi", "=", "bin_addr", ">>", "64", "lo", "=", "bin_addr", "&", "0xFFFFFFFF", "return", "socket", ".", "inet_ntop", "(", "socket", ".", "AF_INET6", ",", "struct", ".", "pack", "(", "\...
Converts binary address to Ipv6 format.
[ "Converts", "binary", "address", "to", "Ipv6", "format", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L457-L462
train
41,447
openstack/networking-arista
networking_arista/l3Plugin/arista_l3_driver.py
AristaL3Driver._get_router_ip
def _get_router_ip(self, cidr, ip_count, ip_ver): """For a given IP subnet and IP version type, generate IP for router. This method takes the network address (cidr) and selects an IP address that should be assigned to virtual router running on multiple switches. It uses upper addresses in a subnet address as IP for the router. Each instace of the router, on each switch, requires uniqe IP address. For example in IPv4 case, on a 255 subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next, and so on. """ start_ip = MLAG_SWITCHES + ip_count network_addr, prefix = cidr.split('/') if ip_ver == 4: bits = IPV4_BITS ip = self._get_binary_from_ipv4(network_addr) elif ip_ver == 6: bits = IPV6_BITS ip = self._get_binary_from_ipv6(network_addr) mask = (pow(2, bits) - 1) << (bits - int(prefix)) network_addr = ip & mask router_ip = pow(2, bits - int(prefix)) - start_ip router_ip = network_addr | router_ip if ip_ver == 4: return self._get_ipv4_from_binary(router_ip) + '/' + prefix else: return self._get_ipv6_from_binary(router_ip) + '/' + prefix
python
def _get_router_ip(self, cidr, ip_count, ip_ver): """For a given IP subnet and IP version type, generate IP for router. This method takes the network address (cidr) and selects an IP address that should be assigned to virtual router running on multiple switches. It uses upper addresses in a subnet address as IP for the router. Each instace of the router, on each switch, requires uniqe IP address. For example in IPv4 case, on a 255 subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next, and so on. """ start_ip = MLAG_SWITCHES + ip_count network_addr, prefix = cidr.split('/') if ip_ver == 4: bits = IPV4_BITS ip = self._get_binary_from_ipv4(network_addr) elif ip_ver == 6: bits = IPV6_BITS ip = self._get_binary_from_ipv6(network_addr) mask = (pow(2, bits) - 1) << (bits - int(prefix)) network_addr = ip & mask router_ip = pow(2, bits - int(prefix)) - start_ip router_ip = network_addr | router_ip if ip_ver == 4: return self._get_ipv4_from_binary(router_ip) + '/' + prefix else: return self._get_ipv6_from_binary(router_ip) + '/' + prefix
[ "def", "_get_router_ip", "(", "self", ",", "cidr", ",", "ip_count", ",", "ip_ver", ")", ":", "start_ip", "=", "MLAG_SWITCHES", "+", "ip_count", "network_addr", ",", "prefix", "=", "cidr", ".", "split", "(", "'/'", ")", "if", "ip_ver", "==", "4", ":", "...
For a given IP subnet and IP version type, generate IP for router. This method takes the network address (cidr) and selects an IP address that should be assigned to virtual router running on multiple switches. It uses upper addresses in a subnet address as IP for the router. Each instace of the router, on each switch, requires uniqe IP address. For example in IPv4 case, on a 255 subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next, and so on.
[ "For", "a", "given", "IP", "subnet", "and", "IP", "version", "type", "generate", "IP", "for", "router", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/arista_l3_driver.py#L464-L494
train
41,448
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_tenant
def create_tenant(self, tenant_id): """Enqueue tenant create""" t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE, a_const.CREATE) self.provision_queue.put(t_res)
python
def create_tenant(self, tenant_id): """Enqueue tenant create""" t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE, a_const.CREATE) self.provision_queue.put(t_res)
[ "def", "create_tenant", "(", "self", ",", "tenant_id", ")", ":", "t_res", "=", "MechResource", "(", "tenant_id", ",", "a_const", ".", "TENANT_RESOURCE", ",", "a_const", ".", "CREATE", ")", "self", ".", "provision_queue", ".", "put", "(", "t_res", ")" ]
Enqueue tenant create
[ "Enqueue", "tenant", "create" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L78-L82
train
41,449
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_tenant_if_removed
def delete_tenant_if_removed(self, tenant_id): """Enqueue tenant delete if it's no longer in the db""" if not db_lib.tenant_provisioned(tenant_id): t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE, a_const.DELETE) self.provision_queue.put(t_res)
python
def delete_tenant_if_removed(self, tenant_id): """Enqueue tenant delete if it's no longer in the db""" if not db_lib.tenant_provisioned(tenant_id): t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE, a_const.DELETE) self.provision_queue.put(t_res)
[ "def", "delete_tenant_if_removed", "(", "self", ",", "tenant_id", ")", ":", "if", "not", "db_lib", ".", "tenant_provisioned", "(", "tenant_id", ")", ":", "t_res", "=", "MechResource", "(", "tenant_id", ",", "a_const", ".", "TENANT_RESOURCE", ",", "a_const", "....
Enqueue tenant delete if it's no longer in the db
[ "Enqueue", "tenant", "delete", "if", "it", "s", "no", "longer", "in", "the", "db" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L84-L89
train
41,450
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_network
def create_network(self, network): """Enqueue network create""" n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, a_const.CREATE) self.provision_queue.put(n_res)
python
def create_network(self, network): """Enqueue network create""" n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, a_const.CREATE) self.provision_queue.put(n_res)
[ "def", "create_network", "(", "self", ",", "network", ")", ":", "n_res", "=", "MechResource", "(", "network", "[", "'id'", "]", ",", "a_const", ".", "NETWORK_RESOURCE", ",", "a_const", ".", "CREATE", ")", "self", ".", "provision_queue", ".", "put", "(", ...
Enqueue network create
[ "Enqueue", "network", "create" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L91-L95
train
41,451
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_network
def delete_network(self, network): """Enqueue network delete""" n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, a_const.DELETE) self.provision_queue.put(n_res)
python
def delete_network(self, network): """Enqueue network delete""" n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, a_const.DELETE) self.provision_queue.put(n_res)
[ "def", "delete_network", "(", "self", ",", "network", ")", ":", "n_res", "=", "MechResource", "(", "network", "[", "'id'", "]", ",", "a_const", ".", "NETWORK_RESOURCE", ",", "a_const", ".", "DELETE", ")", "self", ".", "provision_queue", ".", "put", "(", ...
Enqueue network delete
[ "Enqueue", "network", "delete" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L97-L101
train
41,452
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_segments
def create_segments(self, segments): """Enqueue segment creates""" for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.CREATE) self.provision_queue.put(s_res)
python
def create_segments(self, segments): """Enqueue segment creates""" for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.CREATE) self.provision_queue.put(s_res)
[ "def", "create_segments", "(", "self", ",", "segments", ")", ":", "for", "segment", "in", "segments", ":", "s_res", "=", "MechResource", "(", "segment", "[", "'id'", "]", ",", "a_const", ".", "SEGMENT_RESOURCE", ",", "a_const", ".", "CREATE", ")", "self", ...
Enqueue segment creates
[ "Enqueue", "segment", "creates" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L103-L108
train
41,453
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_segments
def delete_segments(self, segments): """Enqueue segment deletes""" for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.DELETE) self.provision_queue.put(s_res)
python
def delete_segments(self, segments): """Enqueue segment deletes""" for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.DELETE) self.provision_queue.put(s_res)
[ "def", "delete_segments", "(", "self", ",", "segments", ")", ":", "for", "segment", "in", "segments", ":", "s_res", "=", "MechResource", "(", "segment", "[", "'id'", "]", ",", "a_const", ".", "SEGMENT_RESOURCE", ",", "a_const", ".", "DELETE", ")", "self", ...
Enqueue segment deletes
[ "Enqueue", "segment", "deletes" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L110-L115
train
41,454
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.get_instance_type
def get_instance_type(self, port): """Determine the port type based on device owner and vnic type""" if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL: return a_const.BAREMETAL_RESOURCE owner_to_type = { n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE, n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE, trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE} if port['device_owner'] in owner_to_type.keys(): return owner_to_type[port['device_owner']] elif port['device_owner'].startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX): return a_const.VM_RESOURCE return None
python
def get_instance_type(self, port): """Determine the port type based on device owner and vnic type""" if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL: return a_const.BAREMETAL_RESOURCE owner_to_type = { n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE, n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE, trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE} if port['device_owner'] in owner_to_type.keys(): return owner_to_type[port['device_owner']] elif port['device_owner'].startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX): return a_const.VM_RESOURCE return None
[ "def", "get_instance_type", "(", "self", ",", "port", ")", ":", "if", "port", "[", "portbindings", ".", "VNIC_TYPE", "]", "==", "portbindings", ".", "VNIC_BAREMETAL", ":", "return", "a_const", ".", "BAREMETAL_RESOURCE", "owner_to_type", "=", "{", "n_const", "....
Determine the port type based on device owner and vnic type
[ "Determine", "the", "port", "type", "based", "on", "device", "owner", "and", "vnic", "type" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L117-L130
train
41,455
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_instance
def create_instance(self, port): """Enqueue instance create""" instance_type = self.get_instance_type(port) if not instance_type: return i_res = MechResource(port['device_id'], instance_type, a_const.CREATE) self.provision_queue.put(i_res)
python
def create_instance(self, port): """Enqueue instance create""" instance_type = self.get_instance_type(port) if not instance_type: return i_res = MechResource(port['device_id'], instance_type, a_const.CREATE) self.provision_queue.put(i_res)
[ "def", "create_instance", "(", "self", ",", "port", ")", ":", "instance_type", "=", "self", ".", "get_instance_type", "(", "port", ")", "if", "not", "instance_type", ":", "return", "i_res", "=", "MechResource", "(", "port", "[", "'device_id'", "]", ",", "i...
Enqueue instance create
[ "Enqueue", "instance", "create" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L132-L138
train
41,456
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_instance_if_removed
def delete_instance_if_removed(self, port): """Enqueue instance delete if it's no longer in the db""" instance_type = self.get_instance_type(port) if not instance_type: return if not db_lib.instance_provisioned(port['device_id']): i_res = MechResource(port['device_id'], instance_type, a_const.DELETE) self.provision_queue.put(i_res)
python
def delete_instance_if_removed(self, port): """Enqueue instance delete if it's no longer in the db""" instance_type = self.get_instance_type(port) if not instance_type: return if not db_lib.instance_provisioned(port['device_id']): i_res = MechResource(port['device_id'], instance_type, a_const.DELETE) self.provision_queue.put(i_res)
[ "def", "delete_instance_if_removed", "(", "self", ",", "port", ")", ":", "instance_type", "=", "self", ".", "get_instance_type", "(", "port", ")", "if", "not", "instance_type", ":", "return", "if", "not", "db_lib", ".", "instance_provisioned", "(", "port", "["...
Enqueue instance delete if it's no longer in the db
[ "Enqueue", "instance", "delete", "if", "it", "s", "no", "longer", "in", "the", "db" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L140-L148
train
41,457
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_port
def create_port(self, port): """Enqueue port create""" instance_type = self.get_instance_type(port) if not instance_type: return port_type = instance_type + a_const.PORT_SUFFIX p_res = MechResource(port['id'], port_type, a_const.CREATE) self.provision_queue.put(p_res)
python
def create_port(self, port): """Enqueue port create""" instance_type = self.get_instance_type(port) if not instance_type: return port_type = instance_type + a_const.PORT_SUFFIX p_res = MechResource(port['id'], port_type, a_const.CREATE) self.provision_queue.put(p_res)
[ "def", "create_port", "(", "self", ",", "port", ")", ":", "instance_type", "=", "self", ".", "get_instance_type", "(", "port", ")", "if", "not", "instance_type", ":", "return", "port_type", "=", "instance_type", "+", "a_const", ".", "PORT_SUFFIX", "p_res", "...
Enqueue port create
[ "Enqueue", "port", "create" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L150-L157
train
41,458
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_port_if_removed
def delete_port_if_removed(self, port): """Enqueue port delete""" instance_type = self.get_instance_type(port) if not instance_type: return port_type = instance_type + a_const.PORT_SUFFIX if not db_lib.port_provisioned(port['id']): p_res = MechResource(port['id'], port_type, a_const.DELETE) self.provision_queue.put(p_res)
python
def delete_port_if_removed(self, port): """Enqueue port delete""" instance_type = self.get_instance_type(port) if not instance_type: return port_type = instance_type + a_const.PORT_SUFFIX if not db_lib.port_provisioned(port['id']): p_res = MechResource(port['id'], port_type, a_const.DELETE) self.provision_queue.put(p_res)
[ "def", "delete_port_if_removed", "(", "self", ",", "port", ")", ":", "instance_type", "=", "self", ".", "get_instance_type", "(", "port", ")", "if", "not", "instance_type", ":", "return", "port_type", "=", "instance_type", "+", "a_const", ".", "PORT_SUFFIX", "...
Enqueue port delete
[ "Enqueue", "port", "delete" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L159-L167
train
41,459
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver._get_binding_keys
def _get_binding_keys(self, port, host): """Get binding keys from the port binding""" binding_keys = list() switch_binding = port[portbindings.PROFILE].get( 'local_link_information', None) if switch_binding: for binding in switch_binding: switch_id = binding.get('switch_id') port_id = binding.get('port_id') binding_keys.append((port['id'], (switch_id, port_id))) else: binding_keys.append((port['id'], host)) return binding_keys
python
def _get_binding_keys(self, port, host): """Get binding keys from the port binding""" binding_keys = list() switch_binding = port[portbindings.PROFILE].get( 'local_link_information', None) if switch_binding: for binding in switch_binding: switch_id = binding.get('switch_id') port_id = binding.get('port_id') binding_keys.append((port['id'], (switch_id, port_id))) else: binding_keys.append((port['id'], host)) return binding_keys
[ "def", "_get_binding_keys", "(", "self", ",", "port", ",", "host", ")", ":", "binding_keys", "=", "list", "(", ")", "switch_binding", "=", "port", "[", "portbindings", ".", "PROFILE", "]", ".", "get", "(", "'local_link_information'", ",", "None", ")", "if"...
Get binding keys from the port binding
[ "Get", "binding", "keys", "from", "the", "port", "binding" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L169-L181
train
41,460
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_port_binding
def create_port_binding(self, port, host): """Enqueue port binding create""" if not self.get_instance_type(port): return for pb_key in self._get_binding_keys(port, host): pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE, a_const.CREATE) self.provision_queue.put(pb_res)
python
def create_port_binding(self, port, host): """Enqueue port binding create""" if not self.get_instance_type(port): return for pb_key in self._get_binding_keys(port, host): pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE, a_const.CREATE) self.provision_queue.put(pb_res)
[ "def", "create_port_binding", "(", "self", ",", "port", ",", "host", ")", ":", "if", "not", "self", ".", "get_instance_type", "(", "port", ")", ":", "return", "for", "pb_key", "in", "self", ".", "_get_binding_keys", "(", "port", ",", "host", ")", ":", ...
Enqueue port binding create
[ "Enqueue", "port", "binding", "create" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L183-L190
train
41,461
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_port_binding
def delete_port_binding(self, port, host): """Enqueue port binding delete""" if not self.get_instance_type(port): return for pb_key in self._get_binding_keys(port, host): pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE, a_const.DELETE) self.provision_queue.put(pb_res)
python
def delete_port_binding(self, port, host): """Enqueue port binding delete""" if not self.get_instance_type(port): return for pb_key in self._get_binding_keys(port, host): pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE, a_const.DELETE) self.provision_queue.put(pb_res)
[ "def", "delete_port_binding", "(", "self", ",", "port", ",", "host", ")", ":", "if", "not", "self", ".", "get_instance_type", "(", "port", ")", ":", "return", "for", "pb_key", "in", "self", ".", "_get_binding_keys", "(", "port", ",", "host", ")", ":", ...
Enqueue port binding delete
[ "Enqueue", "port", "binding", "delete" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L192-L199
train
41,462
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_network_postcommit
def create_network_postcommit(self, context): """Provision the network on CVX""" network = context.current log_context("create_network_postcommit: network", network) segments = context.network_segments tenant_id = network['project_id'] self.create_tenant(tenant_id) self.create_network(network) self.create_segments(segments)
python
def create_network_postcommit(self, context): """Provision the network on CVX""" network = context.current log_context("create_network_postcommit: network", network) segments = context.network_segments tenant_id = network['project_id'] self.create_tenant(tenant_id) self.create_network(network) self.create_segments(segments)
[ "def", "create_network_postcommit", "(", "self", ",", "context", ")", ":", "network", "=", "context", ".", "current", "log_context", "(", "\"create_network_postcommit: network\"", ",", "network", ")", "segments", "=", "context", ".", "network_segments", "tenant_id", ...
Provision the network on CVX
[ "Provision", "the", "network", "on", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L201-L211
train
41,463
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_network_postcommit
def delete_network_postcommit(self, context): """Delete the network from CVX""" network = context.current log_context("delete_network_postcommit: network", network) segments = context.network_segments tenant_id = network['project_id'] self.delete_segments(segments) self.delete_network(network) self.delete_tenant_if_removed(tenant_id)
python
def delete_network_postcommit(self, context): """Delete the network from CVX""" network = context.current log_context("delete_network_postcommit: network", network) segments = context.network_segments tenant_id = network['project_id'] self.delete_segments(segments) self.delete_network(network) self.delete_tenant_if_removed(tenant_id)
[ "def", "delete_network_postcommit", "(", "self", ",", "context", ")", ":", "network", "=", "context", ".", "current", "log_context", "(", "\"delete_network_postcommit: network\"", ",", "network", ")", "segments", "=", "context", ".", "network_segments", "tenant_id", ...
Delete the network from CVX
[ "Delete", "the", "network", "from", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L233-L243
train
41,464
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.update_port_postcommit
def update_port_postcommit(self, context): """Send port updates to CVX This method is also responsible for the initial creation of ports as we wait until after a port is bound to send the port data to CVX """ port = context.current orig_port = context.original network = context.network.current log_context("update_port_postcommit: port", port) log_context("update_port_postcommit: orig", orig_port) tenant_id = port['project_id'] # Device id can change without a port going DOWN, but the new device # id may not be supported if orig_port and port['device_id'] != orig_port['device_id']: self._delete_port_resources(orig_port, context.original_host) if context.status == n_const.PORT_STATUS_DOWN: if (context.original_host and context.status != context.original_status): self._delete_port_resources(orig_port, context.original_host) self._try_to_release_dynamic_segment(context, migration=True) else: self.create_tenant(tenant_id) self.create_network(network) if context.binding_levels: segments = [ level['bound_segment'] for level in context.binding_levels] self.create_segments(segments) self.create_instance(port) self.create_port(port) self.create_port_binding(port, context.host)
python
def update_port_postcommit(self, context): """Send port updates to CVX This method is also responsible for the initial creation of ports as we wait until after a port is bound to send the port data to CVX """ port = context.current orig_port = context.original network = context.network.current log_context("update_port_postcommit: port", port) log_context("update_port_postcommit: orig", orig_port) tenant_id = port['project_id'] # Device id can change without a port going DOWN, but the new device # id may not be supported if orig_port and port['device_id'] != orig_port['device_id']: self._delete_port_resources(orig_port, context.original_host) if context.status == n_const.PORT_STATUS_DOWN: if (context.original_host and context.status != context.original_status): self._delete_port_resources(orig_port, context.original_host) self._try_to_release_dynamic_segment(context, migration=True) else: self.create_tenant(tenant_id) self.create_network(network) if context.binding_levels: segments = [ level['bound_segment'] for level in context.binding_levels] self.create_segments(segments) self.create_instance(port) self.create_port(port) self.create_port_binding(port, context.host)
[ "def", "update_port_postcommit", "(", "self", ",", "context", ")", ":", "port", "=", "context", ".", "current", "orig_port", "=", "context", ".", "original", "network", "=", "context", ".", "network", ".", "current", "log_context", "(", "\"update_port_postcommit...
Send port updates to CVX This method is also responsible for the initial creation of ports as we wait until after a port is bound to send the port data to CVX
[ "Send", "port", "updates", "to", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L253-L287
train
41,465
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.delete_port_postcommit
def delete_port_postcommit(self, context): """Delete the port from CVX""" port = context.current log_context("delete_port_postcommit: port", port) self._delete_port_resources(port, context.host) self._try_to_release_dynamic_segment(context)
python
def delete_port_postcommit(self, context): """Delete the port from CVX""" port = context.current log_context("delete_port_postcommit: port", port) self._delete_port_resources(port, context.host) self._try_to_release_dynamic_segment(context)
[ "def", "delete_port_postcommit", "(", "self", ",", "context", ")", ":", "port", "=", "context", ".", "current", "log_context", "(", "\"delete_port_postcommit: port\"", ",", "port", ")", "self", ".", "_delete_port_resources", "(", "port", ",", "context", ".", "ho...
Delete the port from CVX
[ "Delete", "the", "port", "from", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L289-L296
train
41,466
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver._bind_baremetal_port
def _bind_baremetal_port(self, context, segment): """Bind the baremetal port to the segment""" port = context.current vif_details = { portbindings.VIF_DETAILS_VLAN: str( segment[driver_api.SEGMENTATION_ID]) } context.set_binding(segment[driver_api.ID], portbindings.VIF_TYPE_OTHER, vif_details, n_const.ACTIVE) LOG.debug("AristaDriver: bound port info- port ID %(id)s " "on network %(network)s", {'id': port['id'], 'network': context.network.current['id']}) if port.get('trunk_details'): self.trunk_driver.bind_port(port) return True
python
def _bind_baremetal_port(self, context, segment): """Bind the baremetal port to the segment""" port = context.current vif_details = { portbindings.VIF_DETAILS_VLAN: str( segment[driver_api.SEGMENTATION_ID]) } context.set_binding(segment[driver_api.ID], portbindings.VIF_TYPE_OTHER, vif_details, n_const.ACTIVE) LOG.debug("AristaDriver: bound port info- port ID %(id)s " "on network %(network)s", {'id': port['id'], 'network': context.network.current['id']}) if port.get('trunk_details'): self.trunk_driver.bind_port(port) return True
[ "def", "_bind_baremetal_port", "(", "self", ",", "context", ",", "segment", ")", ":", "port", "=", "context", ".", "current", "vif_details", "=", "{", "portbindings", ".", "VIF_DETAILS_VLAN", ":", "str", "(", "segment", "[", "driver_api", ".", "SEGMENTATION_ID...
Bind the baremetal port to the segment
[ "Bind", "the", "baremetal", "port", "to", "the", "segment" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L298-L315
train
41,467
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver._get_physnet
def _get_physnet(self, context): """Find the appropriate physnet for the host - Baremetal ports' physnet is determined by looking at the local_link_information contained in the binding profile - Other ports' physnet is determined by looking for the host in the topology """ port = context.current physnet = None if (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL): physnet = self.eapi.get_baremetal_physnet(context) else: physnet = self.eapi.get_host_physnet(context) # If the switch is part of an mlag pair, the physnet is called # peer1_peer2 physnet = self.mlag_pairs.get(physnet, physnet) return physnet
python
def _get_physnet(self, context): """Find the appropriate physnet for the host - Baremetal ports' physnet is determined by looking at the local_link_information contained in the binding profile - Other ports' physnet is determined by looking for the host in the topology """ port = context.current physnet = None if (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL): physnet = self.eapi.get_baremetal_physnet(context) else: physnet = self.eapi.get_host_physnet(context) # If the switch is part of an mlag pair, the physnet is called # peer1_peer2 physnet = self.mlag_pairs.get(physnet, physnet) return physnet
[ "def", "_get_physnet", "(", "self", ",", "context", ")", ":", "port", "=", "context", ".", "current", "physnet", "=", "None", "if", "(", "port", ".", "get", "(", "portbindings", ".", "VNIC_TYPE", ")", "==", "portbindings", ".", "VNIC_BAREMETAL", ")", ":"...
Find the appropriate physnet for the host - Baremetal ports' physnet is determined by looking at the local_link_information contained in the binding profile - Other ports' physnet is determined by looking for the host in the topology
[ "Find", "the", "appropriate", "physnet", "for", "the", "host" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L317-L334
train
41,468
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver._bind_fabric
def _bind_fabric(self, context, segment): """Allocate dynamic segments for the port Segment physnets are based on the switch to which the host is connected. """ port_id = context.current['id'] physnet = self._get_physnet(context) if not physnet: LOG.debug("bind_port for port %(port)s: no physical_network " "found", {'port': port_id}) return False next_segment = context.allocate_dynamic_segment( {'network_id': context.network.current['id'], 'network_type': n_const.TYPE_VLAN, 'physical_network': physnet}) LOG.debug("bind_port for port %(port)s: " "current_segment=%(current_seg)s, " "next_segment=%(next_seg)s", {'port': port_id, 'current_seg': segment, 'next_seg': next_segment}) context.continue_binding(segment['id'], [next_segment]) return True
python
def _bind_fabric(self, context, segment): """Allocate dynamic segments for the port Segment physnets are based on the switch to which the host is connected. """ port_id = context.current['id'] physnet = self._get_physnet(context) if not physnet: LOG.debug("bind_port for port %(port)s: no physical_network " "found", {'port': port_id}) return False next_segment = context.allocate_dynamic_segment( {'network_id': context.network.current['id'], 'network_type': n_const.TYPE_VLAN, 'physical_network': physnet}) LOG.debug("bind_port for port %(port)s: " "current_segment=%(current_seg)s, " "next_segment=%(next_seg)s", {'port': port_id, 'current_seg': segment, 'next_seg': next_segment}) context.continue_binding(segment['id'], [next_segment]) return True
[ "def", "_bind_fabric", "(", "self", ",", "context", ",", "segment", ")", ":", "port_id", "=", "context", ".", "current", "[", "'id'", "]", "physnet", "=", "self", ".", "_get_physnet", "(", "context", ")", "if", "not", "physnet", ":", "LOG", ".", "debug...
Allocate dynamic segments for the port Segment physnets are based on the switch to which the host is connected.
[ "Allocate", "dynamic", "segments", "for", "the", "port" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L336-L359
train
41,469
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.bind_port
def bind_port(self, context): """Bind port to a network segment. Provisioning request to Arista Hardware to plug a host into appropriate network is done when the port is created this simply tells the ML2 Plugin that we are binding the port """ port = context.current log_context("bind_port: port", port) for segment in context.segments_to_bind: physnet = segment.get(driver_api.PHYSICAL_NETWORK) segment_type = segment[driver_api.NETWORK_TYPE] if not physnet: if (segment_type == n_const.TYPE_VXLAN and self.manage_fabric): if self._bind_fabric(context, segment): continue elif (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL): if (not self.managed_physnets or physnet in self.managed_physnets): if self._bind_baremetal_port(context, segment): continue LOG.debug("Arista mech driver unable to bind port %(port)s to " "%(seg_type)s segment on physical_network %(physnet)s", {'port': port.get('id'), 'seg_type': segment_type, 'physnet': physnet})
python
def bind_port(self, context): """Bind port to a network segment. Provisioning request to Arista Hardware to plug a host into appropriate network is done when the port is created this simply tells the ML2 Plugin that we are binding the port """ port = context.current log_context("bind_port: port", port) for segment in context.segments_to_bind: physnet = segment.get(driver_api.PHYSICAL_NETWORK) segment_type = segment[driver_api.NETWORK_TYPE] if not physnet: if (segment_type == n_const.TYPE_VXLAN and self.manage_fabric): if self._bind_fabric(context, segment): continue elif (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL): if (not self.managed_physnets or physnet in self.managed_physnets): if self._bind_baremetal_port(context, segment): continue LOG.debug("Arista mech driver unable to bind port %(port)s to " "%(seg_type)s segment on physical_network %(physnet)s", {'port': port.get('id'), 'seg_type': segment_type, 'physnet': physnet})
[ "def", "bind_port", "(", "self", ",", "context", ")", ":", "port", "=", "context", ".", "current", "log_context", "(", "\"bind_port: port\"", ",", "port", ")", "for", "segment", "in", "context", ".", "segments_to_bind", ":", "physnet", "=", "segment", ".", ...
Bind port to a network segment. Provisioning request to Arista Hardware to plug a host into appropriate network is done when the port is created this simply tells the ML2 Plugin that we are binding the port
[ "Bind", "port", "to", "a", "network", "segment", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L361-L388
train
41,470
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver._try_to_release_dynamic_segment
def _try_to_release_dynamic_segment(self, context, migration=False): """Release dynamic segment if necessary If this port was the last port using a segment and the segment was allocated by this driver, it should be released """ if migration: binding_levels = context.original_binding_levels else: binding_levels = context.binding_levels LOG.debug("_try_release_dynamic_segment: " "binding_levels=%(bl)s", {'bl': binding_levels}) if not binding_levels: return for prior_level, binding in enumerate(binding_levels[1:]): allocating_driver = binding_levels[prior_level].get( driver_api.BOUND_DRIVER) if allocating_driver != a_const.MECHANISM_DRV_NAME: continue bound_segment = binding.get(driver_api.BOUND_SEGMENT, {}) segment_id = bound_segment.get('id') if not db_lib.segment_is_dynamic(segment_id): continue if not db_lib.segment_bound(segment_id): context.release_dynamic_segment(segment_id) LOG.debug("Released dynamic segment %(seg)s allocated " "by %(drv)s", {'seg': segment_id, 'drv': allocating_driver})
python
def _try_to_release_dynamic_segment(self, context, migration=False): """Release dynamic segment if necessary If this port was the last port using a segment and the segment was allocated by this driver, it should be released """ if migration: binding_levels = context.original_binding_levels else: binding_levels = context.binding_levels LOG.debug("_try_release_dynamic_segment: " "binding_levels=%(bl)s", {'bl': binding_levels}) if not binding_levels: return for prior_level, binding in enumerate(binding_levels[1:]): allocating_driver = binding_levels[prior_level].get( driver_api.BOUND_DRIVER) if allocating_driver != a_const.MECHANISM_DRV_NAME: continue bound_segment = binding.get(driver_api.BOUND_SEGMENT, {}) segment_id = bound_segment.get('id') if not db_lib.segment_is_dynamic(segment_id): continue if not db_lib.segment_bound(segment_id): context.release_dynamic_segment(segment_id) LOG.debug("Released dynamic segment %(seg)s allocated " "by %(drv)s", {'seg': segment_id, 'drv': allocating_driver})
[ "def", "_try_to_release_dynamic_segment", "(", "self", ",", "context", ",", "migration", "=", "False", ")", ":", "if", "migration", ":", "binding_levels", "=", "context", ".", "original_binding_levels", "else", ":", "binding_levels", "=", "context", ".", "binding_...
Release dynamic segment if necessary If this port was the last port using a segment and the segment was allocated by this driver, it should be released
[ "Release", "dynamic", "segment", "if", "necessary" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L390-L418
train
41,471
matthewgilbert/mapping
mapping/mappings.py
roller
def roller(timestamps, contract_dates, get_weights, **kwargs): """ Calculate weight allocations to tradeable instruments for generic futures at a set of timestamps for a given root generic. Paramters --------- timestamps: iterable Sorted iterable of of pandas.Timestamps to calculate weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. get_weights: function A function which takes in a timestamp, contract_dates, validate_inputs and **kwargs. Returns a list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. kwargs: keyword arguments Arguements to pass to get_weights Return ------ A pandas.DataFrame with columns representing generics and a MultiIndex of date and contract. Values represent weights on tradeables for each generic. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'), ... pd.Timestamp('2016-10-19'), ... pd.Timestamp('2016-10-19')]) >>> wts = mappings.roller(ts, contract_dates, mappings.static_transition, ... transition=trans) """ timestamps = sorted(timestamps) contract_dates = contract_dates.sort_values() _check_contract_dates(contract_dates) weights = [] # for loop speedup only validate inputs the first function call to # get_weights() validate_inputs = True ts = timestamps[0] weights.extend(get_weights(ts, contract_dates, validate_inputs=validate_inputs, **kwargs)) validate_inputs = False for ts in timestamps[1:]: weights.extend(get_weights(ts, contract_dates, validate_inputs=validate_inputs, **kwargs)) weights = aggregate_weights(weights) return weights
python
def roller(timestamps, contract_dates, get_weights, **kwargs): """ Calculate weight allocations to tradeable instruments for generic futures at a set of timestamps for a given root generic. Paramters --------- timestamps: iterable Sorted iterable of of pandas.Timestamps to calculate weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. get_weights: function A function which takes in a timestamp, contract_dates, validate_inputs and **kwargs. Returns a list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. kwargs: keyword arguments Arguements to pass to get_weights Return ------ A pandas.DataFrame with columns representing generics and a MultiIndex of date and contract. Values represent weights on tradeables for each generic. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'), ... pd.Timestamp('2016-10-19'), ... pd.Timestamp('2016-10-19')]) >>> wts = mappings.roller(ts, contract_dates, mappings.static_transition, ... transition=trans) """ timestamps = sorted(timestamps) contract_dates = contract_dates.sort_values() _check_contract_dates(contract_dates) weights = [] # for loop speedup only validate inputs the first function call to # get_weights() validate_inputs = True ts = timestamps[0] weights.extend(get_weights(ts, contract_dates, validate_inputs=validate_inputs, **kwargs)) validate_inputs = False for ts in timestamps[1:]: weights.extend(get_weights(ts, contract_dates, validate_inputs=validate_inputs, **kwargs)) weights = aggregate_weights(weights) return weights
[ "def", "roller", "(", "timestamps", ",", "contract_dates", ",", "get_weights", ",", "*", "*", "kwargs", ")", ":", "timestamps", "=", "sorted", "(", "timestamps", ")", "contract_dates", "=", "contract_dates", ".", "sort_values", "(", ")", "_check_contract_dates",...
Calculate weight allocations to tradeable instruments for generic futures at a set of timestamps for a given root generic. Paramters --------- timestamps: iterable Sorted iterable of of pandas.Timestamps to calculate weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. get_weights: function A function which takes in a timestamp, contract_dates, validate_inputs and **kwargs. Returns a list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. kwargs: keyword arguments Arguements to pass to get_weights Return ------ A pandas.DataFrame with columns representing generics and a MultiIndex of date and contract. Values represent weights on tradeables for each generic. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'), ... pd.Timestamp('2016-10-19'), ... pd.Timestamp('2016-10-19')]) >>> wts = mappings.roller(ts, contract_dates, mappings.static_transition, ... transition=trans)
[ "Calculate", "weight", "allocations", "to", "tradeable", "instruments", "for", "generic", "futures", "at", "a", "set", "of", "timestamps", "for", "a", "given", "root", "generic", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L81-L141
train
41,472
matthewgilbert/mapping
mapping/mappings.py
aggregate_weights
def aggregate_weights(weights, drop_date=False): """ Transforms list of tuples of weights into pandas.DataFrame of weights. Parameters: ----------- weights: list A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. drop_date: boolean Whether to drop the date from the multiIndex Returns ------- A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instrument names and the index is strings representing instrument names. """ dwts = pd.DataFrame(weights, columns=["generic", "contract", "weight", "date"]) dwts = dwts.pivot_table(index=['date', 'contract'], columns=['generic'], values='weight', fill_value=0) dwts = dwts.astype(float) dwts = dwts.sort_index() if drop_date: dwts.index = dwts.index.levels[-1] return dwts
python
def aggregate_weights(weights, drop_date=False): """ Transforms list of tuples of weights into pandas.DataFrame of weights. Parameters: ----------- weights: list A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. drop_date: boolean Whether to drop the date from the multiIndex Returns ------- A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instrument names and the index is strings representing instrument names. """ dwts = pd.DataFrame(weights, columns=["generic", "contract", "weight", "date"]) dwts = dwts.pivot_table(index=['date', 'contract'], columns=['generic'], values='weight', fill_value=0) dwts = dwts.astype(float) dwts = dwts.sort_index() if drop_date: dwts.index = dwts.index.levels[-1] return dwts
[ "def", "aggregate_weights", "(", "weights", ",", "drop_date", "=", "False", ")", ":", "dwts", "=", "pd", ".", "DataFrame", "(", "weights", ",", "columns", "=", "[", "\"generic\"", ",", "\"contract\"", ",", "\"weight\"", ",", "\"date\"", "]", ")", "dwts", ...
Transforms list of tuples of weights into pandas.DataFrame of weights. Parameters: ----------- weights: list A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. drop_date: boolean Whether to drop the date from the multiIndex Returns ------- A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instrument names and the index is strings representing instrument names.
[ "Transforms", "list", "of", "tuples", "of", "weights", "into", "pandas", ".", "DataFrame", "of", "weights", "." ]
24ea21acfe37a0ee273f63a273b5d24ea405e70d
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L144-L171
train
41,473
openstack/networking-arista
networking_arista/ml2/security_groups/security_group_sync.py
AristaSecurityGroupSyncWorker.synchronize_switch
def synchronize_switch(self, switch_ip, expected_acls, expected_bindings): """Update ACL config on a switch to match expected config This is done as follows: 1. Get switch ACL config using show commands 2. Update expected bindings based on switch LAGs 3. Get commands to synchronize switch ACLs 4. Get commands to synchronize switch ACL bindings 5. Run sync commands on switch """ # Get ACL rules and interface mappings from the switch switch_acls, switch_bindings = self._get_dynamic_acl_info(switch_ip) # Adjust expected bindings for switch LAG config expected_bindings = self.adjust_bindings_for_lag(switch_ip, expected_bindings) # Get synchronization commands switch_cmds = list() switch_cmds.extend( self.get_sync_acl_cmds(switch_acls, expected_acls)) switch_cmds.extend( self.get_sync_binding_cmds(switch_bindings, expected_bindings)) # Update switch config self.run_openstack_sg_cmds(switch_cmds, self._switches.get(switch_ip))
python
def synchronize_switch(self, switch_ip, expected_acls, expected_bindings): """Update ACL config on a switch to match expected config This is done as follows: 1. Get switch ACL config using show commands 2. Update expected bindings based on switch LAGs 3. Get commands to synchronize switch ACLs 4. Get commands to synchronize switch ACL bindings 5. Run sync commands on switch """ # Get ACL rules and interface mappings from the switch switch_acls, switch_bindings = self._get_dynamic_acl_info(switch_ip) # Adjust expected bindings for switch LAG config expected_bindings = self.adjust_bindings_for_lag(switch_ip, expected_bindings) # Get synchronization commands switch_cmds = list() switch_cmds.extend( self.get_sync_acl_cmds(switch_acls, expected_acls)) switch_cmds.extend( self.get_sync_binding_cmds(switch_bindings, expected_bindings)) # Update switch config self.run_openstack_sg_cmds(switch_cmds, self._switches.get(switch_ip))
[ "def", "synchronize_switch", "(", "self", ",", "switch_ip", ",", "expected_acls", ",", "expected_bindings", ")", ":", "# Get ACL rules and interface mappings from the switch", "switch_acls", ",", "switch_bindings", "=", "self", ".", "_get_dynamic_acl_info", "(", "switch_ip"...
Update ACL config on a switch to match expected config This is done as follows: 1. Get switch ACL config using show commands 2. Update expected bindings based on switch LAGs 3. Get commands to synchronize switch ACLs 4. Get commands to synchronize switch ACL bindings 5. Run sync commands on switch
[ "Update", "ACL", "config", "on", "a", "switch", "to", "match", "expected", "config" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/security_group_sync.py#L63-L88
train
41,474
openstack/networking-arista
networking_arista/ml2/security_groups/security_group_sync.py
AristaSecurityGroupSyncWorker.synchronize
def synchronize(self): """Perform sync of the security groups between ML2 and EOS.""" # Get expected ACLs and rules expected_acls = self.get_expected_acls() # Get expected interface to ACL mappings all_expected_bindings = self.get_expected_bindings() # Check that config is correct on every registered switch for switch_ip in self._switches.keys(): expected_bindings = all_expected_bindings.get(switch_ip, []) try: self.synchronize_switch(switch_ip, expected_acls, expected_bindings) except Exception: LOG.exception("Failed to sync SGs for %(switch)s", {'switch': switch_ip})
python
def synchronize(self): """Perform sync of the security groups between ML2 and EOS.""" # Get expected ACLs and rules expected_acls = self.get_expected_acls() # Get expected interface to ACL mappings all_expected_bindings = self.get_expected_bindings() # Check that config is correct on every registered switch for switch_ip in self._switches.keys(): expected_bindings = all_expected_bindings.get(switch_ip, []) try: self.synchronize_switch(switch_ip, expected_acls, expected_bindings) except Exception: LOG.exception("Failed to sync SGs for %(switch)s", {'switch': switch_ip})
[ "def", "synchronize", "(", "self", ")", ":", "# Get expected ACLs and rules", "expected_acls", "=", "self", ".", "get_expected_acls", "(", ")", "# Get expected interface to ACL mappings", "all_expected_bindings", "=", "self", ".", "get_expected_bindings", "(", ")", "# Che...
Perform sync of the security groups between ML2 and EOS.
[ "Perform", "sync", "of", "the", "security", "groups", "between", "ML2", "and", "EOS", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/security_group_sync.py#L90-L107
train
41,475
openstack/networking-arista
networking_arista/ml2/rpc/arista_eapi.py
AristaRPCWrapperEapi.check_vlan_type_driver_commands
def check_vlan_type_driver_commands(self): """Checks the validity of CLI commands for Arista's VLAN type driver. This method tries to execute the commands used exclusively by the arista_vlan type driver and stores the commands if they succeed. """ cmd = ['show openstack resource-pool vlan region %s uuid' % self.region] try: self._run_eos_cmds(cmd) self.cli_commands['resource-pool'] = cmd except arista_exc.AristaRpcError: self.cli_commands['resource-pool'] = [] LOG.warning( _LW("'resource-pool' command '%s' is not available on EOS"), cmd)
python
def check_vlan_type_driver_commands(self): """Checks the validity of CLI commands for Arista's VLAN type driver. This method tries to execute the commands used exclusively by the arista_vlan type driver and stores the commands if they succeed. """ cmd = ['show openstack resource-pool vlan region %s uuid' % self.region] try: self._run_eos_cmds(cmd) self.cli_commands['resource-pool'] = cmd except arista_exc.AristaRpcError: self.cli_commands['resource-pool'] = [] LOG.warning( _LW("'resource-pool' command '%s' is not available on EOS"), cmd)
[ "def", "check_vlan_type_driver_commands", "(", "self", ")", ":", "cmd", "=", "[", "'show openstack resource-pool vlan region %s uuid'", "%", "self", ".", "region", "]", "try", ":", "self", ".", "_run_eos_cmds", "(", "cmd", ")", "self", ".", "cli_commands", "[", ...
Checks the validity of CLI commands for Arista's VLAN type driver. This method tries to execute the commands used exclusively by the arista_vlan type driver and stores the commands if they succeed.
[ "Checks", "the", "validity", "of", "CLI", "commands", "for", "Arista", "s", "VLAN", "type", "driver", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/rpc/arista_eapi.py#L125-L140
train
41,476
openstack/networking-arista
networking_arista/ml2/rpc/arista_eapi.py
AristaRPCWrapperEapi.get_vlan_assignment_uuid
def get_vlan_assignment_uuid(self): """Returns the UUID for the region's vlan assignment on CVX :returns: string containing the region's vlan assignment UUID """ vlan_uuid_cmd = self.cli_commands['resource-pool'] if vlan_uuid_cmd: return self._run_eos_cmds(commands=vlan_uuid_cmd)[0] return None
python
def get_vlan_assignment_uuid(self): """Returns the UUID for the region's vlan assignment on CVX :returns: string containing the region's vlan assignment UUID """ vlan_uuid_cmd = self.cli_commands['resource-pool'] if vlan_uuid_cmd: return self._run_eos_cmds(commands=vlan_uuid_cmd)[0] return None
[ "def", "get_vlan_assignment_uuid", "(", "self", ")", ":", "vlan_uuid_cmd", "=", "self", ".", "cli_commands", "[", "'resource-pool'", "]", "if", "vlan_uuid_cmd", ":", "return", "self", ".", "_run_eos_cmds", "(", "commands", "=", "vlan_uuid_cmd", ")", "[", "0", ...
Returns the UUID for the region's vlan assignment on CVX :returns: string containing the region's vlan assignment UUID
[ "Returns", "the", "UUID", "for", "the", "region", "s", "vlan", "assignment", "on", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/rpc/arista_eapi.py#L142-L150
train
41,477
openstack/networking-arista
networking_arista/ml2/rpc/arista_eapi.py
AristaRPCWrapperEapi.get_vlan_allocation
def get_vlan_allocation(self): """Returns the status of the region's VLAN pool in CVX :returns: dictionary containg the assigned, allocated and available VLANs for the region """ if not self.cli_commands['resource-pool']: LOG.warning(_('The version of CVX you are using does not support' 'arista VLAN type driver.')) else: cmd = ['show openstack resource-pools region %s' % self.region] command_output = self._run_eos_cmds(cmd) if command_output: regions = command_output[0]['physicalNetwork'] if self.region in regions.keys(): return regions[self.region]['vlanPool']['default'] return {'assignedVlans': '', 'availableVlans': '', 'allocatedVlans': ''}
python
def get_vlan_allocation(self): """Returns the status of the region's VLAN pool in CVX :returns: dictionary containg the assigned, allocated and available VLANs for the region """ if not self.cli_commands['resource-pool']: LOG.warning(_('The version of CVX you are using does not support' 'arista VLAN type driver.')) else: cmd = ['show openstack resource-pools region %s' % self.region] command_output = self._run_eos_cmds(cmd) if command_output: regions = command_output[0]['physicalNetwork'] if self.region in regions.keys(): return regions[self.region]['vlanPool']['default'] return {'assignedVlans': '', 'availableVlans': '', 'allocatedVlans': ''}
[ "def", "get_vlan_allocation", "(", "self", ")", ":", "if", "not", "self", ".", "cli_commands", "[", "'resource-pool'", "]", ":", "LOG", ".", "warning", "(", "_", "(", "'The version of CVX you are using does not support'", "'arista VLAN type driver.'", ")", ")", "els...
Returns the status of the region's VLAN pool in CVX :returns: dictionary containg the assigned, allocated and available VLANs for the region
[ "Returns", "the", "status", "of", "the", "region", "s", "VLAN", "pool", "in", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/rpc/arista_eapi.py#L152-L170
train
41,478
openstack/networking-arista
networking_arista/ml2/rpc/arista_eapi.py
AristaRPCWrapperEapi._build_command
def _build_command(self, cmds, sync=False): """Build full EOS's openstack CLI command. Helper method to add commands to enter and exit from openstack CLI modes. :param cmds: The openstack CLI commands that need to be executed in the openstack config mode. :param sync: This flags indicates that the region is being synced. """ region_cmd = 'region %s' % self.region if sync: region_cmd = self.cli_commands[const.CMD_REGION_SYNC] full_command = [ 'enable', 'configure', 'cvx', 'service openstack', region_cmd, ] full_command.extend(cmds) return full_command
python
def _build_command(self, cmds, sync=False): """Build full EOS's openstack CLI command. Helper method to add commands to enter and exit from openstack CLI modes. :param cmds: The openstack CLI commands that need to be executed in the openstack config mode. :param sync: This flags indicates that the region is being synced. """ region_cmd = 'region %s' % self.region if sync: region_cmd = self.cli_commands[const.CMD_REGION_SYNC] full_command = [ 'enable', 'configure', 'cvx', 'service openstack', region_cmd, ] full_command.extend(cmds) return full_command
[ "def", "_build_command", "(", "self", ",", "cmds", ",", "sync", "=", "False", ")", ":", "region_cmd", "=", "'region %s'", "%", "self", ".", "region", "if", "sync", ":", "region_cmd", "=", "self", ".", "cli_commands", "[", "const", ".", "CMD_REGION_SYNC", ...
Build full EOS's openstack CLI command. Helper method to add commands to enter and exit from openstack CLI modes. :param cmds: The openstack CLI commands that need to be executed in the openstack config mode. :param sync: This flags indicates that the region is being synced.
[ "Build", "full", "EOS", "s", "openstack", "CLI", "command", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/rpc/arista_eapi.py#L215-L238
train
41,479
openstack/networking-arista
networking_arista/ml2/rpc/arista_eapi.py
AristaRPCWrapperEapi.get_baremetal_physnet
def get_baremetal_physnet(self, context): """Returns dictionary which contains mac to hostname mapping""" port = context.current host_id = context.host cmd = ['show network physical-topology hosts'] try: response = self._run_eos_cmds(cmd) binding_profile = port.get(portbindings.PROFILE, {}) link_info = binding_profile.get('local_link_information', []) for link in link_info: switch_id = link.get('switch_id') for host in response[0]['hosts'].values(): if switch_id == host['name']: physnet = host['hostname'] LOG.debug("get_physical_network: Physical Network for " "%(host)s is %(physnet)s", {'host': host_id, 'physnet': physnet}) return physnet LOG.debug("Physical network not found for %(host)s", {'host': host_id}) except Exception as exc: LOG.error(_LE('command %(cmd)s failed with ' '%(exc)s'), {'cmd': cmd, 'exc': exc}) return None
python
def get_baremetal_physnet(self, context): """Returns dictionary which contains mac to hostname mapping""" port = context.current host_id = context.host cmd = ['show network physical-topology hosts'] try: response = self._run_eos_cmds(cmd) binding_profile = port.get(portbindings.PROFILE, {}) link_info = binding_profile.get('local_link_information', []) for link in link_info: switch_id = link.get('switch_id') for host in response[0]['hosts'].values(): if switch_id == host['name']: physnet = host['hostname'] LOG.debug("get_physical_network: Physical Network for " "%(host)s is %(physnet)s", {'host': host_id, 'physnet': physnet}) return physnet LOG.debug("Physical network not found for %(host)s", {'host': host_id}) except Exception as exc: LOG.error(_LE('command %(cmd)s failed with ' '%(exc)s'), {'cmd': cmd, 'exc': exc}) return None
[ "def", "get_baremetal_physnet", "(", "self", ",", "context", ")", ":", "port", "=", "context", ".", "current", "host_id", "=", "context", ".", "host", "cmd", "=", "[", "'show network physical-topology hosts'", "]", "try", ":", "response", "=", "self", ".", "...
Returns dictionary which contains mac to hostname mapping
[ "Returns", "dictionary", "which", "contains", "mac", "to", "hostname", "mapping" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/rpc/arista_eapi.py#L288-L311
train
41,480
openstack/networking-arista
networking_arista/ml2/rpc/arista_eapi.py
AristaRPCWrapperEapi.get_host_physnet
def get_host_physnet(self, context): """Returns dictionary which contains physical topology information for a given host_id """ host_id = utils.hostname(context.host) cmd = ['show network physical-topology neighbors'] try: response = self._run_eos_cmds(cmd) # Get response for 'show network physical-topology neighbors' # command neighbors = response[0]['neighbors'] for neighbor in neighbors: if host_id in neighbor: physnet = neighbors[neighbor]['toPort'][0]['hostname'] LOG.debug("get_physical_network: Physical Network for " "%(host)s is %(physnet)s", {'host': host_id, 'physnet': physnet}) return physnet LOG.debug("Physical network not found for %(host)s", {'host': host_id}) except Exception as exc: LOG.error(_LE('command %(cmd)s failed with ' '%(exc)s'), {'cmd': cmd, 'exc': exc}) return None
python
def get_host_physnet(self, context): """Returns dictionary which contains physical topology information for a given host_id """ host_id = utils.hostname(context.host) cmd = ['show network physical-topology neighbors'] try: response = self._run_eos_cmds(cmd) # Get response for 'show network physical-topology neighbors' # command neighbors = response[0]['neighbors'] for neighbor in neighbors: if host_id in neighbor: physnet = neighbors[neighbor]['toPort'][0]['hostname'] LOG.debug("get_physical_network: Physical Network for " "%(host)s is %(physnet)s", {'host': host_id, 'physnet': physnet}) return physnet LOG.debug("Physical network not found for %(host)s", {'host': host_id}) except Exception as exc: LOG.error(_LE('command %(cmd)s failed with ' '%(exc)s'), {'cmd': cmd, 'exc': exc}) return None
[ "def", "get_host_physnet", "(", "self", ",", "context", ")", ":", "host_id", "=", "utils", ".", "hostname", "(", "context", ".", "host", ")", "cmd", "=", "[", "'show network physical-topology neighbors'", "]", "try", ":", "response", "=", "self", ".", "_run_...
Returns dictionary which contains physical topology information for a given host_id
[ "Returns", "dictionary", "which", "contains", "physical", "topology", "information" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/rpc/arista_eapi.py#L313-L337
train
41,481
openstack/networking-arista
networking_arista/common/db_lib.py
filter_unnecessary_segments
def filter_unnecessary_segments(query): """Filter segments are not needed on CVX""" segment_model = segment_models.NetworkSegment network_model = models_v2.Network query = (query .join_if_necessary(network_model) .join_if_necessary(segment_model) .filter(network_model.project_id != '') .filter_network_type()) return query
python
def filter_unnecessary_segments(query): """Filter segments are not needed on CVX""" segment_model = segment_models.NetworkSegment network_model = models_v2.Network query = (query .join_if_necessary(network_model) .join_if_necessary(segment_model) .filter(network_model.project_id != '') .filter_network_type()) return query
[ "def", "filter_unnecessary_segments", "(", "query", ")", ":", "segment_model", "=", "segment_models", ".", "NetworkSegment", "network_model", "=", "models_v2", ".", "Network", "query", "=", "(", "query", ".", "join_if_necessary", "(", "network_model", ")", ".", "j...
Filter segments are not needed on CVX
[ "Filter", "segments", "are", "not", "needed", "on", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L59-L68
train
41,482
openstack/networking-arista
networking_arista/common/db_lib.py
filter_network_type
def filter_network_type(query): """Filter unsupported segment types""" segment_model = segment_models.NetworkSegment query = (query .filter( segment_model.network_type.in_( utils.SUPPORTED_NETWORK_TYPES))) return query
python
def filter_network_type(query): """Filter unsupported segment types""" segment_model = segment_models.NetworkSegment query = (query .filter( segment_model.network_type.in_( utils.SUPPORTED_NETWORK_TYPES))) return query
[ "def", "filter_network_type", "(", "query", ")", ":", "segment_model", "=", "segment_models", ".", "NetworkSegment", "query", "=", "(", "query", ".", "filter", "(", "segment_model", ".", "network_type", ".", "in_", "(", "utils", ".", "SUPPORTED_NETWORK_TYPES", "...
Filter unsupported segment types
[ "Filter", "unsupported", "segment", "types" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L71-L78
train
41,483
openstack/networking-arista
networking_arista/common/db_lib.py
filter_unbound_ports
def filter_unbound_ports(query): """Filter ports not bound to a host or network""" # hack for pep8 E711: comparison to None should be # 'if cond is not None' none = None port_model = models_v2.Port binding_level_model = ml2_models.PortBindingLevel query = (query .join_if_necessary(port_model) .join_if_necessary(binding_level_model) .filter( binding_level_model.host != '', port_model.device_id != none, port_model.network_id != none)) return query
python
def filter_unbound_ports(query): """Filter ports not bound to a host or network""" # hack for pep8 E711: comparison to None should be # 'if cond is not None' none = None port_model = models_v2.Port binding_level_model = ml2_models.PortBindingLevel query = (query .join_if_necessary(port_model) .join_if_necessary(binding_level_model) .filter( binding_level_model.host != '', port_model.device_id != none, port_model.network_id != none)) return query
[ "def", "filter_unbound_ports", "(", "query", ")", ":", "# hack for pep8 E711: comparison to None should be", "# 'if cond is not None'", "none", "=", "None", "port_model", "=", "models_v2", ".", "Port", "binding_level_model", "=", "ml2_models", ".", "PortBindingLevel", "quer...
Filter ports not bound to a host or network
[ "Filter", "ports", "not", "bound", "to", "a", "host", "or", "network" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L81-L95
train
41,484
openstack/networking-arista
networking_arista/common/db_lib.py
filter_by_device_owner
def filter_by_device_owner(query, device_owners=None): """Filter ports by device_owner Either filter using specified device_owner or using the list of all device_owners supported and unsupported by the arista ML2 plugin """ port_model = models_v2.Port if not device_owners: device_owners = utils.SUPPORTED_DEVICE_OWNERS supported_device_owner_filter = [ port_model.device_owner.ilike('%s%%' % owner) for owner in device_owners] unsupported_device_owner_filter = [ port_model.device_owner.notilike('%s%%' % owner) for owner in utils.UNSUPPORTED_DEVICE_OWNERS] query = (query .filter( and_(*unsupported_device_owner_filter), or_(*supported_device_owner_filter))) return query
python
def filter_by_device_owner(query, device_owners=None): """Filter ports by device_owner Either filter using specified device_owner or using the list of all device_owners supported and unsupported by the arista ML2 plugin """ port_model = models_v2.Port if not device_owners: device_owners = utils.SUPPORTED_DEVICE_OWNERS supported_device_owner_filter = [ port_model.device_owner.ilike('%s%%' % owner) for owner in device_owners] unsupported_device_owner_filter = [ port_model.device_owner.notilike('%s%%' % owner) for owner in utils.UNSUPPORTED_DEVICE_OWNERS] query = (query .filter( and_(*unsupported_device_owner_filter), or_(*supported_device_owner_filter))) return query
[ "def", "filter_by_device_owner", "(", "query", ",", "device_owners", "=", "None", ")", ":", "port_model", "=", "models_v2", ".", "Port", "if", "not", "device_owners", ":", "device_owners", "=", "utils", ".", "SUPPORTED_DEVICE_OWNERS", "supported_device_owner_filter", ...
Filter ports by device_owner Either filter using specified device_owner or using the list of all device_owners supported and unsupported by the arista ML2 plugin
[ "Filter", "ports", "by", "device_owner" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L98-L117
train
41,485
openstack/networking-arista
networking_arista/common/db_lib.py
filter_by_device_id
def filter_by_device_id(query): """Filter ports attached to devices we don't care about Currently used to filter DHCP_RESERVED ports """ port_model = models_v2.Port unsupported_device_id_filter = [ port_model.device_id.notilike('%s%%' % id) for id in utils.UNSUPPORTED_DEVICE_IDS] query = (query .filter(and_(*unsupported_device_id_filter))) return query
python
def filter_by_device_id(query): """Filter ports attached to devices we don't care about Currently used to filter DHCP_RESERVED ports """ port_model = models_v2.Port unsupported_device_id_filter = [ port_model.device_id.notilike('%s%%' % id) for id in utils.UNSUPPORTED_DEVICE_IDS] query = (query .filter(and_(*unsupported_device_id_filter))) return query
[ "def", "filter_by_device_id", "(", "query", ")", ":", "port_model", "=", "models_v2", ".", "Port", "unsupported_device_id_filter", "=", "[", "port_model", ".", "device_id", ".", "notilike", "(", "'%s%%'", "%", "id", ")", "for", "id", "in", "utils", ".", "UNS...
Filter ports attached to devices we don't care about Currently used to filter DHCP_RESERVED ports
[ "Filter", "ports", "attached", "to", "devices", "we", "don", "t", "care", "about" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L120-L131
train
41,486
openstack/networking-arista
networking_arista/common/db_lib.py
filter_unmanaged_physnets
def filter_unmanaged_physnets(query): """Filter ports managed by other ML2 plugins """ config = cfg.CONF.ml2_arista managed_physnets = config['managed_physnets'] # Filter out ports bound to segments on physnets that we're not # managing segment_model = segment_models.NetworkSegment if managed_physnets: query = (query .join_if_necessary(segment_model) .filter(segment_model.physical_network.in_( managed_physnets))) return query
python
def filter_unmanaged_physnets(query): """Filter ports managed by other ML2 plugins """ config = cfg.CONF.ml2_arista managed_physnets = config['managed_physnets'] # Filter out ports bound to segments on physnets that we're not # managing segment_model = segment_models.NetworkSegment if managed_physnets: query = (query .join_if_necessary(segment_model) .filter(segment_model.physical_network.in_( managed_physnets))) return query
[ "def", "filter_unmanaged_physnets", "(", "query", ")", ":", "config", "=", "cfg", ".", "CONF", ".", "ml2_arista", "managed_physnets", "=", "config", "[", "'managed_physnets'", "]", "# Filter out ports bound to segments on physnets that we're not", "# managing", "segment_mod...
Filter ports managed by other ML2 plugins
[ "Filter", "ports", "managed", "by", "other", "ML2", "plugins" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L152-L165
train
41,487
openstack/networking-arista
networking_arista/common/db_lib.py
filter_inactive_ports
def filter_inactive_ports(query): """Filter ports that aren't in active status """ port_model = models_v2.Port query = (query .filter(port_model.status == n_const.PORT_STATUS_ACTIVE)) return query
python
def filter_inactive_ports(query): """Filter ports that aren't in active status """ port_model = models_v2.Port query = (query .filter(port_model.status == n_const.PORT_STATUS_ACTIVE)) return query
[ "def", "filter_inactive_ports", "(", "query", ")", ":", "port_model", "=", "models_v2", ".", "Port", "query", "=", "(", "query", ".", "filter", "(", "port_model", ".", "status", "==", "n_const", ".", "PORT_STATUS_ACTIVE", ")", ")", "return", "query" ]
Filter ports that aren't in active status
[ "Filter", "ports", "that", "aren", "t", "in", "active", "status" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L168-L173
train
41,488
openstack/networking-arista
networking_arista/common/db_lib.py
filter_unnecessary_ports
def filter_unnecessary_ports(query, device_owners=None, vnic_type=None, active=True): """Filter out all ports are not needed on CVX """ query = (query .filter_unbound_ports() .filter_by_device_owner(device_owners) .filter_by_device_id() .filter_unmanaged_physnets()) if active: query = query.filter_inactive_ports() if vnic_type: query = query.filter_by_vnic_type(vnic_type) return query
python
def filter_unnecessary_ports(query, device_owners=None, vnic_type=None, active=True): """Filter out all ports are not needed on CVX """ query = (query .filter_unbound_ports() .filter_by_device_owner(device_owners) .filter_by_device_id() .filter_unmanaged_physnets()) if active: query = query.filter_inactive_ports() if vnic_type: query = query.filter_by_vnic_type(vnic_type) return query
[ "def", "filter_unnecessary_ports", "(", "query", ",", "device_owners", "=", "None", ",", "vnic_type", "=", "None", ",", "active", "=", "True", ")", ":", "query", "=", "(", "query", ".", "filter_unbound_ports", "(", ")", ".", "filter_by_device_owner", "(", "d...
Filter out all ports are not needed on CVX
[ "Filter", "out", "all", "ports", "are", "not", "needed", "on", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L176-L188
train
41,489
openstack/networking-arista
networking_arista/common/db_lib.py
get_networks
def get_networks(network_id=None): """Returns list of all networks that may be relevant on CVX""" session = db.get_reader_session() with session.begin(): model = models_v2.Network networks = session.query(model).filter(model.project_id != '') if network_id: networks = networks.filter(model.id == network_id) return networks.all()
python
def get_networks(network_id=None): """Returns list of all networks that may be relevant on CVX""" session = db.get_reader_session() with session.begin(): model = models_v2.Network networks = session.query(model).filter(model.project_id != '') if network_id: networks = networks.filter(model.id == network_id) return networks.all()
[ "def", "get_networks", "(", "network_id", "=", "None", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "model", "=", "models_v2", ".", "Network", "networks", "=", "session", ".", "query...
Returns list of all networks that may be relevant on CVX
[ "Returns", "list", "of", "all", "networks", "that", "may", "be", "relevant", "on", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L219-L227
train
41,490
openstack/networking-arista
networking_arista/common/db_lib.py
get_segments
def get_segments(segment_id=None): """Returns list of all network segments that may be relevant on CVX""" session = db.get_reader_session() with session.begin(): model = segment_models.NetworkSegment segments = session.query(model).filter_unnecessary_segments() if segment_id: segments = segments.filter(model.id == segment_id) return segments.all()
python
def get_segments(segment_id=None): """Returns list of all network segments that may be relevant on CVX""" session = db.get_reader_session() with session.begin(): model = segment_models.NetworkSegment segments = session.query(model).filter_unnecessary_segments() if segment_id: segments = segments.filter(model.id == segment_id) return segments.all()
[ "def", "get_segments", "(", "segment_id", "=", "None", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "model", "=", "segment_models", ".", "NetworkSegment", "segments", "=", "session", "...
Returns list of all network segments that may be relevant on CVX
[ "Returns", "list", "of", "all", "network", "segments", "that", "may", "be", "relevant", "on", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L230-L238
train
41,491
openstack/networking-arista
networking_arista/common/db_lib.py
get_instances
def get_instances(device_owners=None, vnic_type=None, instance_id=None): """Returns filtered list of all instances in the neutron db""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port binding_model = ml2_models.PortBinding instances = (session .query(port_model, binding_model) .outerjoin( binding_model, port_model.id == binding_model.port_id) .distinct(port_model.device_id) .group_by(port_model.device_id) .filter_unnecessary_ports(device_owners, vnic_type)) if instance_id: instances = instances.filter(port_model.device_id == instance_id) return instances.all()
python
def get_instances(device_owners=None, vnic_type=None, instance_id=None): """Returns filtered list of all instances in the neutron db""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port binding_model = ml2_models.PortBinding instances = (session .query(port_model, binding_model) .outerjoin( binding_model, port_model.id == binding_model.port_id) .distinct(port_model.device_id) .group_by(port_model.device_id) .filter_unnecessary_ports(device_owners, vnic_type)) if instance_id: instances = instances.filter(port_model.device_id == instance_id) return instances.all()
[ "def", "get_instances", "(", "device_owners", "=", "None", ",", "vnic_type", "=", "None", ",", "instance_id", "=", "None", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "port_model", ...
Returns filtered list of all instances in the neutron db
[ "Returns", "filtered", "list", "of", "all", "instances", "in", "the", "neutron", "db" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L241-L258
train
41,492
openstack/networking-arista
networking_arista/common/db_lib.py
get_ports
def get_ports(device_owners=None, vnic_type=None, port_id=None, active=True): """Returns list of all ports in neutron the db""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port ports = (session .query(port_model) .filter_unnecessary_ports(device_owners, vnic_type, active)) if port_id: ports = ports.filter(port_model.id == port_id) return ports.all()
python
def get_ports(device_owners=None, vnic_type=None, port_id=None, active=True): """Returns list of all ports in neutron the db""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port ports = (session .query(port_model) .filter_unnecessary_ports(device_owners, vnic_type, active)) if port_id: ports = ports.filter(port_model.id == port_id) return ports.all()
[ "def", "get_ports", "(", "device_owners", "=", "None", ",", "vnic_type", "=", "None", ",", "port_id", "=", "None", ",", "active", "=", "True", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")...
Returns list of all ports in neutron the db
[ "Returns", "list", "of", "all", "ports", "in", "neutron", "the", "db" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L286-L296
train
41,493
openstack/networking-arista
networking_arista/common/db_lib.py
get_port_bindings
def get_port_bindings(binding_key=None): """Returns filtered list of port bindings that may be relevant on CVX This query is a little complex as we need all binding levels for any binding that has a single managed physnet, but we need to filter bindings that have no managed physnets. In order to achieve this, we join to the binding_level_model once to filter bindings with no managed levels, then a second time to get all levels for the remaining bindings. The loop at the end is a convenience to associate levels with bindings as a list. This would ideally be done through the use of an orm.relation, but due to some sqlalchemy limitations imposed to make OVO work, we can't add relations to existing models. """ session = db.get_reader_session() with session.begin(): binding_level_model = ml2_models.PortBindingLevel aliased_blm = aliased(ml2_models.PortBindingLevel) port_binding_model = ml2_models.PortBinding dist_binding_model = ml2_models.DistributedPortBinding bindings = (session.query(port_binding_model, aliased_blm) .join(binding_level_model, and_( port_binding_model.port_id == binding_level_model.port_id, port_binding_model.host == binding_level_model.host)) .filter_unnecessary_ports() .join(aliased_blm, and_(port_binding_model.port_id == aliased_blm.port_id, port_binding_model.host == aliased_blm.host))) dist_bindings = (session.query(dist_binding_model, aliased_blm) .join( binding_level_model, and_(dist_binding_model.port_id == binding_level_model.port_id, dist_binding_model.host == binding_level_model.host)) .filter_unnecessary_ports() .filter(dist_binding_model.status == n_const.PORT_STATUS_ACTIVE) .join(aliased_blm, and_(dist_binding_model.port_id == aliased_blm.port_id, dist_binding_model.host == aliased_blm.host))) if binding_key: port_id = binding_key[0] if type(binding_key[1]) == tuple: switch_id = binding_key[1][0] switch_port = binding_key[1][1] bindings = bindings.filter(and_( port_binding_model.port_id == port_id, port_binding_model.profile.ilike('%%%s%%' % switch_id), port_binding_model.profile.ilike('%%%s%%' % switch_port))) dist_bindings = dist_bindings.filter(and_( dist_binding_model.port_id == port_id, dist_binding_model.profile.ilike('%%%s%%' % switch_id), dist_binding_model.profile.ilike('%%%s%%' % switch_port))) else: host_id = binding_key[1] bindings = bindings.filter(and_( port_binding_model.port_id == port_id, port_binding_model.host == host_id)) dist_bindings = dist_bindings.filter(and_( dist_binding_model.port_id == port_id, dist_binding_model.host == host_id)) binding_levels = collections.defaultdict(list) for binding, level in bindings.all() + dist_bindings.all(): binding_levels[binding].append(level) bindings_with_levels = list() for binding, levels in binding_levels.items(): binding.levels = levels bindings_with_levels.append(binding) return bindings_with_levels
python
def get_port_bindings(binding_key=None): """Returns filtered list of port bindings that may be relevant on CVX This query is a little complex as we need all binding levels for any binding that has a single managed physnet, but we need to filter bindings that have no managed physnets. In order to achieve this, we join to the binding_level_model once to filter bindings with no managed levels, then a second time to get all levels for the remaining bindings. The loop at the end is a convenience to associate levels with bindings as a list. This would ideally be done through the use of an orm.relation, but due to some sqlalchemy limitations imposed to make OVO work, we can't add relations to existing models. """ session = db.get_reader_session() with session.begin(): binding_level_model = ml2_models.PortBindingLevel aliased_blm = aliased(ml2_models.PortBindingLevel) port_binding_model = ml2_models.PortBinding dist_binding_model = ml2_models.DistributedPortBinding bindings = (session.query(port_binding_model, aliased_blm) .join(binding_level_model, and_( port_binding_model.port_id == binding_level_model.port_id, port_binding_model.host == binding_level_model.host)) .filter_unnecessary_ports() .join(aliased_blm, and_(port_binding_model.port_id == aliased_blm.port_id, port_binding_model.host == aliased_blm.host))) dist_bindings = (session.query(dist_binding_model, aliased_blm) .join( binding_level_model, and_(dist_binding_model.port_id == binding_level_model.port_id, dist_binding_model.host == binding_level_model.host)) .filter_unnecessary_ports() .filter(dist_binding_model.status == n_const.PORT_STATUS_ACTIVE) .join(aliased_blm, and_(dist_binding_model.port_id == aliased_blm.port_id, dist_binding_model.host == aliased_blm.host))) if binding_key: port_id = binding_key[0] if type(binding_key[1]) == tuple: switch_id = binding_key[1][0] switch_port = binding_key[1][1] bindings = bindings.filter(and_( port_binding_model.port_id == port_id, port_binding_model.profile.ilike('%%%s%%' % switch_id), port_binding_model.profile.ilike('%%%s%%' % switch_port))) dist_bindings = dist_bindings.filter(and_( dist_binding_model.port_id == port_id, dist_binding_model.profile.ilike('%%%s%%' % switch_id), dist_binding_model.profile.ilike('%%%s%%' % switch_port))) else: host_id = binding_key[1] bindings = bindings.filter(and_( port_binding_model.port_id == port_id, port_binding_model.host == host_id)) dist_bindings = dist_bindings.filter(and_( dist_binding_model.port_id == port_id, dist_binding_model.host == host_id)) binding_levels = collections.defaultdict(list) for binding, level in bindings.all() + dist_bindings.all(): binding_levels[binding].append(level) bindings_with_levels = list() for binding, levels in binding_levels.items(): binding.levels = levels bindings_with_levels.append(binding) return bindings_with_levels
[ "def", "get_port_bindings", "(", "binding_key", "=", "None", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "binding_level_model", "=", "ml2_models", ".", "PortBindingLevel", "aliased_blm", ...
Returns filtered list of port bindings that may be relevant on CVX This query is a little complex as we need all binding levels for any binding that has a single managed physnet, but we need to filter bindings that have no managed physnets. In order to achieve this, we join to the binding_level_model once to filter bindings with no managed levels, then a second time to get all levels for the remaining bindings. The loop at the end is a convenience to associate levels with bindings as a list. This would ideally be done through the use of an orm.relation, but due to some sqlalchemy limitations imposed to make OVO work, we can't add relations to existing models.
[ "Returns", "filtered", "list", "of", "port", "bindings", "that", "may", "be", "relevant", "on", "CVX" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L323-L399
train
41,494
openstack/networking-arista
networking_arista/common/db_lib.py
tenant_provisioned
def tenant_provisioned(tenant_id): """Returns true if any networks or ports exist for a tenant.""" session = db.get_reader_session() with session.begin(): res = any( session.query(m).filter(m.tenant_id == tenant_id).count() for m in [models_v2.Network, models_v2.Port] ) return res
python
def tenant_provisioned(tenant_id): """Returns true if any networks or ports exist for a tenant.""" session = db.get_reader_session() with session.begin(): res = any( session.query(m).filter(m.tenant_id == tenant_id).count() for m in [models_v2.Network, models_v2.Port] ) return res
[ "def", "tenant_provisioned", "(", "tenant_id", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "res", "=", "any", "(", "session", ".", "query", "(", "m", ")", ".", "filter", "(", "m...
Returns true if any networks or ports exist for a tenant.
[ "Returns", "true", "if", "any", "networks", "or", "ports", "exist", "for", "a", "tenant", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L438-L446
train
41,495
openstack/networking-arista
networking_arista/common/db_lib.py
instance_provisioned
def instance_provisioned(device_id): """Returns true if any ports exist for an instance.""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port res = bool(session.query(port_model) .filter(port_model.device_id == device_id).count()) return res
python
def instance_provisioned(device_id): """Returns true if any ports exist for an instance.""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port res = bool(session.query(port_model) .filter(port_model.device_id == device_id).count()) return res
[ "def", "instance_provisioned", "(", "device_id", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "port_model", "=", "models_v2", ".", "Port", "res", "=", "bool", "(", "session", ".", "q...
Returns true if any ports exist for an instance.
[ "Returns", "true", "if", "any", "ports", "exist", "for", "an", "instance", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L449-L456
train
41,496
openstack/networking-arista
networking_arista/common/db_lib.py
port_provisioned
def port_provisioned(port_id): """Returns true if port still exists.""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port res = bool(session.query(port_model) .filter(port_model.id == port_id).count()) return res
python
def port_provisioned(port_id): """Returns true if port still exists.""" session = db.get_reader_session() with session.begin(): port_model = models_v2.Port res = bool(session.query(port_model) .filter(port_model.id == port_id).count()) return res
[ "def", "port_provisioned", "(", "port_id", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "port_model", "=", "models_v2", ".", "Port", "res", "=", "bool", "(", "session", ".", "query",...
Returns true if port still exists.
[ "Returns", "true", "if", "port", "still", "exists", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L459-L466
train
41,497
openstack/networking-arista
networking_arista/common/db_lib.py
get_parent
def get_parent(port_id): """Get trunk subport's parent port""" session = db.get_reader_session() res = dict() with session.begin(): subport_model = trunk_models.SubPort trunk_model = trunk_models.Trunk subport = (session.query(subport_model). filter(subport_model.port_id == port_id).first()) if subport: trunk = (session.query(trunk_model). filter(trunk_model.id == subport.trunk_id).first()) if trunk: trunk_port_id = trunk.port.id res = get_ports(port_id=trunk_port_id, active=False)[0] return res
python
def get_parent(port_id): """Get trunk subport's parent port""" session = db.get_reader_session() res = dict() with session.begin(): subport_model = trunk_models.SubPort trunk_model = trunk_models.Trunk subport = (session.query(subport_model). filter(subport_model.port_id == port_id).first()) if subport: trunk = (session.query(trunk_model). filter(trunk_model.id == subport.trunk_id).first()) if trunk: trunk_port_id = trunk.port.id res = get_ports(port_id=trunk_port_id, active=False)[0] return res
[ "def", "get_parent", "(", "port_id", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "res", "=", "dict", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "subport_model", "=", "trunk_models", ".", "SubPort", "trunk_model", "...
Get trunk subport's parent port
[ "Get", "trunk", "subport", "s", "parent", "port" ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L469-L484
train
41,498
openstack/networking-arista
networking_arista/common/db_lib.py
get_port_binding_level
def get_port_binding_level(filters): """Returns entries from PortBindingLevel based on the specified filters.""" session = db.get_reader_session() with session.begin(): return (session.query(ml2_models.PortBindingLevel). filter_by(**filters). order_by(ml2_models.PortBindingLevel.level). all())
python
def get_port_binding_level(filters): """Returns entries from PortBindingLevel based on the specified filters.""" session = db.get_reader_session() with session.begin(): return (session.query(ml2_models.PortBindingLevel). filter_by(**filters). order_by(ml2_models.PortBindingLevel.level). all())
[ "def", "get_port_binding_level", "(", "filters", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "return", "(", "session", ".", "query", "(", "ml2_models", ".", "PortBindingLevel", ")", "...
Returns entries from PortBindingLevel based on the specified filters.
[ "Returns", "entries", "from", "PortBindingLevel", "based", "on", "the", "specified", "filters", "." ]
07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L487-L494
train
41,499