_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q260600
delete_locks
validation
def delete_locks(context, network_ids, addresses): """Deletes locks for each IP address that is no longer null-routed.""" addresses_no_longer_null_routed = _find_addresses_to_be_unlocked( context, network_ids, addresses) LOG.info("Deleting %s lock holders on IPAddress with ids: %s", len(addresses_no_longer_null_routed), [addr.id for addr in addresses_no_longer_null_routed]) for address in addresses_no_longer_null_routed: lock_holder = None try: lock_holder = db_api.lock_holder_find( context, lock_id=address.lock_id, name=LOCK_NAME, scope=db_api.ONE) if lock_holder: db_api.lock_holder_delete(context, address, lock_holder) except Exception: LOG.exception("Failed to delete lock holder %s", lock_holder) continue context.session.flush()
python
{ "resource": "" }
q260601
create_locks
validation
def create_locks(context, network_ids, addresses): """Creates locks for each IP address that is null-routed. The function creates the IP address if it is not present in the database. """ for address in addresses: address_model = None try: address_model = _find_or_create_address( context, network_ids, address) lock_holder = None if address_model.lock_id: lock_holder = db_api.lock_holder_find( context, lock_id=address_model.lock_id, name=LOCK_NAME, scope=db_api.ONE) if not lock_holder: LOG.info("Creating lock holder on IPAddress %s with id %s", address_model.address_readable, address_model.id) db_api.lock_holder_create( context, address_model, name=LOCK_NAME, type="ip_address") except Exception: LOG.exception("Failed to create lock holder on IPAddress %s", address_model) continue context.session.flush()
python
{ "resource": "" }
q260602
IronicDriver.select_ipam_strategy
validation
def select_ipam_strategy(self, network_id, network_strategy, **kwargs): """Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy. """ LOG.info("Selecting IPAM strategy for network_id:%s " "network_strategy:%s" % (network_id, network_strategy)) net_type = "tenant" if STRATEGY.is_provider_network(network_id): net_type = "provider" strategy = self._ipam_strategies.get(net_type, {}) default = strategy.get("default") overrides = strategy.get("overrides", {}) # If we override a particular strategy explicitly, we use it. if network_strategy in overrides: LOG.info("Selected overridden IPAM strategy: %s" % (overrides[network_strategy])) return overrides[network_strategy] # Otherwise, we are free to use an explicit default. if default: LOG.info("Selected default IPAM strategy for tenant " "network: %s" % (default)) return default # Fallback to the network-specified IPAM strategy LOG.info("Selected network strategy for tenant " "network: %s" % (network_strategy)) return network_strategy
python
{ "resource": "" }
q260603
IronicDriver._get_base_network_info
validation
def _get_base_network_info(self, context, network_id, base_net_driver): """Return a dict of extra network information. :param context: neutron request context. :param network_id: neturon network id. :param net_driver: network driver associated with network_id. :raises IronicException: Any unexpected data fetching failures will be logged and IronicException raised. This driver can attach to networks managed by other drivers. We may need some information from these drivers, or otherwise inform downstream about the type of network we are attaching to. We can make these decisions here. """ driver_name = base_net_driver.get_name() net_info = {"network_type": driver_name} LOG.debug('_get_base_network_info: %s %s' % (driver_name, network_id)) # If the driver is NVP, we need to look up the lswitch id we should # be attaching to. if driver_name == 'NVP': LOG.debug('looking up lswitch ids for network %s' % (network_id)) lswitch_ids = base_net_driver.get_lswitch_ids_for_network( context, network_id) if not lswitch_ids or len(lswitch_ids) > 1: msg = ('lswitch id lookup failed, %s ids found.' % (len(lswitch_ids))) LOG.error(msg) raise IronicException(msg) lswitch_id = lswitch_ids.pop() LOG.info('found lswitch for network %s: %s' % (network_id, lswitch_id)) net_info['lswitch_id'] = lswitch_id LOG.debug('_get_base_network_info finished: %s %s %s' % (driver_name, network_id, net_info)) return net_info
python
{ "resource": "" }
q260604
IronicDriver.create_port
validation
def create_port(self, context, network_id, port_id, **kwargs): """Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("create_port %s %s %s" % (context.tenant_id, network_id, port_id)) # sanity check if not kwargs.get('base_net_driver'): raise IronicException(msg='base_net_driver required.') base_net_driver = kwargs['base_net_driver'] if not kwargs.get('device_id'): raise IronicException(msg='device_id required.') device_id = kwargs['device_id'] if not kwargs.get('instance_node_id'): raise IronicException(msg='instance_node_id required.') instance_node_id = kwargs['instance_node_id'] if not kwargs.get('mac_address'): raise IronicException(msg='mac_address is required.') mac_address = str(netaddr.EUI(kwargs["mac_address"]["address"])) mac_address = mac_address.replace('-', ':') # TODO(morgabra): Change this when we enable security groups. if kwargs.get('security_groups'): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) # unroll the given address models into a fixed_ips list we can # pass downstream fixed_ips = [] addresses = kwargs.get('addresses') if not isinstance(addresses, list): addresses = [addresses] for address in addresses: fixed_ips.append(self._make_fixed_ip_dict(context, address)) body = { "id": port_id, "network_id": network_id, "device_id": device_id, "device_owner": kwargs.get('device_owner', ''), "tenant_id": context.tenant_id or "quark", "roles": context.roles, "mac_address": mac_address, "fixed_ips": fixed_ips, "switch:hardware_id": instance_node_id, "dynamic_network": not STRATEGY.is_provider_network(network_id) } net_info = self._get_base_network_info( context, network_id, base_net_driver) body.update(net_info) try: LOG.info("creating downstream port: %s" % (body)) port = self._create_port(context, body) LOG.info("created downstream port: %s" % (port)) return {"uuid": port['port']['id'], "vlan_id": port['port']['vlan_id']} except Exception as e: msg = "failed to create downstream port. Exception: %s" % (e) raise IronicException(msg=msg)
python
{ "resource": "" }
q260605
IronicDriver.update_port
validation
def update_port(self, context, port_id, **kwargs): """Update a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to update the downstream port for any reason, the exception will be logged and IronicException raised. TODO(morgabra) It does not really make sense in the context of Ironic to allow updating ports. fixed_ips and mac_address are burned in the configdrive on the host, and we otherwise cannot migrate a port between instances. Eventually we will need to support security groups, but for now it's a no-op on port data changes, and we need to rely on the API/Nova to not allow updating data on active ports. """ LOG.info("update_port %s %s" % (context.tenant_id, port_id)) # TODO(morgabra): Change this when we enable security groups. if kwargs.get("security_groups"): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) return {"uuid": port_id}
python
{ "resource": "" }
q260606
IronicDriver.diag_port
validation
def diag_port(self, context, port_id, **kwargs): """Diagnose a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to fetch the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("diag_port %s" % port_id) try: port = self._client.show_port(port_id) except Exception as e: msg = "failed fetching downstream port: %s" % (str(e)) LOG.exception(msg) raise IronicException(msg=msg) return {"downstream_port": port}
python
{ "resource": "" }
q260607
Tag.set
validation
def set(self, model, value): """Set tag on model object.""" self.validate(value) self._pop(model) value = self.serialize(value) model.tags.append(value)
python
{ "resource": "" }
q260608
Tag.get
validation
def get(self, model): """Get a matching valid tag off the model.""" for tag in model.tags: if self.is_tag(tag): value = self.deserialize(tag) try: self.validate(value) return value except TagValidationError: continue return None
python
{ "resource": "" }
q260609
Tag._pop
validation
def _pop(self, model): """Pop all matching tags off the model and return them.""" tags = [] # collect any exsiting tags with matching prefix for tag in model.tags: if self.is_tag(tag): tags.append(tag) # remove collected tags from model if tags: for tag in tags: model.tags.remove(tag) return tags
python
{ "resource": "" }
q260610
Tag.pop
validation
def pop(self, model): """Pop all matching tags off the port, return a valid one.""" tags = self._pop(model) if tags: for tag in tags: value = self.deserialize(tag) try: self.validate(value) return value except TagValidationError: continue
python
{ "resource": "" }
q260611
Tag.has_tag
validation
def has_tag(self, model): """Does the given port have this tag?""" for tag in model.tags: if self.is_tag(tag): return True return False
python
{ "resource": "" }
q260612
VlanTag.validate
validation
def validate(self, value): """Validates a VLAN ID. :param value: The VLAN ID to validate against. :raises TagValidationError: Raised if the VLAN ID is invalid. """ try: vlan_id_int = int(value) assert vlan_id_int >= self.MIN_VLAN_ID assert vlan_id_int <= self.MAX_VLAN_ID except Exception: msg = ("Invalid vlan_id. Got '%(vlan_id)s'. " "vlan_id should be an integer between %(min)d and %(max)d " "inclusive." % {'vlan_id': value, 'min': self.MIN_VLAN_ID, 'max': self.MAX_VLAN_ID}) raise TagValidationError(value, msg) return True
python
{ "resource": "" }
q260613
TagRegistry.get_all
validation
def get_all(self, model): """Get all known tags from a model. Returns a dict of {<tag_name>:<tag_value>}. """ tags = {} for name, tag in self.tags.items(): for mtag in model.tags: if tag.is_tag(mtag): tags[name] = tag.get(model) return tags
python
{ "resource": "" }
q260614
TagRegistry.set_all
validation
def set_all(self, model, **tags): """Validate and set all known tags on a port.""" for name, tag in self.tags.items(): if name in tags: value = tags.pop(name) if value: try: tag.set(model, value) except TagValidationError as e: raise n_exc.BadRequest( resource="tags", msg="%s" % (e.message))
python
{ "resource": "" }
q260615
SecurityGroupsClient.serialize_rules
validation
def serialize_rules(self, rules): """Creates a payload for the redis server.""" # TODO(mdietz): If/when we support other rule types, this comment # will have to be revised. # Action and direction are static, for now. The implementation may # support 'deny' and 'egress' respectively in the future. We allow # the direction to be set to something else, technically, but current # plugin level call actually raises. It's supported here for unit # test purposes at this time serialized = [] for rule in rules: direction = rule["direction"] source = '' destination = '' if rule.get("remote_ip_prefix"): prefix = rule["remote_ip_prefix"] if direction == "ingress": source = self._convert_remote_network(prefix) else: if (Capabilities.EGRESS not in CONF.QUARK.environment_capabilities): raise q_exc.EgressSecurityGroupRulesNotEnabled() else: destination = self._convert_remote_network(prefix) optional_fields = {} # NOTE(mdietz): this will expand as we add more protocols protocol_map = protocols.PROTOCOL_MAP[rule["ethertype"]] if rule["protocol"] == protocol_map["icmp"]: optional_fields["icmp type"] = rule["port_range_min"] optional_fields["icmp code"] = rule["port_range_max"] else: optional_fields["port start"] = rule["port_range_min"] optional_fields["port end"] = rule["port_range_max"] payload = {"ethertype": rule["ethertype"], "protocol": rule["protocol"], "source network": source, "destination network": destination, "action": "allow", "direction": direction} payload.update(optional_fields) serialized.append(payload) return serialized
python
{ "resource": "" }
q260616
SecurityGroupsClient.serialize_groups
validation
def serialize_groups(self, groups): """Creates a payload for the redis server The rule schema is the following: REDIS KEY - port_device_id.port_mac_address/sg REDIS VALUE - A JSON dump of the following: port_mac_address must be lower-cased and stripped of non-alphanumeric characters {"id": "<arbitrary uuid>", "rules": [ {"ethertype": <hexademical integer>, "protocol": <integer>, "port start": <integer>, # optional "port end": <integer>, # optional "icmp type": <integer>, # optional "icmp code": <integer>, # optional "source network": <string>, "destination network": <string>, "action": <string>, "direction": <string>}, ], "security groups ack": <boolean> } Example: {"id": "004c6369-9f3d-4d33-b8f5-9416bf3567dd", "rules": [ {"ethertype": 0x800, "protocol": "tcp", "port start": 1000, "port end": 1999, "source network": "10.10.10.0/24", "destination network": "", "action": "allow", "direction": "ingress"}, ], "security groups ack": "true" } port start/end and icmp type/code are mutually exclusive pairs. """ rules = [] for group in groups: rules.extend(self.serialize_rules(group.rules)) return rules
python
{ "resource": "" }
q260617
SecurityGroupsClient.apply_rules
validation
def apply_rules(self, device_id, mac_address, rules): """Writes a series of security group rules to a redis server.""" LOG.info("Applying security group rules for device %s with MAC %s" % (device_id, mac_address)) rule_dict = {SECURITY_GROUP_RULE_KEY: rules} redis_key = self.vif_key(device_id, mac_address) # TODO(mdietz): Pipeline these. Requires some rewriting self.set_field(redis_key, SECURITY_GROUP_HASH_ATTR, rule_dict) self.set_field_raw(redis_key, SECURITY_GROUP_ACK, False)
python
{ "resource": "" }
q260618
SecurityGroupsClient.get_security_group_states
validation
def get_security_group_states(self, interfaces): """Gets security groups for interfaces from Redis Returns a dictionary of xapi.VIFs with values of the current acknowledged status in Redis. States not explicitly handled: * ack key, no rules - This is the same as just tagging the VIF, the instance will be inaccessible * rules key, no ack - Nothing will happen, the VIF will not be tagged. """ LOG.debug("Getting security groups from Redis for {0}".format( interfaces)) interfaces = tuple(interfaces) vif_keys = [self.vif_key(vif.device_id, vif.mac_address) for vif in interfaces] # Retrieve all fields associated with this key, which should be # 'security groups ack' and 'security group rules'. sec_grp_all = self.get_fields_all(vif_keys) ret = {} # Associate the vif with the fields in a dictionary for vif, group in zip(interfaces, sec_grp_all): if group: ret[vif] = {SECURITY_GROUP_ACK: None, SECURITY_GROUP_HASH_ATTR: []} temp_ack = group[SECURITY_GROUP_ACK].lower() temp_rules = group[SECURITY_GROUP_HASH_ATTR] if temp_rules: temp_rules = json.loads(temp_rules) ret[vif][SECURITY_GROUP_HASH_ATTR] = temp_rules["rules"] if "true" in temp_ack: ret[vif][SECURITY_GROUP_ACK] = True elif "false" in temp_ack: ret[vif][SECURITY_GROUP_ACK] = False else: ret.pop(vif, None) LOG.debug("Skipping bad ack value %s" % temp_ack) return ret
python
{ "resource": "" }
q260619
SecurityGroupsClient.update_group_states_for_vifs
validation
def update_group_states_for_vifs(self, vifs, ack): """Updates security groups by setting the ack field""" vif_keys = [self.vif_key(vif.device_id, vif.mac_address) for vif in vifs] self.set_fields(vif_keys, SECURITY_GROUP_ACK, ack)
python
{ "resource": "" }
q260620
run_migrations_offline
validation
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=neutron_config.database.connection) with context.begin_transaction(): context.run_migrations()
python
{ "resource": "" }
q260621
run_migrations_online
validation
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( neutron_config.database.connection, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()
python
{ "resource": "" }
q260622
do_notify
validation
def do_notify(context, event_type, payload): """Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send """ LOG.debug('IP_BILL: notifying {}'.format(payload)) notifier = n_rpc.get_notifier('network') notifier.info(context, event_type, payload)
python
{ "resource": "" }
q260623
notify
validation
def notify(context, event_type, ipaddress, send_usage=False, *args, **kwargs): """Method to send notifications. We must send USAGE when a public IPv4 address is deallocated or a FLIP is associated. Parameters: - `context`: the context for notifier - `event_type`: the event type for IP allocate, deallocate, associate, disassociate - `ipaddress`: the ipaddress object to notify about Returns: nothing Notes: this may live in the billing module """ if (event_type == IP_ADD and not CONF.QUARK.notify_ip_add) or \ (event_type == IP_DEL and not CONF.QUARK.notify_ip_delete) or \ (event_type == IP_ASSOC and not CONF.QUARK.notify_flip_associate) or \ (event_type == IP_DISASSOC and not CONF.QUARK.notify_flip_disassociate)\ or (event_type == IP_EXISTS and not CONF.QUARK.notify_ip_exists): LOG.debug('IP_BILL: notification {} is disabled by config'. format(event_type)) return # Do not send notifications when we are undoing due to an error if 'rollback' in kwargs and kwargs['rollback']: LOG.debug('IP_BILL: not sending notification because we are in undo') return # ip.add needs the allocated_at time. # All other events need the current time. ts = ipaddress.allocated_at if event_type == IP_ADD else _now() payload = build_payload(ipaddress, event_type, event_time=ts) # Send the notification with the payload do_notify(context, event_type, payload) # When we deallocate an IP or associate a FLIP we must send # a usage message to billing. # In other words when we supply end_time we must send USAGE to billing # immediately. # Our billing period is 24 hrs. If the address was allocated after midnight # send the start_time as as. If the address was allocated yesterday, then # send midnight as the start_time. # Note: if allocated_at is empty we assume today's midnight. if send_usage: if ipaddress.allocated_at is not None and \ ipaddress.allocated_at >= _midnight_today(): start_time = ipaddress.allocated_at else: start_time = _midnight_today() payload = build_payload(ipaddress, IP_EXISTS, start_time=start_time, end_time=ts) do_notify(context, IP_EXISTS, payload)
python
{ "resource": "" }
q260624
build_payload
validation
def build_payload(ipaddress, event_type, event_time=None, start_time=None, end_time=None): """Method builds a payload out of the passed arguments. Parameters: `ipaddress`: the models.IPAddress object `event_type`: USAGE,CREATE,DELETE,SUSPEND,or UNSUSPEND `start_time`: startTime for cloudfeeds `end_time`: endTime for cloudfeeds Returns a dictionary suitable to notify billing. Message types mapping to cloud feeds for references: ip.exists - USAGE ip.add - CREATE ip.delete - DELETE ip.associate - UP ip.disassociate - DOWN Refer to: http://rax.io/cf-api for more details. """ # This is the common part of all message types payload = { 'event_type': unicode(event_type), 'tenant_id': unicode(ipaddress.used_by_tenant_id), 'ip_address': unicode(ipaddress.address_readable), 'ip_version': int(ipaddress.version), 'ip_type': unicode(ipaddress.address_type), 'id': unicode(ipaddress.id) } # Depending on the message type add the appropriate fields if event_type == IP_EXISTS: if start_time is None or end_time is None: raise ValueError('IP_BILL: {} start_time/end_time cannot be empty' .format(event_type)) payload.update({ 'startTime': unicode(convert_timestamp(start_time)), 'endTime': unicode(convert_timestamp(end_time)) }) elif event_type in [IP_ADD, IP_DEL, IP_ASSOC, IP_DISASSOC]: if event_time is None: raise ValueError('IP_BILL: {}: event_time cannot be NULL' .format(event_type)) payload.update({ 'eventTime': unicode(convert_timestamp(event_time)), 'subnet_id': unicode(ipaddress.subnet_id), 'network_id': unicode(ipaddress.network_id), 'public': True if ipaddress.network_id == PUBLIC_NETWORK_ID else False, }) else: raise ValueError('IP_BILL: bad event_type: {}'.format(event_type)) return payload
python
{ "resource": "" }
q260625
build_full_day_ips
validation
def build_full_day_ips(query, period_start, period_end): """Method to build an IP list for the case 1 when the IP was allocated before the period start and is still allocated after the period end. This method only looks at public IPv4 addresses. """ # Filter out only IPv4 that have not been deallocated ip_list = query.\ filter(models.IPAddress.version == 4L).\ filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).\ filter(models.IPAddress.used_by_tenant_id is not None).\ filter(models.IPAddress.allocated_at != null()).\ filter(models.IPAddress.allocated_at < period_start).\ filter(or_(models.IPAddress._deallocated is False, models.IPAddress.deallocated_at == null(), models.IPAddress.deallocated_at >= period_end)).all() return ip_list
python
{ "resource": "" }
q260626
calc_periods
validation
def calc_periods(hour=0, minute=0): """Returns a tuple of start_period and end_period. Assumes that the period is 24-hrs. Parameters: - `hour`: the hour from 0 to 23 when the period ends - `minute`: the minute from 0 to 59 when the period ends This method will calculate the end of the period as the closest hour/minute going backwards. It will also calculate the start of the period as the passed hour/minute but 24 hrs ago. Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the day before yesterday until today's midnight. If we pass 2,0 - we will get the start time as 2am of the previous morning till 2am of today's morning. By default it's midnight. """ # Calculate the time intervals in a usable form period_end = datetime.datetime.utcnow().replace(hour=hour, minute=minute, second=0, microsecond=0) period_start = period_end - datetime.timedelta(days=1) # period end should be slightly before the midnight. # hence, we subtract a second # this will force period_end to store something like: # datetime.datetime(2016, 5, 19, 23, 59, 59, 999999) # instead of: # datetime.datetime(2016, 5, 20, 0, 0, 0, 0) period_end -= datetime.timedelta(seconds=1) return (period_start, period_end)
python
{ "resource": "" }
q260627
_make_job_dict
validation
def _make_job_dict(job): """Creates the view for a job while calculating progress. Since a root job does not have a transaction id (TID) it will return its id as the TID. """ body = {"id": job.get('id'), "action": job.get('action'), "completed": job.get('completed'), "tenant_id": job.get('tenant_id'), "created_at": job.get('created_at'), "transaction_id": job.get('transaction_id'), "parent_id": job.get('parent_id', None)} if not body['transaction_id']: body['transaction_id'] = job.get('id') completed = 0 for sub in job.subtransactions: if sub.get('completed'): completed += 1 pct = 100 if job.get('completed') else 0 if len(job.subtransactions) > 0: pct = float(completed) / len(job.subtransactions) * 100.0 body['transaction_percent'] = int(pct) body['completed_subtransactions'] = completed body['subtransactions'] = len(job.subtransactions) return body
python
{ "resource": "" }
q260628
get_mac_address_range
validation
def get_mac_address_range(context, id, fields=None): """Retrieve a mac_address_range. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_mac_address_range %s for tenant %s fields %s" % (id, context.tenant_id, fields)) if not context.is_admin: raise n_exc.NotAuthorized() mac_address_range = db_api.mac_address_range_find( context, id=id, scope=db_api.ONE) if not mac_address_range: raise q_exc.MacAddressRangeNotFound( mac_address_range_id=id) return v._make_mac_range_dict(mac_address_range)
python
{ "resource": "" }
q260629
delete_mac_address_range
validation
def delete_mac_address_range(context, id): """Delete a mac_address_range. : param context: neutron api request context : param id: UUID representing the mac_address_range to delete. """ LOG.info("delete_mac_address_range %s for tenant %s" % (id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE) if not mar: raise q_exc.MacAddressRangeNotFound( mac_address_range_id=id) _delete_mac_address_range(context, mar)
python
{ "resource": "" }
q260630
delete_segment_allocation_range
validation
def delete_segment_allocation_range(context, sa_id): """Delete a segment_allocation_range. : param context: neutron api request context : param id: UUID representing the segment_allocation_range to delete. """ LOG.info("delete_segment_allocation_range %s for tenant %s" % (sa_id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): sa_range = db_api.segment_allocation_range_find( context, id=sa_id, scope=db_api.ONE) if not sa_range: raise q_exc.SegmentAllocationRangeNotFound( segment_allocation_range_id=sa_id) _delete_segment_allocation_range(context, sa_range)
python
{ "resource": "" }
q260631
filter_factory
validation
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def wrapper(app): return ResponseAsyncIdAdder(app, conf) return wrapper
python
{ "resource": "" }
q260632
get_used_ips
validation
def get_used_ips(session, **kwargs): """Returns dictionary with keys segment_id and value used IPs count. Used IP address count is determined by: - allocated IPs - deallocated IPs whose `deallocated_at` is within the `reuse_after` window compared to the present time, excluding IPs that are accounted for in the current IP policy (because IP policy is mutable and deallocated IPs are not checked nor deleted on IP policy creation, thus deallocated IPs that don't fit the current IP policy can exist in the neutron database). """ LOG.debug("Getting used IPs...") with session.begin(): query = session.query( models.Subnet.segment_id, func.count(models.IPAddress.address)) query = query.group_by(models.Subnet.segment_id) query = _filter(query, **kwargs) reuse_window = timeutils.utcnow() - datetime.timedelta( seconds=cfg.CONF.QUARK.ipam_reuse_after) # NOTE(asadoughi): This is an outer join instead of a regular join # to include subnets with zero IP addresses in the database. query = query.outerjoin( models.IPAddress, and_(models.Subnet.id == models.IPAddress.subnet_id, or_(not_(models.IPAddress.lock_id.is_(None)), models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPAddress.deallocated_at > reuse_window))) query = query.outerjoin( models.IPPolicyCIDR, and_( models.Subnet.ip_policy_id == models.IPPolicyCIDR.ip_policy_id, models.IPAddress.address >= models.IPPolicyCIDR.first_ip, models.IPAddress.address <= models.IPPolicyCIDR.last_ip)) # NOTE(asadoughi): (address is allocated) OR # (address is deallocated and not inside subnet's IP policy) query = query.filter(or_( models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPPolicyCIDR.id.is_(None))) ret = ((segment_id, address_count) for segment_id, address_count in query.all()) return dict(ret)
python
{ "resource": "" }
q260633
get_unused_ips
validation
def get_unused_ips(session, used_ips_counts, **kwargs): """Returns dictionary with key segment_id, and value unused IPs count. Unused IP address count is determined by: - adding subnet's cidr's size - subtracting IP policy exclusions on subnet - subtracting used ips per segment """ LOG.debug("Getting unused IPs...") with session.begin(): query = session.query( models.Subnet.segment_id, models.Subnet) query = _filter(query, **kwargs) query = query.group_by(models.Subnet.segment_id, models.Subnet.id) ret = defaultdict(int) for segment_id, subnet in query.all(): net_size = netaddr.IPNetwork(subnet._cidr).size ip_policy = subnet["ip_policy"] or {"size": 0} ret[segment_id] += net_size - ip_policy["size"] for segment_id in used_ips_counts: ret[segment_id] -= used_ips_counts[segment_id] return ret
python
{ "resource": "" }
q260634
XapiClient.get_interfaces
validation
def get_interfaces(self): """Returns a set of VIFs from `get_instances` return value.""" LOG.debug("Getting interfaces from Xapi") with self.sessioned() as session: instances = self.get_instances(session) recs = session.xenapi.VIF.get_all_records() interfaces = set() for vif_ref, rec in recs.iteritems(): vm = instances.get(rec["VM"]) if not vm: continue device_id = vm.uuid interfaces.add(VIF(device_id, rec, vif_ref)) return interfaces
python
{ "resource": "" }
q260635
XapiClient.update_interfaces
validation
def update_interfaces(self, added_sg, updated_sg, removed_sg): """Handles changes to interfaces' security groups Calls refresh_interfaces on argument VIFs. Set security groups on added_sg's VIFs. Unsets security groups on removed_sg's VIFs. """ if not (added_sg or updated_sg or removed_sg): return with self.sessioned() as session: self._set_security_groups(session, added_sg) self._unset_security_groups(session, removed_sg) combined = added_sg + updated_sg + removed_sg self._refresh_interfaces(session, combined)
python
{ "resource": "" }
q260636
update_network
validation
def update_network(context, id, network): """Update values of a network. : param context: neutron api request context : param id: UUID representing the network to update. : param network: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_network %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): net = db_api.network_find(context, id=id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=id) net_dict = network["network"] utils.pop_param(net_dict, "network_plugin") if not context.is_admin and "ipam_strategy" in net_dict: utils.pop_param(net_dict, "ipam_strategy") net = db_api.network_update(context, net, **net_dict) return v._make_network_dict(net)
python
{ "resource": "" }
q260637
get_network
validation
def get_network(context, id, fields=None): """Retrieve a network. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_network %s for tenant %s fields %s" % (id, context.tenant_id, fields)) network = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=id, join_subnets=True, scope=db_api.ONE) if not network: raise n_exc.NetworkNotFound(net_id=id) return v._make_network_dict(network, fields=fields)
python
{ "resource": "" }
q260638
get_networks
validation
def get_networks(context, limit=None, sorts=['id'], marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of networks. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_networks for tenant %s with filters %s, fields %s" % (context.tenant_id, filters, fields)) filters = filters or {} nets = db_api.network_find(context, limit, sorts, marker, page_reverse, join_subnets=True, **filters) or [] nets = [v._make_network_dict(net, fields=fields) for net in nets] return nets
python
{ "resource": "" }
q260639
get_networks_count
validation
def get_networks_count(context, filters=None): """Return the number of networks. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_networks_count for tenant %s filters %s" % (context.tenant_id, filters)) return db_api.network_count_all(context)
python
{ "resource": "" }
q260640
delete_network
validation
def delete_network(context, id): """Delete a network. : param context: neutron api request context : param id: UUID representing the network to delete. """ LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=id) if not context.is_admin: if STRATEGY.is_provider_network(net.id): raise n_exc.NotAuthorized(net_id=id) if net.ports: raise n_exc.NetworkInUse(net_id=id) net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"]) net_driver.delete_network(context, id) for subnet in net["subnets"]: subnets._delete_subnet(context, subnet) db_api.network_delete(context, net)
python
{ "resource": "" }
q260641
make_case2
validation
def make_case2(context): """This is a helper method for testing. When run with the current context, it will create a case 2 entries in the database. See top of file for what case 2 is. """ query = context.session.query(models.IPAddress) period_start, period_end = billing.calc_periods() ip_list = billing.build_full_day_ips(query, period_start, period_end) import random ind = random.randint(0, len(ip_list) - 1) address = ip_list[ind] address.allocated_at = datetime.datetime.utcnow() -\ datetime.timedelta(days=1) context.session.add(address) context.session.flush()
python
{ "resource": "" }
q260642
main
validation
def main(notify, hour, minute): """Runs billing report. Optionally sends notifications to billing""" # Read the config file and get the admin context config_opts = ['--config-file', '/etc/neutron/neutron.conf'] config.init(config_opts) # Have to load the billing module _after_ config is parsed so # that we get the right network strategy network_strategy.STRATEGY.load() billing.PUBLIC_NETWORK_ID = network_strategy.STRATEGY.get_public_net_id() config.setup_logging() context = neutron_context.get_admin_context() # A query to get all IPAddress objects from the db query = context.session.query(models.IPAddress) (period_start, period_end) = billing.calc_periods(hour, minute) full_day_ips = billing.build_full_day_ips(query, period_start, period_end) partial_day_ips = billing.build_partial_day_ips(query, period_start, period_end) if notify: # '==================== Full Day =============================' for ipaddress in full_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) # '==================== Part Day =============================' for ipaddress in partial_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) else: click.echo('Case 1 ({}):\n'.format(len(full_day_ips))) for ipaddress in full_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end)) click.echo('\n===============================================\n') click.echo('Case 2 ({}):\n'.format(len(partial_day_ips))) for ipaddress in partial_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end))
python
{ "resource": "" }
q260643
QuarkAsyncPluginBase.start_rpc_listeners
validation
def start_rpc_listeners(self): """Configure all listeners here""" self._setup_rpc() if not self.endpoints: return [] self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads()
python
{ "resource": "" }
q260644
QuarkAsyncPluginBase.context
validation
def context(self): """Provides an admin context for workers.""" if not self._context: self._context = context.get_admin_context() return self._context
python
{ "resource": "" }
q260645
QuarkSGAsyncProcessCallback.update_sg
validation
def update_sg(self, context, sg, rule_id, action): """Begins the async update process.""" db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None with context.session.begin(): job_body = dict(action="%s sg rule %s" % (action, rule_id), resource_id=rule_id, tenant_id=db_sg['tenant_id']) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_client = QuarkSGAsyncProducerClient() try: rpc_client.populate_subtasks(context, sg, job['id']) except om_exc.MessagingTimeout: LOG.error("Failed to create subtasks. Rabbit running?") return None return {"job_id": job['id']}
python
{ "resource": "" }
q260646
QuarkSGProducerCallback.populate_subtasks
validation
def populate_subtasks(self, context, sg, parent_job_id): """Produces a list of ports to be updated async.""" db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None ports = db_api.sg_gather_associated_ports(context, db_sg) if len(ports) == 0: return {"ports": 0} for port in ports: job_body = dict(action="update port %s" % port['id'], tenant_id=db_sg['tenant_id'], resource_id=port['id'], parent_id=parent_job_id) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_consumer = QuarkSGAsyncConsumerClient() try: rpc_consumer.update_port(context, port['id'], job['id']) except om_exc.MessagingTimeout: # TODO(roaet): Not too sure what can be done here other than # updating the job as a failure? LOG.error("Failed to update port. Rabbit running?") return None
python
{ "resource": "" }
q260647
QuarkSGConsumerCallback.update_ports_for_sg
validation
def update_ports_for_sg(self, context, portid, jobid): """Updates the ports through redis.""" port = db_api.port_find(context, id=portid, scope=db_api.ONE) if not port: LOG.warning("Port not found") return net_driver = port_api._get_net_driver(port.network, port=port) base_net_driver = port_api._get_net_driver(port.network) sg_list = [sg for sg in port.security_groups] success = False error = None retries = 3 retry_delay = 2 for retry in xrange(retries): try: net_driver.update_port(context, port_id=port["backend_key"], mac_address=port["mac_address"], device_id=port["device_id"], base_net_driver=base_net_driver, security_groups=sg_list) success = True error = None break except Exception as error: LOG.warning("Could not connect to redis, but retrying soon") time.sleep(retry_delay) status_str = "" if not success: status_str = "Port %s update failed after %d tries. Error: %s" % ( portid, retries, error) update_body = dict(completed=True, status=status_str) update_body = dict(job=update_body) job_api.update_job(context.elevated(), jobid, update_body)
python
{ "resource": "" }
q260648
sg_gather_associated_ports
validation
def sg_gather_associated_ports(context, group): """Gather all ports associated to security group. Returns: * list, or None """ if not group: return None if not hasattr(group, "ports") or len(group.ports) <= 0: return [] return group.ports
python
{ "resource": "" }
q260649
security_group_rule_update
validation
def security_group_rule_update(context, rule, **kwargs): '''Updates a security group rule. NOTE(alexm) this is non-standard functionality. ''' rule.update(kwargs) context.session.add(rule) return rule
python
{ "resource": "" }
q260650
segment_allocation_find
validation
def segment_allocation_find(context, lock_mode=False, **filters): """Query for segment allocations.""" range_ids = filters.pop("segment_allocation_range_ids", None) query = context.session.query(models.SegmentAllocation) if lock_mode: query = query.with_lockmode("update") query = query.filter_by(**filters) # Optionally filter by given list of range ids if range_ids: query.filter( models.SegmentAllocation.segment_allocation_range_id.in_( range_ids)) return query
python
{ "resource": "" }
q260651
NikoHomeControlConnection.send
validation
def send(self, s): """ Sends the given command to Niko Home Control and returns the output of the system. Aliases: write, put, sendall, send_all """ self._socket.send(s.encode()) return self.read()
python
{ "resource": "" }
q260652
if_
validation
def if_(*args): """Implements the 'if' operator with support for multiple elseif-s.""" for i in range(0, len(args) - 1, 2): if args[i]: return args[i + 1] if len(args) % 2: return args[-1] else: return None
python
{ "resource": "" }
q260653
soft_equals
validation
def soft_equals(a, b): """Implements the '==' operator, which does type JS-style coertion.""" if isinstance(a, str) or isinstance(b, str): return str(a) == str(b) if isinstance(a, bool) or isinstance(b, bool): return bool(a) is bool(b) return a == b
python
{ "resource": "" }
q260654
less
validation
def less(a, b, *args): """Implements the '<' operator with JS-style type coertion.""" types = set([type(a), type(b)]) if float in types or int in types: try: a, b = float(a), float(b) except TypeError: # NaN return False return a < b and (not args or less(b, *args))
python
{ "resource": "" }
q260655
less_or_equal
validation
def less_or_equal(a, b, *args): """Implements the '<=' operator with JS-style type coertion.""" return ( less(a, b) or soft_equals(a, b) ) and (not args or less_or_equal(b, *args))
python
{ "resource": "" }
q260656
minus
validation
def minus(*args): """Also, converts either to ints or to floats.""" if len(args) == 1: return -to_numeric(args[0]) return to_numeric(args[0]) - to_numeric(args[1])
python
{ "resource": "" }
q260657
merge
validation
def merge(*args): """Implements the 'merge' operator for merging lists.""" ret = [] for arg in args: if isinstance(arg, list) or isinstance(arg, tuple): ret += list(arg) else: ret.append(arg) return ret
python
{ "resource": "" }
q260658
get_var
validation
def get_var(data, var_name, not_found=None): """Gets variable value from data dictionary.""" try: for key in str(var_name).split('.'): try: data = data[key] except TypeError: data = data[int(key)] except (KeyError, TypeError, ValueError): return not_found else: return data
python
{ "resource": "" }
q260659
missing
validation
def missing(data, *args): """Implements the missing operator for finding missing variables.""" not_found = object() if args and isinstance(args[0], list): args = args[0] ret = [] for arg in args: if get_var(data, arg, not_found) is not_found: ret.append(arg) return ret
python
{ "resource": "" }
q260660
missing_some
validation
def missing_some(data, min_required, args): """Implements the missing_some operator for finding missing variables.""" if min_required < 1: return [] found = 0 not_found = object() ret = [] for arg in args: if get_var(data, arg, not_found) is not_found: ret.append(arg) else: found += 1 if found >= min_required: return [] return ret
python
{ "resource": "" }
q260661
jsonLogic
validation
def jsonLogic(tests, data=None): """Executes the json-logic with given data.""" # You've recursed to a primitive, stop! if tests is None or not isinstance(tests, dict): return tests data = data or {} operator = list(tests.keys())[0] values = tests[operator] # Easy syntax for unary operators, like {"var": "x"} instead of strict # {"var": ["x"]} if not isinstance(values, list) and not isinstance(values, tuple): values = [values] # Recursion! values = [jsonLogic(val, data) for val in values] if operator == 'var': return get_var(data, *values) if operator == 'missing': return missing(data, *values) if operator == 'missing_some': return missing_some(data, *values) if operator not in operations: raise ValueError("Unrecognized operation %s" % operator) return operations[operator](*values)
python
{ "resource": "" }
q260662
PyIndenterMode.indent
validation
def indent(self): """ Performs an indentation """ if not self.tab_always_indent: super(PyIndenterMode, self).indent() else: cursor = self.editor.textCursor() assert isinstance(cursor, QtGui.QTextCursor) if cursor.hasSelection(): self.indent_selection(cursor) else: # simply insert indentation at the cursor position tab_len = self.editor.tab_length cursor.beginEditBlock() if self.editor.use_spaces_instead_of_tabs: cursor.insertText(tab_len * " ") else: cursor.insertText('\t') cursor.endEditBlock() self.editor.setTextCursor(cursor)
python
{ "resource": "" }
q260663
PyIndenterMode.unindent
validation
def unindent(self): """ Performs an un-indentation """ if self.tab_always_indent: cursor = self.editor.textCursor() if not cursor.hasSelection(): cursor.select(cursor.LineUnderCursor) self.unindent_selection(cursor) else: super(PyIndenterMode, self).unindent()
python
{ "resource": "" }
q260664
PyAutoIndentMode._handle_indent_between_paren
validation
def _handle_indent_between_paren(self, column, line, parent_impl, tc): """ Handle indent between symbols such as parenthesis, braces,... """ pre, post = parent_impl next_char = self._get_next_char(tc) prev_char = self._get_prev_char(tc) prev_open = prev_char in ['[', '(', '{'] next_close = next_char in [']', ')', '}'] (open_line, open_symbol_col), (close_line, close_col) = \ self._get_paren_pos(tc, column) open_line_txt = self._helper.line_text(open_line) open_line_indent = len(open_line_txt) - len(open_line_txt.lstrip()) if prev_open: post = (open_line_indent + self.editor.tab_length) * ' ' elif next_close and prev_char != ',': post = open_line_indent * ' ' elif tc.block().blockNumber() == open_line: post = open_symbol_col * ' ' # adapt indent if cursor on closing line and next line have same # indent -> PEP8 compliance if close_line and close_col: txt = self._helper.line_text(close_line) bn = tc.block().blockNumber() flg = bn == close_line next_indent = self._helper.line_indent(bn + 1) * ' ' if flg and txt.strip().endswith(':') and next_indent == post: # | look at how the previous line ( ``':'):`` ) was # over-indented, this is actually what we are trying to # achieve here post += self.editor.tab_length * ' ' # breaking string if next_char in ['"', "'"]: tc.movePosition(tc.Left) is_string = self._helper.is_comment_or_string(tc, formats=['string']) if next_char in ['"', "'"]: tc.movePosition(tc.Right) if is_string: trav = QTextCursor(tc) while self._helper.is_comment_or_string( trav, formats=['string']): trav.movePosition(trav.Left) trav.movePosition(trav.Right) symbol = '%s' % self._get_next_char(trav) pre += symbol post += symbol return pre, post
python
{ "resource": "" }
q260665
PyAutoIndentMode._at_block_start
validation
def _at_block_start(tc, line): """ Improve QTextCursor.atBlockStart to ignore spaces """ if tc.atBlockStart(): return True column = tc.columnNumber() indentation = len(line) - len(line.lstrip()) return column <= indentation
python
{ "resource": "" }
q260666
PyConsole.update_terminal_colors
validation
def update_terminal_colors(self): """ Update terminal color scheme based on the pygments color scheme colors """ self.color_scheme = self.create_color_scheme( background=self.syntax_highlighter.color_scheme.background, foreground=self.syntax_highlighter.color_scheme.formats['normal'].foreground().color())
python
{ "resource": "" }
q260667
PyInteractiveConsole.mouseMoveEvent
validation
def mouseMoveEvent(self, e): """ Extends mouseMoveEvent to display a pointing hand cursor when the mouse cursor is over a file location """ super(PyInteractiveConsole, self).mouseMoveEvent(e) cursor = self.cursorForPosition(e.pos()) assert isinstance(cursor, QtGui.QTextCursor) p = cursor.positionInBlock() usd = cursor.block().userData() if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block: if QtWidgets.QApplication.overrideCursor() is None: QtWidgets.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.PointingHandCursor)) else: if QtWidgets.QApplication.overrideCursor() is not None: QtWidgets.QApplication.restoreOverrideCursor()
python
{ "resource": "" }
q260668
PyInteractiveConsole.mousePressEvent
validation
def mousePressEvent(self, e): """ Emits open_file_requested if the press event occured over a file location string. """ super(PyInteractiveConsole, self).mousePressEvent(e) cursor = self.cursorForPosition(e.pos()) p = cursor.positionInBlock() usd = cursor.block().userData() if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block: if e.button() == QtCore.Qt.LeftButton: self.open_file_requested.emit(usd.filename, usd.line)
python
{ "resource": "" }
q260669
MainWindow.setup_actions
validation
def setup_actions(self): """ Connects slots to signals """ self.actionOpen.triggered.connect(self.on_open) self.actionNew.triggered.connect(self.on_new) self.actionSave.triggered.connect(self.on_save) self.actionSave_as.triggered.connect(self.on_save_as) self.actionQuit.triggered.connect( QtWidgets.QApplication.instance().quit) self.tabWidget.current_changed.connect(self.on_current_tab_changed) self.tabWidget.last_tab_closed.connect(self.on_last_tab_closed) self.actionAbout.triggered.connect(self.on_about) self.actionRun.triggered.connect(self.on_run) self.interactiveConsole.process_finished.connect( self.on_process_finished) self.actionConfigure_run.triggered.connect(self.on_configure_run)
python
{ "resource": "" }
q260670
MainWindow.setup_editor
validation
def setup_editor(self, editor): """ Setup the python editor, run the server and connect a few signals. :param editor: editor to setup. """ editor.cursorPositionChanged.connect(self.on_cursor_pos_changed) try: m = editor.modes.get(modes.GoToAssignmentsMode) except KeyError: pass else: assert isinstance(m, modes.GoToAssignmentsMode) m.out_of_doc.connect(self.on_goto_out_of_doc)
python
{ "resource": "" }
q260671
MainWindow.open_file
validation
def open_file(self, path, line=None): """ Creates a new GenericCodeEdit, opens the requested file and adds it to the tab widget. :param path: Path of the file to open :return The opened editor if open succeeded. """ editor = None if path: interpreter, pyserver, args = self._get_backend_parameters() editor = self.tabWidget.open_document( path, None, interpreter=interpreter, server_script=pyserver, args=args) if editor: self.setup_editor(editor) self.recent_files_manager.open_file(path) self.menu_recents.update_actions() if line is not None: TextHelper(self.tabWidget.current_widget()).goto_line(line) return editor
python
{ "resource": "" }
q260672
MainWindow.on_new
validation
def on_new(self): """ Add a new empty code editor to the tab widget """ interpreter, pyserver, args = self._get_backend_parameters() self.setup_editor(self.tabWidget.create_new_document( extension='.py', interpreter=interpreter, server_script=pyserver, args=args)) self.actionRun.setDisabled(True) self.actionConfigure_run.setDisabled(True)
python
{ "resource": "" }
q260673
MainWindow.on_open
validation
def on_open(self): """ Shows an open file dialog and open the file if the dialog was accepted. """ filename, filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open') if filename: self.open_file(filename) self.actionRun.setEnabled(True) self.actionConfigure_run.setEnabled(True)
python
{ "resource": "" }
q260674
MainWindow.on_save_as
validation
def on_save_as(self): """ Save the current editor document as. """ path = self.tabWidget.current_widget().file.path path = os.path.dirname(path) if path else '' filename, filter = QtWidgets.QFileDialog.getSaveFileName( self, 'Save', path) if filename: self.tabWidget.save_current(filename) self.recent_files_manager.open_file(filename) self.menu_recents.update_actions() self.actionRun.setEnabled(True) self.actionConfigure_run.setEnabled(True) self._update_status_bar(self.tabWidget.current_widget())
python
{ "resource": "" }
q260675
MainWindow.setup_mnu_style
validation
def setup_mnu_style(self, editor): """ setup the style menu for an editor tab """ menu = QtWidgets.QMenu('Styles', self.menuEdit) group = QtWidgets.QActionGroup(self) self.styles_group = group current_style = editor.syntax_highlighter.color_scheme.name group.triggered.connect(self.on_style_changed) for s in sorted(PYGMENTS_STYLES): a = QtWidgets.QAction(menu) a.setText(s) a.setCheckable(True) if s == current_style: a.setChecked(True) group.addAction(a) menu.addAction(a) self.menuEdit.addMenu(menu)
python
{ "resource": "" }
q260676
MainWindow.on_current_tab_changed
validation
def on_current_tab_changed(self): """ Update action states when the current tab changed. """ self.menuEdit.clear() self.menuModes.clear() self.menuPanels.clear() editor = self.tabWidget.current_widget() self.menuEdit.setEnabled(editor is not None) self.menuModes.setEnabled(editor is not None) self.menuPanels.setEnabled(editor is not None) self.actionSave.setEnabled(editor is not None) self.actionSave_as.setEnabled(editor is not None) self.actionConfigure_run.setEnabled(editor is not None) self.actionRun.setEnabled(editor is not None) if editor is not None: self.setup_mnu_edit(editor) self.setup_mnu_modes(editor) self.setup_mnu_panels(editor) self.widgetOutline.set_editor(editor) self._update_status_bar(editor)
python
{ "resource": "" }
q260677
MainWindow.on_run
validation
def on_run(self): """ Run the current current script """ filename = self.tabWidget.current_widget().file.path wd = os.path.dirname(filename) args = Settings().get_run_config_for_file(filename) self.interactiveConsole.start_process( Settings().interpreter, args=[filename] + args, cwd=wd) self.dockWidget.show() self.actionRun.setEnabled(False) self.actionConfigure_run.setEnabled(False)
python
{ "resource": "" }
q260678
MainWindow.on_goto_out_of_doc
validation
def on_goto_out_of_doc(self, assignment): """ Open the a new tab when goto goes out of the current document. :param assignment: Destination """ editor = self.open_file(assignment.module_path) if editor: TextHelper(editor).goto_line(assignment.line, assignment.column)
python
{ "resource": "" }
q260679
calltips
validation
def calltips(request_data): """ Worker that returns a list of calltips. A calltips is a tuple made of the following parts: - module_name: name of the module of the function invoked - call_name: name of the function that is being called - params: the list of parameter names. - index: index of the current parameter - bracket_start :returns tuple(module_name, call_name, params) """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = request_data['encoding'] encoding = 'utf-8' # use jedi to get call signatures script = jedi.Script(code, line, column, path, encoding) signatures = script.call_signatures() for sig in signatures: results = (str(sig.module_name), str(sig.name), [p.description for p in sig.params], sig.index, sig.bracket_start, column) # todo: add support for multiple signatures, for that we need a custom # widget for showing calltips. return results return []
python
{ "resource": "" }
q260680
goto_assignments
validation
def goto_assignments(request_data): """ Go to assignements worker. """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = request_data['encoding'] encoding = 'utf-8' script = jedi.Script(code, line, column, path, encoding) try: definitions = script.goto_assignments() except jedi.NotFoundError: pass else: ret_val = [(d.module_path, d.line - 1 if d.line else None, d.column, d.full_name) for d in definitions] return ret_val
python
{ "resource": "" }
q260681
defined_names
validation
def defined_names(request_data): """ Returns the list of defined names for the document. """ global _old_definitions ret_val = [] path = request_data['path'] toplvl_definitions = jedi.names( request_data['code'], path, 'utf-8') for d in toplvl_definitions: definition = _extract_def(d, path) if d.type != 'import': ret_val.append(definition) ret_val = [d.to_dict() for d in ret_val] return ret_val
python
{ "resource": "" }
q260682
quick_doc
validation
def quick_doc(request_data): """ Worker that returns the documentation of the symbol under cursor. """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = 'utf-8' encoding = 'utf-8' script = jedi.Script(code, line, column, path, encoding) try: definitions = script.goto_definitions() except jedi.NotFoundError: return [] else: ret_val = [d.docstring() for d in definitions] return ret_val
python
{ "resource": "" }
q260683
run_pep8
validation
def run_pep8(request_data): """ Worker that run the pep8 tool on the current editor text. :returns a list of tuples (msg, msg_type, line_number) """ import pycodestyle from pyqode.python.backend.pep8utils import CustomChecker WARNING = 1 code = request_data['code'] path = request_data['path'] max_line_length = request_data['max_line_length'] ignore_rules = request_data['ignore_rules'] ignore_rules += ['W291', 'W292', 'W293', 'W391'] pycodestyle.MAX_LINE_LENGTH = max_line_length # setup our custom style guide with our custom checker which returns a list # of strings instread of spitting the results at stdout pep8style = pycodestyle.StyleGuide(parse_argv=False, config_file='', checker_class=CustomChecker) try: results = pep8style.input_file(path, lines=code.splitlines(True)) except Exception: _logger().exception('Failed to run PEP8 analysis with data=%r' % request_data) return [] else: messages = [] for line_number, offset, code, text, doc in results: if code in ignore_rules: continue messages.append(('[PEP8] %s: %s' % (code, text), WARNING, line_number - 1)) return messages
python
{ "resource": "" }
q260684
icon_from_typename
validation
def icon_from_typename(name, icon_type): """ Returns the icon resource filename that corresponds to the given typename. :param name: name of the completion. Use to make the distinction between public and private completions (using the count of starting '_') :pram typename: the typename reported by jedi :returns: The associate icon resource filename or None. """ ICONS = { 'CLASS': ICON_CLASS, 'IMPORT': ICON_NAMESPACE, 'STATEMENT': ICON_VAR, 'FORFLOW': ICON_VAR, 'FORSTMT': ICON_VAR, 'WITHSTMT': ICON_VAR, 'GLOBALSTMT': ICON_VAR, 'MODULE': ICON_NAMESPACE, 'KEYWORD': ICON_KEYWORD, 'PARAM': ICON_VAR, 'ARRAY': ICON_VAR, 'INSTANCEELEMENT': ICON_VAR, 'INSTANCE': ICON_VAR, 'PARAM-PRIV': ICON_VAR, 'PARAM-PROT': ICON_VAR, 'FUNCTION': ICON_FUNC, 'DEF': ICON_FUNC, 'FUNCTION-PRIV': ICON_FUNC_PRIVATE, 'FUNCTION-PROT': ICON_FUNC_PROTECTED } ret_val = None icon_type = icon_type.upper() # jedi 0.8 introduced NamedPart class, which have a string instead of being # one if hasattr(name, "string"): name = name.string if icon_type == "FORFLOW" or icon_type == "STATEMENT": icon_type = "PARAM" if icon_type == "PARAM" or icon_type == "FUNCTION": if name.startswith("__"): icon_type += "-PRIV" elif name.startswith("_"): icon_type += "-PROT" if icon_type in ICONS: ret_val = ICONS[icon_type] elif icon_type: _logger().warning("Unimplemented completion icon_type: %s", icon_type) return ret_val
python
{ "resource": "" }
q260685
JediCompletionProvider.complete
validation
def complete(code, line, column, path, encoding, prefix): """ Completes python code using `jedi`_. :returns: a list of completion. """ ret_val = [] try: script = jedi.Script(code, line + 1, column, path, encoding) completions = script.completions() print('completions: %r' % completions) except jedi.NotFoundError: completions = [] for completion in completions: ret_val.append({ 'name': completion.name, 'icon': icon_from_typename( completion.name, completion.type), 'tooltip': completion.description}) return ret_val
python
{ "resource": "" }
q260686
make_python_patterns
validation
def make_python_patterns(additional_keywords=[], additional_builtins=[]): """Strongly inspired from idlelib.ColorDelegator.make_pat""" kw = r"\b" + any("keyword", kwlist + additional_keywords) + r"\b" kw_namespace = r"\b" + any("namespace", kw_namespace_list) + r"\b" word_operators = r"\b" + any("operator_word", wordop_list) + r"\b" builtinlist = [str(name) for name in dir(builtins) if not name.startswith('_')] + additional_builtins for v in ['None', 'True', 'False']: builtinlist.remove(v) builtin = r"([^.'\"\\#]\b|^)" + any("builtin", builtinlist) + r"\b" builtin_fct = any("builtin_fct", [r'_{2}[a-zA-Z_]*_{2}']) comment = any("comment", [r"#[^\n]*"]) instance = any("instance", [r"\bself\b", r"\bcls\b"]) decorator = any('decorator', [r'@\w*', r'.setter']) number = any("number", [r"\b[+-]?[0-9]+[lLjJ]?\b", r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b", r"\b[+-]?0[oO][0-7]+[lL]?\b", r"\b[+-]?0[bB][01]+[lL]?\b", r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\b"]) sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?' uf_sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*(\\)$(?!')$" uf_dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*(\\)$(?!")$' sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?' uf_sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(\\)?(?!''')$" uf_dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(\\)?(?!""")$' string = any("string", [sq3string, dq3string, sqstring, dqstring]) ufstring1 = any("uf_sqstring", [uf_sqstring]) ufstring2 = any("uf_dqstring", [uf_dqstring]) ufstring3 = any("uf_sq3string", [uf_sq3string]) ufstring4 = any("uf_dq3string", [uf_dq3string]) return "|".join([instance, decorator, kw, kw_namespace, builtin, word_operators, builtin_fct, comment, ufstring1, ufstring2, ufstring3, ufstring4, string, number, any("SYNC", [r"\n"])])
python
{ "resource": "" }
q260687
GoToAssignmentsMode._check_word_cursor
validation
def _check_word_cursor(self, tc=None): """ Request a go to assignment. :param tc: Text cursor which contains the text that we must look for its assignment. Can be None to go to the text that is under the text cursor. :type tc: QtGui.QTextCursor """ if not tc: tc = TextHelper(self.editor).word_under_cursor() request_data = { 'code': self.editor.toPlainText(), 'line': tc.blockNumber(), 'column': tc.columnNumber(), 'path': self.editor.file.path, 'encoding': self.editor.file.encoding } try: self.editor.backend.send_request( workers.goto_assignments, request_data, on_receive=self._on_results_available) except NotRunning: pass
python
{ "resource": "" }
q260688
GoToAssignmentsMode._unique
validation
def _unique(self, seq): """ Not performant but works. """ # order preserving checked = [] for e in seq: present = False for c in checked: if str(c) == str(e): present = True break if not present: checked.append(e) return checked
python
{ "resource": "" }
q260689
read_bgen
validation
def read_bgen(filepath, metafile_filepath=None, samples_filepath=None, verbose=True): r""" Read a given BGEN file. Parameters ---------- filepath : str A bgen file path. metafile_filepath : str, optional If ``None``, it will try to read the ``filepath + ".metadata"`` file. If this is not possible, it will create one. It tries to create one at ``filepath + ".metadata"``. If that is also no possible, it tries to create one at a temporary folder. samples_filepath : str, optional A sample file in `gen format <https://goo.gl/bCzo7m>`_. If ``samples_filepath`` is provided, sample ids are read from this file. Otherwise, it reads from the bgen file itself if possible. Defaults to ``None``. verbose : bool, optional ``True`` to show progress; ``False`` otherwise. Defaults to ``True``. Returns ------- variants : :class:`dask.dataFrame.DataFrame` Variant position, chromosomes, rsids, etc. samples : :class:`pandas.Series` Sample identifications. genotype : list List of genotypes. Examples -------- .. doctest:: >>> from bgen_reader import example_files, read_bgen >>> >>> with example_files("haplotypes.bgen") as filepath: ... bgen = read_bgen(filepath, verbose=False) ... variants = bgen["variants"] ... samples = bgen["samples"] ... ... v = variants.loc[0].compute() ... g = bgen["genotype"][0].compute() ... print(v) ... print(samples) ... print(g["probs"][0]) id rsid chrom pos nalleles allele_ids vaddr 0 SNP1 RS1 1 1 2 A,G 102 0 sample_0 1 sample_1 2 sample_2 3 sample_3 Name: id, dtype: object [1. 0. 1. 0.] """ assert_file_exist(filepath) assert_file_readable(filepath) metafile_filepath = _get_valid_metafile_filepath(filepath, metafile_filepath) if not os.path.exists(metafile_filepath): if verbose: print( f"We will create the metafile `{metafile_filepath}`. This file will " "speed up further\nreads and only need to be created once. So, please, " "bear with me." ) create_metafile(filepath, metafile_filepath, verbose) samples = get_samples(filepath, samples_filepath, verbose) variants = map_metadata(filepath, metafile_filepath) genotype = map_genotype(filepath, metafile_filepath, verbose) return dict(variants=variants, samples=samples, genotype=genotype)
python
{ "resource": "" }
q260690
create_metafile
validation
def create_metafile(bgen_filepath, metafile_filepath, verbose=True): r"""Create variants metadata file. Variants metadata file helps speed up subsequent reads of the associated bgen file. Parameters ---------- bgen_filepath : str Bgen file path. metafile_file : str Metafile file path. verbose : bool ``True`` to show progress; ``False`` otherwise. Examples -------- .. doctest:: >>> import os >>> from bgen_reader import create_metafile, example_files >>> >>> with example_files("example.32bits.bgen") as filepath: ... folder = os.path.dirname(filepath) ... metafile_filepath = os.path.join(folder, filepath + ".metadata") ... ... try: ... create_metafile(filepath, metafile_filepath, verbose=False) ... finally: ... if os.path.exists(metafile_filepath): ... os.remove(metafile_filepath) """ if verbose: verbose = 1 else: verbose = 0 bgen_filepath = make_sure_bytes(bgen_filepath) metafile_filepath = make_sure_bytes(metafile_filepath) assert_file_exist(bgen_filepath) assert_file_readable(bgen_filepath) if exists(metafile_filepath): raise ValueError(f"The file {metafile_filepath} already exists.") with bgen_file(bgen_filepath) as bgen: nparts = _estimate_best_npartitions(lib.bgen_nvariants(bgen)) metafile = lib.bgen_create_metafile(bgen, metafile_filepath, nparts, verbose) if metafile == ffi.NULL: raise RuntimeError(f"Error while creating metafile: {metafile_filepath}.") if lib.bgen_close_metafile(metafile) != 0: raise RuntimeError(f"Error while closing metafile: {metafile_filepath}.")
python
{ "resource": "" }
q260691
CheckLiteral.match
validation
def match(self, subsetLines, offsetOfSubset, fileName): """ Search through lines for match. Raise an Exception if fail to match If match is succesful return the position the match was found """ for (offset,l) in enumerate(subsetLines): column = l.find(self.literal) if column != -1: truePosition = offset + offsetOfSubset _logger.debug('Found match on line {}, col {}'.format(str(truePosition+ 1), column)) _logger.debug('Line is {}'.format(l)) self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +1) return truePosition # No Match found self.failed = True raise DirectiveException(self)
python
{ "resource": "" }
q260692
CheckNot.match
validation
def match(self, subsetLines, offsetOfSubset, fileName): """ Search through lines for match. Raise an Exception if a match """ for (offset,l) in enumerate(subsetLines): for t in self.regex: m = t.Regex.search(l) if m != None: truePosition = offset + offsetOfSubset _logger.debug('Found match on line {}'.format(str(truePosition+ 1))) _logger.debug('Line is {}'.format(l)) self.failed = True self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +1) raise DirectiveException(self)
python
{ "resource": "" }
q260693
isA
validation
def isA(instance, typeList): """ Return true if ``instance`` is an instance of any the Directive types in ``typeList`` """ return any(map(lambda iType: isinstance(instance,iType), typeList))
python
{ "resource": "" }
q260694
_touch
validation
def _touch(fname, mode=0o666, dir_fd=None, **kwargs): """ Touch a file. Credits to <https://stackoverflow.com/a/1160227>. """ flags = os.O_CREAT | os.O_APPEND with os.fdopen(os.open(fname, flags=flags, mode=mode, dir_fd=dir_fd)) as f: os.utime( f.fileno() if os.utime in os.supports_fd else fname, dir_fd=None if os.supports_fd else dir_fd, **kwargs, )
python
{ "resource": "" }
q260695
allele_frequency
validation
def allele_frequency(expec): r""" Compute allele frequency from its expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. Returns ------- :class:`numpy.ndarray` Allele frequencies encoded as a variants-by-alleles matrix. Examples -------- .. doctest:: >>> from bgen_reader import read_bgen, example_files >>> from bgen_reader import allele_expectation, allele_frequency >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> variant_idx = variant.index.item() >>> >>> p = genotype[variant_idx].compute()["probs"] >>> # For unphased genotypes only. >>> e = allele_expectation(bgen, variant_idx) >>> f = allele_frequency(e) >>> >>> alleles = variant["allele_ids"].item().split(",") >>> print(alleles[0] + ": {}".format(f[0])) A: 229.23103218810434 >>> print(alleles[1] + ": {}".format(f[1])) G: 270.7689678118956 >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> >>> # Clean-up the example >>> example.close() """ expec = asarray(expec, float) if expec.ndim != 2: raise ValueError("Expectation matrix must be bi-dimensional.") ploidy = expec.shape[-1] return expec.sum(-2) / ploidy
python
{ "resource": "" }
q260696
compute_dosage
validation
def compute_dosage(expec, alt=None): r""" Compute dosage from allele expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. alt : array_like, optional Alternative allele index. If ``None``, the allele having the minor allele frequency for the provided ``expec`` is used as the alternative. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` Dosage encoded as an array of size equal to the number of samples. Examples -------- .. code-block:: python :caption: First a quick-start example. >>> from bgen_reader import allele_expectation, compute_dosage >>> from bgen_reader import example_files, read_bgen >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> # Extract the allele expectations of the fourth variant. >>> variant_idx = 3 >>> e = allele_expectation(bgen, variant_idx) >>> >>> # Compute the dosage when considering the first allele >>> # as the reference/alternative one. >>> alt_allele_idx = 1 >>> d = compute_dosage(e, alt=alt_allele_idx) >>> >>> # Print the dosage of the first five samples only. >>> print(d[:5]) [1.96185308 0.00982666 0.01745552 1.00347899 1.01153563] >>> >>> # Clean-up the example >>> example.close() .. code-block:: python :caption: Genotype probabilities, allele expectations and frequencies. >>> from bgen_reader import ( ... allele_expectation, ... allele_frequency, ... compute_dosage, ... example_files, ... read_bgen, ... ) >>> from pandas import DataFrame >>> from xarray import DataArray >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Open the bgen file. >>> bgen = read_bgen(filepath, verbose=False) >>> variants = bgen["variants"] >>> genotype = bgen["genotype"] >>> samples = bgen["samples"] >>> >>> variant_idx = 3 >>> variant = variants.loc[variant_idx].compute() >>> # Print the metadata of the fourth variant. >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 3 SNPID_5 RSID_5 01 5000 2 A,G 16034 >>> geno = bgen["genotype"][variant_idx].compute() >>> metageno = DataFrame({k: geno[k] for k in ["ploidy", "missing"]}, ... index=samples) >>> metageno.index.name = "sample" >>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE ploidy missing sample sample_001 2 False sample_002 2 False sample_003 2 False sample_004 2 False ... ... ... sample_497 2 False sample_498 2 False sample_499 2 False sample_500 2 False <BLANKLINE> [500 rows x 2 columns] >>> p = DataArray( ... geno["probs"], ... name="probability", ... coords={"sample": samples}, ... dims=["sample", "genotype"], ... ) >>> # Print the genotype probabilities. >>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE genotype 0 1 2 sample sample_001 0.00488 0.02838 0.96674 sample_002 0.99045 0.00928 0.00027 sample_003 0.98932 0.00391 0.00677 sample_004 0.00662 0.98328 0.01010 ... ... ... ... sample_497 0.00137 0.01312 0.98550 sample_498 0.00552 0.99423 0.00024 sample_499 0.01266 0.01154 0.97580 sample_500 0.00021 0.98431 0.01547 <BLANKLINE> [500 rows x 3 columns] >>> alleles = variant["allele_ids"].item().split(",") >>> e = DataArray( ... allele_expectation(bgen, variant_idx), ... name="expectation", ... coords={"sample": samples, "allele": alleles}, ... dims=["sample", "allele"], ... ) >>> # Print the allele expectations. >>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE allele A G sample sample_001 0.03815 1.96185 sample_002 1.99017 0.00983 sample_003 1.98254 0.01746 sample_004 0.99652 1.00348 ... ... ... sample_497 0.01587 1.98413 sample_498 1.00528 0.99472 sample_499 0.03687 1.96313 sample_500 0.98474 1.01526 <BLANKLINE> [500 rows x 2 columns] >>> rsid = variant["rsid"].item() >>> chrom = variant["chrom"].item() >>> variant_name = f"{chrom}:{rsid}" >>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles) >>> f.index.name = "allele" >>> # Allele frequencies. >>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE 01:RSID_5 allele A 305.97218 G 194.02782 >>> alt = f.idxmin().item() >>> alt_idx = alleles.index(alt) >>> d = compute_dosage(e, alt=alt_idx).to_series() >>> d = DataFrame(d.values, columns=[f"alt={alt}"], index=d.index) >>> # Dosages when considering G as the alternative allele. >>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE alt=G sample sample_001 1.96185 sample_002 0.00983 sample_003 0.01746 sample_004 1.00348 ... ... sample_497 1.98413 sample_498 0.99472 sample_499 1.96313 sample_500 1.01526 <BLANKLINE> [500 rows x 1 columns] >>> >>> # Clean-up the example >>> example.close() """ if alt is None: return expec[..., -1] try: return expec[:, alt] except NotImplementedError: alt = asarray(alt, int) return asarray(expec, float)[:, alt]
python
{ "resource": "" }
q260697
allele_expectation
validation
def allele_expectation(bgen, variant_idx): r""" Allele expectation. Compute the expectation of each allele from the genotype probabilities. Parameters ---------- bgen : bgen_file Bgen file handler. variant_idx : int Variant index. Returns ------- :class:`numpy.ndarray` Samples-by-alleles matrix of allele expectations. Note ---- This function supports unphased genotypes only. Examples -------- .. doctest:: >>> from bgen_reader import allele_expectation, example_files, read_bgen >>> >>> from texttable import Texttable >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a pandas data frame, >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> # from which we retrieve the variant index. >>> variant_idx = variant.index.item() >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> genotype = bgen["genotype"] >>> # Samples is a pandas series, and we retrieve the >>> # sample index from the sample name. >>> sample_idx = samples[samples == "sample_005"].index.item() >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a dictionary from which >>> # we can get the probability matrix the corresponding >>> # variant. >>> p = genotype[variant_idx].compute()["probs"][sample_idx] >>> >>> genotype = bgen["genotype"] >>> # Allele expectation makes sense for unphased genotypes only, >>> # which is the case here. >>> e = allele_expectation(bgen, variant_idx)[sample_idx] >>> >>> genotype = bgen["genotype"] >>> alleles = variant["allele_ids"].item().split(",") >>> >>> genotype = bgen["genotype"] >>> >>> # Print what we have got in a nice format. >>> table = Texttable() >>> table = table.add_rows( ... [ ... ["", "AA", "AG", "GG", "E[.]"], ... ["p"] + list(p) + ["na"], ... ["#" + alleles[0], 2, 1, 0, e[0]], ... ["#" + alleles[1], 0, 1, 2, e[1]], ... ] ... ) >>> print(table.draw()) +----+-------+-------+-------+-------+ | | AA | AG | GG | E[.] | +====+=======+=======+=======+=======+ | p | 0.012 | 0.987 | 0.001 | na | +----+-------+-------+-------+-------+ | #A | 2 | 1 | 0 | 1.011 | +----+-------+-------+-------+-------+ | #G | 0 | 1 | 2 | 0.989 | +----+-------+-------+-------+-------+ >>> >>> # Clean-up. >>> example.close() """ geno = bgen["genotype"][variant_idx].compute() if geno["phased"]: raise ValueError("Allele expectation is define for unphased genotypes only.") nalleles = bgen["variants"].loc[variant_idx, "nalleles"].compute().item() genotypes = get_genotypes(geno["ploidy"], nalleles) expec = [] for i in range(len(genotypes)): count = asarray(genotypes_to_allele_counts(genotypes[i]), float) n = count.shape[0] expec.append((count.T * geno["probs"][i, :n]).sum(1)) return stack(expec, axis=0)
python
{ "resource": "" }
q260698
Windows.find_libname
validation
def find_libname(self, name): """Try to infer the correct library name.""" names = ["{}.lib", "lib{}.lib", "{}lib.lib"] names = [n.format(name) for n in names] dirs = self.get_library_dirs() for d in dirs: for n in names: if exists(join(d, n)): return n[:-4] msg = "Could not find the {} library.".format(name) raise ValueError(msg)
python
{ "resource": "" }
q260699
SimilarityDistance.fit
validation
def fit(self, X, y=None): """Fit distance-based AD. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Returns ------- self : object Returns self. """ # Check data X = check_array(X) self.tree = BallTree(X, leaf_size=self.leaf_size, metric=self.metric) dist_train = self.tree.query(X, k=2)[0] if self.threshold == 'auto': self.threshold_value = 0.5 * sqrt(var(dist_train[:, 1])) + mean(dist_train[:, 1]) elif self.threshold == 'cv': if y is None: raise ValueError("Y must be specified to find the optimal threshold.") y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None) self.threshold_value = 0 score = 0 Y_pred, Y_true, AD = [], [], [] cv = KFold(n_splits=5, random_state=1, shuffle=True) for train_index, test_index in cv.split(X): x_train = safe_indexing(X, train_index) x_test = safe_indexing(X, test_index) y_train = safe_indexing(y, train_index) y_test = safe_indexing(y, test_index) data_test = safe_indexing(dist_train[:, 1], test_index) if self.reg_model is None: reg_model = RandomForestRegressor(n_estimators=500, random_state=1).fit(x_train, y_train) else: reg_model = clone(self.reg_model).fit(x_train, y_train) Y_pred.append(reg_model.predict(x_test)) Y_true.append(y_test) AD.append(data_test) AD_ = unique(hstack(AD)) for z in AD_: AD_new = hstack(AD) <= z if self.score == 'ba_ad': val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) elif self.score == 'rmse_ad': val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) if val >= score: score = val self.threshold_value = z else: self.threshold_value = self.threshold return self
python
{ "resource": "" }