gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared implementation of connections to API servers.""" import json from pkg_resources import get_distribution import six from six.moves.urllib.parse import urlencode # pylint: disable=F0401 import httplib2 from gcloud.exceptions import make_exception API_BASE_URL = 'https://www.googleapis.com' """The base of the API call URL.""" class Connection(object): """A generic connection to Google Cloud Platform. Subclasses should understand only the basic types in method arguments, however they should be capable of returning advanced types. If no value is passed in for ``http``, a :class:`httplib2.Http` object will be created and authorized with the ``credentials``. If not, the ``credentials`` and ``http`` need not be related. Subclasses may seek to use the private key from ``credentials`` to sign data. A custom (non-``httplib2``) HTTP object must have a ``request`` method which accepts the following arguments: * ``uri`` * ``method`` * ``body`` * ``headers`` In addition, ``redirections`` and ``connection_type`` may be used. Without the use of ``credentials.authorize(http)``, a custom ``http`` object will also need to be able to add a bearer token to API requests and handle token refresh on 401 errors. :type credentials: :class:`oauth2client.client.OAuth2Credentials` or :class:`NoneType` :param credentials: The OAuth2 Credentials to use for this connection. :type http: :class:`httplib2.Http` or class that defines ``request()``. :param http: An optional HTTP object to make requests. """ USER_AGENT = "gcloud-python/{0}".format(get_distribution('gcloud').version) """The user agent for gcloud-python requests.""" SCOPE = None """The scopes required for authenticating with a service. Needs to be set by subclasses. """ def __init__(self, credentials=None, http=None): self._http = http self._credentials = self._create_scoped_credentials( credentials, self.SCOPE) @property def credentials(self): """Getter for current credentials. :rtype: :class:`oauth2client.client.OAuth2Credentials` or :class:`NoneType` :returns: The credentials object associated with this connection. """ return self._credentials @property def http(self): """A getter for the HTTP transport used in talking to the API. :rtype: :class:`httplib2.Http` :returns: A Http object used to transport data. """ if self._http is None: self._http = httplib2.Http() if self._credentials: self._http = self._credentials.authorize(self._http) return self._http @staticmethod def _create_scoped_credentials(credentials, scope): """Create a scoped set of credentials if it is required. :type credentials: :class:`oauth2client.client.OAuth2Credentials` or :class:`NoneType` :param credentials: The OAuth2 Credentials to add a scope to. :type scope: list of URLs :param scope: the effective service auth scopes for the connection. :rtype: :class:`oauth2client.client.OAuth2Credentials` or :class:`NoneType` :returns: A new credentials object that has a scope added (if needed). """ if credentials and credentials.create_scoped_required(): credentials = credentials.create_scoped(scope) return credentials class JSONConnection(Connection): """A connection to a Google JSON-based API. These APIs are discovery based. For reference: https://developers.google.com/discovery/ This defines :meth:`Connection.api_request` for making a generic JSON API request and API requests are created elsewhere. The class constants * ``API_BASE_URL`` * ``API_VERSION`` * ``API_URL_TEMPLATE`` must be updated by subclasses. """ API_BASE_URL = None """The base of the API call URL.""" API_VERSION = None """The version of the API, used in building the API call's URL.""" API_URL_TEMPLATE = None """A template for the URL of a particular API call.""" @classmethod def build_api_url(cls, path, query_params=None, api_base_url=None, api_version=None): """Construct an API url given a few components, some optional. Typically, you shouldn't need to use this method. :type path: string :param path: The path to the resource (ie, ``'/b/bucket-name'``). :type query_params: dict :param query_params: A dictionary of keys and values to insert into the query string of the URL. :type api_base_url: string :param api_base_url: The base URL for the API endpoint. Typically you won't have to provide this. :type api_version: string :param api_version: The version of the API to call. Typically you shouldn't provide this and instead use the default for the library. :rtype: string :returns: The URL assembled from the pieces provided. """ api_base_url = api_base_url or cls.API_BASE_URL url = cls.API_URL_TEMPLATE.format( api_base_url=(api_base_url or cls.API_BASE_URL), api_version=(api_version or cls.API_VERSION), path=path) query_params = query_params or {} if query_params: url += '?' + urlencode(query_params) return url def _make_request(self, method, url, data=None, content_type=None, headers=None, target_object=None): """A low level method to send a request to the API. Typically, you shouldn't need to use this method. :type method: string :param method: The HTTP method to use in the request. :type url: string :param url: The URL to send the request to. :type data: string :param data: The data to send as the body of the request. :type content_type: string :param content_type: The proper MIME type of the data provided. :type headers: dict :param headers: A dictionary of HTTP headers to send with the request. :type target_object: object or :class:`NoneType` :param target_object: Argument to be used by library callers. This can allow custom behavior, for example, to defer an HTTP request and complete initialization of the object at a later time. :rtype: tuple of ``response`` (a dictionary of sorts) and ``content`` (a string). :returns: The HTTP response object and the content of the response, returned by :meth:`_do_request`. """ headers = headers or {} headers['Accept-Encoding'] = 'gzip' if data: content_length = len(str(data)) else: content_length = 0 headers['Content-Length'] = content_length if content_type: headers['Content-Type'] = content_type headers['User-Agent'] = self.USER_AGENT return self._do_request(method, url, headers, data, target_object) def _do_request(self, method, url, headers, data, target_object): # pylint: disable=unused-argument """Low-level helper: perform the actual API request over HTTP. Allows batch context managers to override and defer a request. :type method: string :param method: The HTTP method to use in the request. :type url: string :param url: The URL to send the request to. :type headers: dict :param headers: A dictionary of HTTP headers to send with the request. :type data: string :param data: The data to send as the body of the request. :type target_object: object or :class:`NoneType` :param target_object: Unused ``target_object`` here but may be used by a superclass. :rtype: tuple of ``response`` (a dictionary of sorts) and ``content`` (a string). :returns: The HTTP response object and the content of the response. """ return self.http.request(uri=url, method=method, headers=headers, body=data) def api_request(self, method, path, query_params=None, data=None, content_type=None, api_base_url=None, api_version=None, expect_json=True, _target_object=None): """Make a request over the HTTP transport to the API. You shouldn't need to use this method, but if you plan to interact with the API using these primitives, this is the correct one to use. :type method: string :param method: The HTTP method name (ie, ``GET``, ``POST``, etc). Required. :type path: string :param path: The path to the resource (ie, ``'/b/bucket-name'``). Required. :type query_params: dict :param query_params: A dictionary of keys and values to insert into the query string of the URL. Default is empty dict. :type data: string :param data: The data to send as the body of the request. Default is the empty string. :type content_type: string :param content_type: The proper MIME type of the data provided. Default is None. :type api_base_url: string :param api_base_url: The base URL for the API endpoint. Typically you won't have to provide this. Default is the standard API base URL. :type api_version: string :param api_version: The version of the API to call. Typically you shouldn't provide this and instead use the default for the library. Default is the latest API version supported by gcloud-python. :type expect_json: boolean :param expect_json: If True, this method will try to parse the response as JSON and raise an exception if that cannot be done. Default is True. :type _target_object: object or :class:`NoneType` :param _target_object: Protected argument to be used by library callers. This can allow custom behavior, for example, to defer an HTTP request and complete initialization of the object at a later time. :raises: Exception if the response code is not 200 OK. """ url = self.build_api_url(path=path, query_params=query_params, api_base_url=api_base_url, api_version=api_version) # Making the executive decision that any dictionary # data will be sent properly as JSON. if data and isinstance(data, dict): data = json.dumps(data) content_type = 'application/json' response, content = self._make_request( method=method, url=url, data=data, content_type=content_type, target_object=_target_object) if not 200 <= response.status < 300: raise make_exception(response, content, error_info=method + ' ' + url) string_or_bytes = (six.binary_type, six.text_type) if content and expect_json and isinstance(content, string_or_bytes): content_type = response.get('content-type', '') if not content_type.startswith('application/json'): raise TypeError('Expected JSON, got %s' % content_type) if isinstance(content, six.binary_type): content = content.decode('utf-8') return json.loads(content) return content
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file contains helper methods used in creating a release. import re import sys from subprocess import Popen, PIPE try: from jira.client import JIRA from jira.exceptions import JIRAError except ImportError: print "This tool requires the jira-python library" print "Install using 'sudo pip install jira'" sys.exit(-1) try: from github import Github from github import GithubException except ImportError: print "This tool requires the PyGithub library" print "Install using 'sudo pip install PyGithub'" sys.exit(-1) try: import unidecode except ImportError: print "This tool requires the unidecode library to decode obscure github usernames" print "Install using 'sudo pip install unidecode'" sys.exit(-1) # Contributors list file name contributors_file_name = "contributors.txt" # Prompt the user to answer yes or no until they do so def yesOrNoPrompt(msg): response = raw_input("%s [y/n]: " % msg) while response != "y" and response != "n": return yesOrNoPrompt(msg) return response == "y" # Utility functions run git commands (written with Git 1.8.5) def run_cmd(cmd): return Popen(cmd, stdout=PIPE).communicate()[0] def run_cmd_error(cmd): return Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[1] def get_date(commit_hash): return run_cmd(["git", "show", "--quiet", "--pretty=format:%cd", commit_hash]) def tag_exists(tag): stderr = run_cmd_error(["git", "show", tag]) return "error" not in stderr # A type-safe representation of a commit class Commit: def __init__(self, _hash, author, title, pr_number = None): self._hash = _hash self.author = author self.title = title self.pr_number = pr_number def get_hash(self): return self._hash def get_author(self): return self.author def get_title(self): return self.title def get_pr_number(self): return self.pr_number def __str__(self): closes_pr = "(Closes #%s)" % self.pr_number if self.pr_number else "" return "%s %s %s %s" % (self._hash, self.author, self.title, closes_pr) # Return all commits that belong to the specified tag. # # Under the hood, this runs a `git log` on that tag and parses the fields # from the command output to construct a list of Commit objects. Note that # because certain fields reside in the commit description and cannot be parsed # through the Github API itself, we need to do some intelligent regex parsing # to extract those fields. # # This is written using Git 1.8.5. def get_commits(tag): commit_start_marker = "|=== COMMIT START MARKER ===|" commit_end_marker = "|=== COMMIT END MARKER ===|" field_end_marker = "|=== COMMIT FIELD END MARKER ===|" log_format =\ commit_start_marker + "%h" +\ field_end_marker + "%an" +\ field_end_marker + "%s" +\ commit_end_marker + "%b" output = run_cmd(["git", "log", "--quiet", "--pretty=format:" + log_format, tag]) commits = [] raw_commits = [c for c in output.split(commit_start_marker) if c] for commit in raw_commits: if commit.count(commit_end_marker) != 1: print "Commit end marker not found in commit: " for line in commit.split("\n"): print line sys.exit(1) # Separate commit digest from the body # From the digest we extract the hash, author and the title # From the body, we extract the PR number and the github username [commit_digest, commit_body] = commit.split(commit_end_marker) if commit_digest.count(field_end_marker) != 2: sys.exit("Unexpected format in commit: %s" % commit_digest) [_hash, author, title] = commit_digest.split(field_end_marker) # The PR number and github username is in the commit message # itself and cannot be accessed through any Github API pr_number = None match = re.search("Closes #([0-9]+) from ([^/\\s]+)/", commit_body) if match: [pr_number, github_username] = match.groups() # If the author name is not valid, use the github # username so we can translate it properly later if not is_valid_author(author): author = github_username # Guard against special characters author = unidecode.unidecode(unicode(author, "UTF-8")).strip() commit = Commit(_hash, author, title, pr_number) commits.append(commit) return commits # Maintain a mapping for translating issue types to contributions in the release notes # This serves an additional function of warning the user against unknown issue types # Note: This list is partially derived from this link: # https://issues.apache.org/jira/plugins/servlet/project-config/SPARK/issuetypes # Keep these in lower case known_issue_types = { "bug": "bug fixes", "build": "build fixes", "dependency upgrade": "build fixes", "improvement": "improvements", "new feature": "new features", "documentation": "documentation", "test": "test", "task": "improvement", "sub-task": "improvement" } # Maintain a mapping for translating component names when creating the release notes # This serves an additional function of warning the user against unknown components # Note: This list is largely derived from this link: # https://issues.apache.org/jira/plugins/servlet/project-config/SPARK/components CORE_COMPONENT = "Core" known_components = { "block manager": CORE_COMPONENT, "build": CORE_COMPONENT, "deploy": CORE_COMPONENT, "documentation": CORE_COMPONENT, "ec2": "EC2", "examples": CORE_COMPONENT, "graphx": "GraphX", "input/output": CORE_COMPONENT, "java api": "Java API", "mesos": "Mesos", "ml": "MLlib", "mllib": "MLlib", "project infra": "Project Infra", "pyspark": "PySpark", "shuffle": "Shuffle", "spark core": CORE_COMPONENT, "spark shell": CORE_COMPONENT, "sql": "SQL", "streaming": "Streaming", "web ui": "Web UI", "windows": "Windows", "yarn": "YARN" } # Translate issue types using a format appropriate for writing contributions # If an unknown issue type is encountered, warn the user def translate_issue_type(issue_type, issue_id, warnings): issue_type = issue_type.lower() if issue_type in known_issue_types: return known_issue_types[issue_type] else: warnings.append("Unknown issue type \"%s\" (see %s)" % (issue_type, issue_id)) return issue_type # Translate component names using a format appropriate for writing contributions # If an unknown component is encountered, warn the user def translate_component(component, commit_hash, warnings): component = component.lower() if component in known_components: return known_components[component] else: warnings.append("Unknown component \"%s\" (see %s)" % (component, commit_hash)) return component # Parse components in the commit message # The returned components are already filtered and translated def find_components(commit, commit_hash): components = re.findall("\[\w*\]", commit.lower()) components = [translate_component(c, commit_hash)\ for c in components if c in known_components] return components # Join a list of strings in a human-readable manner # e.g. ["Juice"] -> "Juice" # e.g. ["Juice", "baby"] -> "Juice and baby" # e.g. ["Juice", "baby", "moon"] -> "Juice, baby, and moon" def nice_join(str_list): str_list = list(str_list) # sometimes it's a set if not str_list: return "" elif len(str_list) == 1: return next(iter(str_list)) elif len(str_list) == 2: return " and ".join(str_list) else: return ", ".join(str_list[:-1]) + ", and " + str_list[-1] # Return the full name of the specified user on Github # If the user doesn't exist, return None def get_github_name(author, github_client): if github_client: try: return github_client.get_user(author).name except GithubException as e: # If this is not a "not found" exception if e.status != 404: raise e return None # Return the full name of the specified user on JIRA # If the user doesn't exist, return None def get_jira_name(author, jira_client): if jira_client: try: return jira_client.user(author).displayName except JIRAError as e: # If this is not a "not found" exception if e.status_code != 404: raise e return None # Return whether the given name is in the form <First Name><space><Last Name> def is_valid_author(author): if not author: return False return " " in author and not re.findall("[0-9]", author) # Capitalize the first letter of each word in the given author name def capitalize_author(author): if not author: return None words = author.split(" ") words = [w[0].capitalize() + w[1:] for w in words if w] return " ".join(words)
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes and Tables relating to network interfaces""" #Note: any changes to this class definition and it's constraints #should always be reviewed by DHCP Eng. from datetime import datetime from collections import deque import re from sqlalchemy import (Column, Integer, DateTime, Sequence, String, Boolean, ForeignKey, UniqueConstraint, CheckConstraint, Index) from sqlalchemy.orm import (relation, backref, validates, object_session, deferred) from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.sql import desc, case, and_, or_ from aquilon.exceptions_ import InternalError from aquilon.aqdb.column_types import AqMac, AqStr from aquilon.aqdb.model import Base, HardwareEntity, ObservedMac, Model from aquilon.aqdb.model.vlan import MAX_VLANS _TN = "interface" _ABV = "iface" class Interface(Base): """ Interface: Representation of network interfaces for our network This table stores collections of machines, names, mac addresses, types, and a bootable flag to aid our DHCP and machine configuration. """ __tablename__ = _TN # Any extra fields the subclass needs over the generic interface parameters extra_fields = [] # Name syntax restrictions name_check = None # Allows setting model/vendor model_allowed = False # The Natural (and composite) pk is HW_ENT_ID/NAME. # But is it the "correct" pk in this case???. The surrogate key is here # only because it's easier to have a single target FK in the address # association object. It might actually be doable to use the natural key if # we try it. The upside: less clutter, meaningful keys. Downside: # It's also extra work we may not enjoy, it means rewriting the table # since we'd blow away its PK. id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True) name = Column(AqStr(32), nullable=False) # like e0, hme1, etc. mac = Column(AqMac(17), nullable=True) model_id = Column(Integer, ForeignKey('model.id', name='%s_model_fk' % _ABV), nullable=False) # PXE boot control. Does not affect how the OS configures the interface. # FIXME: move to PublicInterface bootable = Column(Boolean(name="%s_bootable_ck" % _ABV), nullable=False, default=False) default_route = Column(Boolean(name="%s_default_route_ck" % _ABV), nullable=False, default=False) interface_type = Column(AqStr(32), nullable=False) hardware_entity_id = Column(Integer, ForeignKey('hardware_entity.id', name='%s_hw_ent_fk' % _ABV, ondelete='CASCADE'), nullable=False) # The FK is deferrable to make it easier to copy the DB between different # backends. The broker itself does not make use of deferred constraints. master_id = Column(Integer, ForeignKey('interface.id', name='%s_master_fk' % _ABV, ondelete='CASCADE', deferrable=True, initially='IMMEDIATE'), nullable=True) # FIXME: move to PublicInterface port_group = Column(AqStr(32), nullable=True) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) # Most of the update_* commands need to load the comments due to # snapshot_hw(), so it is not worth deferring it comments = Column('comments', String(255), nullable=True) hardware_entity = relation(HardwareEntity, lazy=False, innerjoin=True, backref=backref('interfaces', cascade='all, delete-orphan')) model = relation(Model, innerjoin=True) master = relation('Interface', uselist=False, remote_side=id, primaryjoin=master_id == id, backref=backref('slaves')) # Order matters here, utils/constraints.py checks for endswith("NOT NULL") __table_args__ = (UniqueConstraint(mac, name='%s_mac_addr_uk' % _ABV), UniqueConstraint(hardware_entity_id, name, name='%s_hw_name_uk' % _ABV), Index('%s_model_idx' % _ABV, model_id), Index('%s_master_idx' % _ABV, master_id)) __mapper_args__ = {'polymorphic_on': interface_type} # Interfaces also have the property 'assignments' which is defined in # address_assignment.py def __format__(self, format_spec): instance = "{0.name} of {1:l}".format(self, self.hardware_entity) return self.format_helper(format_spec, instance) @validates('mac') def _validate_mac(self, key, value): # Due to how decorators work, we have to do a level of indirection to # make polymorphism work return self.validate_mac(key, value) def validate_mac(self, key, value): if self.bootable and not value: raise ValueError("Bootable interfaces require a MAC address.") return value @validates('name') def validate_name(self, key, value): if self.__class__.name_check and \ not self.__class__.name_check.match(value): raise ValueError("Illegal %s interface name '%s'." % (self.interface_type, value)) return value @validates('master') def validate_master(self, key, value): if value is not None and not isinstance(value, BondingInterface) and \ not isinstance(value, BridgeInterface): raise ValueError("The master must be a bonding or bridge interface.") if self.vlans: raise ValueError("{0} can not be bound as long as it has " "VLANs.".format(self)) if self.assignments: raise ValueError("{0} cannot be enslaved as long as it holds " "addresses.".format(self)) return value @property def last_observation(self): session = object_session(self) q = session.query(ObservedMac) q = q.filter_by(mac_address=self.mac) # Group the results into 'any port number but zero' and 'port 0'. # This prioritizes any port over the uplink port. # Saying that port 0 is an uplink port isn't very elegant, also # with real port names it's not even true. q = q.order_by(desc(case([(ObservedMac.port == "0", 0)], else_=1))) q = q.order_by(desc(ObservedMac.last_seen)) return q.first() def __init__(self, **kw): """ Overload the Base initializer to prevent null MAC addresses where the interface is bootable or is of type 'management' """ super(Interface, self).__init__(**kw) self.validate_mac("mac", self.mac) def __repr__(self): msg = "<{0} {1} of {2}, MAC={3}>".format(self._get_class_label(), self.name, self.hardware_entity, self.mac) return msg def all_slaves(self): queue = deque(self.slaves) slaves = [] while queue: iface = queue.popleft() slaves.append(iface) queue.extend(iface.slaves) return slaves class PublicInterface(Interface): """ Normal machine interfaces """ _class_label = "Public Interface" __mapper_args__ = {'polymorphic_identity': 'public'} extra_fields = ['bootable', 'port_group'] name_check = re.compile(r"^[a-z]+\d+[a-z]?$") model_allowed = True class ManagementInterface(Interface): """ Management board interfaces """ _class_label = "Management Interface" __mapper_args__ = {'polymorphic_identity': 'management'} name_check = re.compile(r"^[a-z]+\d*$") def validate_mac(self, key, value): if not value: raise ValueError("Management interfaces require a MAC address.") return value class OnboardInterface(Interface): """ Switch/chassis interfaces """ _class_label = "On-board Admin Interface" __mapper_args__ = {'polymorphic_identity': 'oa'} # There are interface names like "gigabitethernet0/1", so no name checks for # now. class VlanInterface(Interface): """ 802.1q VLAN interfaces """ _class_label = "VLAN Interface" extra_fields = ['vlan_id', 'parent'] name_check = re.compile(r"^[a-z]+\d*\.[1-9]\d*$") # The FK is deferrable to make it easier to copy the DB between different # backends. The broker itself does not make use of deferred constraints. parent_id = Column(Integer, ForeignKey(Interface.id, name='iface_vlan_parent_fk', ondelete='CASCADE', deferrable=True, initially='IMMEDIATE')) vlan_id = Column(Integer) parent = relation(Interface, uselist=False, remote_side=Interface.id, primaryjoin=parent_id == Interface.id, backref=backref('vlans', collection_class=attribute_mapped_collection('vlan_id'))) __mapper_args__ = {'polymorphic_identity': 'vlan'} # Order matters here, utils/constraints.py checks for endswith("NOT NULL") __extra_table_args__ = (CheckConstraint(or_(and_(parent_id != None, vlan_id > 0, vlan_id < MAX_VLANS), Interface.interface_type != "vlan"), name="%s_vlan_ck" % _ABV), UniqueConstraint(parent_id, vlan_id, name="%s_parent_vlan_uk" % _ABV)) @validates('vlan_id') def validate_vlan_id(self, key, value): if not isinstance(value, int) or value <= 0 or value >= MAX_VLANS: raise ValueError("Illegal VLAN ID %s: it must be greater than " "0 and smaller than %s." % (value, MAX_VLANS)) return value def validate_mac(self, key, value): if value is not None: raise ValueError("VLAN interfaces can not have a distinct MAC address.") return value def __init__(self, parent=None, vlan_id=None, **kwargs): if not parent: raise InternalError("VLAN interfaces need a parent.") if isinstance(parent, VlanInterface): raise ValueError("Stacking of VLAN interfaces is not allowed.") self.validate_vlan_id('vlan_id', vlan_id) super(VlanInterface, self).__init__(parent=parent, vlan_id=vlan_id, **kwargs) class BondingInterface(Interface): """ Channel bonding interfaces """ _class_label = "Bonding Interface" __mapper_args__ = {'polymorphic_identity': 'bonding'} # Linux: ncm-networks wans "bond.*", but Netapp is more relaxed name_check = re.compile(r'^[a-z]+\d+$') class BridgeInterface(Interface): """ Level 2 bridge interfaces """ _class_label = "Bridge Interface" __mapper_args__ = {'polymorphic_identity': 'bridge'} # Bridges on Linux could have any random name, but the templates also # enforce this naming name_check = re.compile(r'^br\d+$') def validate_mac(self, key, value): if value is not None: raise ValueError("Bridge interfaces can not have a distinct MAC address.") return value class LoopbackInterface(Interface): """ Virtual loopback interface, primarily for switches """ _class_label = "Loopback Interface" __mapper_args__ = {'polymorphic_identity': 'loopback'} def validate_mac(self, key, value): if value is not None: raise ValueError("Loopback interfaces cannot have a MAC address.") return value interface = Interface.__table__ # pylint: disable=C0103 interface.info['unique_fields'] = ['name', 'hardware_entity'] interface.info['extra_search_fields'] = ['mac']
''' Android target, based on python-for-android project ''' import sys if sys.platform == 'win32': raise NotImplementedError('Windows platform not yet working for Android') from platform import uname WSL = 'microsoft' in uname()[2].lower() ANDROID_API = '27' ANDROID_MINAPI = '21' APACHE_ANT_VERSION = '1.9.4' # This constant should *not* be updated, it is used only in the case # that python-for-android cannot provide a recommendation, which in # turn only happens if the python-for-android is old and probably # doesn't support any newer NDK. DEFAULT_ANDROID_NDK_VERSION = '17c' import traceback import os import io import re import ast from pipes import quote from sys import platform, executable from buildozer import BuildozerException, USE_COLOR from buildozer.target import Target from os import environ from os.path import exists, join, realpath, expanduser, basename, relpath from platform import architecture from shutil import copyfile, rmtree from glob import glob from time import sleep from buildozer.libs.version import parse from distutils.version import LooseVersion # buildozer.spec tokens that used to exist but are now ignored DEPRECATED_TOKENS = (('app', 'android.sdk'), ) # Default SDK tag to download. This is not a configurable option # because it doesn't seem to matter much, it is normally correct to # download once then update all the components as buildozer already # does. DEFAULT_SDK_TAG = '6514223' DEFAULT_ARCH = 'armeabi-v7a' MSG_P4A_RECOMMENDED_NDK_ERROR = ( "WARNING: Unable to find recommended Android NDK for current " "installation of python-for-android, defaulting to the default " "version r{android_ndk}".format(android_ndk=DEFAULT_ANDROID_NDK_VERSION) ) class TargetAndroid(Target): targetname = 'android' p4a_directory_name = "python-for-android" p4a_fork = 'kivy' p4a_branch = 'master' p4a_apk_cmd = "apk --debug --bootstrap=" p4a_recommended_ndk_version = None extra_p4a_args = '' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._arch = self.buildozer.config.getdefault( 'app', 'android.arch', DEFAULT_ARCH) self._build_dir = join( self.buildozer.platform_dir, 'build-{}'.format(self._arch)) executable = sys.executable or 'python' self._p4a_cmd = '{} -m pythonforandroid.toolchain '.format(executable) self._p4a_bootstrap = self.buildozer.config.getdefault( 'app', 'p4a.bootstrap', 'sdl2') self.p4a_apk_cmd += self._p4a_bootstrap color = 'always' if USE_COLOR else 'never' self.extra_p4a_args = ' --color={} --storage-dir="{}"'.format( color, self._build_dir) # minapi should match ndk-api, so can use the same default if # nothing is specified ndk_api = self.buildozer.config.getdefault( 'app', 'android.ndk_api', self.android_minapi) self.extra_p4a_args += ' --ndk-api={}'.format(ndk_api) hook = self.buildozer.config.getdefault("app", "p4a.hook", None) if hook is not None: self.extra_p4a_args += ' --hook={}'.format(realpath(expanduser(hook))) port = self.buildozer.config.getdefault('app', 'p4a.port', None) if port is not None: self.extra_p4a_args += ' --port={}'.format(port) setup_py = self.buildozer.config.getdefault('app', 'p4a.setup_py', False) if setup_py: self.extra_p4a_args += ' --use-setup-py' else: self.extra_p4a_args += ' --ignore-setup-py' self.warn_on_deprecated_tokens() def warn_on_deprecated_tokens(self): for section, token in DEPRECATED_TOKENS: value = self.buildozer.config.getdefault(section, token, None) if value is not None: error = ('WARNING: Config token {} {} is deprecated and ignored, ' 'but you set value {}').format(section, token, value) self.buildozer.error(error) def _p4a(self, cmd, **kwargs): kwargs.setdefault('cwd', self.p4a_dir) return self.buildozer.cmd(self._p4a_cmd + cmd + self.extra_p4a_args, **kwargs) @property def p4a_dir(self): """The directory where python-for-android is/will be installed.""" # Default p4a dir p4a_dir = join(self.buildozer.platform_dir, self.p4a_directory_name) # Possibly overriden by user setting system_p4a_dir = self.buildozer.config.getdefault('app', 'p4a.source_dir') if system_p4a_dir: p4a_dir = expanduser(system_p4a_dir) return p4a_dir @property def p4a_recommended_android_ndk(self): """ Return the p4a's recommended android's NDK version, depending on the p4a version used for our buildozer build. In case that we don't find it, we will return the buildozer's recommended one, defined by global variable `DEFAULT_ANDROID_NDK_VERSION`. """ # make sure to read p4a version only the first time if self.p4a_recommended_ndk_version is not None: return self.p4a_recommended_ndk_version # check p4a's recommendation file, and in case that exists find the # recommended android's NDK version, otherwise return buildozer's one ndk_version = DEFAULT_ANDROID_NDK_VERSION rec_file = join(self.p4a_dir, "pythonforandroid", "recommendations.py") if not os.path.isfile(rec_file): self.buildozer.error(MSG_P4A_RECOMMENDED_NDK_ERROR) return ndk_version for line in open(rec_file, "r"): if line.startswith("RECOMMENDED_NDK_VERSION ="): ndk_version = line.replace( "RECOMMENDED_NDK_VERSION =", "") # clean version of unwanted characters for i in {"'", '"', "\n", " "}: ndk_version = ndk_version.replace(i, "") self.buildozer.info( "Recommended android's NDK version by p4a is: {}".format( ndk_version ) ) self.p4a_recommended_ndk_version = ndk_version break return ndk_version def _sdkmanager(self, *args, **kwargs): """Call the sdkmanager in our Android SDK with the given arguments.""" # Use the android-sdk dir as cwd by default android_sdk_dir = self.android_sdk_dir kwargs['cwd'] = kwargs.get('cwd', android_sdk_dir) sdkmanager_path = self.sdkmanager_path sdk_root = f"--sdk_root={android_sdk_dir}" command = f"{sdkmanager_path} {sdk_root} " + ' '.join(args) return_child = kwargs.pop('return_child', False) if return_child: return self.buildozer.cmd_expect(command, **kwargs) else: kwargs['get_stdout'] = kwargs.get('get_stdout', True) return self.buildozer.cmd(command, **kwargs) @property def android_ndk_version(self): return self.buildozer.config.getdefault('app', 'android.ndk', self.p4a_recommended_android_ndk) @property def android_api(self): return self.buildozer.config.getdefault('app', 'android.api', ANDROID_API) @property def android_minapi(self): return self.buildozer.config.getdefault('app', 'android.minapi', ANDROID_MINAPI) @property def android_sdk_dir(self): directory = expanduser(self.buildozer.config.getdefault( 'app', 'android.sdk_path', '')) if directory: return realpath(directory) return join(self.buildozer.global_platform_dir, 'android-sdk') @property def android_ndk_dir(self): directory = expanduser(self.buildozer.config.getdefault( 'app', 'android.ndk_path', '')) if directory: return realpath(directory) version = self.buildozer.config.getdefault('app', 'android.ndk', self.android_ndk_version) return join(self.buildozer.global_platform_dir, 'android-ndk-r{0}'.format(version)) @property def apache_ant_dir(self): directory = expanduser(self.buildozer.config.getdefault( 'app', 'android.ant_path', '')) if directory: return realpath(directory) version = self.buildozer.config.getdefault('app', 'android.ant', APACHE_ANT_VERSION) return join(self.buildozer.global_platform_dir, 'apache-ant-{0}'.format(version)) @property def sdkmanager_path(self): sdkmanager_path = join( self.android_sdk_dir, 'tools', 'bin', 'sdkmanager') if not os.path.isfile(sdkmanager_path): raise BuildozerException( ('sdkmanager path "{}" does not exist, sdkmanager is not' 'installed'.format(sdkmanager_path))) return sdkmanager_path def check_requirements(self): if platform in ('win32', 'cygwin'): try: self._set_win32_java_home() except: traceback.print_exc() self.adb_cmd = join(self.android_sdk_dir, 'platform-tools', 'adb.exe') self.javac_cmd = self._locate_java('javac.exe') self.keytool_cmd = self._locate_java('keytool.exe') # darwin, linux else: self.adb_cmd = join(self.android_sdk_dir, 'platform-tools', 'adb') self.javac_cmd = self._locate_java('javac') self.keytool_cmd = self._locate_java('keytool') # Check for C header <zlib.h>. _, _, returncode_dpkg = self.buildozer.cmd('dpkg --version', break_on_error=False) is_debian_like = (returncode_dpkg == 0) if is_debian_like and \ not self.buildozer.file_exists('/usr/include/zlib.h'): raise BuildozerException( 'zlib headers must be installed, ' 'run: sudo apt-get install zlib1g-dev') # Adb arguments: adb_args = self.buildozer.config.getdefault( "app", "android.adb_args", None) if adb_args is not None: self.adb_cmd += ' ' + adb_args # Need to add internally installed ant to path for external tools # like adb to use path = [join(self.apache_ant_dir, 'bin')] if 'PATH' in self.buildozer.environ: path.append(self.buildozer.environ['PATH']) else: path.append(os.environ['PATH']) self.buildozer.environ['PATH'] = ':'.join(path) checkbin = self.buildozer.checkbin checkbin('Git (git)', 'git') checkbin('Cython (cython)', 'cython') checkbin('Java compiler (javac)', self.javac_cmd) checkbin('Java keytool (keytool)', self.keytool_cmd) def check_configuration_tokens(self): errors = [] # check the permission available_permissions = self._get_available_permissions() if available_permissions: permissions = self.buildozer.config.getlist( 'app', 'android.permissions', []) for permission in permissions: # no check on full named permission # like com.google.android.providers.gsf.permission.READ_GSERVICES if '.' in permission: continue permission = permission.upper() if permission not in available_permissions: errors.append( '[app] "android.permission" contain an unknown' ' permission {0}'.format(permission)) super().check_configuration_tokens(errors) def _get_available_permissions(self): key = 'android:available_permissions' key_sdk = 'android:available_permissions_sdk' current_platform_tools = self._android_get_installed_platform_tools_version() refresh_permissions = False sdk = self.buildozer.state.get(key_sdk, None) if not sdk or sdk != current_platform_tools: refresh_permissions = True if key not in self.buildozer.state: refresh_permissions = True if not refresh_permissions: return self.buildozer.state[key] try: self.buildozer.debug( 'Read available permissions from api-versions.xml') import xml.etree.ElementTree as ET fn = join(self.android_sdk_dir, 'platform-tools', 'api', 'api-versions.xml') with io.open(fn, encoding='utf-8') as fd: doc = ET.fromstring(fd.read()) fields = doc.findall( './/class[@name="android/Manifest$permission"]/field[@name]') available_permissions = [x.attrib['name'] for x in fields] self.buildozer.state[key] = available_permissions self.buildozer.state[key_sdk] = current_platform_tools return available_permissions except: return None def _set_win32_java_home(self): if 'JAVA_HOME' in self.buildozer.environ: return import _winreg with _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\JavaSoft\Java Development Kit") as jdk: # @UndefinedVariable current_version, _type = _winreg.QueryValueEx( jdk, "CurrentVersion") # @UndefinedVariable with _winreg.OpenKey(jdk, current_version) as cv: # @UndefinedVariable java_home, _type = _winreg.QueryValueEx( cv, "JavaHome") # @UndefinedVariable self.buildozer.environ['JAVA_HOME'] = java_home def _locate_java(self, s): '''If JAVA_HOME is in the environ, return $JAVA_HOME/bin/s. Otherwise, return s. ''' if 'JAVA_HOME' in self.buildozer.environ: return join(self.buildozer.environ['JAVA_HOME'], 'bin', s) else: return s def _install_apache_ant(self): ant_dir = self.apache_ant_dir if self.buildozer.file_exists(ant_dir): self.buildozer.info('Apache ANT found at {0}'.format(ant_dir)) return ant_dir if not os.path.exists(ant_dir): os.makedirs(ant_dir) self.buildozer.info('Android ANT is missing, downloading') archive = 'apache-ant-{0}-bin.tar.gz'.format(APACHE_ANT_VERSION) url = 'https://archive.apache.org/dist/ant/binaries/' self.buildozer.download(url, archive, cwd=ant_dir) self.buildozer.file_extract(archive, cwd=ant_dir) self.buildozer.info('Apache ANT installation done.') return ant_dir def _install_android_sdk(self): sdk_dir = self.android_sdk_dir if self.buildozer.file_exists(sdk_dir): self.buildozer.info('Android SDK found at {0}'.format(sdk_dir)) return sdk_dir self.buildozer.info('Android SDK is missing, downloading') if platform in ('win32', 'cygwin'): archive = 'commandlinetools-win-{}_latest.zip'.format(DEFAULT_SDK_TAG) elif platform in ('darwin', ): archive = 'commandlinetools-mac-{}_latest.zip'.format(DEFAULT_SDK_TAG) elif platform.startswith('linux'): archive = 'commandlinetools-linux-{}_latest.zip'.format(DEFAULT_SDK_TAG) else: raise SystemError('Unsupported platform: {0}'.format(platform)) if not os.path.exists(sdk_dir): os.makedirs(sdk_dir) url = 'https://dl.google.com/android/repository/' self.buildozer.download(url, archive, cwd=sdk_dir) self.buildozer.info('Unpacking Android SDK') self.buildozer.file_extract(archive, cwd=sdk_dir) self.buildozer.info('Android SDK tools base installation done.') return sdk_dir def _install_android_ndk(self): ndk_dir = self.android_ndk_dir if self.buildozer.file_exists(ndk_dir): self.buildozer.info('Android NDK found at {0}'.format(ndk_dir)) return ndk_dir import re _version = int(re.search(r'(\d+)', self.android_ndk_version).group(1)) self.buildozer.info('Android NDK is missing, downloading') # Welcome to the NDK URL hell! # a list of all NDK URLs up to level 14 can be found here: # https://gist.github.com/roscopecoltran/43861414fbf341adac3b6fa05e7fad08 # it seems that from level 11 on the naming schema is consistent # from 10e on the URLs can be looked up at # https://developer.android.com/ndk/downloads/older_releases is_darwin = platform == 'darwin' is_linux = platform.startswith('linux') if platform in ('win32', 'cygwin'): # Checking of 32/64 bits at Windows from: https://stackoverflow.com/a/1405971/798575 import struct archive = 'android-ndk-r{0}-windows-{1}.zip' is_64 = (8 * struct.calcsize("P") == 64) elif is_darwin or is_linux: _platform = 'linux' if is_linux else 'darwin' if self.android_ndk_version in ['10c', '10d', '10e']: ext = 'bin' elif _version <= 10: ext = 'tar.bz2' else: ext = 'zip' archive = 'android-ndk-r{0}-' + _platform + '-{1}.' + ext is_64 = (os.uname()[4] == 'x86_64') else: raise SystemError('Unsupported platform: {}'.format(platform)) architecture = 'x86_64' if is_64 else 'x86' unpacked = 'android-ndk-r{0}' archive = archive.format(self.android_ndk_version, architecture) unpacked = unpacked.format(self.android_ndk_version) if _version >= 11: url = 'https://dl.google.com/android/repository/' else: url = 'https://dl.google.com/android/ndk/' self.buildozer.download(url, archive, cwd=self.buildozer.global_platform_dir) self.buildozer.info('Unpacking Android NDK') self.buildozer.file_extract(archive, cwd=self.buildozer.global_platform_dir) self.buildozer.file_rename(unpacked, ndk_dir, cwd=self.buildozer.global_platform_dir) self.buildozer.info('Android NDK installation done.') return ndk_dir def _android_list_build_tools_versions(self): available_packages = self._sdkmanager('--list') lines = available_packages[0].split('\n') build_tools_versions = [] for line in lines: if not line.strip().startswith('build-tools;'): continue package_name = line.strip().split(' ')[0] assert package_name.count(';') == 1, ( 'could not parse package "{}"'.format(package_name)) version = package_name.split(';')[1] build_tools_versions.append(parse(version)) return build_tools_versions def _android_get_installed_platform_tools_version(self): """ Crudely parse out the installed platform-tools version """ platform_tools_dir = os.path.join( self.android_sdk_dir, 'platform-tools') if not os.path.exists(platform_tools_dir): return None data_file = os.path.join(platform_tools_dir, 'source.properties') if not os.path.exists(data_file): return None with open(data_file, 'r') as fileh: lines = fileh.readlines() for line in lines: if line.startswith('Pkg.Revision='): break else: self.buildozer.error('Read {} but found no Pkg.Revision'.format(data_file)) # Don't actually exit, in case the build env is # okay. Something else will fault if it's important. return None revision = line.split('=')[1].strip() return revision def _android_update_sdk(self, *sdkmanager_commands): """Update the tools and package-tools if possible""" auto_accept_license = self.buildozer.config.getbooldefault( 'app', 'android.accept_sdk_license', False) kwargs = {} if auto_accept_license: # `SIGPIPE` is not being reported somehow, but `EPIPE` is. # This leads to a stderr "Broken pipe" message which is harmless, # but doesn't look good on terminal, hence redirecting to /dev/null yes_command = 'yes 2>/dev/null' android_sdk_dir = self.android_sdk_dir sdkmanager_path = self.sdkmanager_path sdk_root = f"--sdk_root={android_sdk_dir}" command = f"{yes_command} | {sdkmanager_path} {sdk_root} --licenses" self.buildozer.cmd(command, cwd=self.android_sdk_dir) else: kwargs['show_output'] = True self._sdkmanager(*sdkmanager_commands, **kwargs) def _read_version_subdir(self, *args): versions = [] if not os.path.exists(join(*args)): self.buildozer.debug('build-tools folder not found {}'.format(join( *args))) return parse("0") for v in os.listdir(join(*args)): try: versions.append(parse(v)) except: pass if not versions: self.buildozer.error( 'Unable to find the latest version for {}'.format(join(*args))) return parse("0") return max(versions) def _find_latest_package(self, packages, key): package_versions = [] for p in packages: if not p.startswith(key): continue version_string = p.split(key)[-1] version = parse(version_string) package_versions.append(version) if not package_versions: return return max(package_versions) def _install_android_packages(self): # if any of theses value change into the buildozer.spec, retry the # update cache_key = 'android:sdk_installation' cache_value = [ self.android_api, self.android_minapi, self.android_ndk_version, self.android_sdk_dir, self.android_ndk_dir ] if self.buildozer.state.get(cache_key, None) == cache_value: return True # 1. update the platform-tools package if needed skip_upd = self.buildozer.config.getbooldefault( 'app', 'android.skip_update', False) if not skip_upd: self.buildozer.info('Installing/updating SDK platform tools if necessary') # just calling sdkmanager with the items will install them if necessary self._android_update_sdk('platform-tools') self._android_update_sdk('--update') else: self.buildozer.info('Skipping Android SDK update due to spec file setting') self.buildozer.info('Note: this also prevents installing missing ' 'SDK components') # 2. install the latest build tool self.buildozer.info('Updating SDK build tools if necessary') installed_v_build_tools = self._read_version_subdir(self.android_sdk_dir, 'build-tools') available_v_build_tools = self._android_list_build_tools_versions() if not available_v_build_tools: self.buildozer.error('Did not find any build tools available to download') latest_v_build_tools = sorted(available_v_build_tools)[-1] if latest_v_build_tools > installed_v_build_tools: if not skip_upd: self._android_update_sdk( '"build-tools;{}"'.format(latest_v_build_tools)) installed_v_build_tools = latest_v_build_tools else: self.buildozer.info( 'Skipping update to build tools {} due to spec setting'.format( latest_v_build_tools)) # 2. check aidl can be run self._check_aidl(installed_v_build_tools) # 3. finally, install the android for the current api self.buildozer.info('Downloading platform api target if necessary') android_platform = join(self.android_sdk_dir, 'platforms', 'android-{}'.format(self.android_api)) if not self.buildozer.file_exists(android_platform): if not skip_upd: self._sdkmanager('"platforms;android-{}"'.format(self.android_api)) else: self.buildozer.info( 'Skipping install API {} platform tools due to spec setting'.format( self.android_api)) self.buildozer.info('Android packages installation done.') self.buildozer.state[cache_key] = cache_value self.buildozer.state.sync() def _check_aidl(self, v_build_tools): self.buildozer.debug('Check that aidl can be executed') v_build_tools = self._read_version_subdir(self.android_sdk_dir, 'build-tools') aidl_cmd = join(self.android_sdk_dir, 'build-tools', str(v_build_tools), 'aidl') self.buildozer.checkbin('Aidl', aidl_cmd) _, _, returncode = self.buildozer.cmd(aidl_cmd, break_on_error=False, show_output=False) if returncode != 1: self.buildozer.error('Aidl cannot be executed') if architecture()[0] == '64bit': self.buildozer.error('') self.buildozer.error( 'You might have missed to install 32bits libs') self.buildozer.error( 'Check https://buildozer.readthedocs.org/en/latest/installation.html') self.buildozer.error('') else: self.buildozer.error('') self.buildozer.error( 'In case of a bug report, please add a full log with log_level = 2') self.buildozer.error('') raise BuildozerException() def install_platform(self): self._install_p4a() self._install_apache_ant() self._install_android_sdk() self._install_android_ndk() self._install_android_packages() # ultimate configuration check. # some of our configuration cannot be check without platform. self.check_configuration_tokens() self.buildozer.environ.update({ 'PACKAGES_PATH': self.buildozer.global_packages_dir, 'ANDROIDSDK': self.android_sdk_dir, 'ANDROIDNDK': self.android_ndk_dir, 'ANDROIDAPI': self.android_api, 'ANDROIDMINAPI': self.android_minapi, }) def _install_p4a(self): cmd = self.buildozer.cmd p4a_fork = self.buildozer.config.getdefault( 'app', 'p4a.fork', self.p4a_fork ) p4a_url = self.buildozer.config.getdefault( 'app', 'p4a.url', f'https://github.com/{p4a_fork}/python-for-android.git' ) p4a_branch = self.buildozer.config.getdefault( 'app', 'p4a.branch', self.p4a_branch ) p4a_dir = self.p4a_dir system_p4a_dir = self.buildozer.config.getdefault('app', 'p4a.source_dir') if system_p4a_dir: # Don't install anything, just check that the dir does exist if not self.buildozer.file_exists(p4a_dir): self.buildozer.error( 'Path for p4a.source_dir does not exist') self.buildozer.error('') raise BuildozerException() else: # check that url/branch has not been changed if self.buildozer.file_exists(p4a_dir): cur_url = cmd( 'git config --get remote.origin.url', get_stdout=True, cwd=p4a_dir, )[0].strip() cur_branch = cmd( 'git branch -vv', get_stdout=True, cwd=p4a_dir )[0].split()[1] if any([cur_url != p4a_url, cur_branch != p4a_branch]): self.buildozer.info( f"Detected old url/branch ({cur_url}/{cur_branch}), deleting..." ) rmtree(p4a_dir) if not self.buildozer.file_exists(p4a_dir): cmd( ( 'git clone -b {p4a_branch} --single-branch ' '{p4a_url} {p4a_dir}' ).format( p4a_branch=p4a_branch, p4a_url=p4a_url, p4a_dir=self.p4a_directory_name, ), cwd=self.buildozer.platform_dir, ) elif self.platform_update: cmd('git clean -dxf', cwd=p4a_dir) current_branch = cmd('git rev-parse --abbrev-ref HEAD', get_stdout=True, cwd=p4a_dir)[0].strip() if current_branch == p4a_branch: cmd('git pull', cwd=p4a_dir) else: cmd('git fetch --tags origin {0}:{0}'.format(p4a_branch), cwd=p4a_dir) cmd('git checkout {}'.format(p4a_branch), cwd=p4a_dir) # also install dependencies (currently, only setup.py knows about it) # let's extract them. try: with open(join(self.p4a_dir, "setup.py")) as fd: setup = fd.read() deps = re.findall(r"^\s*install_reqs = (\[[^\]]*\])", setup, re.DOTALL | re.MULTILINE)[0] deps = ast.literal_eval(deps) except IOError: self.buildozer.error('Failed to read python-for-android setup.py at {}'.format( join(self.p4a_dir, 'setup.py'))) sys.exit(1) pip_deps = [] for dep in deps: pip_deps.append("'{}'".format(dep)) # in virtualenv or conda env options = "--user" if "VIRTUAL_ENV" in os.environ or "CONDA_PREFIX" in os.environ: options = "" cmd('{} -m pip install -q {} {}'.format(executable, options, " ".join(pip_deps))) def compile_platform(self): app_requirements = self.buildozer.config.getlist( 'app', 'requirements', '') dist_name = self.buildozer.config.get('app', 'package.name') local_recipes = self.get_local_recipes_dir() requirements = ','.join(app_requirements) options = [] source_dirs = { 'P4A_{}_DIR'.format(name[20:]): realpath(expanduser(value)) for name, value in self.buildozer.config.items('app') if name.startswith('requirements.source.') } if source_dirs: self.buildozer.environ.update(source_dirs) self.buildozer.info('Using custom source dirs:\n {}'.format( '\n '.join(['{} = {}'.format(k, v) for k, v in source_dirs.items()]))) if self.buildozer.config.getbooldefault('app', 'android.copy_libs', True): options.append("--copy-libs") # support for recipes in a local directory within the project if local_recipes: options.append('--local-recipes') options.append(local_recipes) self._p4a( ("create --dist_name={} --bootstrap={} --requirements={} " "--arch {} {}").format( dist_name, self._p4a_bootstrap, requirements, self._arch, " ".join(options)), get_stdout=True)[0] def get_available_packages(self): return True def get_dist_dir(self, dist_name, arch): """Find the dist dir with the given name and target arch, if one already exists, otherwise return a new dist_dir name. """ expected_dist_name = generate_dist_folder_name(dist_name, arch_names=[arch]) # If the expected dist name does exist, simply use that expected_dist_dir = join(self._build_dir, 'dists', expected_dist_name) if exists(expected_dist_dir): return expected_dist_dir # For backwards compatibility, check if a directory without # the arch exists. If so, this is probably the target dist. old_dist_dir = join(self._build_dir, 'dists', dist_name) if exists(old_dist_dir): return old_dist_dir # If no directory has been found yet, our dist probably # doesn't exist yet, so use the expected name return expected_dist_dir def get_local_recipes_dir(self): local_recipes = self.buildozer.config.getdefault('app', 'p4a.local_recipes') return realpath(expanduser(local_recipes)) if local_recipes else None def execute_build_package(self, build_cmd): # wrapper from previous old_toolchain to new toolchain dist_name = self.buildozer.config.get('app', 'package.name') local_recipes = self.get_local_recipes_dir() cmd = [self.p4a_apk_cmd, "--dist_name", dist_name] for args in build_cmd: option, values = args[0], args[1:] if option == "debug": continue elif option == "release": cmd.append("--release") if self.check_p4a_sign_env(True): cmd.append("--sign") continue if option == "--window": cmd.append("--window") elif option == "--sdk": cmd.append("--android_api") cmd.extend(values) else: cmd.extend(args) # support for presplash background color presplash_color = self.buildozer.config.getdefault('app', 'android.presplash_color', None) if presplash_color: cmd.append('--presplash-color') cmd.append("'{}'".format(presplash_color)) # support for services services = self.buildozer.config.getlist('app', 'services', []) for service in services: cmd.append("--service") cmd.append(service) # support for copy-libs if self.buildozer.config.getbooldefault('app', 'android.copy_libs', True): cmd.append("--copy-libs") # support for recipes in a local directory within the project if local_recipes: cmd.append('--local-recipes') cmd.append(local_recipes) # support for blacklist/whitelist filename whitelist_src = self.buildozer.config.getdefault('app', 'android.whitelist_src', None) blacklist_src = self.buildozer.config.getdefault('app', 'android.blacklist_src', None) if whitelist_src: cmd.append('--whitelist') cmd.append(realpath(expanduser(whitelist_src))) if blacklist_src: cmd.append('--blacklist') cmd.append(realpath(expanduser(blacklist_src))) # support for aars aars = self.buildozer.config.getlist('app', 'android.add_aars', []) for aar in aars: cmd.append('--add-aar') cmd.append(realpath(expanduser(aar))) # support for uses-lib uses_library = self.buildozer.config.getlist( 'app', 'android.uses_library', '') for lib in uses_library: cmd.append('--uses-library={}'.format(lib)) # support for gradle dependencies gradle_dependencies = self.buildozer.config.getlist('app', 'android.gradle_dependencies', []) for gradle_dependency in gradle_dependencies: cmd.append('--depend') cmd.append(gradle_dependency) # support for manifestPlaceholders manifest_placeholders = self.buildozer.config.getdefault('app', 'android.manifest_placeholders', None) if manifest_placeholders: cmd.append('--manifest-placeholders') cmd.append("{}".format(manifest_placeholders)) # support disabling of compilation compile_py = self.buildozer.config.getdefault('app', 'android.no-compile-pyo', None) if compile_py: cmd.append('--no-compile-pyo') cmd.append('--arch') cmd.append(self._arch) cmd = " ".join(cmd) self._p4a(cmd) def get_release_mode(self): if self.check_p4a_sign_env(): return "release" return "release-unsigned" def check_p4a_sign_env(self, error=False): keys = ["KEYALIAS", "KEYSTORE_PASSWD", "KEYSTORE", "KEYALIAS_PASSWD"] check = True for key in keys: key = "P4A_RELEASE_{}".format(key) if key not in os.environ: if error: self.buildozer.error( ("Asking for release but {} is missing" "--sign will not be passed").format(key)) check = False return check def cmd_run(self, *args): entrypoint = self.buildozer.config.getdefault( 'app', 'android.entrypoint') if not entrypoint: self.buildozer.config.set('app', 'android.entrypoint', 'org.kivy.android.PythonActivity') super().cmd_run(*args) entrypoint = self.buildozer.config.getdefault( 'app', 'android.entrypoint', 'org.kivy.android.PythonActivity') package = self._get_package() # push on the device for serial in self.serials: self.buildozer.environ['ANDROID_SERIAL'] = serial self.buildozer.info('Run on {}'.format(serial)) self.buildozer.cmd( '{adb} shell am start -n {package}/{entry} -a {entry}'.format( adb=self.adb_cmd, package=package, entry=entrypoint), cwd=self.buildozer.global_platform_dir) self.buildozer.environ.pop('ANDROID_SERIAL', None) while True: if self._get_pid(): break sleep(.1) self.buildozer.info('Waiting for application to start.') self.buildozer.info('Application started.') def cmd_p4a(self, *args): ''' Run p4a commands. Args must come after --, or use --alias to make an alias ''' self.check_requirements() self.install_platform() args = args[0] if args and args[0] == '--alias': print('To set up p4a in this shell session, execute:') print(' alias p4a=$(buildozer {} p4a --alias 2>&1 >/dev/null)' .format(self.targetname)) sys.stderr.write('PYTHONPATH={} {}\n'.format(self.p4a_dir, self._p4a_cmd)) else: self._p4a(' '.join(args) if args else '') def cmd_clean(self, *args): ''' Clean the build and distribution ''' self._p4a("clean_builds") self._p4a("clean_dists") def _get_package(self): config = self.buildozer.config package_domain = config.getdefault('app', 'package.domain', '') package = config.get('app', 'package.name') if package_domain: package = package_domain + '.' + package return package.lower() def _generate_whitelist(self, dist_dir): p4a_whitelist = self.buildozer.config.getlist( 'app', 'android.whitelist') or [] whitelist_fn = join(dist_dir, 'whitelist.txt') with open(whitelist_fn, 'w') as fd: for wl in p4a_whitelist: fd.write(wl + '\n') def build_package(self): dist_name = self.buildozer.config.get('app', 'package.name') arch = self.buildozer.config.getdefault('app', 'android.arch', DEFAULT_ARCH) dist_dir = self.get_dist_dir(dist_name, arch) config = self.buildozer.config package = self._get_package() version = self.buildozer.get_version() # add extra libs/armeabi files in dist/default/libs/armeabi # (same for armeabi-v7a, arm64-v8a, x86, mips) for config_key, lib_dir in ( ('android.add_libs_armeabi', 'armeabi'), ('android.add_libs_armeabi_v7a', 'armeabi-v7a'), ('android.add_libs_arm64_v8a', 'arm64-v8a'), ('android.add_libs_x86', 'x86'), ('android.add_libs_mips', 'mips')): patterns = config.getlist('app', config_key, []) if not patterns: continue if self._arch != lib_dir: continue self.buildozer.debug('Search and copy libs for {}'.format(lib_dir)) for fn in self.buildozer.file_matches(patterns): self.buildozer.file_copy( join(self.buildozer.root_dir, fn), join(dist_dir, 'libs', lib_dir, basename(fn))) # update the project.properties libraries references self._update_libraries_references(dist_dir) # add src files self._add_java_src(dist_dir) # generate the whitelist if needed self._generate_whitelist(dist_dir) # build the app build_cmd = [ ("--name", quote(config.get('app', 'title'))), ("--version", version), ("--package", package), ("--minsdk", config.getdefault('app', 'android.minapi', self.android_minapi)), ("--ndk-api", config.getdefault('app', 'android.minapi', self.android_minapi)), ] is_private_storage = config.getbooldefault( 'app', 'android.private_storage', True) if is_private_storage: build_cmd += [("--private", self.buildozer.app_dir)] else: build_cmd += [("--dir", self.buildozer.app_dir)] # add permissions permissions = config.getlist('app', 'android.permissions', []) for permission in permissions: # force the latest component to be uppercase permission = permission.split('.') permission[-1] = permission[-1].upper() permission = '.'.join(permission) build_cmd += [("--permission", permission)] # add features features = config.getlist('app', 'android.features', []) for feature in features: build_cmd += [("--feature", feature)] # android.entrypoint entrypoint = config.getdefault('app', 'android.entrypoint', 'org.kivy.android.PythonActivity') build_cmd += [('--android-entrypoint', entrypoint)] # android.apptheme apptheme = config.getdefault('app', 'android.apptheme', '@android:style/Theme.NoTitleBar') build_cmd += [('--android-apptheme', apptheme)] # android.compile_options compile_options = config.getlist('app', 'android.add_compile_options', []) for option in compile_options: build_cmd += [('--add-compile-option', option)] # android.add_gradle_repositories repos = config.getlist('app', 'android.add_gradle_repositories', []) for repo in repos: build_cmd += [('--add-gradle-repository', repo)] # android packaging options pkgoptions = config.getlist('app', 'android.add_packaging_options', []) for pkgoption in pkgoptions: build_cmd += [('--add-packaging-option', pkgoption)] # meta-data meta_datas = config.getlistvalues('app', 'android.meta_data', []) for meta in meta_datas: key, value = meta.split('=', 1) meta = '{}={}'.format(key.strip(), value.strip()) build_cmd += [("--meta-data", meta)] # add extra Java jar files add_jars = config.getlist('app', 'android.add_jars', []) for pattern in add_jars: pattern = join(self.buildozer.root_dir, pattern) matches = glob(expanduser(pattern.strip())) if matches: for jar in matches: build_cmd += [("--add-jar", jar)] else: raise SystemError('Failed to find jar file: {}'.format( pattern)) # add Java activity add_activities = config.getlist('app', 'android.add_activities', []) for activity in add_activities: build_cmd += [("--add-activity", activity)] # add presplash, lottie animation or static presplash = config.getdefault('app', 'android.presplash_lottie', '') if presplash: build_cmd += [("--presplash-lottie", join(self.buildozer.root_dir, presplash))] else: presplash = config.getdefault('app', 'presplash.filename', '') if presplash: build_cmd += [("--presplash", join(self.buildozer.root_dir, presplash))] # add icon icon = config.getdefault('app', 'icon.filename', '') if icon: build_cmd += [("--icon", join(self.buildozer.root_dir, icon))] icon_fg = config.getdefault('app', 'icon.adaptive_foreground.filename', '') icon_bg = config.getdefault('app', 'icon.adaptive_background.filename', '') if icon_fg and icon_bg: build_cmd += [("--icon-fg", join(self.buildozer.root_dir, icon_fg))] build_cmd += [("--icon-bg", join(self.buildozer.root_dir, icon_bg))] # OUYA Console support ouya_category = config.getdefault('app', 'android.ouya.category', '').upper() if ouya_category: if ouya_category not in ('GAME', 'APP'): raise SystemError( 'Invalid android.ouya.category: "{}" must be one of GAME or APP'.format( ouya_category)) # add icon ouya_icon = config.getdefault('app', 'android.ouya.icon.filename', '') build_cmd += [("--ouya-category", ouya_category)] build_cmd += [("--ouya-icon", join(self.buildozer.root_dir, ouya_icon))] if config.getdefault('app', 'p4a.bootstrap', 'sdl2') != 'service_only': # add orientation orientation = config.getdefault('app', 'orientation', 'landscape') if orientation == 'all': orientation = 'sensor' build_cmd += [("--orientation", orientation)] # fullscreen ? fullscreen = config.getbooldefault('app', 'fullscreen', True) if not fullscreen: build_cmd += [("--window", )] # wakelock ? wakelock = config.getbooldefault('app', 'android.wakelock', False) if wakelock: build_cmd += [("--wakelock", )] # AndroidX ? enable_androidx = config.getbooldefault('app', 'android.enable_androidx', False) if enable_androidx: build_cmd += [("--enable-androidx", )] # intent filters intent_filters = config.getdefault( 'app', 'android.manifest.intent_filters', '') if intent_filters: build_cmd += [("--intent-filters", join(self.buildozer.root_dir, intent_filters))] # activity launch mode launch_mode = config.getdefault( 'app', 'android.manifest.launch_mode', '') if launch_mode: build_cmd += [("--activity-launch-mode", launch_mode)] # numeric version numeric_version = config.getdefault('app', 'android.numeric_version') if numeric_version: build_cmd += [("--numeric-version", numeric_version)] # android.allow_backup allow_backup = config.getbooldefault('app', 'android.allow_backup', True) if not allow_backup: build_cmd += [('--allow-backup', 'false')] # android.backup_rules backup_rules = config.getdefault('app', 'android.backup_rules', '') if backup_rules: build_cmd += [("--backup-rules", join(self.buildozer.root_dir, backup_rules))] # build only in debug right now. if self.build_mode == 'debug': build_cmd += [("debug", )] mode = 'debug' mode_sign = mode else: build_cmd += [("release", )] mode_sign = "release" mode = self.get_release_mode() self.execute_build_package(build_cmd) try: self.buildozer.hook("android_pre_build_apk") self.execute_build_package(build_cmd) self.buildozer.hook("android_post_build_apk") except: # maybe the hook fail because the apk is not pass build_tools_versions = os.listdir(join(self.android_sdk_dir, "build-tools")) build_tools_versions = sorted(build_tools_versions, key=LooseVersion) build_tools_version = build_tools_versions[-1] gradle_files = ["build.gradle", "gradle", "gradlew"] is_gradle_build = build_tools_version >= "25.0" and any( (exists(join(dist_dir, x)) for x in gradle_files)) packagename = config.get('app', 'package.name') if is_gradle_build: # on gradle build, the apk use the package name, and have no version packagename_src = basename(dist_dir) # gradle specifically uses the folder name apk = u'{packagename}-{mode}.apk'.format( packagename=packagename_src, mode=mode) apk_dir = join(dist_dir, "build", "outputs", "apk", mode_sign) else: # on ant, the apk use the title, and have version bl = u'\'" ,' apptitle = config.get('app', 'title') if hasattr(apptitle, 'decode'): apptitle = apptitle.decode('utf-8') apktitle = ''.join([x for x in apptitle if x not in bl]) apk = u'{title}-{version}-{mode}.apk'.format( title=apktitle, version=version, mode=mode) apk_dir = join(dist_dir, "bin") apk_dest = u'{packagename}-{version}-{arch}-{mode}.apk'.format( packagename=packagename, mode=mode, version=version, arch=self._arch) # copy to our place copyfile(join(apk_dir, apk), join(self.buildozer.bin_dir, apk_dest)) self.buildozer.info('Android packaging done!') self.buildozer.info( u'APK {0} available in the bin directory'.format(apk_dest)) self.buildozer.state['android:latestapk'] = apk_dest self.buildozer.state['android:latestmode'] = self.build_mode def _update_libraries_references(self, dist_dir): # ensure the project.properties exist project_fn = join(dist_dir, 'project.properties') if not self.buildozer.file_exists(project_fn): content = [ 'target=android-{}\n'.format(self.android_api), 'APP_PLATFORM={}\n'.format(self.android_minapi)] else: with io.open(project_fn, encoding='utf-8') as fd: content = fd.readlines() # extract library reference references = [] for line in content[:]: if not line.startswith('android.library.reference.'): continue content.remove(line) # convert our references to relative path app_references = self.buildozer.config.getlist( 'app', 'android.library_references', []) source_dir = realpath(expanduser(self.buildozer.config.getdefault( 'app', 'source.dir', '.'))) for cref in app_references: # get the full path of the current reference ref = realpath(join(source_dir, cref)) if not self.buildozer.file_exists(ref): self.buildozer.error( 'Invalid library reference (path not found): {}'.format( cref)) sys.exit(1) # get a relative path from the project file ref = relpath(ref, realpath(expanduser(dist_dir))) # ensure the reference exists references.append(ref) # recreate the project.properties with io.open(project_fn, 'w', encoding='utf-8') as fd: try: fd.writelines((line.decode('utf-8') for line in content)) except: fd.writelines(content) if content and not content[-1].endswith(u'\n'): fd.write(u'\n') for index, ref in enumerate(references): fd.write(u'android.library.reference.{}={}\n'.format(index + 1, ref)) self.buildozer.debug('project.properties updated') def _add_java_src(self, dist_dir): java_src = self.buildozer.config.getlist('app', 'android.add_src', []) gradle_files = ["build.gradle", "gradle", "gradlew"] is_gradle_build = any(( exists(join(dist_dir, x)) for x in gradle_files)) if is_gradle_build: src_dir = join(dist_dir, "src", "main", "java") self.buildozer.info( "Gradle project detected, copy files {}".format(src_dir)) else: src_dir = join(dist_dir, 'src') self.buildozer.info( "Ant project detected, copy files in {}".format(src_dir)) for pattern in java_src: for fn in glob(expanduser(pattern.strip())): last_component = basename(fn) self.buildozer.file_copytree(fn, join(src_dir, last_component)) @property def serials(self): if hasattr(self, '_serials'): return self._serials serial = environ.get('ANDROID_SERIAL') if serial: return serial.split(',') lines = self.buildozer.cmd('{} devices'.format(self.adb_cmd), get_stdout=True)[0].splitlines() serials = [] for serial in lines: if not serial: continue if serial.startswith('*') or serial.startswith('List '): continue serials.append(serial.split()[0]) self._serials = serials return serials def cmd_adb(self, *args): ''' Run adb from the Android SDK. Args must come after --, or use --alias to make an alias ''' self.check_requirements() self.install_platform() args = args[0] if args and args[0] == '--alias': print('To set up ADB in this shell session, execute:') print(' alias adb=$(buildozer {} adb --alias 2>&1 >/dev/null)' .format(self.targetname)) sys.stderr.write(self.adb_cmd + '\n') else: self.buildozer.cmd(' '.join([self.adb_cmd] + args)) def cmd_deploy(self, *args): super().cmd_deploy(*args) state = self.buildozer.state if 'android:latestapk' not in state: self.buildozer.error('No APK built yet. Run "debug" first.') if state.get('android:latestmode', '') != 'debug': self.buildozer.error('Only debug APK are supported for deploy') # search the APK in the bin dir apk = state['android:latestapk'] full_apk = join(self.buildozer.bin_dir, apk) if not self.buildozer.file_exists(full_apk): self.buildozer.error( 'Unable to found the latest APK. Please run "debug" again.') # push on the device for serial in self.serials: self.buildozer.environ['ANDROID_SERIAL'] = serial self.buildozer.info('Deploy on {}'.format(serial)) self.buildozer.cmd('{0} install -r "{1}"'.format( self.adb_cmd, full_apk), cwd=self.buildozer.global_platform_dir) self.buildozer.environ.pop('ANDROID_SERIAL', None) self.buildozer.info('Application pushed.') def _get_pid(self): pid, *_ = self.buildozer.cmd( f'{self.adb_cmd} shell pidof {self._get_package()}', get_stdout=True, show_output=False, break_on_error=False, quiet=True, ) if pid: return pid.strip() return False def cmd_logcat(self, *args): '''Show the log from the device ''' self.check_requirements() serial = self.serials[0:] if not serial: return filters = self.buildozer.config.getrawdefault( "app", "android.logcat_filters", "", section_sep=":", split_char=" ") filters = " ".join(filters) self.buildozer.environ['ANDROID_SERIAL'] = serial[0] extra_args = [] pid = None if self.buildozer.config.getdefault('app', 'android.logcat_pid_only'): pid = self._get_pid() if pid: extra_args.extend(('--pid', pid)) self.buildozer.cmd( f"{self.adb_cmd} logcat {filters} {' '.join(extra_args)}", cwd=self.buildozer.global_platform_dir, show_output=True, run_condition=self._get_pid if pid else None, break_on_error=False, ) self.buildozer.info(f"{self._get_package()} terminated") self.buildozer.environ.pop('ANDROID_SERIAL', None) def get_target(buildozer): buildozer.targetname = "android" return TargetAndroid(buildozer) def generate_dist_folder_name(base_dist_name, arch_names=None): """Generate the distribution folder name to use, based on a combination of the input arguments. WARNING: This function is copied from python-for-android. It would be preferable to have a proper interface, either importing the p4a code or having a p4a dist dir query option. Parameters ---------- base_dist_name : str The core distribution identifier string arch_names : list of str The architecture compile targets """ if arch_names is None: arch_names = ["no_arch_specified"] return '{}__{}'.format( base_dist_name, '_'.join(arch_names) )
"""Search implementation using a cell database.""" from base64 import b64decode from collections import defaultdict import math import numpy from sqlalchemy import select from ichnaea.api.locate.constants import ( CELL_MIN_ACCURACY, CELL_MAX_ACCURACY, CELLAREA_MIN_ACCURACY, CELLAREA_MAX_ACCURACY, ) from ichnaea.api.locate.result import ( Position, PositionResultList, Region, RegionResultList, ) from ichnaea.api.locate.score import area_score, station_score from geocalc import distance from ichnaea.geocode import GEOCODER from ichnaea.models import ( area_id, decode_cellarea, decode_cellid, encode_cellarea, encode_cellid, CellArea, CellShard, station_blocked, ) from ichnaea.models.constants import MIN_CELL_SIGNAL from ichnaea import util NETWORK_DTYPE = numpy.dtype( [ ("lat", numpy.double), ("lon", numpy.double), ("radius", numpy.double), ("age", numpy.int32), ("signalStrength", numpy.int32), ("score", numpy.double), ("id_b64", "S16"), ("seen_today", numpy.bool), ] ) def cluster_cells(cells, lookups, min_age=0): """ Cluster cells by area. """ now = util.utcnow() today = now.date() # Create a dict of cell ids mapped to their age and signal strength. obs_data = {} for lookup in lookups: obs_data[decode_cellid(lookup.cellid)] = ( max(abs(lookup.age or min_age), 1000), lookup.signalStrength or MIN_CELL_SIGNAL[lookup.radioType], ) areas = defaultdict(list) for cell in cells: areas[area_id(cell)].append(cell) clusters = [] for area_cells in areas.values(): clusters.append( numpy.array( [ ( cell.lat, cell.lon, cell.radius, obs_data[cell.cellid][0], obs_data[cell.cellid][1], station_score(cell, now), encode_cellid(*cell.cellid, codec="base64"), bool(cell.last_seen is not None and cell.last_seen >= today), ) for cell in area_cells ], dtype=NETWORK_DTYPE, ) ) return clusters def cluster_areas(areas, lookups, min_age=0): """ Cluster areas, treat each area as its own cluster. """ now = util.utcnow() today = now.date() # Create a dict of area ids mapped to their age and signal strength. obs_data = {} for lookup in lookups: obs_data[decode_cellarea(lookup.areaid)] = ( max(abs(lookup.age or min_age), 1000), lookup.signalStrength or MIN_CELL_SIGNAL[lookup.radioType], ) clusters = [] for area in areas: clusters.append( numpy.array( [ ( area.lat, area.lon, area.radius, obs_data[area.areaid][0], obs_data[area.areaid][1], area_score(area, now), encode_cellarea(*area.areaid, codec="base64"), bool(area.last_seen is not None and area.last_seen >= today), ) ], dtype=NETWORK_DTYPE, ) ) return clusters def aggregate_cell_position(networks, min_accuracy, max_accuracy): """ Calculate the aggregate position of the user inside the given cluster of networks. Return the position, an accuracy estimate and a combined score. The accuracy is bounded by the min_accuracy and max_accuracy. """ if len(networks) == 1: lat = networks[0]["lat"] lon = networks[0]["lon"] radius = min(max(networks[0]["radius"], min_accuracy), max_accuracy) score = networks[0]["score"] return (float(lat), float(lon), float(radius), float(score)) points = numpy.array( [(net["lat"], net["lon"]) for net in networks], dtype=numpy.double ) weights = numpy.array( [ net["score"] * min(math.sqrt(2000.0 / net["age"]), 1.0) / math.pow(net["signalStrength"], 2) for net in networks ], dtype=numpy.double, ) lat, lon = numpy.average(points, axis=0, weights=weights) score = networks["score"].sum() # Guess the accuracy as the 95th percentile of the distances # from the lat/lon to the positions of all networks. distances = numpy.array( [distance(lat, lon, net["lat"], net["lon"]) for net in networks], dtype=numpy.double, ) accuracy = min(max(numpy.percentile(distances, 95), min_accuracy), max_accuracy) return (float(lat), float(lon), float(accuracy), float(score)) def query_cells(query, lookups, model, raven_client): # Given a location query and a list of lookup instances, query the # database and return a list of model objects. cellids = [lookup.cellid for lookup in lookups] if not cellids: return [] # load all fields used in score calculation and those we # need for the position load_fields = ( "cellid", "lat", "lon", "radius", "region", "samples", "created", "modified", "last_seen", "block_last", "block_count", ) result = [] today = util.utcnow().date() try: shards = defaultdict(list) for lookup in lookups: shards[model.shard_model(lookup.radioType)].append(lookup.cellid) for shard, shard_cellids in shards.items(): columns = shard.__table__.c fields = [getattr(columns, f) for f in load_fields] rows = ( query.session.execute( select(fields) .where(columns.lat.isnot(None)) .where(columns.lon.isnot(None)) .where(columns.cellid.in_(shard_cellids)) ) ).fetchall() result.extend([row for row in rows if not station_blocked(row, today)]) except Exception: raven_client.captureException() return result def query_areas(query, lookups, model, raven_client): areaids = [lookup.areaid for lookup in lookups] if not areaids: return [] # load all fields used in score calculation and those we # need for the position or region load_fields = ( "areaid", "lat", "lon", "radius", "region", "num_cells", "created", "modified", "last_seen", ) try: columns = model.__table__.c fields = [getattr(columns, f) for f in load_fields] rows = ( query.session.execute( select(fields) .where(columns.lat.isnot(None)) .where(columns.lon.isnot(None)) .where(columns.areaid.in_(areaids)) ) ).fetchall() return rows except Exception: raven_client.captureException() return [] class CellPositionMixin(object): """ A CellPositionMixin implements a position search using the cell models. """ cell_model = CellShard area_model = CellArea result_list = PositionResultList result_type = Position def should_search_cell(self, query, results): if not (query.cell or query.cell_area): return False return True def search_cell(self, query): results = self.result_list() if query.cell: cells = query_cells(query, query.cell, self.cell_model, self.raven_client) if cells: for cluster in cluster_cells(cells, query.cell): lat, lon, accuracy, score = aggregate_cell_position( cluster, CELL_MIN_ACCURACY, CELL_MAX_ACCURACY ) used_networks = [ ("cell", b64decode(id_b64), bool(seen_today)) for id_b64, seen_today in cluster[["id_b64", "seen_today"]] ] results.add( self.result_type( lat=lat, lon=lon, accuracy=accuracy, score=score, used_networks=used_networks, ) ) if len(results): return results if query.cell_area: areas = query_areas( query, query.cell_area, self.area_model, self.raven_client ) if areas: for cluster in cluster_areas(areas, query.cell_area): lat, lon, accuracy, score = aggregate_cell_position( cluster, CELLAREA_MIN_ACCURACY, CELLAREA_MAX_ACCURACY ) used_networks = [ ("area", b64decode(id_b64), bool(seen_today)) for id_b64, seen_today in cluster[["id_b64", "seen_today"]] ] results.add( self.result_type( lat=lat, lon=lon, accuracy=accuracy, score=score, fallback="lacf", used_networks=used_networks, ) ) return results class CellRegionMixin(object): """ A CellRegionMixin implements a region search using the cell models. """ area_model = CellArea result_list = RegionResultList result_type = Region def should_search_cell(self, query, results): if not (query.cell or query.cell_area): return False return True def search_cell(self, query): results = self.result_list() now = util.utcnow() ambiguous_cells = [] regions = [] for cell in list(query.cell) + list(query.cell_area): code = cell.mobileCountryCode mcc_regions = GEOCODER.regions_for_mcc(code, metadata=True) # Divide score by number of possible regions for the mcc score = 1.0 / (len(mcc_regions) or 1.0) for mcc_region in mcc_regions: regions.append((mcc_region, score)) if len(mcc_regions) > 1: ambiguous_cells.append(cell) # Group by region code grouped_regions = {} for region, score in regions: code = region.code if code not in grouped_regions: grouped_regions[code] = [region, score] else: # Sum up scores of multiple matches grouped_regions[code][1] += score if ambiguous_cells: # Only do a database query if the mcc is ambiguous. # Use the area models for area and cell entries, # as we are only interested in the region here, # which won't differ between individual cells inside and area. areas = query_areas( query, ambiguous_cells, self.area_model, self.raven_client ) for area in areas: code = area.region if code and code in grouped_regions: grouped_regions[code][1] += area_score(area, now) for region, score in grouped_regions.values(): results.add( self.result_type( region_code=region.code, region_name=region.name, accuracy=region.radius, score=score, ) ) return results
#!/usr/bin/env python import argparse import atexit import logging import sys from datetime import datetime from functools import partial, reduce from itertools import groupby from pprint import pformat from operator import itemgetter from docker import APIClient from humanfriendly import format_size DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock' HELP_DOCKER_BASE_URL = ( 'Refers to the protocol+hostname+port where the ' 'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL DEFAULT_DOCKER_API_VERSION = 'auto' HELP_DOCKER_API_VERSION = ( 'The version of the API the client will use. ' 'Defaults to use the API version provided by the server') DEFAULT_DOCKER_HTTP_TIMEOUT = 5 HELP_DOCKER_HTTP_TIMEOUT = ( 'The HTTP request timeout, in seconds. ' 'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT DEFAULT_IMAGES_TO_KEEP = 2 HELP_IMAGES_TO_KEEP = ( 'How many docker images to keep. ' 'Defaults to %d images') % DEFAULT_IMAGES_TO_KEEP HELP_KEEP_NONE_IMAGES = 'Keep <none> images' HELP_NOOP = 'Do nothing' HELP_VERBOSE = 'Print images to delete' def _exit(): logging.shutdown() def is_debug_on(): return logging.getLogger().getEffectiveLevel() == logging.DEBUG def debug_var(debug, name, var): if debug: logging.debug('Var %s has: %s' % (name, pformat(var))) def setup_parser(parser): parser.add_argument('--debug', help='debug mode', action='store_true') parser.add_argument( '--base-url', help=HELP_DOCKER_BASE_URL, default=DEFAULT_DOCKER_BASE_URL) parser.add_argument( '--api-version', help=HELP_DOCKER_API_VERSION, default=DEFAULT_DOCKER_API_VERSION) parser.add_argument( '--http-timeout', help=HELP_DOCKER_HTTP_TIMEOUT, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int) parser.add_argument( '--images-to-keep', help=HELP_IMAGES_TO_KEEP, default=DEFAULT_IMAGES_TO_KEEP, type=int) parser.add_argument( '--keep-none-images', help=HELP_KEEP_NONE_IMAGES, action='store_true') parser.add_argument('--noop', help=HELP_NOOP, action='store_true') parser.add_argument('--verbose', help=HELP_VERBOSE, action='store_true') return parser def validate_args(args): checks = [ (lambda args: args.http_timeout < 0, 'HTTP timeout should be 0 or bigger\n'), (lambda args: args.images_to_keep < 0, 'Images to keep should be 0 or bigger\n')] if [sys.stderr.write(msg) for checker, msg in checks if checker(args)]: sys.exit(1) def split_by_none(images_lists, image): if not isinstance(images_lists, tuple): raise TypeError('First argument should be a tuple') non_none_images, none_images = images_lists if image[u'RepoTags'] is None or u'<none>:<none>' in image[u'RepoTags']: none_images.append(image) else: non_none_images.append(image) return (non_none_images, none_images) def split_images(images): return reduce(split_by_none, images, ([], [])) def remove_keys_from_dict(keys, dict_): return {k: v for k, v in dict_.items() if k not in keys} def add_image_to_grp_images(grp_images, image): repos = sorted([e.split(':')[0] for e in image[u'RepoTags']]) for repo, _ in groupby(repos): if repo in grp_images: grp_images[repo].append(image) else: grp_images[repo] = [image] return grp_images def group_by_repo(images): return reduce(add_image_to_grp_images, images, {}) def reverse_sort_images_created(images): return sorted(images, key=itemgetter(u'Created'), reverse=True) def sort_images_in_repos(repos): return {k: reverse_sort_images_created(v) for k, v in repos.items()} def beautify_image(image): new_image = remove_keys_from_dict( [u'RepoDigests', u'ParentId', u'Labels'], image) new_image[u'Created'] = datetime.fromtimestamp( image[u'Created']).isoformat(' ') new_image[u'Size'] = format_size(image[u'Size']) new_image[u'VirtualSize'] = format_size(image[u'VirtualSize']) return new_image def print_images_to_delete(images): print('Images to delete') print(pformat([beautify_image(image) for image in images])) def remove_docker_image(client, image, verbose): try: if verbose: print("Removing {}".format(image[u'Id'])) if image[u'RepoTags'] is None or u'<none>:<none>' in image[u'RepoTags']: client.remove_image(image[u'Id']) else: [client.remove_image(tag) for tag in image[u'RepoTags']] except Exception as e: if verbose: print(e) def delete_images(client, images, verbose): [remove_docker_image(client, image, verbose) for image in images] def get_images_to_delete(none_images, repos, num_images_to_keep, keep_nones): images_to_delete = [] if not keep_nones: images_to_delete.extend(none_images) [images_to_delete.extend(repo_images[num_images_to_keep:]) for repo_images in repos.values() if len(repo_images) > num_images_to_keep] return images_to_delete def _build_docker_client(args): if _is_osx_platform(): return _macosx_docker_client(args) else: return _default_docker_client(args) def _macosx_docker_client(args): from docker.utils import kwargs_from_env kwargs = kwargs_from_env() # Read http://docker-py.readthedocs.org/en/latest/boot2docker/ kwargs['tls'].assert_hostname = False kwargs['version'] = args.api_version kwargs['timeout'] = args.http_timeout return APIClient(**kwargs) def _default_docker_client(args): return APIClient(base_url=args.base_url, version=args.api_version, timeout=args.http_timeout) def _is_osx_platform(): from sys import platform as _platform return "darwin" in _platform def main(): atexit.register(_exit) parser = setup_parser(argparse.ArgumentParser( description='Clean old docker images')) args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) debug = partial(debug_var, debug=is_debug_on()) debug(name='args', var=args) validate_args(args) client = _build_docker_client(args) images = client.images(all=True) debug(name='images', var=images) non_none_images, none_images = split_images(images) debug(name='non_none_images', var=non_none_images) debug(name='none_images', var=none_images) repos = sort_images_in_repos(group_by_repo(non_none_images)) debug(name='repos', var=repos) images_to_delete = get_images_to_delete( none_images, repos, args.images_to_keep, args.keep_none_images) debug(name='images_to_delete', var=images_to_delete) if args.verbose: print_images_to_delete(images_to_delete) if args.noop: sys.exit(0) delete_images(client, images_to_delete, args.verbose) if __name__ == '__main__': main()
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import codecs import os import re from pkg_resources import resource_string from pygments.formatters.html import HtmlFormatter from pygments.styles import get_all_styles from pants.backend.docgen.targets.doc import Page from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.base.generator import Generator from pants.base.workunit import WorkUnitLabel from pants.binaries import binary_util from pants.build_graph.address import Address from pants.task.task import Task from pants.util.dirutil import safe_mkdir def util(): """Indirection function so we can lazy-import our utils. It's an expensive import that invokes re.compile a lot (via markdown and pygments), so we don't want to incur that cost unless we must. """ from pants.backend.docgen.tasks import markdown_to_html_utils return markdown_to_html_utils class MarkdownToHtml(Task): """Generate HTML from Markdown docs.""" @classmethod def register_options(cls, register): register('--code-style', choices=list(get_all_styles()), default='friendly', fingerprint=True, help='Use this stylesheet for code highlights.') register('--open', type=bool, help='Open the generated documents in a browser.') register('--fragment', type=bool, fingerprint=True, help='Generate a fragment of html to embed in a page.') register('--ignore-failure', type=bool, fingerprint=True, help='Do not consider rendering errors to be build errors.') @classmethod def product_types(cls): return ['markdown_html', 'wiki_html'] def __init__(self, *args, **kwargs): super(MarkdownToHtml, self).__init__(*args, **kwargs) self._templates_dir = os.path.join('templates', 'markdown') self.open = self.get_options().open self.fragment = self.get_options().fragment self.code_style = self.get_options().code_style def execute(self): # TODO(John Sirois): consider adding change detection outdir = os.path.join(self.get_options().pants_distdir, 'markdown') css_path = os.path.join(outdir, 'css', 'codehighlight.css') css = util().emit_codehighlight_css(css_path, self.code_style) if css: self.context.log.info('Emitted {}'.format(css)) def is_page(target): return isinstance(target, Page) roots = set() interior_nodes = set() if self.open: dependencies_by_page = self.context.dependents(on_predicate=is_page, from_predicate=is_page) roots.update(dependencies_by_page.keys()) for dependencies in dependencies_by_page.values(): interior_nodes.update(dependencies) roots.difference_update(dependencies) for page in self.context.targets(is_page): # There are no in or out edges so we need to show show this isolated page. if not page.dependencies and page not in interior_nodes: roots.add(page) with self.context.new_workunit(name='render', labels=[WorkUnitLabel.MULTITOOL]): plaingenmap = self.context.products.get('markdown_html') wikigenmap = self.context.products.get('wiki_html') show = [] for page in self.context.targets(is_page): def process_page(key, outdir, url_builder, genmap, fragment=False): if page.format == 'rst': with self.context.new_workunit(name='rst') as workunit: html_path = self.process_rst( workunit, page, os.path.join(outdir, util().page_to_html_path(page)), os.path.join(page.payload.sources.rel_path, page.source), self.fragment or fragment, ) else: with self.context.new_workunit(name='md'): html_path = self.process_md( os.path.join(outdir, util().page_to_html_path(page)), os.path.join(page.payload.sources.rel_path, page.source), self.fragment or fragment, url_builder, css=css, ) self.context.log.info('Processed {} to {}'.format(page.source, html_path)) relpath = os.path.relpath(html_path, outdir) genmap.add(key, outdir, [relpath]) return html_path def url_builder(linked_page): dest = util().page_to_html_path(linked_page) src_dir = os.path.dirname(util().page_to_html_path(page)) return linked_page.name, os.path.relpath(dest, src_dir) page_path = os.path.join(outdir, 'html') html = process_page(page, page_path, url_builder, plaingenmap) if css and not self.fragment: plaingenmap.add(page, self.workdir, list(css_path)) if self.open and page in roots: show.append(html) if page.provides: for wiki in page.provides: basedir = os.path.join(self.workdir, str(hash(wiki))) process_page((wiki, page), basedir, wiki.wiki.url_builder, wikigenmap, fragment=True) if show: binary_util.ui_open(*show) PANTS_LINK = re.compile(r'''pants\(['"]([^)]+)['"]\)(#.*)?''') def process_md(self, output_path, source, fragmented, url_builder, css=None): def parse_url(spec): match = self.PANTS_LINK.match(spec) if match: address = Address.parse(match.group(1), relative_to=get_buildroot()) page = self.context.build_graph.get_target(address) anchor = match.group(2) or '' if not page: raise TaskError('Invalid markdown link to pants target: "{}" when processing {}. ' 'Is your page missing a dependency on this target?'.format( match.group(1), source)) alias, url = url_builder(page) return alias, url + anchor else: return spec, spec def build_url(label): components = label.split('|', 1) if len(components) == 1: return parse_url(label.strip()) else: alias, link = components _, url = parse_url(link.strip()) return alias, url wikilinks = util().WikilinksExtension(build_url) safe_mkdir(os.path.dirname(output_path)) with codecs.open(output_path, 'w', 'utf-8') as output: source_path = os.path.join(get_buildroot(), source) with codecs.open(source_path, 'r', 'utf-8') as source_stream: md_html = util().markdown.markdown( source_stream.read(), extensions=['codehilite(guess_lang=False)', 'extra', 'tables', 'toc', wikilinks, util().IncludeExcerptExtension(source_path)], ) if fragmented: style_css = (HtmlFormatter(style=self.code_style)).get_style_defs('.codehilite') template = resource_string(__name__, os.path.join(self._templates_dir, 'fragment.mustache')) generator = Generator(template, style_css=style_css, md_html=md_html) generator.write(output) else: style_link = os.path.relpath(css, os.path.dirname(output_path)) template = resource_string(__name__, os.path.join(self._templates_dir, 'page.mustache')) generator = Generator(template, style_link=style_link, md_html=md_html) generator.write(output) return output.name def process_rst(self, workunit, page, output_path, source, fragmented): source_path = os.path.join(get_buildroot(), source) with codecs.open(source_path, 'r', 'utf-8') as source_stream: rst_html, returncode = util().rst_to_html(source_stream.read(), stderr=workunit.output('stderr')) if returncode != 0: message = '{} rendered with errors.'.format(source_path) if self.get_options().ignore_failure: self.context.log.warn(message) else: raise TaskError(message, exit_code=returncode, failed_targets=[page]) template_path = os.path.join(self._templates_dir, 'fragment.mustache' if fragmented else 'page.mustache') template = resource_string(__name__, template_path) generator = Generator(template, md_html=rst_html) safe_mkdir(os.path.dirname(output_path)) with codecs.open(output_path, 'w', 'utf-8') as output: generator.write(output) return output.name
from __future__ import unicode_literals import pytest import time import redis from redis.exceptions import ConnectionError from redis._compat import basestring, unichr from .conftest import _get_client from .conftest import skip_if_server_version_lt def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False): now = time.time() timeout = now + timeout while now < timeout: message = pubsub.get_message( ignore_subscribe_messages=ignore_subscribe_messages) if message is not None: return message time.sleep(0.01) now = time.time() return None def make_message(type, channel, data, pattern=None): return { 'type': type, 'pattern': pattern and pattern.encode('utf-8') or None, 'channel': channel and channel.encode('utf-8') or None, 'data': data.encode('utf-8') if isinstance(data, basestring) else data } def make_subscribe_test_data(pubsub, type): if type == 'channel': return { 'p': pubsub, 'sub_type': 'subscribe', 'unsub_type': 'unsubscribe', 'sub_func': pubsub.subscribe, 'unsub_func': pubsub.unsubscribe, 'keys': ['foo', 'bar', 'uni' + unichr(4456) + 'code'] } elif type == 'pattern': return { 'p': pubsub, 'sub_type': 'psubscribe', 'unsub_type': 'punsubscribe', 'sub_func': pubsub.psubscribe, 'unsub_func': pubsub.punsubscribe, 'keys': ['f*', 'b*', 'uni' + unichr(4456) + '*'] } assert False, 'invalid subscribe type: %s' % type class TestPubSubSubscribeUnsubscribe(object): def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): for key in keys: assert sub_func(key) is None # should be a message for each channel/pattern we just subscribed to for i, key in enumerate(keys): assert wait_for_message(p) == make_message(sub_type, key, i + 1) for key in keys: assert unsub_func(key) is None # should be a message for each channel/pattern we just unsubscribed # from for i, key in enumerate(keys): i = len(keys) - 1 - i assert wait_for_message(p) == make_message(unsub_type, key, i) def test_channel_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribe_unsubscribe(**kwargs) def test_pattern_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribe_unsubscribe(**kwargs) def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): for key in keys: assert sub_func(key) is None # should be a message for each channel/pattern we just subscribed to for i, key in enumerate(keys): assert wait_for_message(p) == make_message(sub_type, key, i + 1) # manually disconnect p.connection.disconnect() # calling get_message again reconnects and resubscribes # note, we may not re-subscribe to channels in exactly the same order # so we have to do some extra checks to make sure we got them all messages = [] for i in range(len(keys)): messages.append(wait_for_message(p)) unique_channels = set() assert len(messages) == len(keys) for i, message in enumerate(messages): assert message['type'] == sub_type assert message['data'] == i + 1 assert isinstance(message['channel'], bytes) channel = message['channel'].decode('utf-8') unique_channels.add(channel) assert len(unique_channels) == len(keys) for channel in unique_channels: assert channel in keys def test_resubscribe_to_channels_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_resubscribe_on_reconnection(**kwargs) def test_resubscribe_to_patterns_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_resubscribe_on_reconnection(**kwargs) def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): assert p.subscribed is False sub_func(keys[0]) # we're now subscribed even though we haven't processed the # reply from the server just yet assert p.subscribed is True assert wait_for_message(p) == make_message(sub_type, keys[0], 1) # we're still subscribed assert p.subscribed is True # unsubscribe from all channels unsub_func() # we're still technically subscribed until we process the # response messages from the server assert p.subscribed is True assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) # now we're no longer subscribed as no more messages can be delivered # to any channels we were listening to assert p.subscribed is False # subscribing again flips the flag back sub_func(keys[0]) assert p.subscribed is True assert wait_for_message(p) == make_message(sub_type, keys[0], 1) # unsubscribe again unsub_func() assert p.subscribed is True # subscribe to another channel before reading the unsubscribe response sub_func(keys[1]) assert p.subscribed is True # read the unsubscribe for key1 assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) # we're still subscribed to key2, so subscribed should still be True assert p.subscribed is True # read the key2 subscribe message assert wait_for_message(p) == make_message(sub_type, keys[1], 1) unsub_func() # haven't read the message yet, so we're still subscribed assert p.subscribed is True assert wait_for_message(p) == make_message(unsub_type, keys[1], 0) # now we're finally unsubscribed assert p.subscribed is False def test_subscribe_property_with_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribed_property(**kwargs) def test_subscribe_property_with_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribed_property(**kwargs) def test_ignore_all_subscribe_messages(self, r): p = r.pubsub(ignore_subscribe_messages=True) checks = ( (p.subscribe, 'foo'), (p.unsubscribe, 'foo'), (p.psubscribe, 'f*'), (p.punsubscribe, 'f*'), ) assert p.subscribed is False for func, channel in checks: assert func(channel) is None assert p.subscribed is True assert wait_for_message(p) is None assert p.subscribed is False def test_ignore_individual_subscribe_messages(self, r): p = r.pubsub() checks = ( (p.subscribe, 'foo'), (p.unsubscribe, 'foo'), (p.psubscribe, 'f*'), (p.punsubscribe, 'f*'), ) assert p.subscribed is False for func, channel in checks: assert func(channel) is None assert p.subscribed is True message = wait_for_message(p, ignore_subscribe_messages=True) assert message is None assert p.subscribed is False def test_sub_unsub_resub_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_sub_unsub_resub(**kwargs) def test_sub_unsub_resub_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_sub_unsub_resub(**kwargs) def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] sub_func(key) unsub_func(key) sub_func(key) assert p.subscribed is True assert wait_for_message(p) == make_message(sub_type, key, 1) assert wait_for_message(p) == make_message(unsub_type, key, 0) assert wait_for_message(p) == make_message(sub_type, key, 1) assert p.subscribed is True def test_sub_unsub_all_resub_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_sub_unsub_all_resub(**kwargs) def test_sub_unsub_all_resub_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_sub_unsub_all_resub(**kwargs) def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] sub_func(key) unsub_func() sub_func(key) assert p.subscribed is True assert wait_for_message(p) == make_message(sub_type, key, 1) assert wait_for_message(p) == make_message(unsub_type, key, 0) assert wait_for_message(p) == make_message(sub_type, key, 1) assert p.subscribed is True class TestPubSubMessages(object): def setup_method(self, method): self.message = None def message_handler(self, message): self.message = message def test_published_message_to_channel(self, r): p = r.pubsub() p.subscribe('foo') assert wait_for_message(p) == make_message('subscribe', 'foo', 1) assert r.publish('foo', 'test message') == 1 message = wait_for_message(p) assert isinstance(message, dict) assert message == make_message('message', 'foo', 'test message') def test_published_message_to_pattern(self, r): p = r.pubsub() p.subscribe('foo') p.psubscribe('f*') assert wait_for_message(p) == make_message('subscribe', 'foo', 1) assert wait_for_message(p) == make_message('psubscribe', 'f*', 2) # 1 to pattern, 1 to channel assert r.publish('foo', 'test message') == 2 message1 = wait_for_message(p) message2 = wait_for_message(p) assert isinstance(message1, dict) assert isinstance(message2, dict) expected = [ make_message('message', 'foo', 'test message'), make_message('pmessage', 'foo', 'test message', pattern='f*') ] assert message1 in expected assert message2 in expected assert message1 != message2 def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(foo=self.message_handler) assert wait_for_message(p) is None assert r.publish('foo', 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('message', 'foo', 'test message') def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{'f*': self.message_handler}) assert wait_for_message(p) is None assert r.publish('foo', 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('pmessage', 'foo', 'test message', pattern='f*') def test_unicode_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) channel = 'uni' + unichr(4456) + 'code' channels = {channel: self.message_handler} p.subscribe(**channels) assert wait_for_message(p) is None assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('message', channel, 'test message') def test_unicode_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) pattern = 'uni' + unichr(4456) + '*' channel = 'uni' + unichr(4456) + 'code' p.psubscribe(**{pattern: self.message_handler}) assert wait_for_message(p) is None assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('pmessage', channel, 'test message', pattern=pattern) def test_get_message_without_subscribe(self, r): p = r.pubsub() with pytest.raises(RuntimeError) as info: p.get_message() expect = ('connection not set: ' 'did you forget to call subscribe() or psubscribe()?') assert expect in info.exconly() class TestPubSubAutoDecoding(object): "These tests only validate that we get unicode values back" channel = 'uni' + unichr(4456) + 'code' pattern = 'uni' + unichr(4456) + '*' data = 'abc' + unichr(4458) + '123' def make_message(self, type, channel, data, pattern=None): return { 'type': type, 'channel': channel, 'pattern': pattern, 'data': data } def setup_method(self, method): self.message = None def message_handler(self, message): self.message = message @pytest.fixture() def r(self, request): return _get_client(redis.Redis, request=request, decode_responses=True) def test_channel_subscribe_unsubscribe(self, r): p = r.pubsub() p.subscribe(self.channel) assert wait_for_message(p) == self.make_message('subscribe', self.channel, 1) p.unsubscribe(self.channel) assert wait_for_message(p) == self.make_message('unsubscribe', self.channel, 0) def test_pattern_subscribe_unsubscribe(self, r): p = r.pubsub() p.psubscribe(self.pattern) assert wait_for_message(p) == self.make_message('psubscribe', self.pattern, 1) p.punsubscribe(self.pattern) assert wait_for_message(p) == self.make_message('punsubscribe', self.pattern, 0) def test_channel_publish(self, r): p = r.pubsub() p.subscribe(self.channel) assert wait_for_message(p) == self.make_message('subscribe', self.channel, 1) r.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message('message', self.channel, self.data) def test_pattern_publish(self, r): p = r.pubsub() p.psubscribe(self.pattern) assert wait_for_message(p) == self.make_message('psubscribe', self.pattern, 1) r.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message('pmessage', self.channel, self.data, pattern=self.pattern) def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(**{self.channel: self.message_handler}) assert wait_for_message(p) is None r.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, self.data) # test that we reconnected to the correct channel self.message = None p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' r.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, new_data) def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{self.pattern: self.message_handler}) assert wait_for_message(p) is None r.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, self.data, pattern=self.pattern) # test that we reconnected to the correct pattern self.message = None p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' r.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, new_data, pattern=self.pattern) def test_context_manager(self, r): with r.pubsub() as pubsub: pubsub.subscribe('foo') assert pubsub.connection is not None assert pubsub.connection is None assert pubsub.channels == {} assert pubsub.patterns == {} class TestPubSubRedisDown(object): def test_channel_subscribe(self, r): r = redis.Redis(host='localhost', port=6390) p = r.pubsub() with pytest.raises(ConnectionError): p.subscribe('foo') class TestPubSubSubcommands(object): @skip_if_server_version_lt('2.8.0') def test_pubsub_channels(self, r): p = r.pubsub() p.subscribe('foo', 'bar', 'baz', 'quux') for i in range(4): assert wait_for_message(p)['type'] == 'subscribe' expected = [b'bar', b'baz', b'foo', b'quux'] assert all([channel in r.pubsub_channels() for channel in expected]) @skip_if_server_version_lt('2.8.0') def test_pubsub_numsub(self, r): p1 = r.pubsub() p1.subscribe('foo', 'bar', 'baz') for i in range(3): assert wait_for_message(p1)['type'] == 'subscribe' p2 = r.pubsub() p2.subscribe('bar', 'baz') for i in range(2): assert wait_for_message(p2)['type'] == 'subscribe' p3 = r.pubsub() p3.subscribe('baz') assert wait_for_message(p3)['type'] == 'subscribe' channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)] assert channels == r.pubsub_numsub('foo', 'bar', 'baz') @skip_if_server_version_lt('2.8.0') def test_pubsub_numpat(self, r): p = r.pubsub() p.psubscribe('*oo', '*ar', 'b*z') for i in range(3): assert wait_for_message(p)['type'] == 'psubscribe' assert r.pubsub_numpat() == 3 class TestPubSubPings(object): @skip_if_server_version_lt('3.0.0') def test_send_pubsub_ping(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') p.ping() assert wait_for_message(p) == make_message(type='pong', channel=None, data='', pattern=None) @skip_if_server_version_lt('3.0.0') def test_send_pubsub_ping_message(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') p.ping(message='hello world') assert wait_for_message(p) == make_message(type='pong', channel=None, data='hello world', pattern=None) class TestPubSubConnectionKilled(object): @skip_if_server_version_lt('3.0.0') def test_connection_error_raised_when_connection_dies(self, r): p = r.pubsub() p.subscribe('foo') assert wait_for_message(p) == make_message('subscribe', 'foo', 1) for client in r.client_list(): if client['cmd'] == 'subscribe': r.client_kill_filter(_id=client['id']) with pytest.raises(ConnectionError): wait_for_message(p) class TestPubSubTimeouts(object): def test_get_message_with_timeout_returns_none(self, r): p = r.pubsub() p.subscribe('foo') assert wait_for_message(p) == make_message('subscribe', 'foo', 1) assert p.get_message(timeout=0.01) is None
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from msrest.exceptions import DeserializationError from msrestazure.azure_operation import AzureOperationPoller from .. import models class VirtualNetworkPeeringsOperations(object): """VirtualNetworkPeeringsOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. :ivar api_version: Client API version. Constant value: "2016-09-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2016-09-01" self.config = config def _delete_initial( self, resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers=None, raw=False, **operation_config): # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def delete( self, resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers=None, raw=False, **operation_config): """Deletes the specified virtual network peering. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param virtual_network_peering_name: The name of the virtual network peering. :type virtual_network_peering_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :return: An instance of AzureOperationPoller that returns None or ClientRawResponse if raw=true :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._delete_initial( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, virtual_network_peering_name=virtual_network_peering_name, custom_headers=custom_headers, raw=True, **operation_config ) if raw: return raw_result # Construct and send request def long_running_send(): return raw_result.response def get_long_running_status(status_link, headers=None): request = self._client.get(status_link) if headers: request.headers.update(headers) header_parameters = {} header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] return self._client.send( request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): if response.status_code not in [200, 202, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response long_running_operation_timeout = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) return AzureOperationPoller( long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout) def get( self, resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers=None, raw=False, **operation_config): """Gets the specified virtual network peering. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param virtual_network_peering_name: The name of the virtual network peering. :type virtual_network_peering_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: VirtualNetworkPeering or ClientRawResponse if raw=true :rtype: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualNetworkPeering', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def _create_or_update_initial( self, resource_group_name, virtual_network_name, virtual_network_peering_name, virtual_network_peering_parameters, custom_headers=None, raw=False, **operation_config): # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 201]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualNetworkPeering', response) if response.status_code == 201: deserialized = self._deserialize('VirtualNetworkPeering', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def create_or_update( self, resource_group_name, virtual_network_name, virtual_network_peering_name, virtual_network_peering_parameters, custom_headers=None, raw=False, **operation_config): """Creates or updates a peering in the specified virtual network. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param virtual_network_peering_name: The name of the peering. :type virtual_network_peering_name: str :param virtual_network_peering_parameters: Parameters supplied to the create or update virtual network peering operation. :type virtual_network_peering_parameters: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :return: An instance of AzureOperationPoller that returns VirtualNetworkPeering or ClientRawResponse if raw=true :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering] or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, virtual_network_peering_name=virtual_network_peering_name, virtual_network_peering_parameters=virtual_network_peering_parameters, custom_headers=custom_headers, raw=True, **operation_config ) if raw: return raw_result # Construct and send request def long_running_send(): return raw_result.response def get_long_running_status(status_link, headers=None): request = self._client.get(status_link) if headers: request.headers.update(headers) header_parameters = {} header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] return self._client.send( request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): if response.status_code not in [200, 201]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = self._deserialize('VirtualNetworkPeering', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized long_running_operation_timeout = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) return AzureOperationPoller( long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout) def list( self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config): """Gets all virtual network peerings in a virtual network. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of VirtualNetworkPeering :rtype: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeeringPaged[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.VirtualNetworkPeeringPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.VirtualNetworkPeeringPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
#!/usr/bin/python # -*- coding: utf-8 -*- import queue import multiprocessing import magic import logging import dbApi import signal import traceback import os.path import random random.seed() import tqdm import scanner.runState import UniversalArchiveInterface as uar import scanner.hashFile as hasher IMAGE_EXTS = ("bmp", "eps", "gif", "im", "jpeg", "jpg", "msp", "pcx", "png", "ppm", "spider", "tiff", "webp", "xbm") ARCH_EXTS = ("zip", "rar", "cbz", "cbr", "7z", "cb7") class HashEngine(object): def __init__(self, inputQueue, outputQueue, threads=2, pHash=True, integrity=True): self.log = logging.getLogger("Main.HashEngine") self.tlog = logging.getLogger("Main.HashEngineThread") self.hashWorkers = threads self.inQ = inputQueue self.outQ = outputQueue self.archIntegrity = integrity self.runStateMgr = multiprocessing.Manager() self.manNamespace = self.runStateMgr.Namespace() self.dbApi = self.getDbConnection() def getDbConnection(self): ''' Intended to be overridden in unit-tests ''' return dbApi.DbApi() def runThreads(self): self.manNamespace.stopOnEmpty = False self.manNamespace.run = True args = (self.inQ, self.outQ, self.manNamespace, self.archIntegrity) self.pool = multiprocessing.pool.Pool(processes=self.hashWorkers, initializer=createHashThread, initargs=args) def close(self): self.log.info("Closing threadpool") self.manNamespace.run = False self.pool.terminate() def haltEarly(self): self.manNamespace.run = False def gracefulShutdown(self): self.manNamespace.stopOnEmpty = True self.pool.close() self.pool.join() def cleanPathCache(self, fqPathBase): self.log.info("Querying for all files on specified path.") itemsCursor = self.dbApi.getUniqueOnBasePath(fqPathBase) items = [] retItems = 0 for item in tqdm.tqdm(itemsCursor): retItems += 1 items.append(item[0]) if not scanner.runState.run: print("Breaking due to exit flag") return self.log.info("Looking for files in the DB that are not on disk anymore.") self.log.info("Recieved items = %d", retItems) self.log.info("total unique items = %s", len(items)) for itemPath in tqdm.tqdm(items, desc="Exist check"): if not os.path.exists(itemPath): self.log.info("Item %s does not exist. Should delete from DB", itemPath) self.dbApi.deleteBasePath(itemPath) try: if not scanner.runState.run: print("Breaking due to exit flag") return except BrokenPipeError: self.log.error("Runstate thread exited? Halting") return self.outQ.put("clean") def createHashThread(inQueue, outQueue, runMgr, integrity): # Make all the thread-pool threads ignore SIGINT, so they won't freak out on CTRL+C signal.signal(signal.SIGINT, signal.SIG_IGN) runner = HashThread(inQueue, outQueue, runMgr, integrity) runner.run() class HashThread(object): loggerPath = "Main.HashEngine" def __init__(self, inputQueue, outputQueue, runMgr, integrity=True): # If we're running as a multiprocessing thread, inject that into # the logger path threadName = multiprocessing.current_process().name if threadName: self.tlog = logging.getLogger("%s.%s" % (self.loggerPath, threadName)) else: self.tlog = logging.getLogger(self.loggerPath) self.runMgr = runMgr self.inQ = inputQueue self.outQ = outputQueue self.archIntegrity = integrity self.dbApi = self.getDbConnection() def getDbConnection(self): ''' Intended to be overridden in unit-tests ''' return dbApi.DbApi() def putProgQueue(self, value): if self.outQ: self.outQ.put(value) def run(self): try: while self.runMgr.run: try: filePath, fileName = self.inQ.get(timeout=0.5) # self.tlog.info("Scan task! %s", filePath) self.processFile(filePath) except queue.Empty: if self.runMgr.stopOnEmpty: self.tlog.info("Hashing thread out of tasks. Exiting.") break # self.tlog.info("HashThread loopin! stopOnEmpty = %s, run = %s", self.runMgr.stopOnEmpty, self.runMgr.run) except FileNotFoundError: print("Multiprocessing manager shut down?") except BrokenPipeError: print("Multiprocessing manager shut down?") self.tlog.info("Scanner exiting.") self.inQ.close() self.outQ.close() self.inQ.join_thread() self.outQ.join_thread() def scanArchive(self, archPath, archData): # print("Scanning archive", archPath) archIterator = uar.ArchiveReader(archPath, fileContents=archData) fnames = [item[0] for item in archIterator] fset = set(fnames) if len(fnames) != len(fset): print(fnames) print(fset) raise ValueError("Wat?") self.dbApi.begin() try: for fName, fp in archIterator: fCont = fp.read() fName, hexHash, pHash, imX, imY = hasher.hashFile(archPath, fName, fCont) insertArgs = { "fsPath" :archPath, "internalPath" :fName, "itemHash" :hexHash, "pHash" :pHash, "imgX" :imX, "imgY" :imY } self.dbApi.insertIntoDb(**insertArgs) self.putProgQueue("processed") if not scanner.runState.run: break except: print(archPath) self.dbApi.rollback() raise self.dbApi.commit() archIterator.close() def processImageFile(self, wholePath, dbFilePath): scan = True have = self.dbApi.getItem(fspath=dbFilePath, wantCols = ['phash', 'imgx', 'imgy']) if have and all(have): scan = False # print("Have hashes - ", dummy_itemHash, pHash, dHash) if scan: with open(wholePath, "rb") as fp: fCont = fp.read() try: fName, hexHash, pHash, imX, imY = hasher.hashFile(wholePath, "", fCont) insertArgs = { "fsPath" :wholePath, "internalPath" :fName, # fname == '' in this case "itemHash" :hexHash, "pHash" :pHash, "imgX" :imX, "imgY" :imY } # insert or update data row self.dbApi.upsert(**insertArgs) self.outQ.put("processed") except (IndexError, UnboundLocalError): self.tlog.error("Error while processing fileN") self.tlog.error("%s", wholePath) self.tlog.error("%s", traceback.format_exc()) # self.log.info("Scanned bare image %s, %s, %s", fileN, pHash, dHash) else: self.outQ.put("skipped") def hashBareFile(self, wholePath, dbPath, doPhash=True): with open(wholePath, "rb") as fp: fCont = fp.read() fName, hexHash, pHash, imX, imY = hasher.hashFile(wholePath, "", fCont) insertArgs = { "fsPath" :wholePath, "internalPath" :fName, # fname == '' in this case "itemHash" :hexHash, "pHash" :pHash, "imgX" :imX, "imgY" :imY } self.dbApi.insertIntoDb(**insertArgs) self.putProgQueue("processed") def getFileMd5(self, wholePath): with open(wholePath, "rb") as fp: fCont = fp.read() hexHash = hasher.getMd5Hash(fCont) return hexHash, fCont def processArchive(self, wholePath): fType = "none" fCont = None archRow = self.dbApi.getItemsOnBasePathInternalPath(wholePath, "") contHashes = self.dbApi.getItemsOnBasePath(wholePath) haveImInfo = [(bool(item['imgx']) and bool(item['imgy'])) for item in contHashes if item['pHash']] if not all(haveImInfo): self.tlog.info("Missing image size information for archive %s. Rescanning.", wholePath) self.dbApi.deleteBasePath(wholePath) elif len(archRow) > 1: # print("archRow", archRow) raise ValueError("Multiple hashes for a single file? Wat?") elif archRow and archRow[0]['itemhash']: if not self.archIntegrity: self.putProgQueue("skipped") return item = archRow.pop() curHash, fCont = self.getFileMd5(wholePath) if curHash == item['itemhash']: # print("Skipped", wholePath) self.putProgQueue("hash_match") return else: self.tlog.warn("Archive %s has changed! Rehashing!", wholePath) self.dbApi.deleteBasePath(wholePath) else: if archRow: self.tlog.info("Missing whole archive hash! Rescanning!") self.dbApi.deleteBasePath(wholePath) curHash, fCont = self.getFileMd5(wholePath) insertArgs = { "fsPath" :wholePath, "internalPath" :'', "itemHash" :curHash } self.dbApi.insertIntoDb(**insertArgs) self.putProgQueue("processed") # TODO: Use `fCont` to prevent having to read each file twice. try: fType = magic.from_file(wholePath, mime=True) if not isinstance(fType, str): fType = fType.decode("ascii") # So some versions of libmagic return application/CDFV2-corrupt for some # thumbs.db, while some return application/CDFV2 for the /same/ file. # In any event, I've never seen a application/CDFV2 file that wasn't # one of the garbage application/CDFV2 file, so just pretend # the corrupt ones aren't corrupt, since we don't wany any of them # anyways. if fType == "application/CDFV2-corrupt": fType = 'application/CDFV2' except magic.MagicException: self.tlog.error("REALLY Corrupt Archive! ") self.tlog.error("%s", wholePath) self.tlog.error("%s", traceback.format_exc()) fType = "none" except IOError: self.tlog.error("Something happened to the file before processing (did it get moved?)! ") self.tlog.error("%s", wholePath) self.tlog.error("%s", traceback.format_exc()) fType = "none" if fType == 'application/zip' or \ fType == 'application/x-rar' or \ fType == 'application/x-7z-compressed': # self.tlog.info("Scanning into archive - %s - %s", fileN, wholePath) try: self.scanArchive(wholePath, fCont) except KeyboardInterrupt: raise except: self.tlog.error("Archive is damaged, corrupt, or not actually an archive: %s", wholePath) self.tlog.error("Error Traceback:") self.tlog.error(traceback.format_exc()) # print("wat?") # print("Archive scan complete") return def processFile(self, wholePath): if wholePath.startswith("/content"): raise ValueError("Wat?") # print("path", wholePath) if wholePath.lower().endswith(ARCH_EXTS): self.processArchive(wholePath) else: # Get list of all hashes for items on wholePath extantItems = self.dbApi.getItemsOnBasePath(wholePath) haveFileHashList = [item['itemhash'] != "" for item in extantItems] haveAllStatList = [(bool(item['imgx']) and bool(item['imgy'])) for item in extantItems if item['pHash']] # Only rescan if we don't have hashes for all the items in the archive (no idea how that would happen), # or we have no items for the archive if all(haveFileHashList) and len(extantItems) and all(haveAllStatList): self.putProgQueue("skipped") return elif wholePath.lower().endswith(IMAGE_EXTS): # It looks like an image. self.processImageFile(wholePath, wholePath) # self.tlog.info("Skipping Image = %s", wholePath) # Rehash the overall archive if we don't have a hash-value for the archive with no internalpath. elif not any([item['itemhash'] and not item['internalPath'] for item in extantItems]): # print("File", wholePath) try: self.hashBareFile(wholePath, wholePath) except IOError: self.tlog.error("Something happened to the file before processing (did it get moved?)! ") self.tlog.error("%s", wholePath) self.tlog.error("%s", traceback.format_exc()) except (IndexError, UnboundLocalError): self.tlog.error("Error while processing wholePath") self.tlog.error("%s", wholePath) self.tlog.error("%s", traceback.format_exc()) else: self.putProgQueue("skipped") # self.tlog.info("Skipping file = %s", wholePath)
import json import os from pathlib import Path import subprocess import sys import signal import pytest import requests import ray from ray import serve from ray.tests.conftest import tmp_working_dir # noqa: F401, E501 from ray._private.test_utils import wait_for_condition from ray.dashboard.optional_utils import RAY_INTERNAL_DASHBOARD_NAMESPACE from ray.serve.scripts import process_args_and_kwargs def ping_endpoint(endpoint: str, params: str = ""): try: return requests.get(f"http://localhost:8000/{endpoint}{params}").text except requests.exceptions.ConnectionError: return "connection error" @pytest.fixture def ray_start_stop(): subprocess.check_output(["ray", "start", "--head"]) yield subprocess.check_output(["ray", "stop", "--force"]) class TestProcessArgsAndKwargs: def test_valid_args_and_kwargs(self): args_and_kwargs = ( "argval1", "argval2", "--kwarg1", "kwval1", "--kwarg2", "kwval2", ) args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == ["argval1", "argval2"] assert kwargs == {"kwarg1": "kwval1", "kwarg2": "kwval2"} def test_mixed_args_and_kwargs(self): args_and_kwargs = ( "argval1", "--kwarg1", "kwval1", "argval2", "--kwarg2", "kwval2", ) with pytest.raises(ValueError): process_args_and_kwargs(args_and_kwargs) def test_mixed_kwargs(self): args_and_kwargs = ( "argval1", "argval2", "--kwarg1==kw==val1", "--kwarg2", "kwval2", "--kwarg3", "=kwval=3", "--kwarg4=", "--kwarg5", "kwval5", ) args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == ["argval1", "argval2"] assert kwargs == { "kwarg1": "=kw==val1", "kwarg2": "kwval2", "kwarg3": "=kwval=3", "kwarg4": "", "kwarg5": "kwval5", } def test_empty_kwarg(self): args_and_kwargs = ( "argval1", "--kwarg1", "--kwarg2", "kwval2", ) with pytest.raises(ValueError): process_args_and_kwargs(args_and_kwargs) args_and_kwargs = ("--empty_kwarg_only",) with pytest.raises(ValueError): process_args_and_kwargs(args_and_kwargs) def test_empty_equals_kwarg(self): args_and_kwargs = ( "argval1", "--kwarg1=--hello", "--kwarg2=", ) args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == ["argval1"] assert kwargs == { "kwarg1": "--hello", "kwarg2": "", } args_and_kwargs = ("--empty_kwarg_only=",) args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == [] assert kwargs == {"empty_kwarg_only": ""} def test_only_args(self): args_and_kwargs = ("argval1", "argval2", "argval3") args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == ["argval1", "argval2", "argval3"] assert kwargs == {} args_and_kwargs = ("single_arg",) args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == ["single_arg"] assert kwargs == {} def test_only_kwargs(self): args_and_kwargs = ( "--kwarg1", "kwval1", "--kwarg2", "kwval2", "--kwarg3", "kwval3", ) args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == [] assert kwargs == {"kwarg1": "kwval1", "kwarg2": "kwval2", "kwarg3": "kwval3"} args_and_kwargs = ( "--single_kwarg", "single_kwval", ) args, kwargs = process_args_and_kwargs(args_and_kwargs) assert args == [] assert kwargs == {"single_kwarg": "single_kwval"} def test_empty_args_and_kwargs(self): for empty_val in [None, ()]: args, kwargs = process_args_and_kwargs(empty_val) assert args == [] assert kwargs == {} def test_start_shutdown(ray_start_stop): with pytest.raises(subprocess.CalledProcessError): subprocess.check_output(["serve", "shutdown"]) subprocess.check_output(["serve", "start"]) subprocess.check_output(["serve", "shutdown"]) def test_start_shutdown_in_namespace(ray_start_stop): with pytest.raises(subprocess.CalledProcessError): subprocess.check_output(["serve", "-n", "test", "shutdown"]) subprocess.check_output(["serve", "-n", "test", "start"]) subprocess.check_output(["serve", "-n", "test", "shutdown"]) class A: def __init__(self, value, increment=1): self.value = value self.increment = increment self.decrement = 0 self.multiplier = int(os.environ["SERVE_TEST_MULTIPLIER"]) p = Path("hello") assert p.exists() with open(p) as f: assert f.read() == "world" def reconfigure(self, config): self.decrement = config["decrement"] def __call__(self, inp): return (self.value + self.increment - self.decrement) * self.multiplier @serve.deployment class DecoratedA(A): pass @pytest.mark.parametrize("class_name", ["A", "DecoratedA"]) def test_create_deployment(ray_start_stop, tmp_working_dir, class_name): # noqa: F811 subprocess.check_output(["serve", "start"]) subprocess.check_output( [ "serve", "--runtime-env-json", json.dumps( { "working_dir": tmp_working_dir, } ), "create-deployment", f"ray.serve.tests.test_cli.{class_name}", "--options-json", json.dumps( { "name": "B", "init_args": [42], "init_kwargs": {"increment": 10}, "num_replicas": 2, "user_config": {"decrement": 5}, "ray_actor_options": { "runtime_env": { "env_vars": { "SERVE_TEST_MULTIPLIER": "2", }, } }, } ), ] ) resp = requests.get("http://127.0.0.1:8000/B") resp.raise_for_status() assert resp.text == "94", resp.text @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_deploy(ray_start_stop): # Deploys some valid config files and checks that the deployments work # Initialize serve in test to enable calling serve.list_deployments() ray.init(address="auto", namespace=RAY_INTERNAL_DASHBOARD_NAMESPACE) serve.start(detached=True) # Create absolute file names to YAML config files three_deployments = os.path.join( os.path.dirname(__file__), "test_config_files", "three_deployments.yaml" ) two_deployments = os.path.join( os.path.dirname(__file__), "test_config_files", "two_deployments.yaml" ) deny_deployment = os.path.join( os.path.dirname(__file__), "test_config_files", "deny_access.yaml" ) # Dictionary mapping test config file names to expected deployment names # and configurations. These should match the values specified in the YAML # files. configs = { three_deployments: { "shallow": { "num_replicas": 1, "response": "Hello shallow world!", }, "deep": { "num_replicas": 1, "response": "Hello deep world!", }, "one": { "num_replicas": 2, "response": "2", }, }, two_deployments: { "shallow": { "num_replicas": 3, "response": "Hello shallow world!", }, "one": { "num_replicas": 2, "response": "2", }, }, } request_url = "http://localhost:8000/" success_message_fragment = b"Sent deploy request successfully!" # Check idempotence: for _ in range(2): for config_file_name, expected_deployments in configs.items(): deploy_response = subprocess.check_output( ["serve", "deploy", config_file_name] ) assert success_message_fragment in deploy_response for name, deployment_config in expected_deployments.items(): wait_for_condition( lambda: ( requests.get(f"{request_url}{name}").text == deployment_config["response"] ), timeout=15, ) running_deployments = serve.list_deployments() # Check that running deployment names match expected deployment names assert set(running_deployments.keys()) == expected_deployments.keys() for name, deployment in running_deployments.items(): assert ( deployment.num_replicas == expected_deployments[name]["num_replicas"] ) # Deploy a deployment without HTTP access deploy_response = subprocess.check_output(["serve", "deploy", deny_deployment]) assert success_message_fragment in deploy_response wait_for_condition( lambda: requests.get(f"{request_url}shallow").status_code == 404, timeout=15 ) assert ( ray.get(serve.get_deployment("shallow").get_handle().remote()) == "Hello shallow world!" ) ray.shutdown() @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_info(ray_start_stop): # Deploys valid config file and checks that serve info returns correct # response config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "two_deployments.yaml" ) success_message_fragment = b"Sent deploy request successfully!" deploy_response = subprocess.check_output(["serve", "deploy", config_file_name]) assert success_message_fragment in deploy_response info_response = subprocess.check_output(["serve", "info"]).decode("utf-8") info = json.loads(info_response) assert "deployments" in info assert len(info["deployments"]) == 2 # Validate non-default information about shallow deployment shallow_info = None for deployment_info in info["deployments"]: if deployment_info["name"] == "shallow": shallow_info = deployment_info assert shallow_info is not None assert shallow_info["import_path"] == "test_env.shallow_import.ShallowClass" assert shallow_info["num_replicas"] == 3 assert shallow_info["route_prefix"] == "/shallow" assert ( "https://github.com/shrekris-anyscale/test_deploy_group/archive/HEAD.zip" in shallow_info["ray_actor_options"]["runtime_env"]["py_modules"] ) assert ( "https://github.com/shrekris-anyscale/test_module/archive/HEAD.zip" in shallow_info["ray_actor_options"]["runtime_env"]["py_modules"] ) # Validate non-default information about one deployment one_info = None for deployment_info in info["deployments"]: if deployment_info["name"] == "one": one_info = deployment_info assert one_info is not None assert one_info["import_path"] == "test_module.test.one" assert one_info["num_replicas"] == 2 assert one_info["route_prefix"] == "/one" assert ( "https://github.com/shrekris-anyscale/test_deploy_group/archive/HEAD.zip" in one_info["ray_actor_options"]["runtime_env"]["py_modules"] ) assert ( "https://github.com/shrekris-anyscale/test_module/archive/HEAD.zip" in one_info["ray_actor_options"]["runtime_env"]["py_modules"] ) @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_status(ray_start_stop): # Deploys a config file and checks its status config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "three_deployments.yaml" ) subprocess.check_output(["serve", "deploy", config_file_name]) status_response = subprocess.check_output(["serve", "status"]) statuses = json.loads(status_response)["statuses"] expected_deployments = {"shallow", "deep", "one"} for status in statuses: expected_deployments.remove(status["name"]) assert status["status"] in {"HEALTHY", "UPDATING"} assert "message" in status assert len(expected_deployments) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_delete(ray_start_stop): # Deploys a config file and deletes it def get_num_deployments(): info_response = subprocess.check_output(["serve", "info"]) info = json.loads(info_response) return len(info["deployments"]) config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "two_deployments.yaml" ) # Check idempotence for _ in range(2): subprocess.check_output(["serve", "deploy", config_file_name]) wait_for_condition(lambda: get_num_deployments() == 2, timeout=35) subprocess.check_output(["serve", "delete", "-y"]) wait_for_condition(lambda: get_num_deployments() == 0, timeout=35) def parrot(request): return request.query_params["sound"] @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_run_basic(ray_start_stop): # Deploys valid config file and import path via serve run # Deploy via config file config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "two_deployments.yaml" ) p = subprocess.Popen(["serve", "run", config_file_name]) wait_for_condition(lambda: ping_endpoint("one") == "2", timeout=10) wait_for_condition( lambda: ping_endpoint("shallow") == "Hello shallow world!", timeout=10 ) p.send_signal(signal.SIGINT) # Equivalent to ctrl-C p.wait() assert ping_endpoint("one") == "connection error" assert ping_endpoint("shallow") == "connection error" # Deploy via import path p = subprocess.Popen(["serve", "run", "ray.serve.tests.test_cli.parrot"]) wait_for_condition( lambda: ping_endpoint("parrot", params="?sound=squawk") == "squawk", timeout=10 ) p.send_signal(signal.SIGINT) # Equivalent to ctrl-C p.wait() assert ping_endpoint("parrot", params="?sound=squawk") == "connection error" class Macaw: def __init__(self, color, name="Mulligan", surname=None): self.color = color self.name = name self.surname = surname def __call__(self): if self.surname is not None: return f"{self.name} {self.surname} is {self.color}!" else: return f"{self.name} is {self.color}!" @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_run_init_args_kwargs(ray_start_stop): # Tests serve run with specified args and kwargs # Deploy via import path p = subprocess.Popen( [ "serve", "run", "ray.serve.tests.test_cli.Macaw", "--", "green", "--name", "Molly", ] ) wait_for_condition(lambda: ping_endpoint("Macaw") == "Molly is green!", timeout=10) p.send_signal(signal.SIGINT) p.wait() assert ping_endpoint("Macaw") == "connection error" # Mix and match keyword notation p = subprocess.Popen( [ "serve", "run", "ray.serve.tests.test_cli.Macaw", "--", "green", "--name", "Molly", "--surname==./u=6y", ] ) wait_for_condition( lambda: ping_endpoint("Macaw") == "Molly =./u=6y is green!", timeout=10 ) p.send_signal(signal.SIGINT) p.wait() assert ping_endpoint("Macaw") == "connection error" # Args/kwargs with config file config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "macaw.yaml" ) with pytest.raises(subprocess.CalledProcessError): subprocess.check_output( ["serve", "run", config_file_name, "--", "green", "--name", "Molly"] ) @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_run_simultaneous(ray_start_stop): # Test that two serve run processes can run simultaneously p1 = subprocess.Popen(["serve", "run", "ray.serve.tests.test_cli.parrot"]) wait_for_condition( lambda: ping_endpoint("parrot", params="?sound=squawk") == "squawk", timeout=10 ) p2 = subprocess.Popen( [ "serve", "run", "ray.serve.tests.test_cli.Macaw", "--", "green", "--name=Molly", "--surname=Malarkey", ] ) wait_for_condition( lambda: ping_endpoint("parrot", params="?sound=squawk") == "squawk", timeout=10 ) wait_for_condition( lambda: ping_endpoint("Macaw") == "Molly Malarkey is green!", timeout=10 ) # Macaw should still be available after parrot is torn down p1.send_signal(signal.SIGINT) p1.wait() assert "Path '/parrot' not found" in ping_endpoint("parrot") assert ping_endpoint("Macaw") == "Molly Malarkey is green!" # Serve should shut down after all deployments are torn down p2.send_signal(signal.SIGINT) p2.wait() assert ping_endpoint("parrot") == "connection error" assert ping_endpoint("Macaw") == "connection error" if __name__ == "__main__": sys.exit(pytest.main(["-v", "-s", __file__]))
''' Poll torrent sites checking for number of torrent copies for a select film ''' import time import re import requests import pandas as pd from bs4 import BeautifulSoup from src.my_aws import S3 KEY_OMDB = 'OMDB_API.csv' KEY_OMDB_TOR = 'OMDB_Torrents.csv' BUCKET = 'movie-torrents' class TorrentCount(): ''' Poll popular torrent sites Kickass Torrents, Pirate Bay, Torrentz, and Torrentz Verified collecting the number of torrent copies available for a given film ''' def __init__(self): ''' Connect to AWS S3 bucket, pull down pandas dataframe of data, and create list of movie tuples upon instantiation ''' self.s3_connect = S3() self.omdb_data = self.get_data_s3() self.movies_tup = self.make_movie_tuple() def get_data_s3(self): ''' Get pandas dataframe from S3 Args: none Returns: pd.dataframe: Dataframe containing moive data sans torrent counts ''' omdb_data = self.s3_connect.get_data(KEY_OMDB, BUCKET) omdb_data['Year'] = pd.DatetimeIndex(omdb_data['Released']).year omdb_data = omdb_data.dropna(subset=['Year']) omdb_data['Year'] = omdb_data['Year'].apply( lambda year: str(int(year))) return omdb_data def make_movie_tuple(self): ''' Create a list of tuples containing IMDB id, title, and release year to use when polling through torrent sites Args: none Returns: list[tuples]: Movie tuples in the form (imdbID, year, title) ''' movies_tup = [(imdb_id, title, year) for imdb_id, title, year in zip(self.omdb_data['imdbID'], self.omdb_data['Title'], self.omdb_data['Year'])] return movies_tup @classmethod def kat_crawl(cls, imdb_id): ''' Class method to crawl Kickass Torrents website to get number of torrent copies available for given movie IMDB id Args: imdb_id (str): Unique id for film from IMDB website Returns: str: Number of torrent copies found on Kickass Torrents website for given IMDB id ''' address = 'https://kat.cr/usearch/category:movies%20imdb:{0}/'.format( imdb_id[2:]) web_req = requests.get(address) if web_req.status_code != 200: return 'Fail' soup = BeautifulSoup(web_req.text, 'lxml') html_title = soup.div.h2.span if not html_title: return 'Fail' title_strip = re.sub( r'(<span> results )([0-9*]\D[0-9*]*)( from )', '', str(html_title)) torrent_count = re.sub(r'(</span>)', '', title_strip) return torrent_count @classmethod def pirate_crawl(cls, imdb_id): ''' Class method to crawl Pirate Bay website to get number of torrent copies available for given movie IMDB id Args: imdb_id (str): Unique id for film from IMDB website Returns: str: Number of torrent copies found on Pirate Bay site for given IMDB id ''' address = 'https://thepiratebay.org/search/{0}/'.format(imdb_id) web_req = requests.get(address) if web_req.status_code != 200: return 'Fail' soup = BeautifulSoup(web_req.text, 'lxml') html_title = soup.body.h2 if not html_title: return 'Fail' title_strip = re.search(r'(?<=approx )([^ found>]+)', str(html_title)) torrent_count = title_strip.group(0) return torrent_count @classmethod def torrentz_crawl(cls, site, title, year): ''' Class method to crawl both the Torrentz website as well as the Torrentz Verified website to get number of torrent copies available for given movie title and release year Args: title (str): Title of movie to search on Torrentz site year (str): Year of movie to search on Torrentz site Returns: str: Number of torrent copies found on Torrentz site for given title and year ''' # Handle both regular site and verified site if site.contains('Ver'): address = 'http://www.torrentz.eu/verified?f={0}+{1}'.format( title, year) else: address = 'http://www.torrentz.eu/search?f={0}+{1}'.format( title, year) web_req = requests.get(address) if web_req.status_code != 200: return 'Fail' soup = BeautifulSoup(web_req.text, 'lxml') html_title = soup.h2 if not html_title: return 'Fail' title_strip = re.search( r'(?<=none">)([^ torrents>]+)', str(html_title)) torrent_count = title_strip.group(0) return torrent_count def poll_torrent_counts(self): ''' Loop through movies tuple and poll torrent sites for number of torrent copies found before uploading final results to AWS S3 Args: none Returns: none ''' for imdb_id, title, year in self.movies_tup: time.sleep(1) omdb = self.omdb_data omdb.loc[omdb['imdbID'] == imdb_id, 'Kat_Count'] = self.kat_crawl(imdb_id) omdb.loc[omdb['imdbID'] == imdb_id, 'Pirate_Count'] = self.pirate_crawl(imdb_id) omdb.loc[omdb['imdbID'] == imdb_id, 'Torrentz_Count'] = \ self.torrentz_crawl('Reg', title, year) omdb.loc[omdb['imdbID'] == imdb_id, 'Torrentz_Ver_Count'] = \ self.torrentz_crawl('Ver', title, year) self.omdb_data = omdb self.s3_connect.put_data(omdb, KEY_OMDB_TOR, BUCKET) if __name__ == '__main__': TOR_COUNT = TorrentCount()
# Copyright 2017 Balazs Nemeth # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys, os, getopt, subprocess, copy import numpy as np from collections import OrderedDict import matplotlib.pyplot as plt import math # runs when mapping repo is cloned individually, and NFFG lib is in a # sibling directory. WARNING: cicular import is not avioded by design. import site site.addsitedir('../..') from nffg_lib.nffg import NFFG helpmsg=""" Decompresses the NFFG-s given in command line, sorts them base on test level, and calculates the average and deviation of link/node resources for all resource types. Prints them in ascending order of test levels in CSV format. Removes the uncompressed NFFG after it is finished with its processing. -h Print this help message. -l <<NFFG location>> Location of the *.nffg files. Also used to save figures in a folder under this path. --prefix=<<string>> File names should look like --suffix=<<string>> "prefix<<NUM>>suffix<<optional variable string>>.nffg" --single_nffg=<<path>> If given, a single uncompressed NFFG is processed. --hist=<<aggregation size>> If given, draws the histogram for the utilization of resource components aggregating the values by the given size. WARN: histogram drawing doesn't work! --add_hist_values If set, non-zero values are written above the bars on the histogram. --hist_format=<<pdf|png|...>> The format of the saved histograms. PNG by default --starting_lvl=i Start the analyzation only after the given NFFG number. --one Exit after one NFFG processing --cdf_format=<<pdf|png|...>> The format of the saved CDF, PNG by default. --cdf Produces images of Cumulative Distribution Function in the format specified by --cdf_formatoption for each resource type. --print_avgs Print the average resource utilizations for all processed NFFG-s in CSV format. --print_devs Print the deviations of resource utilizations for all processed NFFG-s in CSV format. The --cdf option must be set too. --print_cdf_data=<<res|mem...>> Print the data used to plot the CDF in CSV format for the given resource types. The --cdf option must be set too! --no_cdf_interpolation If set, CDF is delignated in a step function manner, instead of linear interpolation between points. --print_minmax Print the minimal and maximal utilization of all resource types of the processed NFFG-s. --print_objective_value Print the objective function value of each NFFG. --plot_aspect=<<float>> Ratio of x/y axis. """ print_avgs = False print_devs = False print_cdf_data = False draw_cdf = False draw_hist = False print_minmax = False reskeys = ['cpu', 'mem', 'storage', 'bandwidth'] hist_aggr_size = 0.05 def increment_util_counter(d, u, aggr_size): # coudl be binary search... prev_aggr = aggr_size for aggr in d: if aggr > u: d[prev_aggr] += 1 return prev_aggr = aggr def autolabel(rects, ax): # attach some text labels for rect in rects: height = rect.get_height() if height > 0.0: ax.text(rect.get_x() + rect.get_width()/2., 1.05*height, '%.2f' % height, ha='center', va='bottom') def gather_and_print_cdf_hist_data (nffg, empty_hist, empty_cdf, nffg_num=0): # calculate avg. res utils by resource types. avgs = {} cnts = {} mins = {} maxs = {} if draw_hist: hist = copy.deepcopy(empty_hist) if draw_cdf: cdf = copy.deepcopy(empty_cdf) for noderes in reskeys: avgs[noderes] = 0.0 cnts[noderes] = 0 for i in nffg.infras: # only count nodes which had these resources initially if i.resources[noderes] > 1e-10: util = float(i.resources[noderes] - i.availres[noderes]) / \ i.resources[noderes] avgs[noderes] += util cnts[noderes] += 1 # maintain max/min struct if noderes in mins: if mins[noderes] > util: mins[noderes] = util else: mins[noderes] = util if noderes in maxs: if maxs[noderes] < util: maxs[noderes] = util else: maxs[noderes] = util if draw_hist: increment_util_counter(hist[noderes], util, hist_aggr_size) if draw_cdf: cdf[noderes].append(util) avgs[noderes] /= cnts[noderes] avg_linkutil = 0.0 linkcnt = 0 for l in nffg.links: if l.type == 'STATIC': link_util = float(l.bandwidth - l.availbandwidth) / l.bandwidth avg_linkutil += link_util linkcnt += 1 # maintain max/min struct if 'link_bw' in mins: if mins['link_bw'] > link_util: mins['link_bw'] = link_util else: mins['link_bw'] = link_util if 'link_bw' in maxs: if maxs['link_bw'] < link_util: maxs['link_bw'] = link_util else: maxs['link_bw'] = link_util if draw_hist: increment_util_counter(hist['link_bw'], link_util, hist_aggr_size) if draw_cdf: cdf['link_bw'].append(link_util) avg_linkutil /= linkcnt if print_avgs: to_print = [nffg_num, avg_linkutil] to_print.extend([avgs[res] for res in reskeys]) print ",".join(map(str, to_print)) if print_devs: avgs['link_bw'] = avg_linkutil devs = {} for res in cdf: devs[res] = math.sqrt(sum([(avgs[res] - u) ** 2 for u in cdf[res]]) / \ (len(cdf[res]) - 1)) to_print = [nffg_num] to_print.extend([devs[res] for res in cdf]) print ",".join(map(str, to_print)) if print_minmax: to_print = [nffg_num] for res in reskeys + ['link_bw']: to_print.append(mins[res]) to_print.append(maxs[res]) print ",".join(map(str, to_print)) return hist, cdf def draw_histogram (hist, add_hist_values, hist_format, plot_aspect, loc_of_nffgs, prefix, suffix, variable_str="", nffg_num=0): # normalize the histogram to [0,1], so the resource types could be # plotted # on the same bar chart for res in hist: sum_util_cnt = sum( [hist[res][util_range] for util_range in hist[res]]) for util_range in hist[res]: hist[res][util_range] = float(hist[res][util_range]) / sum_util_cnt # plot the histograms. fig, ax = plt.subplots() ax.set_ylim((0.00, 1.10)) range_seq = np.array( [float("%.4f" % (aggr / hist_aggr_size)) for aggr in \ np.arange(hist_aggr_size, 1.0, hist_aggr_size)]) range_seq = np.append(range_seq, [1.0 / hist_aggr_size]) width = range_seq[-1] / (len(hist) + 2) / len(range_seq) colors = iter(['r', 'g', 'b', 'c', 'y']) i = 0 rects = [] for res in hist: rect = ax.bar((range_seq - 1) * (len(hist) + 2) * width + \ (i + 4.5) * width, [hist[res][util_range] for util_range in hist[res]], width, color=next(colors)) rects.append((res, rect)) i += 1 if add_hist_values: autolabel(rect, ax) ax.set_ylabel("Ratio of network element counts to total count") ax.set_xlabel("Resource utilization intervals [%]") ax.set_xticks(range_seq * (len(hist) + 2) * width) ax.set_xticklabels( [str(int(100 * util_range)) for util_range in hist['cpu']]) ax.set_aspect(plot_aspect) ax.legend([r[0] for r in zip(*rects)[1]], zip(*rects)[0], ncol=5, loc='upper left', fontsize=8, bbox_to_anchor=(0, 1)) plt.savefig('%s/hists/%s%s%s%s.%s' % (loc_of_nffgs, prefix, nffg_num, suffix, variable_str, hist_format), bbox_inches='tight') plt.close(fig) def draw_cummulative_distribution_function (cdf, res_cdf_to_print, no_cdf_interpolation, cdf_format, plot_aspect, loc_of_nffgs, prefix, suffix, variable_str="", nffg_num=0): # sort util values incrementing in each resource type for res in cdf: cdf[res] = sorted(cdf[res]) fig, ax = plt.subplots() ax.set_xlim((-0.05, 1.05)) ax.set_ylim((-0.05, 1.19)) colors = iter(['r', 'g', 'b', 'c', 'y']) styles = iter( [[8, 4, 2, 4, 2, 4], [4, 2], [8, 4, 4, 2], [8, 4, 2, 4], []]) markers = iter(['o', 'v', '+', 's', '']) for res in cdf: last_point = (0, 0) vertical_step = 1.0 / len(cdf[res]) rescolor = next(colors) resline = next(styles) resmarker = next(markers) reslab = res if print_cdf_data and res == res_cdf_to_print: cdf_plot_data = [last_point] for point in zip(cdf[res], np.append( np.arange(vertical_step, 1.0, vertical_step), [1.0])): if no_cdf_interpolation: plt.plot((last_point[0], point[0]), (last_point[1], last_point[1]), color=rescolor, lw=1.5, label=reslab, dashes=resline, marker=resmarker) plt.plot((point[0], point[0]), (last_point[1], point[1]), color=rescolor, lw=1.5, dashes=resline, marker=resmarker) else: plt.plot((last_point[0], point[0]), (last_point[1], point[1]), color=rescolor, lw=1.5, dashes=resline, label=reslab, marker=resmarker) reslab = None if print_cdf_data and res == res_cdf_to_print: cdf_plot_data.append(point) last_point = point plt.plot((last_point[0], 1.0), (last_point[1], 1.0), color=rescolor, lw=1.5, dashes=resline, label=reslab, marker=resmarker) if print_cdf_data and res == res_cdf_to_print: cdf_plot_data.append((1.0, 1.0)) print nffg_num, ",", ",".join( map(lambda t: "(%.6f; %.6f)" % (t[0], t[1]), cdf_plot_data)) ax.set_ylabel("CDF") ax.set_xlabel("Resource utilization [%]") ax.set_aspect(plot_aspect) ax.set_xticks([float(i) / 100 for i in xrange(0, 101, 20)]) ax.set_xticklabels([str(i) for i in xrange(0, 101, 20)]) ax.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=5, fontsize=12, columnspacing=0.9) plt.savefig('%s/cdfs/%s%s%s%s.%s' % (loc_of_nffgs, prefix, nffg_num, suffix, variable_str, cdf_format), bbox_inches='tight') plt.close(fig) def calc_objective_value (nffg): max_edge_cost = 0.0 edge_cost = 0.0 # find sum of used bandwidth and its max possible value for link in nffg.links: if link.type == NFFG.TYPE_LINK_STATIC: max_edge_cost += link.bandwidth edge_cost += link.bandwidth - link.availbandwidth # find the node with lowest utilization, balancing means maximizing # the lowest utilization node_resources_to_balance = ["cpu", "mem"] load_balance_component = 1.0 for res in node_resources_to_balance: for node in nffg.infras: if node.resources[res] > 1e-30: node_load = float(node.resources[res] - node.availres[res]) / \ node.resources[res] if node_load < load_balance_component: load_balance_component = node_load return edge_cost/max_edge_cost, load_balance_component def main(argv): global print_avgs, print_devs, print_minmax, draw_hist, draw_cdf try: opts, args = getopt.getopt(argv, "hl:", ["hist=", "add_hist_values", "hist_format=", "starting_lvl=", "one", "cdf_format=", "cdf", "print_devs", "print_avgs", "print_cdf_data=", "print_minmax", "no_cdf_interpolation", "plot_aspect=", "single_nffg=", "prefix=", "suffix=", "print_objective_value"]) except getopt.GetoptError as goe: print helpmsg raise loc_of_nffgs = "" add_hist_values = False hist_format = "png" starting_lvl = 0 process_only_one = False cdf_format = "png" res_cdf_to_print = None no_cdf_interpolation = True plot_aspect = 1 single_nffg_process = False nffg_prefix = None nffg_suffix = None nffg_name_variable_str = {} print_cdf_data = False print_devs = False print_avgs = False print_minmax = False print_objective_value = False for opt, arg in opts: if opt == "-h": print helpmsg sys.exit() elif opt == "-l": loc_of_nffgs = arg elif opt == "--hist": draw_hist = True hist_aggr_size = float(arg) hist = {} for res in reskeys + ['link_bw']: hist[res] = OrderedDict() for aggr in np.arange(hist_aggr_size, 1.0, hist_aggr_size): hist[res][float("%.4f"%aggr)] = 0 hist[res][1.0] = 0 elif opt == "--add_hist_values": add_hist_values = True elif opt == "--hist_format": hist_format = arg elif opt == "--starting_lvl": starting_lvl=int(arg) elif opt == "--one": process_only_one = True elif opt == "--cdf": draw_cdf = True cdf = {} for res in reskeys + ['link_bw']: cdf[res] = [] elif opt == "--cdf_format": cdf_format = arg elif opt == "--print_devs": print_devs = True elif opt == "--print_avgs": print_avgs = True elif opt == "--print_cdf_data": print_cdf_data = True res_cdf_to_print = arg elif opt == "--no_cdf_interpolation": no_cdf_interpolation = True elif opt == "--print_minmax": print_minmax = True elif opt == "--plot_aspect": plot_aspect = float(arg) elif opt == "--single_nffg": single_nffg_process = True single_nffg_path = arg elif opt == "--prefix": nffg_prefix = arg elif opt == "--suffix": nffg_suffix = arg elif opt == "--print_objective_value": print_objective_value = True if not single_nffg_process and (nffg_suffix is None or nffg_prefix is None): raise Exception("If --single_process is not specified a location and " "--prefix and --suffix must be specified!") # clean previously created plots os.system("rm -rf %s/hists %s/cdfs"%(loc_of_nffgs, loc_of_nffgs)) os.system("mkdir %s/hists %s/cdfs"%(loc_of_nffgs, loc_of_nffgs)) if not single_nffg_process: nffg_num_list = [] bashCommand = "ls -x " + loc_of_nffgs process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE) nffg_files = process.communicate()[0] for filen in nffg_files.replace("\n", " ").split(" "): if nffg_prefix in filen and nffg_suffix in filen: truncted_filen = filen.lstrip(nffg_prefix).rstrip(".nffg").split(nffg_suffix) number_of_nffg = int(truncted_filen[0]) if len(truncted_filen) > 1: nffg_name_variable_str[number_of_nffg] = truncted_filen[1] else: nffg_name_variable_str[number_of_nffg] = "" nffg_num_list.append(number_of_nffg) nffg_num_list = sorted(filter(lambda x: x>=starting_lvl, nffg_num_list)) if print_avgs: print "nffg_num, avg(link_bw), ",", ".join(["".join(["avg(",noderes,")"]) \ for noderes in reskeys]) if print_devs: print "nffg_num, ", ", ".join(["".join(["dev(",noderes,")"]) \ for noderes in cdf]) if print_minmax: print "nffg_num, ", ", ".join(["min(%s), max(%s)"%(res, res) for res in \ reskeys + ['link_bw']]) if print_objective_value: print "nffg_num, edge_cost_comp, load_balancing_comp, total" if draw_hist: empty_hist = copy.deepcopy(hist) if draw_cdf: empty_cdf = copy.deepcopy(cdf) if not single_nffg_process: for nffg_num in nffg_num_list: filename = "".join((nffg_prefix, str(nffg_num), nffg_suffix, nffg_name_variable_str[nffg_num], ".nffg")) with open("".join((loc_of_nffgs, "/", filename)), "r") as f: nffg = NFFG.parse(f.read()) nffg.calculate_available_node_res() nffg.calculate_available_link_res([]) hist, cdf = gather_and_print_cdf_hist_data(nffg, empty_hist, empty_cdf, nffg_num) # we can only know the number of CDF points after the first processing. # this number should stay the same for all consequential NFFG-s. if print_cdf_data and nffg_num == nffg_num_list[0]: print ",".join(["nffg_num"] + \ [res_cdf_to_print + "_cdf_point" + str(i) \ for i in range(0, len(cdf[res_cdf_to_print]) + 2)]) if print_objective_value: edge_comp, balancing_comp = calc_objective_value(nffg) print ",".join(map(str, (nffg_num, edge_comp, balancing_comp, edge_comp+balancing_comp))) # NOTE: Draw hist doesn't work but not needed for now. # if draw_hist: # draw_histogram(hist, add_hist_values, hist_format, plot_aspect, # loc_of_nffgs, nffg_prefix, nffg_suffix, # nffg_name_variable_str[nffg_num], nffg_num) if draw_cdf: draw_cummulative_distribution_function(cdf, res_cdf_to_print, no_cdf_interpolation, cdf_format, plot_aspect, loc_of_nffgs, nffg_prefix, nffg_suffix, nffg_name_variable_str[nffg_num], nffg_num) # maybe finish after one iteration if process_only_one: break else: with open(single_nffg_path, "r") as f: nffg = NFFG.parse(f.read()) nffg.calculate_available_node_res() nffg.calculate_available_link_res([]) hist, cdf = gather_and_print_cdf_hist_data(nffg, empty_hist, empty_cdf) # if draw_hist: # draw_histogram(hist, add_hist_values, hist_format, plot_aspect, # loc_of_nffgs, nffg_prefix, nffg_suffix) if draw_cdf: draw_cummulative_distribution_function(cdf, res_cdf_to_print, no_cdf_interpolation, cdf_format, plot_aspect, loc_of_nffgs, nffg_prefix, nffg_suffix) if __name__ == '__main__': main(sys.argv[1:])
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import atexit import logging import os import re import socket import struct import subprocess from telemetry.internal.util import binary_manager from telemetry.core import platform from telemetry.core import util from telemetry.internal import forwarders from telemetry.internal.platform import android_device from devil.android import device_errors from devil.android import device_utils try: from devil.android import forwarder except ImportError: forwarder = None class AndroidForwarderFactory(forwarders.ForwarderFactory): def __init__(self, device, use_rndis): super(AndroidForwarderFactory, self).__init__() self._device = device self._rndis_configurator = None if use_rndis: self._rndis_configurator = AndroidRndisConfigurator(self._device) def Create(self, port_pairs): try: if self._rndis_configurator: return AndroidRndisForwarder(self._device, self._rndis_configurator, port_pairs) return AndroidForwarder(self._device, port_pairs) except Exception: try: logging.warning('Failed to create forwarder. ' 'Currently forwarded connections:') for line in self._device.adb.ForwardList().splitlines(): logging.warning(' %s', line) except Exception: logging.warning('Exception raised while listing forwarded connections.') logging.warning('Device tcp sockets in use:') try: for line in self._device.ReadFile('/proc/net/tcp', as_root=True, force_pull=True).splitlines(): logging.warning(' %s', line) except Exception: logging.warning('Exception raised while listing tcp sockets.') logging.warning('Alive webpagereplay instances:') try: for line in subprocess.check_output(['ps', '-ef']).splitlines(): if 'webpagereplay' in line: logging.warning(' %s', line) except Exception: logging.warning('Exception raised while listing WPR intances.') raise @property def host_ip(self): if self._rndis_configurator: return self._rndis_configurator.host_ip return super(AndroidForwarderFactory, self).host_ip @property def does_forwarder_override_dns(self): return bool(self._rndis_configurator) class AndroidForwarder(forwarders.Forwarder): def __init__(self, device, port_pairs): super(AndroidForwarder, self).__init__(port_pairs) self._device = device forwarder.Forwarder.Map([(p.remote_port, p.local_port) for p in port_pairs if p], self._device) self._port_pairs = forwarders.PortPairs(*[ forwarders.PortPair( p.local_port, forwarder.Forwarder.DevicePortForHostPort(p.local_port)) if p else None for p in port_pairs]) atexit.register(self.Close) # TODO(tonyg): Verify that each port can connect to host. def Close(self): if self._forwarding: for port_pair in self._port_pairs: if port_pair: forwarder.Forwarder.UnmapDevicePort( port_pair.remote_port, self._device) super(AndroidForwarder, self).Close() class AndroidRndisForwarder(forwarders.Forwarder): """Forwards traffic using RNDIS. Assumes the device has root access.""" def __init__(self, device, rndis_configurator, port_pairs): super(AndroidRndisForwarder, self).__init__(port_pairs) self._device = device self._rndis_configurator = rndis_configurator self._device_iface = rndis_configurator.device_iface self._host_ip = rndis_configurator.host_ip self._original_dns = None, None, None self._RedirectPorts(port_pairs) if port_pairs.dns: self._OverrideDns() self._OverrideDefaultGateway() # Need to override routing policy again since call to setifdns # sometimes resets policy table self._rndis_configurator.OverrideRoutingPolicy() atexit.register(self.Close) # TODO(tonyg): Verify that each port can connect to host. @property def host_ip(self): return self._host_ip def Close(self): if self._forwarding: self._rndis_configurator.RestoreRoutingPolicy() self._SetDns(*self._original_dns) self._RestoreDefaultGateway() super(AndroidRndisForwarder, self).Close() def _RedirectPorts(self, port_pairs): """Sets the local to remote pair mappings to use for RNDIS.""" # Flush any old nat rules. self._device.RunShellCommand('iptables -F -t nat') for port_pair in port_pairs: if not port_pair or port_pair.local_port == port_pair.remote_port: continue protocol = 'udp' if port_pair.remote_port == 53 else 'tcp' self._device.RunShellCommand( 'iptables -t nat -A OUTPUT -p %s --dport %d' ' -j DNAT --to-destination %s:%d' % (protocol, port_pair.remote_port, self.host_ip, port_pair.local_port)) def _OverrideDns(self): """Overrides DNS on device to point at the host.""" self._original_dns = self._GetCurrentDns() self._SetDns(self._device_iface, self.host_ip, self.host_ip) def _SetDns(self, iface, dns1, dns2): """Overrides device's DNS configuration. Args: iface: name of the network interface to make default dns1, dns2: nameserver IP addresses """ if not iface: return # If there is no route, then nobody cares about DNS. # DNS proxy in older versions of Android is configured via properties. # TODO(szym): run via su -c if necessary. self._device.SetProp('net.dns1', dns1) self._device.SetProp('net.dns2', dns2) dnschange = self._device.GetProp('net.dnschange') if dnschange: self._device.SetProp('net.dnschange', str(int(dnschange) + 1)) # Since commit 8b47b3601f82f299bb8c135af0639b72b67230e6 to frameworks/base # the net.dns1 properties have been replaced with explicit commands for netd self._device.RunShellCommand('netd resolver setifdns %s %s %s' % (iface, dns1, dns2)) # TODO(szym): if we know the package UID, we could setifaceforuidrange self._device.RunShellCommand('netd resolver setdefaultif %s' % iface) def _GetCurrentDns(self): """Returns current gateway, dns1, and dns2.""" routes = self._device.RunShellCommand('cat /proc/net/route')[1:] routes = [route.split() for route in routes] default_routes = [route[0] for route in routes if route[1] == '00000000'] return ( default_routes[0] if default_routes else None, self._device.GetProp('net.dns1'), self._device.GetProp('net.dns2'), ) def _OverrideDefaultGateway(self): """Force traffic to go through RNDIS interface. Override any default gateway route. Without this traffic may go through the wrong interface. This introduces the risk that _RestoreDefaultGateway() is not called (e.g. Telemetry crashes). A power cycle or "adb reboot" is a simple workaround around in that case. """ self._device.RunShellCommand('route add default gw %s dev %s' % (self.host_ip, self._device_iface)) def _RestoreDefaultGateway(self): self._device.RunShellCommand('netcfg %s down' % self._device_iface) class AndroidRndisConfigurator(object): """Configures a linux host to connect to an android device via RNDIS. Note that we intentionally leave RNDIS running on the device. This is because the setup is slow and potentially flaky and leaving it running doesn't seem to interfere with any other developer or bot use-cases. """ _RNDIS_DEVICE = '/sys/class/android_usb/android0' _NETWORK_INTERFACES = '/etc/network/interfaces' _INTERFACES_INCLUDE = 'source /etc/network/interfaces.d/*.conf' _TELEMETRY_INTERFACE_FILE = '/etc/network/interfaces.d/telemetry-{}.conf' def __init__(self, device): self._device = device try: self._device.EnableRoot() except device_errors.CommandFailedError: logging.error('RNDIS forwarding requires a rooted device.') raise self._device_ip = None self._host_iface = None self._host_ip = None self.device_iface = None if platform.GetHostPlatform().GetOSName() == 'mac': self._InstallHorndis(platform.GetHostPlatform().GetArchName()) assert self._IsRndisSupported(), 'Device does not support RNDIS.' self._CheckConfigureNetwork() @property def host_ip(self): return self._host_ip def _IsRndisSupported(self): """Checks that the device has RNDIS support in the kernel.""" return self._device.FileExists('%s/f_rndis/device' % self._RNDIS_DEVICE) def _FindDeviceRndisInterface(self): """Returns the name of the RNDIS network interface if present.""" config = self._device.RunShellCommand('ip -o link show') interfaces = [line.split(':')[1].strip() for line in config] candidates = [iface for iface in interfaces if re.match('rndis|usb', iface)] if candidates: candidates.sort() if len(candidates) == 2 and candidates[0].startswith('rndis') and \ candidates[1].startswith('usb'): return candidates[0] assert len(candidates) == 1, 'Found more than one rndis device!' return candidates[0] def _EnumerateHostInterfaces(self): host_platform = platform.GetHostPlatform().GetOSName() if host_platform == 'linux': return subprocess.check_output(['ip', 'addr']).splitlines() if host_platform == 'mac': return subprocess.check_output(['ifconfig']).splitlines() raise NotImplementedError('Platform %s not supported!' % host_platform) def _FindHostRndisInterface(self): """Returns the name of the host-side network interface.""" interface_list = self._EnumerateHostInterfaces() ether_address = self._device.ReadFile( '%s/f_rndis/ethaddr' % self._RNDIS_DEVICE, as_root=True, force_pull=True).strip() interface_name = None for line in interface_list: if not line.startswith((' ', '\t')): interface_name = line.split(':')[-2].strip() elif ether_address in line: return interface_name def _WriteProtectedFile(self, file_path, contents): subprocess.check_call( ['/usr/bin/sudo', 'bash', '-c', 'echo -e "%s" > %s' % (contents, file_path)]) def _LoadInstalledHoRNDIS(self): """Attempt to load HoRNDIS if installed. If kext could not be loaded or if HoRNDIS is not installed, return False. """ if not os.path.isdir('/System/Library/Extensions/HoRNDIS.kext'): logging.info('HoRNDIS not present on system.') return False def HoRNDISLoaded(): return 'HoRNDIS' in subprocess.check_output(['kextstat']) if HoRNDISLoaded(): return True logging.info('HoRNDIS installed but not running, trying to load manually.') subprocess.check_call( ['/usr/bin/sudo', 'kextload', '-b', 'com.joshuawise.kexts.HoRNDIS']) return HoRNDISLoaded() def _InstallHorndis(self, arch_name): if self._LoadInstalledHoRNDIS(): logging.info('HoRNDIS kext loaded successfully.') return logging.info('Installing HoRNDIS...') pkg_path = binary_manager.FetchPath('horndis', arch_name, 'mac') subprocess.check_call( ['/usr/bin/sudo', 'installer', '-pkg', pkg_path, '-target', '/']) def _DisableRndis(self): try: self._device.SetProp('sys.usb.config', 'adb') except device_errors.AdbCommandFailedError: # Ignore exception due to USB connection being reset. pass self._device.WaitUntilFullyBooted() def _EnableRndis(self): """Enables the RNDIS network interface.""" script_prefix = '/data/local/tmp/rndis' # This could be accomplished via "svc usb setFunction rndis" but only on # devices which have the "USB tethering" feature. # Also, on some devices, it's necessary to go through "none" function. script = """ trap '' HUP trap '' TERM trap '' PIPE function manual_config() { echo %(functions)s > %(dev)s/functions echo 224 > %(dev)s/bDeviceClass echo 1 > %(dev)s/enable start adbd setprop sys.usb.state %(functions)s } # This function kills adb transport, so it has to be run "detached". function doit() { setprop sys.usb.config none while [ `getprop sys.usb.state` != "none" ]; do sleep 1 done manual_config # For some combinations of devices and host kernels, adb won't work unless the # interface is up, but if we bring it up immediately, it will break adb. #sleep 1 #ifconfig rndis0 192.168.123.2 netmask 255.255.255.0 up echo DONE >> %(prefix)s.log } doit & """ % {'dev': self._RNDIS_DEVICE, 'functions': 'rndis,adb', 'prefix': script_prefix} self._device.WriteFile('%s.sh' % script_prefix, script) # TODO(szym): run via su -c if necessary. self._device.RunShellCommand('rm %s.log' % script_prefix) self._device.RunShellCommand('. %s.sh' % script_prefix) self._device.WaitUntilFullyBooted() result = self._device.ReadFile('%s.log' % script_prefix).splitlines() assert any('DONE' in line for line in result), 'RNDIS script did not run!' def _CheckEnableRndis(self, force): """Enables the RNDIS network interface, retrying if necessary. Args: force: Disable RNDIS first, even if it appears already enabled. Returns: device_iface: RNDIS interface name on the device host_iface: corresponding interface name on the host """ for _ in range(3): if not force: device_iface = self._FindDeviceRndisInterface() if device_iface: host_iface = self._FindHostRndisInterface() if host_iface: return device_iface, host_iface self._DisableRndis() self._EnableRndis() force = False raise Exception('Could not enable RNDIS, giving up.') def _Ip2Long(self, addr): return struct.unpack('!L', socket.inet_aton(addr))[0] def _IpPrefix2AddressMask(self, addr): def _Length2Mask(length): return 0xFFFFFFFF & ~((1 << (32 - length)) - 1) addr, masklen = addr.split('/') return self._Ip2Long(addr), _Length2Mask(int(masklen)) def _GetHostAddresses(self, iface): """Returns the IP addresses on host's interfaces, breaking out |iface|.""" interface_list = self._EnumerateHostInterfaces() addresses = [] iface_address = None found_iface = False for line in interface_list: if not line.startswith((' ', '\t')): found_iface = iface in line match = re.search(r'(?<=inet )\S+', line) if match: address = match.group(0) if '/' in address: address = self._IpPrefix2AddressMask(address) else: match = re.search(r'(?<=netmask )\S+', line) address = self._Ip2Long(address), int(match.group(0), 16) if found_iface: assert not iface_address, ( 'Found %s twice when parsing host interfaces.' % iface) iface_address = address else: addresses.append(address) return addresses, iface_address def _GetDeviceAddresses(self, excluded_iface): """Returns the IP addresses on all connected devices. Excludes interface |excluded_iface| on the selected device. """ my_device = str(self._device) addresses = [] for device_serial in android_device.GetDeviceSerials(None): try: device = device_utils.DeviceUtils(device_serial) if device_serial == my_device: excluded = excluded_iface else: excluded = 'no interfaces excluded on other devices' addresses += [line.split()[3] for line in device.RunShellCommand('ip -o -4 addr') if excluded not in line] except device_errors.CommandFailedError: logging.warning('Unable to determine IP addresses for %s', device_serial) return addresses def _ConfigureNetwork(self, device_iface, host_iface): """Configures the |device_iface| to be on the same network as |host_iface|. """ def _Long2Ip(value): return socket.inet_ntoa(struct.pack('!L', value)) def _IsNetworkUnique(network, addresses): return all((addr & mask != network & mask) for addr, mask in addresses) def _NextUnusedAddress(network, netmask, used_addresses): # Excludes '0' and broadcast. for suffix in range(1, 0xFFFFFFFF & ~netmask): candidate = network | suffix if candidate not in used_addresses: return candidate def HasHostAddress(): _, host_address = self._GetHostAddresses(host_iface) return bool(host_address) if not HasHostAddress(): if platform.GetHostPlatform().GetOSName() == 'mac': if 'Telemetry' not in subprocess.check_output( ['networksetup', '-listallnetworkservices']): subprocess.check_call( ['/usr/bin/sudo', 'networksetup', '-createnetworkservice', 'Telemetry', host_iface]) subprocess.check_call( ['/usr/bin/sudo', 'networksetup', '-setmanual', 'Telemetry', '192.168.123.1', '255.255.255.0']) elif platform.GetHostPlatform().GetOSName() == 'linux': with open(self._NETWORK_INTERFACES) as f: orig_interfaces = f.read() if self._INTERFACES_INCLUDE not in orig_interfaces: interfaces = '\n'.join([ orig_interfaces, '', '# Added by Telemetry.', self._INTERFACES_INCLUDE]) self._WriteProtectedFile(self._NETWORK_INTERFACES, interfaces) interface_conf_file = self._TELEMETRY_INTERFACE_FILE.format(host_iface) if not os.path.exists(interface_conf_file): interface_conf_dir = os.path.dirname(interface_conf_file) if not os.path.exists(interface_conf_dir): subprocess.call(['/usr/bin/sudo', '/bin/mkdir', interface_conf_dir]) subprocess.call( ['/usr/bin/sudo', '/bin/chmod', '755', interface_conf_dir]) interface_conf = '\n'.join([ '# Added by Telemetry for RNDIS forwarding.', 'allow-hotplug %s' % host_iface, 'iface %s inet static' % host_iface, ' address 192.168.123.1', ' netmask 255.255.255.0', ]) self._WriteProtectedFile(interface_conf_file, interface_conf) subprocess.check_call(['/usr/bin/sudo', 'ifup', host_iface]) logging.info('Waiting for RNDIS connectivity...') util.WaitFor(HasHostAddress, 30) addresses, host_address = self._GetHostAddresses(host_iface) assert host_address, 'Interface %s could not be configured.' % host_iface host_ip, netmask = host_address # pylint: disable=unpacking-non-sequence network = host_ip & netmask if not _IsNetworkUnique(network, addresses): logging.warning( 'The IP address configuration %s of %s is not unique!\n' 'Check your /etc/network/interfaces. If this overlap is intended,\n' 'you might need to use: ip rule add from <device_ip> lookup <table>\n' 'or add the interface to a bridge in order to route to this network.' % (host_address, host_iface) ) # Find unused IP address. used_addresses = [addr for addr, _ in addresses] used_addresses += [self._IpPrefix2AddressMask(addr)[0] for addr in self._GetDeviceAddresses(device_iface)] used_addresses += [host_ip] device_ip = _NextUnusedAddress(network, netmask, used_addresses) assert device_ip, ('The network %s on %s is full.' % (host_address, host_iface)) host_ip = _Long2Ip(host_ip) device_ip = _Long2Ip(device_ip) netmask = _Long2Ip(netmask) # TODO(szym) run via su -c if necessary. self._device.RunShellCommand( 'ifconfig %s %s netmask %s up' % (device_iface, device_ip, netmask)) # Enabling the interface sometimes breaks adb. self._device.WaitUntilFullyBooted() self._host_iface = host_iface self._host_ip = host_ip self.device_iface = device_iface self._device_ip = device_ip def _TestConnectivity(self): with open(os.devnull, 'wb') as devnull: return subprocess.call(['ping', '-q', '-c1', '-W1', self._device_ip], stdout=devnull) == 0 def OverrideRoutingPolicy(self): """Override any routing policy that could prevent packets from reaching the rndis interface """ policies = self._device.RunShellCommand('ip rule') if len(policies) > 1 and not 'lookup main' in policies[1]: self._device.RunShellCommand('ip rule add prio 1 from all table main') self._device.RunShellCommand('ip route flush cache') def RestoreRoutingPolicy(self): policies = self._device.RunShellCommand('ip rule') if len(policies) > 1 and re.match("^1:.*lookup main", policies[1]): self._device.RunShellCommand('ip rule del prio 1') self._device.RunShellCommand('ip route flush cache') def _CheckConfigureNetwork(self): """Enables RNDIS and configures it, retrying until we have connectivity.""" force = False for _ in range(3): device_iface, host_iface = self._CheckEnableRndis(force) self._ConfigureNetwork(device_iface, host_iface) self.OverrideRoutingPolicy() # Sometimes the first packet will wake up the connection. for _ in range(3): if self._TestConnectivity(): return force = True self.RestoreRoutingPolicy() raise Exception('No connectivity, giving up.')
# This file is part of DroidGraph. # # Copyright (C) 2021, Dario Incalza <dario.incalza at gmail.com> # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse, os, shutil, re, hashlib from subprocess import call from networkx.drawing.nx_pydot import write_dot import networkx as nx from zipfile import ZipFile __author__ = 'Dario Incalza <dario.incalza@gmail.com>' BAKSMALI_PATH = os.getcwd() + "/bin/baksmali.jar" APK_FILE = "" CACHE_PATH = os.getcwd() + "/cache/" DEX_DIR = CACHE_PATH + "dex/" SMALI_DIR = CACHE_PATH + "smali/" GRAPH_PATH = os.getcwd() EXCLUDE_LIST = [] INCLUDE_LIST = [] ''' Check if there is a baksmali tool. ''' def has_baksmali(): return os.path.isfile(BAKSMALI_PATH) ''' Determine if the given regex is valid. ''' def isValidRegEx(regex): try: re.compile(regex) return True except re.error: return False ''' Parse a given file of regular expressions and only add them if they are valid regexes. ''' def parse_configuration_list(file): result = [] with open(file, 'r') as f: for line in f: if line.strip(' \t\n\r').startswith("#"): continue regex = line.strip(' \t\n\r') if not regex: continue if not isValidRegEx(regex): print("CONFIG ERR: Ignoring %s because it is not a valid regex." % regex) else: result.append(regex) return result ''' Parse the arguments and assign global variables that we will be using throughout the tool. ''' def parse_arguments(): parser = argparse.ArgumentParser(description='Create different graphs ' 'from Android DEX bytecode ' 'to get insight in the code ' 'structure.') parser.add_argument('-a', '--apk', type=str, help='APK file to analyze', required=True) parser.add_argument('-d', '--directory', type=str, help="Directory to " "which the graphs " "should be " "printed") parser.add_argument('-i', '--include', type=str, help='Specify a list of ' 'regexes that ' 'define the classes ' 'to be ' 'included in the ' 'scope (OPTIONAL)') parser.add_argument('-x', '--exclude', type=str, help='Specify a list of ' 'regexes that ' 'define the classes ' 'to be ' 'excluded in the ' 'scope (OPTIONAL)') args = parser.parse_args() global APK_FILE APK_FILE = args.apk global GRAPH_PATH if args.directory is not None: GRAPH_PATH = args.directory else: GRAPH_PATH = os.getcwd() global INCLUDE_LIST if args.include is not None: INCLUDE_LIST = parse_configuration_list(args.include) for regex in INCLUDE_LIST: print("[*] CONFIG: Including classes matching %s" % regex) global EXCLUDE_LIST if args.exclude is not None: EXCLUDE_LIST = parse_configuration_list(args.exclude) for regex in EXCLUDE_LIST: print("[*] CONFIG: Excluding classes matching %s" % regex) ''' Sanity check to see if a valid APK file is specified. TODO: implement more specific check to see if it is a valid APK file ''' def check_apk_file(): if APK_FILE == "" or not os.path.isfile(APK_FILE): print("[!!!] ERR: No APK file specified, exiting.") exit(3) ''' Use baksmali to disassemble the .DEX files. ''' def disassemble_dex_files(): dex_files = get_dex_files(APK_FILE) if len(dex_files) > 1: print("[*] MultiDex APK detected with {} .DEX files".format(len(dex_files))) else: print("[*] Single .DEX file detected") for dexfn in dex_files: print("\t[|]Disassembling {}".format(dexfn)) disassemble_dex_file(dexfilename=dexfn) ''' Disassemble the Dex bytecode of a given APK source, a specified DEX File (e.g., classes.dex) to a given smali cache sink ''' def disassemble_dex_file(dexfilename='classes.dex'): zf = ZipFile(APK_FILE, mode='r') with zf.open(dexfilename) as dexfile: df = dexfile.read() f = open(DEX_DIR + "{}".format(dexfilename), "wb") f.write(df) f.close() dexfile.close() call(["java", "-jar", BAKSMALI_PATH, "d", DEX_DIR + "{}".format(dexfilename), "-o", SMALI_DIR]) ''' Get a list of strings representing the .dex files available in the APK ''' def get_dex_files(apkfile): zf = ZipFile(apkfile, mode='r') file_list = zf.namelist() idx = 2 dexs = [] if 'classes.dex' in file_list: dexs.append("classes.dex") else: print('[!!!] classes.dex not found, invalid APK') exit(-1) while True: if 'classes{}.dex'.format(idx) in file_list: dexs.append('classes{}.dex'.format(idx)) idx = idx + 1 else: break return dexs ''' Clear the cache directory. ''' def clear_cache(): try: shutil.rmtree(CACHE_PATH) os.makedirs(CACHE_PATH) os.makedirs(SMALI_DIR) os.makedirs(DEX_DIR) except OSError: os.makedirs(CACHE_PATH) ''' Extract class name from a smali source line. Every class name is represented as a classdescriptor that starts zith 'L' and ends with ';'. ''' def extract_class_name(class_line): for el in class_line.split(" "): if el.startswith("L") and el.endswith(";"): return el ''' Check if the class_name matches a regex in the include list Note that letter case is ignored ''' def is_included(class_name): if len(INCLUDE_LIST) == 0: return True for regex in INCLUDE_LIST: if re.compile(regex, re.IGNORECASE).match(class_name): return True return False ''' Check if the class_name matches a regex in the exclude list Note that letter case is ignored ''' def is_excluded(class_name): if len(EXCLUDE_LIST) == 0: return False for regex in EXCLUDE_LIST: if re.compile(regex, re.IGNORECASE).match(class_name): return True return False ''' Create the actual hierarchy graph from disassembled DEX bytecode. ''' def create_hierarchy_graph(): print("[*] Generating hierarchy graph") hierarchy_graph = nx.DiGraph() for subdir, dirs, files in os.walk(SMALI_DIR): for file in files: full_path = os.path.join(subdir, file) with open(full_path, 'r') as f: class_name = "" super_class = "" interfaces = [] continue_loop = True for line in f: if line.startswith(".class"): class_line = line.strip( "\n") # extract the class line; always first line class_name = extract_class_name( class_line) # extract the class descriptor if not is_included(class_name) or is_excluded( class_name): continue_loop = False break if line.startswith(".super"): super_class = extract_class_name(line.strip("\n")) if line.startswith(".implements"): interfaces.append(extract_class_name(line.strip("\n"))) if not continue_loop: continue if class_name == "": print("[!!!] ERR: Could not parse class name from " + full_path) elif super_class == "": print("[!!!] ERR: Could not parse super class name from " + full_path) else: if not super_class == "Ljava/lang/Object;": hierarchy_graph.add_edge(class_name, super_class, label="C", color="blue") if len(interfaces) > 0: for interface in interfaces: hierarchy_graph.add_edge(class_name, interface, label="I", color="red") write_dot(hierarchy_graph, GRAPH_PATH + "/hierarchy.dot") print("[*] Hierarchy graph is located at {}".format(GRAPH_PATH + "/hierarchy.dot")) ''' Get the SHA-256 hash of a given file ''' def _get_file_hash(file_path): sha256_hash = hashlib.sha256() with open(file_path, "rb") as f: for byte_block in iter(lambda: f.read(4096), b""): sha256_hash.update(byte_block) return sha256_hash.hexdigest() def main(): parse_arguments() check_apk_file() clear_cache() print("[*] DroidGraph - by Dario Incalza <dario.incalza@gmail.com>") print("[*] Using APK {} with hash {}".format(APK_FILE, _get_file_hash(APK_FILE))) disassemble_dex_files() create_hierarchy_graph() if __name__ == "__main__": if not has_baksmali(): print("[!!!] No baksmali.jar found in " + BAKSMALI_PATH) exit(2) main()
# ------------------------------------------------------------------------------ # Copyright (c) 2010-2013, EVEthing team # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------ import re from decimal import Decimal, ROUND_UP from datetime import datetime from django.contrib.staticfiles.storage import staticfiles_storage from django.template.defaultfilters import stringfilter from django.conf import settings from django.utils.timesince import timesince from jingo import register @register.filter def tablecols(data, cols): rows = [] row = [] index = 0 for user in data: row.append(user) index = index + 1 if index % cols == 0: rows.append(row) row = [] # Still stuff missing? if len(row) > 0: for i in range(cols - len(row)): row.append([]) rows.append(row) return rows # Put commas in things # http://code.activestate.com/recipes/498181-add-thousands-separator-commas-to-formatted-number/ re_digits_nondigits = re.compile(r'\d+|\D+') @register.filter @stringfilter def commas(value, round_to=0, include_sign=False): try: value = float(value) except ValueError: return None if round_to is not None: value = round(value, round_to) if include_sign: format_str = '{0:+,.%df}' else: format_str = '{0:,.%df}' return (format_str % round_to).format(value) @register.filter def round_price(value): return round(value, 2) @register.filter def round_nodecimal(value): return round(value, 0) @register.filter def slugify(value): import unicodedata """ Converts to lowercase, removes non-word characters (alphanumerics and underscores) and converts spaces to hyphens. Also strips leading and trailing whitespace. """ try: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') except: pass value = re.sub('[^\w\s-]', '', value).strip().lower() return re.sub('[-\s]+', '-', value) def _commafy(s): r = [] for i, c in enumerate(reversed(s)): if i and (not (i % 3)): r.insert(0, ',') r.insert(0, c) return ''.join(r) @register.filter def duration(s): """Turn a duration in seconds into a human readable string""" m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) parts = [] if d: parts.append('%dd' % (d)) if h: parts.append('%dh' % (h)) if m: parts.append('%dm' % (m)) if s: parts.append('%ds' % (s)) return ' '.join(parts) @register.filter def duration_right(s): m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) parts = [] if d: parts.append('%dd' % (d)) if h or d: parts.append('%02dh' % (h)) if m or h or d: parts.append('%02dm' % (m)) parts.append('%02ds' % (s)) return ' '.join(parts) @register.filter def shortduration(s): """Turn a duration in seconds into a shorter human readable string""" return ' '.join(duration(s).split()[:2]) @register.filter @stringfilter def balance(s): """Do balance colouring (red for negative, green for positive)""" if s == '0': return s elif s.startswith('-'): return '<span class="neg">%s</span>' % (s) else: return '<span class="pos">%s</span>' % (s) @register.filter def balance_class(n): if n < 0: return 'neg' else: return 'pos' roman_list = ['', 'I', 'II', 'III', 'IV', 'V'] @register.filter def roman(num): if isinstance(num, str) or isinstance(num, unicode): return roman_list[int(num)] elif isinstance(num, int) or isinstance(num, long): return roman_list[num] else: return '' MONTHS = [None, 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] @register.filter def month_name(num): return MONTHS[num] @register.filter def date(d, f): return d.strftime(f) @register.filter def time(d): return d.strftime('%H:%M:%S') @register.filter def timeago(d, tz=True): fmt_str = '%Y-%m-%dT%H:%M:%S' if tz: fmt_str += '+0000' return '' if d is None else d.strftime(fmt_str) # Shorten numbers to a human readable version THOUSAND = 10 ** 3 TEN_THOUSAND = 10 ** 4 MILLION = 10 ** 6 BILLION = 10 ** 9 @register.filter def lvl(value, best=5, better=4, append=""): value_fmt = "<b>%s</b>" if value >= best: cls = 'text-success' elif value >= better: cls = 'text-info' else: cls = 'text-danger' value_fmt = "%s" return '<span class="%s">%s%s</span>' % (cls, value_fmt % value, append) @register.filter def implant_lvl(value): return lvl(value, 5, 3, "%") @register.filter def pct_over(value): value_fmt = "<b>%s</b>" if value < 200: cls = 'txt-success' elif value < 400: cls = 'txt-info' else: cls = 'txt-danger' return '<span class="%s">%s%%</span>' % (cls, value_fmt % value) @register.filter def humanize(value, include_sign=False): if value is None or value == '': return '0' ret = "" if value >= BILLION or value <= -BILLION: v = Decimal(value) / BILLION ret = '%sB' % (v.quantize(Decimal('.01'), rounding=ROUND_UP)) elif value >= MILLION or value <= -MILLION: v = Decimal(value) / MILLION if v >= 10: ret = '%sM' % (v.quantize(Decimal('.1'), rounding=ROUND_UP)) else: ret = '%sM' % (v.quantize(Decimal('.01'), rounding=ROUND_UP)) elif value >= TEN_THOUSAND or value <= -TEN_THOUSAND: v = Decimal(value) / THOUSAND ret = '%sK' % (v.quantize(Decimal('.1'), rounding=ROUND_UP)) elif value >= THOUSAND or value <= -THOUSAND: ret = '%s' % (commas(Decimal(value).quantize(Decimal('1.'), rounding=ROUND_UP))) else: if isinstance(value, Decimal): ret = value.quantize(Decimal('.1'), rounding=ROUND_UP) else: ret = value if include_sign: if value > 0: ret = "+" + ret elif value < 0: ret = "-" + ret return ret @register.filter def spanif(value, arg): """Conditionally wrap some text in a span if it matches a condition. Ugh.""" parts = arg.split() if len(parts) != 3: return value n = int(parts[2]) if (parts[1] == '<' and value < n) or (parts[1] == '=' and value == n) or (parts[1] == '>' and value > n): return '<span class="%s">%s</span>' % (parts[0], value) else: return value @register.function def static(path): """Jinja2 filter version of staticfiles. Hopefully.""" return staticfiles_storage.url(path) @register.filter def can_register(user): if (not settings.ALLOW_REGISTRATION) or user.is_authenticated(): return False return True @register.filter def timeuntil(d): return timesince(datetime.utcnow(), d)
# Copyright 2012 Twitter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import datetime from collections import defaultdict import simplejson from django.http import HttpResponse from django.template.response import TemplateResponse from django.shortcuts import get_object_or_404 from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, Http404 from django.views.decorators.http import require_POST from django_ext.http import JSONResponse from django_ext.utils import datetime_to_timestamp from django.contrib.auth.decorators import login_required from dashboard.models import App, Member, AppKey from ab.models import Experiment, Variation from ab import interface as abdb from ab.forms import ExperimentForm def _get_app_apps(request, app_slug): apps = App.objects.filter(member__user=request.user).order_by('id') app = get_object_or_404(apps, slug__iexact=app_slug) try: Member.objects.get(user=request.user, app=app) except Member.DoesNotExist: return TemplateResponse(request, 'forbidden.html') return app, apps @login_required def experiment_list(request, app_slug=None): app, apps = _get_app_apps(request, app_slug) experiments = Experiment.objects.filter(app=app) context = { 'app': app, 'apps': apps, 'experiments': experiments, } return TemplateResponse(request, 'ab/experiment_list.html', context) @login_required def experiment_detail(request, app_slug=None, experiment_slug=None): app, apps = _get_app_apps(request, app_slug) exp = get_object_or_404(Experiment, app=app, slug__iexact=experiment_slug) variations = Variation.objects.filter(experiment=exp).order_by('num') context = { 'app': app, 'apps': apps, 'experiment': exp, 'variations': variations, 'total_percentage': 0.0 + sum([v.weight for v in variations]), 'baseline_percentage': 1.0 - sum([v.weight for v in variations]), } return TemplateResponse(request, 'ab/experiment_detail.html', context) @login_required def experiment_stats(request, app_slug=None, experiment_id=None): app, apps = _get_app_apps(request, app_slug) exp = get_object_or_404(Experiment, id=int(experiment_id)) confidence = abdb.get_confidence_data(exp.id) graph_data = abdb.get_graphs(exp.id) graphs = {-1: [{'successes': 0, 'trials': 0}]} for variation in Variation.objects.filter(experiment=exp).order_by('num'): graphs[variation.num - 1] = [{'successes': 0, 'trials': 0}] confidence.setdefault(variation.num - 1, [0, 0]) # Convert the series into something nicer and convert datetimes to floats for choice in sorted(graph_data.keys()): graph = [] for dt in sorted(graph_data[choice].keys()): item = graph_data[choice][dt].copy() item['timestamp'] = datetime_to_timestamp(dt) graph.append(item) graphs[choice] = graph return JSONResponse(request, { 'graphs': graphs, 'confidence': confidence, }) @login_required def experiment_csv(request, app_slug=None, experiment_id=None): app, apps = _get_app_apps(request, app_slug) exp = get_object_or_404(Experiment, id=int(experiment_id)) variations = list(Variation.objects.filter(experiment=exp).order_by('num')) graph_data = abdb.get_graphs(exp.id) response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachment; filename=%s.%s.csv' % ( exp.slug, datetime.date.today() ) writer = csv.writer(response) data = defaultdict(lambda: {}) all_dts = set() all_headers = [] for choice in sorted(graph_data.keys()): choice_name = 'Baseline' if choice == -1 else variations[choice].name success_col_name = choice_name + ' Successes' trial_col_name = choice_name + ' Trials' if success_col_name not in all_headers: all_headers.extend([success_col_name, trial_col_name]) for dt in graph_data[choice].keys(): all_dts.add(dt) data[dt].update({ success_col_name: graph_data[choice][dt]['successes'], trial_col_name: graph_data[choice][dt]['trials'], }) writer.writerow(['Date'] + all_headers) for dt in sorted(all_dts): row = [str(dt)] for header in all_headers: val = data[dt].get(header) row.append(str(val) if val else '0') writer.writerow(row) return response @login_required def experiment_create(request, app_slug=None): app, apps = _get_app_apps(request, app_slug) form = ExperimentForm(request.POST or None, app=app, user=request.user) if form.is_valid(): exp = form.save() next = reverse('ab_experiment_detail', args=[app_slug, exp.slug]) return HttpResponseRedirect(next) context = { 'app': app, 'apps': apps, 'form': form, } return TemplateResponse(request, 'ab/experiment_create.html', context) @login_required def variation_change_name(request, app_slug=None, variation_id=None): app, apps = _get_app_apps(request, app_slug) variation = get_object_or_404(Variation, id=variation_id) if 'name' not in request.REQUEST: raise Http404 variation.name = request.REQUEST['name'] variation.save() return JSONResponse(request, {'status': 'ok'}) @login_required def variation_remove(request, app_slug=None, variation_id=None): app, apps = _get_app_apps(request, app_slug) variation = get_object_or_404(Variation, id=variation_id) experiment = variation.experiment variation.delete() vs = Variation.objects.filter(experiment=experiment).order_by('num') for i, variation in enumerate(vs): if variation.num != i + 1: variation.num = i + 1 variation.save() return JSONResponse(request, {'status': 'ok'}) @login_required def variation_create(request, app_slug=None, experiment_id=None): app, apps = _get_app_apps(request, app_slug) experiment = get_object_or_404(Experiment, id=experiment_id) vs = Variation.objects.filter(experiment=experiment).order_by('num') v = Variation.objects.create( experiment=experiment, weight=0, num=vs.count() + 1, name='Test ' + ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'[vs.count()]) ) return JSONResponse(request, { 'id': v.id, 'name': v.name, 'weight': v.weight, }) @login_required @require_POST def experiment_save_data(request, app_slug=None, experiment_id=None): # TODO: Respond with error stuff instead of doing assertions app, apps = _get_app_apps(request, app_slug) experiment = get_object_or_404(Experiment, id=experiment_id) assert experiment.app_id == app.id items = simplejson.loads(request.raw_post_data) total_weight = 0.0 for item in items: total_weight += item['weight'] if experiment.has_data: simplejson.loads(item['data']) assert total_weight <= 1.0 for item in items: variation = Variation.objects.get(id=item['id']) assert variation.experiment_id == experiment.id Variation.objects.filter(id=item['id']).update( weight=item['weight'], data=item['data'], ) return JSONResponse(request, {'status': 'ok'}) @login_required @require_POST def experiment_delete(request, app_slug=None, experiment_id=None): # TODO: Respond with error stuff instead of doing assertions app, apps = _get_app_apps(request, app_slug) experiment = get_object_or_404(Experiment, id=experiment_id) assert experiment.app_id == app.id experiment.delete() return JSONResponse(request, {'status': 'ok'}) @login_required def quickstart(request, app_slug=None): app, apps = _get_app_apps(request, app_slug) app_key = AppKey.objects.filter( app=app, status=AppKey.ACTIVE).order_by('-date_created')[0].key return TemplateResponse(request, 'ab/quickstart.html', { 'apps': apps, 'app': app, 'app_key': app_key, })
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> import datetime import json import re import sys import sqlalchemy from flask import request from ggrc.settings import CUSTOM_URL_ROOT from ggrc.utils import benchmarks class GrcEncoder(json.JSONEncoder): """Custom JSON Encoder to handle datetime objects and sets from: `http://stackoverflow.com/questions/12122007/python-json-encoder-to-support-datetime`_ also consider: `http://hg.tryton.org/2.4/trytond/file/ade5432ac476/trytond/protocols/jsonrpc.py#l53`_ """ def default(self, obj): if isinstance(obj, datetime.datetime): return obj.isoformat() elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, datetime.timedelta): return (datetime.datetime.min + obj).time().isoformat() elif isinstance(obj, set): return list(obj) else: return super(GrcEncoder, self).default(obj) def as_json(obj, **kwargs): return json.dumps(obj, cls=GrcEncoder, **kwargs) def service_for(obj): module = sys.modules['ggrc.services'] if type(obj) is str or type(obj) is unicode: # noqa model_type = obj else: model_type = obj.__class__.__name__ return getattr(module, model_type, None) def url_for(obj, id=None): service = service_for(obj) if service is None: return None if not hasattr(service, 'url_for'): return None if id is not None: return service.url_for(id=id) return service.url_for(obj) def view_service_for(obj): module = sys.modules['ggrc.views'] if type(obj) is str or type(obj) is unicode: # noqa model_type = obj else: model_type = obj.__class__.__name__ return getattr(module, model_type, None) def view_url_for(obj, id=None): service = view_service_for(obj) if service is None: return None if not hasattr(service, 'url_for'): return None if id is not None: return service.url_for(id=id) return service.url_for(obj) def encoded_dict(in_dict): # http://stackoverflow.com/questions/6480723/urllib-urlencode-doesn't-like-unicode-values-how-about-this-workaround out_dict = {} for k, v in in_dict.iteritems(): if isinstance(v, unicode): # noqa v = v.encode('utf8') elif isinstance(v, str): # Must be encoded in UTF-8 v.decode('utf8') out_dict[k] = v return out_dict def merge_dict(destination, source, path=None): """merges source into destination""" if path is None: path = [] for key in source: if key in destination: if isinstance(destination[key], dict) and isinstance(source[key], dict): merge_dict(destination[key], source[key], path + [str(key)]) elif destination[key] == source[key]: pass # same leaf value else: raise Exception('Conflict at %s' % '.'.join(path + [str(key)])) else: destination[key] = source[key] return destination def merge_dicts(*args): result = {} for arg in args: result = merge_dict(result, arg) return result def get_url_root(): if CUSTOM_URL_ROOT is not None: return CUSTOM_URL_ROOT return request.url_root def get_mapping_rules(): """ Get mappings rules as defined in business_object.js Special cases: Aduit has direct mapping to Program with program_id Section has a direct mapping to Standard/Regulation/Poicy with directive_id """ # these rules are copy pasted from # src/ggrc/assets/javascripts/apps/base_widgets.js line: 9 # WARNING ######################################################## # Manually added Risks and threats to the list from base_widgets # ################################################################## # TODO: Read these rules from different modules and combine them here. all_rules = set(['AccessGroup', 'Assessment', 'Audit', 'Clause', 'Contract', 'Control', 'CycleTaskGroupObjectTask', 'DataAsset', 'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup', 'Person', 'Policy', 'Process', 'Product', 'Program', 'Project', 'Regulation', 'Risk', 'Section', 'Standard', 'System', 'Threat', 'Vendor']) business_object_rules = { "AccessGroup": all_rules - set(['AccessGroup']), "Assessment": all_rules - set(['Assessment']), "Audit": all_rules - set(['CycleTaskGroupObjectTask', 'Audit', 'Risk', 'Threat']), "Clause": all_rules - set(['Clause']), "Contract": all_rules - set(['Policy', 'Regulation', 'Contract', 'Standard']), "Control": all_rules, "CycleTaskGroupObjectTask": all_rules - set(['CycleTaskGroupObjectTask', 'Audit']), "DataAsset": all_rules, "Facility": all_rules, "Issue": all_rules, "Market": all_rules, "Objective": all_rules, "OrgGroup": all_rules, "Person": all_rules - set(['Person']), "Policy": all_rules - set(['Policy', 'Regulation', 'Contract', 'Standard']), "Process": all_rules, "Product": all_rules, "Program": all_rules - set(['Program']), "Project": all_rules, "Regulation": all_rules - set(['Policy', 'Regulation', 'Contract', 'Standard']), "Risk": all_rules - set(['Audit', 'Risk']), "Section": all_rules, "Standard": all_rules - set(['Policy', 'Regulation', 'Contract', 'Standard']), "System": all_rules, "Threat": all_rules - set(['Audit', 'Threat']), "Vendor": all_rules, } return business_object_rules def _prefix_camelcase(name, prefix): name = name[:1].lower() + name[1:] return re.sub(r'[A-Z]', lambda pat: prefix + pat.group(0).lower(), name) def underscore_from_camelcase(name): return _prefix_camelcase(name, "_") def title_from_camelcase(name): return _prefix_camelcase(name, " ") def get_fuzzy_date(delta_date): """Get a human readable date string. This function returns a human friendly time delta compared to today. Args: delta_date (date): Date that we want to show to the user. Returns: string: A human readable representation date delta. Examples: >>> get_fuzzy_date(datetime.date.today() + datetime.timedelta(2)) 'in 2 days' >>> get_fuzzy_date(datetime.date.today()) 'today' >>> get_fuzzy_date(datetime.date.today() + datetime.timedelta(-1)) '1 day ago' """ if not delta_date: return "" if isinstance(delta_date, datetime.datetime): delta_date = delta_date.date() delta = delta_date - datetime.date.today() if delta.days < 0: days = abs(delta.days) return "{} day{} ago".format(days, "s" if days > 1 else "") if delta.days == 0: return "today" # TODO: use format_timedelta from babel package. return "in {} day{}".format(delta.days, "s" if delta.days > 1 else "") def get_digest_date_statement(delta_date, word, is_change_tense=False): """Get statement created from word in appropriate tense and readable date. This function returns phrase created using concatenation of a word in appropriate tense with human readable date value. Args: delta_date (date): Date that we want to show to the user word (str): With which date should be concatenated is_change_tense: Flag which shows is tense of the word should be changed Returns: string: A human readable statement, created from word and date concatenation """ fuzzy_date = get_fuzzy_date(delta_date) word_end = '' if is_change_tense: if "in" in fuzzy_date: word_end = 's' else: word_end = 'ed' return '{}{} {}'.format(word, word_end, fuzzy_date) # pylint: disable=too-few-public-methods # because this is a small context manager class QueryCounter(object): """Context manager for counting sqlalchemy database queries. Usage: with QueryCounter() as counter: query_count = counter.get """ def __init__(self): self.queries = [] def after_cursor_execute(*args): self.queries.append(args[2]) self.listener = after_cursor_execute def __enter__(self): sqlalchemy.event.listen(sqlalchemy.engine.Engine, "after_cursor_execute", self.listener) return self def __exit__(self, *_): sqlalchemy.event.remove(sqlalchemy.engine.Engine, "after_cursor_execute", self.listener) @property def get(self): return len(self.queries) benchmark = benchmarks.get_benchmark() with_nop = benchmarks.WithNop def convert_date_format(date, format_from, format_to): """Convert string date format from one to another.""" return datetime.datetime.strptime(date, format_from).strftime(format_to)
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH class IndicesClient(NamespacedClient): @query_params('format', 'prefer_local') def analyze(self, index=None, body=None, params=None): """ Perform the analysis process on a text and return the tokens breakdown of the text. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html>`_ :arg index: The name of the index to scope the operation :arg body: Define analyzer/tokenizer parameters and the text on which the analysis should be performed :arg format: Format of the output, default 'detailed', valid choices are: 'detailed', 'text' :arg prefer_local: With `true`, specify that a local shard should be used if available, with `false`, use a random shard (default: true) """ return self.transport.perform_request('GET', _make_path(index, '_analyze'), params=params, body=body) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable') def refresh(self, index=None, params=None): """ Explicitly refresh one or more index, making all operations performed since the last refresh available for search. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ return self.transport.perform_request('POST', _make_path(index, '_refresh'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'force', 'ignore_unavailable', 'wait_if_ongoing') def flush(self, index=None, params=None): """ Explicitly flush one or more indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string for all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg force: Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg wait_if_ongoing: If set to true the flush operation will block until the flush can be executed if another flush operation is already executing. The default is true. If set to false the flush will be skipped iff if another flush operation is already running. """ return self.transport.perform_request('POST', _make_path(index, '_flush'), params=params) @query_params('master_timeout', 'timeout', 'update_all_types', 'wait_for_active_shards') def create(self, index, body=None, params=None): """ Create an index in Elasticsearch. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html>`_ :arg index: The name of the index :arg body: The configuration for the index (`settings` and `mappings`) :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout :arg update_all_types: Whether to update the mapping for all fields with the same name across all types or not :arg wait_for_active_shards: Set the number of active shards to wait for before the operation returns. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") return self.transport.perform_request('PUT', _make_path(index), params=params, body=body) @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'local') def get(self, index, feature=None, params=None): """ The get index API allows to retrieve information about one or more indexes. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html>`_ :arg index: A comma-separated list of index names :arg allow_no_indices: Ignore if a wildcard expression resolves to no concrete indices (default: false) :arg expand_wildcards: Whether wildcard expressions should get expanded to open or closed indices (default: open), default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Ignore unavailable indexes (default: false) :arg include_defaults: Whether to return all default setting for each of the indices., default False :arg local: Return local information, do not retrieve the state from master node (default: false) """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") return self.transport.perform_request('GET', _make_path(index, feature), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout') def open(self, index, params=None): """ Open a closed index to make it available for search. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html>`_ :arg index: The name of the index :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'closed', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") return self.transport.perform_request('POST', _make_path(index, '_open'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout') def close(self, index, params=None): """ Close an index to remove it's overhead from the cluster. Closed index is blocked for read/write operations. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html>`_ :arg index: The name of the index :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") return self.transport.perform_request('POST', _make_path(index, '_close'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout') def delete(self, index, params=None): """ Delete an index in Elasticsearch `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html>`_ :arg index: A comma-separated list of indices to delete; use `_all` or `*` string to delete all indices :arg allow_no_indices: Ignore if a wildcard expression resolves to no concrete indices (default: false) :arg expand_wildcards: Whether wildcard expressions should get expanded to open or closed indices (default: open), default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Ignore unavailable indexes (default: false) :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") return self.transport.perform_request('DELETE', _make_path(index), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'local') def exists(self, index, params=None): """ Return a boolean indicating whether given index exists. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html>`_ :arg index: A comma-separated list of index names :arg allow_no_indices: Ignore if a wildcard expression resolves to no concrete indices (default: false) :arg expand_wildcards: Whether wildcard expressions should get expanded to open or closed indices (default: open), default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Ignore unavailable indexes (default: false) :arg include_defaults: Whether to return all default setting for each of the indices., default False :arg local: Return local information, do not retrieve the state from master node (default: false) """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") return self.transport.perform_request('HEAD', _make_path(index), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local') def exists_type(self, index, doc_type, params=None): """ Check if a type/types exists in an index/indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html>`_ :arg index: A comma-separated list of index names; use `_all` to check the types across all indices :arg doc_type: A comma-separated list of document types to check :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) """ for param in (index, doc_type): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request('HEAD', _make_path(index, '_mapping', doc_type), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'update_all_types') def put_mapping(self, doc_type, body, index=None, params=None): """ Register specific mapping definition for a specific type. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html>`_ :arg doc_type: The name of the document type :arg body: The mapping definition :arg index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout :arg update_all_types: Whether to update the mapping for all fields with the same name across all types or not """ for param in (doc_type, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request('PUT', _make_path(index, '_mapping', doc_type), params=params, body=body) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local') def get_mapping(self, index=None, doc_type=None, params=None): """ Retrieve mapping definition of index or index/type. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html>`_ :arg index: A comma-separated list of index names :arg doc_type: A comma-separated list of document types :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) """ return self.transport.perform_request('GET', _make_path(index, '_mapping', doc_type), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'include_defaults', 'local') def get_field_mapping(self, fields, index=None, doc_type=None, params=None): """ Retrieve mapping definition of a specific field. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html>`_ :arg fields: A comma-separated list of fields :arg index: A comma-separated list of index names :arg doc_type: A comma-separated list of document types :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg include_defaults: Whether the default mapping values should be returned as well :arg local: Return local information, do not retrieve the state from master node (default: false) """ if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'fields'.") return self.transport.perform_request('GET', _make_path(index, '_mapping', doc_type, 'field', fields), params=params) @query_params('master_timeout', 'timeout') def put_alias(self, index, name, body=None, params=None): """ Create an alias for a specific index/indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. :arg name: The name of the alias to be created or updated :arg body: The settings for the alias, such as `routing` or `filter` :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit timeout for the operation """ for param in (index, name): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request('PUT', _make_path(index, '_alias', name), params=params, body=body) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local') def exists_alias(self, index=None, name=None, params=None): """ Return a boolean indicating whether given alias exists. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names to filter aliases :arg name: A comma-separated list of alias names to return :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'all', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) """ return self.transport.perform_request('HEAD', _make_path(index, '_alias', name), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local') def get_alias(self, index=None, name=None, params=None): """ Retrieve a specified alias. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names to filter aliases :arg name: A comma-separated list of alias names to return :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'all', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) """ return self.transport.perform_request('GET', _make_path(index, '_alias', name), params=params) @query_params('master_timeout', 'timeout') def update_aliases(self, body, params=None): """ Update specified aliases. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg body: The definition of `actions` to perform :arg master_timeout: Specify timeout for connection to master :arg timeout: Request timeout """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request('POST', '/_aliases', params=params, body=body) @query_params('master_timeout', 'timeout') def delete_alias(self, index, name, params=None): """ Delete specific alias. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names (supports wildcards); use `_all` for all indices :arg name: A comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit timeout for the operation """ for param in (index, name): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request('DELETE', _make_path(index, '_alias', name), params=params) @query_params('create', 'flat_settings', 'master_timeout', 'order', 'timeout') def put_template(self, name, body, params=None): """ Create an index template that will automatically be applied to new indices created. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_ :arg name: The name of the template :arg body: The template definition :arg create: Whether the index template should only be added if new or can also replace an existing one, default False :arg flat_settings: Return settings in flat format (default: false) :arg master_timeout: Specify timeout for connection to master :arg order: The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower numbers) :arg timeout: Explicit operation timeout """ for param in (name, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request('PUT', _make_path('_template', name), params=params, body=body) @query_params('flat_settings', 'local', 'master_timeout') def exists_template(self, name, params=None): """ Return a boolean indicating whether given template exists. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_ :arg name: The comma separated names of the index templates :arg flat_settings: Return settings in flat format (default: false) :arg local: Return local information, do not retrieve the state from master node (default: false) :arg master_timeout: Explicit operation timeout for connection to master node """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") return self.transport.perform_request('HEAD', _make_path('_template', name), params=params) @query_params('flat_settings', 'local', 'master_timeout') def get_template(self, name=None, params=None): """ Retrieve an index template by its name. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_ :arg name: The name of the template :arg flat_settings: Return settings in flat format (default: false) :arg local: Return local information, do not retrieve the state from master node (default: false) :arg master_timeout: Explicit operation timeout for connection to master node """ return self.transport.perform_request('GET', _make_path('_template', name), params=params) @query_params('master_timeout', 'timeout') def delete_template(self, name, params=None): """ Delete an index template by its name. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_ :arg name: The name of the template :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") return self.transport.perform_request('DELETE', _make_path('_template', name), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'local') def get_settings(self, index=None, name=None, params=None): """ Retrieve settings for one or more (or all) indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg name: The name of the settings that should be included :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default ['open', 'closed'], valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg include_defaults: Whether to return all default setting for each of the indices., default False :arg local: Return local information, do not retrieve the state from master node (default: false) """ return self.transport.perform_request('GET', _make_path(index, '_settings', name), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'master_timeout', 'preserve_existing') def put_settings(self, body, index=None, params=None): """ Change specific index level settings in real time. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html>`_ :arg body: The index settings to be updated :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg master_timeout: Specify timeout for connection to master :arg preserve_existing: Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false` """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request('PUT', _make_path(index, '_settings'), params=params, body=body) @query_params('completion_fields', 'fielddata_fields', 'fields', 'groups', 'include_segment_file_sizes', 'level', 'types') def stats(self, index=None, metric=None, params=None): """ Retrieve statistics on different operations happening on an index. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg metric: Limit the information returned the specific metrics. :arg completion_fields: A comma-separated list of fields for `fielddata` and `suggest` index metric (supports wildcards) :arg fielddata_fields: A comma-separated list of fields for `fielddata` index metric (supports wildcards) :arg fields: A comma-separated list of fields for `fielddata` and `completion` index metric (supports wildcards) :arg groups: A comma-separated list of search groups for `search` index metric :arg include_segment_file_sizes: Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested), default False :arg level: Return stats aggregated at cluster, index or shard level, default 'indices', valid choices are: 'cluster', 'indices', 'shards' :arg types: A comma-separated list of document types for the `indexing` index metric """ return self.transport.perform_request('GET', _make_path(index, '_stats', metric), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'operation_threading', 'verbose') def segments(self, index=None, params=None): """ Provide low level segments information that a Lucene index (shard level) is built with. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg operation_threading: TODO: ? :arg verbose: Includes detailed memory usage by Lucene., default False """ return self.transport.perform_request('GET', _make_path(index, '_segments'), params=params) @query_params('all_shards', 'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator', 'df', 'expand_wildcards', 'explain', 'ignore_unavailable', 'lenient', 'operation_threading', 'q', 'rewrite') def validate_query(self, index=None, doc_type=None, body=None, params=None): """ Validate a potentially expensive query without executing it. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-validate.html>`_ :arg index: A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices :arg doc_type: A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types :arg body: The query definition specified with the Query DSL :arg all_shards: Execute validation on all shards instead of one random shard per index :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg analyze_wildcard: Specify whether wildcard and prefix queries should be analyzed (default: false) :arg analyzer: The analyzer to use for the query string :arg default_operator: The default operator for query string query (AND or OR), default 'OR', valid choices are: 'AND', 'OR' :arg df: The field to use as default where no field prefix is given in the query string :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg explain: Return detailed information about the error :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg lenient: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored :arg operation_threading: TODO: ? :arg q: Query in the Lucene query string syntax :arg rewrite: Provide a more detailed explanation showing the actual Lucene query that will be executed. """ return self.transport.perform_request('GET', _make_path(index, doc_type, '_validate', 'query'), params=params, body=body) @query_params('allow_no_indices', 'expand_wildcards', 'field_data', 'fielddata', 'fields', 'ignore_unavailable', 'query', 'recycler', 'request') def clear_cache(self, index=None, params=None): """ Clear either all caches or specific cached associated with one ore more indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html>`_ :arg index: A comma-separated list of index name to limit the operation :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg field_data: Clear field data :arg fielddata: Clear field data :arg fields: A comma-separated list of fields to clear when using the `field_data` parameter (default: all) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg query: Clear query caches :arg recycler: Clear the recycler cache :arg request: Clear request cache :arg request_cache: Clear request cache """ return self.transport.perform_request('POST', _make_path(index, '_cache', 'clear'), params=params) @query_params('active_only', 'detailed') def recovery(self, index=None, params=None): """ The indices recovery API provides insight into on-going shard recoveries. Recovery status may be reported for specific indices, or cluster-wide. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg active_only: Display only those recoveries that are currently on- going, default False :arg detailed: Whether to display detailed information about shard recovery, default False """ return self.transport.perform_request('GET', _make_path(index, '_recovery'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'only_ancient_segments', 'wait_for_completion') def upgrade(self, index=None, params=None): """ Upgrade one or more indices to the latest format through an API. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-upgrade.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg only_ancient_segments: If true, only ancient (an older Lucene major release) segments will be upgraded :arg wait_for_completion: Specify whether the request should block until the all segments are upgraded (default: false) """ return self.transport.perform_request('POST', _make_path(index, '_upgrade'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable') def get_upgrade(self, index=None, params=None): """ Monitor how much of one or more index is upgraded. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-upgrade.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ return self.transport.perform_request('GET', _make_path(index, '_upgrade'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable') def flush_synced(self, index=None, params=None): """ Perform a normal flush, then add a generated unique marker (sync_id) to all shards. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string for all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ return self.transport.perform_request('POST', _make_path(index, '_flush', 'synced'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'operation_threading', 'status') def shard_stores(self, index=None, params=None): """ Provides store information for shard copies of indices. Store information reports on which nodes shard copies exist, the shard copy version, indicating how recent they are, and any exceptions encountered while opening the shard index or from earlier engine failure. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg operation_threading: TODO: ? :arg status: A comma-separated list of statuses used to filter on shards to get store information for, valid choices are: 'green', 'yellow', 'red', 'all' """ return self.transport.perform_request('GET', _make_path(index, '_shard_stores'), params=params) @query_params('allow_no_indices', 'expand_wildcards', 'flush', 'ignore_unavailable', 'max_num_segments', 'only_expunge_deletes', 'operation_threading', 'wait_for_merge') def forcemerge(self, index=None, params=None): """ The force merge API allows to force merging of one or more indices through an API. The merge relates to the number of segments a Lucene index holds within each shard. The force merge operation allows to reduce the number of segments by merging them. This call will block until the merge is complete. If the http connection is lost, the request will continue in the background, and any new requests will block until the previous force merge is complete. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg flush: Specify whether the index should be flushed after performing the operation (default: true) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg max_num_segments: The number of segments the index should be merged into (default: dynamic) :arg only_expunge_deletes: Specify whether the operation should only expunge deleted documents :arg operation_threading: TODO: ? :arg wait_for_merge: Specify whether the request should block until the merge process is finished (default: true) """ return self.transport.perform_request('POST', _make_path(index, '_forcemerge'), params=params) @query_params('master_timeout', 'timeout', 'wait_for_active_shards') def shrink(self, index, target, body=None, params=None): """ The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_ :arg index: The name of the source index to shrink :arg target: The name of the target index to shrink into :arg body: The configuration for the target index (`settings` and `aliases`) :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns. """ for param in (index, target): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request('PUT', _make_path(index, '_shrink', target), params=params, body=body) @query_params('dry_run', 'master_timeout', 'timeout', 'wait_for_active_shards') def rollover(self, alias, new_index=None, body=None, params=None): """ The rollover index API rolls an alias over to a new index when the existing index is considered to be too large or too old. The API accepts a single alias name and a list of conditions. The alias must point to a single index only. If the index satisfies the specified conditions then a new index is created and the alias is switched to point to the new alias. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html>`_ :arg alias: The name of the alias to rollover :arg new_index: The name of the rollover index :arg body: The conditions that needs to be met for executing rollover :arg dry_run: If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false :arg master_timeout: Specify timeout for connection to master :arg timeout: Explicit operation timeout :arg wait_for_active_shards: Set the number of active shards to wait for on the newly created rollover index before the operation returns. """ if alias in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'alias'.") return self.transport.perform_request('POST', _make_path(alias, '_rollover', new_index), params=params, body=body)
"""Tests for the WLED light platform.""" import json from unittest.mock import MagicMock import pytest from wled import Device as WLEDDevice, WLEDConnectionError, WLEDError from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_EFFECT, ATTR_HS_COLOR, ATTR_RGB_COLOR, ATTR_RGBW_COLOR, ATTR_TRANSITION, DOMAIN as LIGHT_DOMAIN, ) from homeassistant.components.wled.const import ( ATTR_INTENSITY, ATTR_PALETTE, ATTR_PRESET, ATTR_REVERSE, ATTR_SPEED, CONF_KEEP_MASTER_LIGHT, DOMAIN, SCAN_INTERVAL, SERVICE_EFFECT, SERVICE_PRESET, ) from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_ICON, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON, STATE_UNAVAILABLE, ) from homeassistant.core import HomeAssistant from homeassistant.helpers import entity_registry as er import homeassistant.util.dt as dt_util from tests.common import MockConfigEntry, async_fire_time_changed, load_fixture async def test_rgb_light_state( hass: HomeAssistant, init_integration: MockConfigEntry ) -> None: """Test the creation and values of the WLED lights.""" entity_registry = er.async_get(hass) # First segment of the strip state = hass.states.get("light.wled_rgb_light") assert state assert state.attributes.get(ATTR_BRIGHTNESS) == 127 assert state.attributes.get(ATTR_EFFECT) == "Solid" assert state.attributes.get(ATTR_HS_COLOR) == (37.412, 100.0) assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant" assert state.attributes.get(ATTR_INTENSITY) == 128 assert state.attributes.get(ATTR_PALETTE) == "Default" assert state.attributes.get(ATTR_PRESET) is None assert state.attributes.get(ATTR_REVERSE) is False assert state.attributes.get(ATTR_SPEED) == 32 assert state.state == STATE_ON entry = entity_registry.async_get("light.wled_rgb_light") assert entry assert entry.unique_id == "aabbccddeeff_0" # Second segment of the strip state = hass.states.get("light.wled_rgb_light_segment_1") assert state assert state.attributes.get(ATTR_BRIGHTNESS) == 127 assert state.attributes.get(ATTR_EFFECT) == "Blink" assert state.attributes.get(ATTR_HS_COLOR) == (148.941, 100.0) assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant" assert state.attributes.get(ATTR_INTENSITY) == 64 assert state.attributes.get(ATTR_PALETTE) == "Random Cycle" assert state.attributes.get(ATTR_PRESET) is None assert state.attributes.get(ATTR_REVERSE) is False assert state.attributes.get(ATTR_SPEED) == 16 assert state.state == STATE_ON entry = entity_registry.async_get("light.wled_rgb_light_segment_1") assert entry assert entry.unique_id == "aabbccddeeff_1" # Test master control of the lightstrip state = hass.states.get("light.wled_rgb_light_master") assert state assert state.attributes.get(ATTR_BRIGHTNESS) == 127 assert state.state == STATE_ON entry = entity_registry.async_get("light.wled_rgb_light_master") assert entry assert entry.unique_id == "aabbccddeeff" async def test_segment_change_state( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, ) -> None: """Test the change of state of the WLED segments.""" await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_TRANSITION: 5}, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 1 mock_wled.segment.assert_called_with( on=False, segment_id=0, transition=50, ) await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_ON, { ATTR_BRIGHTNESS: 42, ATTR_EFFECT: "Chase", ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_RGB_COLOR: [255, 0, 0], ATTR_TRANSITION: 5, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 2 mock_wled.segment.assert_called_with( brightness=42, color_primary=(255, 0, 0), effect="Chase", on=True, segment_id=0, transition=50, ) async def test_master_change_state( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, ) -> None: """Test the change of state of the WLED master light control.""" await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5}, blocking=True, ) await hass.async_block_till_done() assert mock_wled.master.call_count == 1 mock_wled.master.assert_called_with( on=False, transition=50, ) await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_ON, { ATTR_BRIGHTNESS: 42, ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.master.call_count == 2 mock_wled.master.assert_called_with( brightness=42, on=True, transition=50, ) await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5}, blocking=True, ) await hass.async_block_till_done() assert mock_wled.master.call_count == 3 mock_wled.master.assert_called_with( on=False, transition=50, ) await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_ON, { ATTR_BRIGHTNESS: 42, ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.master.call_count == 4 mock_wled.master.assert_called_with( brightness=42, on=True, transition=50, ) @pytest.mark.parametrize("mock_wled", ["wled/rgb_single_segment.json"], indirect=True) async def test_dynamically_handle_segments( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, ) -> None: """Test if a new/deleted segment is dynamically added/removed.""" master = hass.states.get("light.wled_rgb_light_master") segment0 = hass.states.get("light.wled_rgb_light") segment1 = hass.states.get("light.wled_rgb_light_segment_1") assert segment0 assert segment0.state == STATE_ON assert not master assert not segment1 return_value = mock_wled.update.return_value mock_wled.update.return_value = WLEDDevice( json.loads(load_fixture("wled/rgb.json")) ) async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL) await hass.async_block_till_done() master = hass.states.get("light.wled_rgb_light_master") segment0 = hass.states.get("light.wled_rgb_light") segment1 = hass.states.get("light.wled_rgb_light_segment_1") assert master assert master.state == STATE_ON assert segment0 assert segment0.state == STATE_ON assert segment1 assert segment1.state == STATE_ON # Test adding if segment shows up again, including the master entity mock_wled.update.return_value = return_value async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL) await hass.async_block_till_done() master = hass.states.get("light.wled_rgb_light_master") segment0 = hass.states.get("light.wled_rgb_light") segment1 = hass.states.get("light.wled_rgb_light_segment_1") assert master assert master.state == STATE_UNAVAILABLE assert segment0 assert segment0.state == STATE_ON assert segment1 assert segment1.state == STATE_UNAVAILABLE @pytest.mark.parametrize("mock_wled", ["wled/rgb_single_segment.json"], indirect=True) async def test_single_segment_behavior( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, ) -> None: """Test the behavior of the integration with a single segment.""" device = mock_wled.update.return_value assert not hass.states.get("light.wled_rgb_light_master") state = hass.states.get("light.wled_rgb_light") assert state assert state.state == STATE_ON # Test segment brightness takes master into account device.state.brightness = 100 device.state.segments[0].brightness = 255 async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL) await hass.async_block_till_done() state = hass.states.get("light.wled_rgb_light") assert state assert state.attributes.get(ATTR_BRIGHTNESS) == 100 # Test segment is off when master is off device.state.on = False async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL) await hass.async_block_till_done() state = hass.states.get("light.wled_rgb_light") assert state assert state.state == STATE_OFF # Test master is turned off when turning off a single segment await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_TRANSITION: 5}, blocking=True, ) await hass.async_block_till_done() assert mock_wled.master.call_count == 1 mock_wled.master.assert_called_with( on=False, transition=50, ) # Test master is turned on when turning on a single segment, and segment # brightness is set to 255. await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_ON, { ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_TRANSITION: 5, ATTR_BRIGHTNESS: 42, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 1 assert mock_wled.master.call_count == 2 mock_wled.segment.assert_called_with(on=True, segment_id=0, brightness=255) mock_wled.master.assert_called_with(on=True, transition=50, brightness=42) async def test_light_error( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, caplog: pytest.LogCaptureFixture, ) -> None: """Test error handling of the WLED lights.""" mock_wled.segment.side_effect = WLEDError await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "light.wled_rgb_light"}, blocking=True, ) await hass.async_block_till_done() state = hass.states.get("light.wled_rgb_light") assert state assert state.state == STATE_ON assert "Invalid response from API" in caplog.text assert mock_wled.segment.call_count == 1 mock_wled.segment.assert_called_with(on=False, segment_id=0, transition=None) async def test_light_connection_error( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, caplog: pytest.LogCaptureFixture, ) -> None: """Test error handling of the WLED switches.""" mock_wled.segment.side_effect = WLEDConnectionError await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "light.wled_rgb_light"}, blocking=True, ) await hass.async_block_till_done() state = hass.states.get("light.wled_rgb_light") assert state assert state.state == STATE_UNAVAILABLE assert "Error communicating with API" in caplog.text assert mock_wled.segment.call_count == 1 mock_wled.segment.assert_called_with(on=False, segment_id=0, transition=None) @pytest.mark.parametrize("mock_wled", ["wled/rgbw.json"], indirect=True) async def test_rgbw_light( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock ) -> None: """Test RGBW support for WLED.""" state = hass.states.get("light.wled_rgbw_light") assert state assert state.state == STATE_ON assert state.attributes.get(ATTR_RGBW_COLOR) == (255, 0, 0, 139) await hass.services.async_call( LIGHT_DOMAIN, SERVICE_TURN_ON, { ATTR_ENTITY_ID: "light.wled_rgbw_light", ATTR_RGBW_COLOR: (255, 255, 255, 255), }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 1 mock_wled.segment.assert_called_with( color_primary=(255, 255, 255, 255), on=True, segment_id=0, ) async def test_effect_service( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock ) -> None: """Test the effect service of a WLED light.""" await hass.services.async_call( DOMAIN, SERVICE_EFFECT, { ATTR_EFFECT: "Rainbow", ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_INTENSITY: 200, ATTR_PALETTE: "Tiamat", ATTR_REVERSE: True, ATTR_SPEED: 100, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 1 mock_wled.segment.assert_called_with( effect="Rainbow", intensity=200, palette="Tiamat", reverse=True, segment_id=0, speed=100, ) await hass.services.async_call( DOMAIN, SERVICE_EFFECT, {ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_EFFECT: 9}, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 2 mock_wled.segment.assert_called_with( segment_id=0, effect=9, intensity=None, palette=None, reverse=None, speed=None, ) await hass.services.async_call( DOMAIN, SERVICE_EFFECT, { ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_INTENSITY: 200, ATTR_REVERSE: True, ATTR_SPEED: 100, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 3 mock_wled.segment.assert_called_with( intensity=200, reverse=True, segment_id=0, speed=100, effect=None, palette=None, ) await hass.services.async_call( DOMAIN, SERVICE_EFFECT, { ATTR_EFFECT: "Rainbow", ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_PALETTE: "Tiamat", ATTR_REVERSE: True, ATTR_SPEED: 100, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 4 mock_wled.segment.assert_called_with( effect="Rainbow", palette="Tiamat", reverse=True, segment_id=0, speed=100, intensity=None, ) await hass.services.async_call( DOMAIN, SERVICE_EFFECT, { ATTR_EFFECT: "Rainbow", ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_INTENSITY: 200, ATTR_SPEED: 100, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 5 mock_wled.segment.assert_called_with( effect="Rainbow", intensity=200, segment_id=0, speed=100, palette=None, reverse=None, ) await hass.services.async_call( DOMAIN, SERVICE_EFFECT, { ATTR_EFFECT: "Rainbow", ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_INTENSITY: 200, ATTR_REVERSE: True, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.segment.call_count == 6 mock_wled.segment.assert_called_with( effect="Rainbow", intensity=200, reverse=True, segment_id=0, palette=None, speed=None, ) async def test_effect_service_error( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, caplog: pytest.LogCaptureFixture, ) -> None: """Test error handling of the WLED effect service.""" mock_wled.segment.side_effect = WLEDError await hass.services.async_call( DOMAIN, SERVICE_EFFECT, {ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_EFFECT: 9}, blocking=True, ) await hass.async_block_till_done() state = hass.states.get("light.wled_rgb_light") assert state assert state.state == STATE_ON assert "Invalid response from API" in caplog.text assert mock_wled.segment.call_count == 1 mock_wled.segment.assert_called_with( effect=9, segment_id=0, intensity=None, palette=None, reverse=None, speed=None ) async def test_preset_service( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, caplog: pytest.LogCaptureFixture, ) -> None: """Test the preset service of a WLED light.""" await hass.services.async_call( DOMAIN, SERVICE_PRESET, { ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_PRESET: 1, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.preset.call_count == 1 mock_wled.preset.assert_called_with(preset=1) await hass.services.async_call( DOMAIN, SERVICE_PRESET, { ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_PRESET: 2, }, blocking=True, ) await hass.async_block_till_done() assert mock_wled.preset.call_count == 2 mock_wled.preset.assert_called_with(preset=2) assert "The 'wled.preset' service is deprecated" in caplog.text async def test_preset_service_error( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, caplog: pytest.LogCaptureFixture, ) -> None: """Test error handling of the WLED preset service.""" mock_wled.preset.side_effect = WLEDError await hass.services.async_call( DOMAIN, SERVICE_PRESET, {ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_PRESET: 1}, blocking=True, ) await hass.async_block_till_done() state = hass.states.get("light.wled_rgb_light") assert state assert state.state == STATE_ON assert "Invalid response from API" in caplog.text assert mock_wled.preset.call_count == 1 mock_wled.preset.assert_called_with(preset=1) @pytest.mark.parametrize("mock_wled", ["wled/rgb_single_segment.json"], indirect=True) async def test_single_segment_with_keep_master_light( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock, ) -> None: """Test the behavior of the integration with a single segment.""" assert not hass.states.get("light.wled_rgb_light_master") hass.config_entries.async_update_entry( init_integration, options={CONF_KEEP_MASTER_LIGHT: True} ) await hass.async_block_till_done() state = hass.states.get("light.wled_rgb_light_master") assert state assert state.state == STATE_ON
#!/usr/bin/env python import copy import unittest from nose.tools import * import networkx from test_graph import TestGraph class TestDiGraph(TestGraph): def setUp(self): self.Graph=networkx.DiGraph # build K3 ed1,ed2,ed3,ed4,ed5,ed6 = ({},{},{},{},{},{}) self.k3adj={0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1:ed6}} self.k3edges=[(0, 1), (0, 2), (1, 2)] self.k3nodes=[0, 1, 2] self.K3=self.Graph() self.K3.adj = self.K3.succ = self.K3.edge = self.k3adj self.K3.pred={0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1:ed4}} ed1,ed2 = ({},{}) self.P3=self.Graph() self.P3.adj={0: {1: ed1}, 1: {2: ed2}, 2: {}} self.P3.succ=self.P3.adj self.P3.pred={0: {}, 1: {0: ed1}, 2: {1: ed2}} self.K3.node={} self.K3.node[0]={} self.K3.node[1]={} self.K3.node[2]={} def test_data_input(self): G=self.Graph(data={1:[2],2:[1]}, name="test") assert_equal(G.name,"test") assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})]) assert_equal(sorted(G.succ.items()),[(1, {2: {}}), (2, {1: {}})]) assert_equal(sorted(G.pred.items()),[(1, {2: {}}), (2, {1: {}})]) def test_add_edge(self): G=self.Graph() G.add_edge(0,1) assert_equal(G.adj,{0: {1: {}}, 1: {}}) assert_equal(G.succ,{0: {1: {}}, 1: {}}) assert_equal(G.pred,{0: {}, 1: {0:{}}}) G=self.Graph() G.add_edge(*(0,1)) assert_equal(G.adj,{0: {1: {}}, 1: {}}) assert_equal(G.succ,{0: {1: {}}, 1: {}}) assert_equal(G.pred,{0: {}, 1: {0:{}}}) def test_add_edges_from(self): G=self.Graph() G.add_edges_from([(0,1),(0,2,{'data':3})],data=2) assert_equal(G.adj,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}}) assert_equal(G.succ,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}}) assert_equal(G.pred,{0: {}, 1: {0: {'data':2}}, 2: {0: {'data':3}}}) assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3)]) # too many in tuple assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple def test_remove_edge(self): G=self.K3 G.remove_edge(0,1) assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}}) assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1:{}}}) assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0) def test_remove_edges_from(self): G=self.K3 G.remove_edges_from([(0,1)]) assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}}) assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1: {}}}) G.remove_edges_from([(0,0)]) # silent fail def test_has_successor(self): G=self.K3 assert_equal(G.has_successor(0,1),True) assert_equal(G.has_successor(0,-1),False) def test_successors(self): G=self.K3 assert_equal(sorted(G.successors(0)),[1,2]) assert_raises((KeyError,networkx.NetworkXError), G.successors,-1) def test_successors_iter(self): G=self.K3 assert_equal(sorted(G.successors_iter(0)),[1,2]) assert_raises((KeyError,networkx.NetworkXError), G.successors_iter,-1) def test_has_predecessor(self): G=self.K3 assert_equal(G.has_predecessor(0,1),True) assert_equal(G.has_predecessor(0,-1),False) def test_predecessors(self): G=self.K3 assert_equal(sorted(G.predecessors(0)),[1,2]) assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1) def test_predecessors_iter(self): G=self.K3 assert_equal(sorted(G.predecessors_iter(0)),[1,2]) assert_raises((KeyError,networkx.NetworkXError), G.predecessors_iter,-1) def test_edges(self): G=self.K3 assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)]) assert_equal(sorted(G.edges(0)),[(0,1),(0,2)]) assert_raises((KeyError,networkx.NetworkXError), G.edges,-1) def test_edges_iter(self): G=self.K3 assert_equal(sorted(G.edges_iter()), [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)]) assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)]) def test_edges_data(self): G=self.K3 assert_equal(sorted(G.edges(data=True)), [(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})]) assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})]) assert_raises((KeyError,networkx.NetworkXError), G.neighbors,-1) def test_out_edges(self): G=self.K3 assert_equal(sorted(G.out_edges()), [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)]) assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)]) assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1) def test_out_edges_iter(self): G=self.K3 assert_equal(sorted(G.out_edges_iter()), [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)]) assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)]) def test_out_edges_dir(self): G=self.P3 assert_equal(sorted(G.out_edges()),[(0, 1), (1, 2)]) assert_equal(sorted(G.out_edges(0)),[(0, 1)]) assert_equal(sorted(G.out_edges(2)),[]) def test_out_edges_iter_dir(self): G=self.P3 assert_equal(sorted(G.out_edges_iter()),[(0, 1), (1, 2)]) assert_equal(sorted(G.out_edges_iter(0)),[(0, 1)]) assert_equal(sorted(G.out_edges_iter(2)),[]) def test_in_edges_dir(self): G=self.P3 assert_equal(sorted(G.in_edges()),[(0, 1), (1, 2)]) assert_equal(sorted(G.in_edges(0)),[]) assert_equal(sorted(G.in_edges(2)),[(1,2)]) def test_in_edges_iter_dir(self): G=self.P3 assert_equal(sorted(G.in_edges_iter()),[(0, 1), (1, 2)]) assert_equal(sorted(G.in_edges_iter(0)),[]) assert_equal(sorted(G.in_edges_iter(2)),[(1,2)]) def test_degree(self): G=self.K3 assert_equal(G.degree(),[4,4,4]) assert_equal(G.degree(with_labels=True),{0:4,1:4,2:4}) assert_equal(G.degree(0),4) assert_equal(G.degree(0,with_labels=True),{0:4}) assert_raises((KeyError,networkx.NetworkXError), G.degree,-1) def test_degree_iter(self): G=self.K3 assert_equal(list(G.degree_iter()),[(0,4),(1,4),(2,4)]) assert_equal(dict(G.degree_iter()),{0:4,1:4,2:4}) assert_equal(list(G.degree_iter(0)),[(0,4)]) def test_in_degree(self): G=self.K3 assert_equal(G.in_degree(),[2,2,2]) assert_equal(G.in_degree(with_labels=True),{0:2,1:2,2:2}) assert_equal(G.in_degree(0),2) assert_equal(G.in_degree(0,with_labels=True),{0:2}) assert_raises((KeyError,networkx.NetworkXError), G.in_degree,-1) def test_in_degree_iter(self): G=self.K3 assert_equal(list(G.in_degree_iter()),[(0,2),(1,2),(2,2)]) assert_equal(dict(G.in_degree_iter()),{0:2,1:2,2:2}) assert_equal(list(G.in_degree_iter(0)),[(0,2)]) def test_out_degree(self): G=self.K3 assert_equal(G.out_degree(),[2,2,2]) assert_equal(G.out_degree(with_labels=True),{0:2,1:2,2:2}) assert_equal(G.out_degree(0),2) assert_equal(G.out_degree(0,with_labels=True),{0:2}) assert_raises((KeyError,networkx.NetworkXError), G.out_degree,-1) def test_out_degree_iter(self): G=self.K3 assert_equal(list(G.out_degree_iter()),[(0,2),(1,2),(2,2)]) assert_equal(dict(G.out_degree_iter()),{0:2,1:2,2:2}) assert_equal(list(G.out_degree_iter(0)),[(0,2)]) def test_size(self): G=self.K3 assert_equal(G.size(),6) assert_equal(G.number_of_edges(),6)
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Interface for the Smartbits SAI method. """ import sys, os, string from UserList import UserList from UserDict import UserDict from UserString import UserString from pycopia import ipv4 from pycopia.smartbits.UserInt import UserInt from pycopia.smartbits.UserLong import UserLong from pycopia.smartbits.UserFloat import UserFloat from pycopia.smartbits import SMBAddress def IF(test, trueval, falseval): if test: return trueval else: return falseval class SAIError(Exception): pass # basic types of SAI objects follow. These are the objects that are # referred to in the data (*.txt) files. class Boolean(UserInt): def __str__(self): if self.data: return "yes" else: return "no" yes = on = Boolean(1) no = off = Boolean(0) String = UserString Integer = UserInt Float = UserFloat class MACAddress(UserLong): def __init__(self, data=0L, increment=0, bitoffset=0, length=6): self.length = length # octets self.increment = increment self.bitoffset = bitoffset if type(data) is type(""): self.data = self._handle_str(data) elif isinstance(data, MACAddress): self.data = data.data self.increment = data.increment self.bitoffset = data.bitoffset self.length = data.length else: self.data = long(data) def __str__(self): cv = [] val = self.data for i in xrange(self.length): cv.insert(0, hex((val >> (i * 8)) & 0xff)[2:-1]) return "%s%s%s" % (".".join(cv), IF(self.increment, IF(self.increment>1,"+%d"%self.increment ,"+"), ""), IF(self.bitoffset, "|%d" % self.bitoffset, "")) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, hex(self.data)) def set_increment(self, increment): self.increment = increment def _handle_str(self, data): rv = 0L octets = data.split(".") assert len(octets) == self.length count = self.length - 1 octets = map(lambda oc: long(eval("0x%s" % oc)), octets) for octet in octets: rv += octet << (count * 8) count -= 1 return rv class IPAddress(ipv4.IPv4): def __init__(self, addr, increment=0, bitoffset=0): ipv4.IPv4.__init__(self, addr) # use slash notation feature for mask if isinstance(addr, IPAddress): self.__dict__["increment"] = addr.increment self.__dict__["bitoffset"] = addr.bitoffset else: self.__dict__["increment"] = increment self.__dict__["bitoffset"] = bitoffset def __str__(self): return "%s%s%s" % (ipv4.itodq(self._address), IF(self.increment, IF(self.increment>1,"+%d" % self.increment, "+"), ""), IF(self.bitoffset, "|%s" % self.bitoffset, "") ) def set_increment(self, increment): self.increment = increment class HexInteger(UserInt): def __init__(self, value): if type(value) is type(""): self.data = eval("0x%s" % value) else: self.data = int(value) def __str__(self): return hex(self.data)[2:] class Endpoint: def __init__(self, start, end=None): self.start = start self.end = end def __str__(self): if self.end is None: return str(self.start) else: return "%s:%s" % (self.start, self.end) def __repr__(self): return "Endpoint(%r, %r)" % (self.start, self.end) class Flow: def __init__(self, endpoint1, endpoint2=None, name=""): if isinstance(endpoint1, self.__class__): self.endpoint1 = endpoint1.endpoint1 self.endpoint2 = endpoint1.endpoint2 self.name = endpoint1.name else: self.endpoint1 = endpoint1 self.endpoint2 = endpoint2 self.name = name def __str__(self): return "%s->%s" % (self.endpoint1, self.endpoint2) def __repr__(self): return "Flow(%r, %r)" % (self.endpoint1, self.endpoint2) def set_name(self, name): self.name = name class MPLSLabel: """ MPLS Label (user-specified) in the form: Label:Experimental:TTL Disposition of fields: Label User-specified 20-bit label. Experimental User-specified 3-bit experimental value. TTL User-specified 8-bis value. All fields are interpreted as hexadecimal. The label sub-field of the first label will be overwritten by the value returned by the DUT, but other labels (if any) will contain the user-specified value in the label sub-field. """ def __init__(self, label, experimental=0, ttl=64): if isinstance(label, self.__class__): self.label = label.label self.experimental = label.experimental self.ttl = label.ttl else: self.label = label & 0xfffff self.experimental = experimental & 0x3 self.ttl = ttl & 0xff def __str__(self): return "%s:%s:%s" % (hex(self.label)[2:], hex(self.experimental)[2:], hex(self.ttl)[2:]) def __repr__(self): return "%s(%x, %x, %x)" % (self.__class__.__name__, self.label, self.experimental, self.ttl) class PayloadProt(UserInt): def __str__(self): if self: return "p%d" % (self.data) else: return "" class VariantField(UserInt): # implemented as a bitmap _flags = {"none":0, "i": 0x1, "m": 0x2, "p":0x4, "I":0x8, "M":0x10, "P":0x20} def __init__(self, data=0): if type(data) is type(""): if data == "none": self.data = 0 else: self.data = 0 self.set(data) else: UserInt.__init__(self, data) def __str__(self): s = [] if self.data: for name, bit in VariantField._flags.items(): if self.data & bit: s.append(name) return string.join(s, "") else: return "none" def set(self, flags): for flag in flags: self.data |= VariantField._flags[flag] def setall(self): for bit in VariantField._flags.values(): self.data |= bit def reset(self, flags): for flag in flags: self.data &= ~VariantField._flags[flag] def clear(self): self.data = 0 def get_attributes(self): return VariantField._flags.keys() class TCPFlag(HexInteger): pass class INETPort(UserInt): def __init__(self, data=9, increment=0): UserInt.__init__(self, data) if isinstance(data, INETPort): self.increment = data.increment else: self.increment = increment def __str__(self): return "%s%s" % (self.data, IF(self.increment, "+", "")) class _ArrayList(UserList): def __init__(self, data=None): if type(data) is type(""): # allow initialization of list with single string. self.data = [data] else: UserList.__init__(self, data) def __str__(self): # it seems SAI interpreter cannot handle lines that are to long. # so this will break up the lines with a backslash continuation # feature. str_list = map(str, self.data) # approx 20 items per line lines = [] for sect in range(int(len(str_list) / 20)+1): lines.append(string.join(str_list[sect*20:sect*20+20], " ")) return string.join(lines, " \\\n") def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.data) class FlowList(_ArrayList): pass class Buckets(_ArrayList): pass class FrameSizes(_ArrayList): pass class GroupList(_ArrayList): pass class Strings(_ArrayList): pass class FrameRange: def __init__(self, minimum=64, maximum=1500, increment=10): if isinstance(minimum, FrameRange): self.minimum = minimum.minimum self.maximum = minimum.maximum self.increment = minimum.increment else: self.minimum = minimum self.maximum = maximum self.increment = increment def __str__(self): return "%d:%d:%d" % (self.minimum, self.maximum, self.increment) class ReportFlags: """ Manage ReportAFGRTS parameter object. """ _flags = {"A":0, "F":1, "G":2, "R":3, "T":4, "S":5} # index numbers def __init__(self, data=None): self.clear() if type(data) is type(""): self.data = string.split(data, ":") assert len(self.data) == 6 def __str__(self): return string.join(self.data, ":") def set(self, flags): for flag in flags: self.data[ReportFlags._flags[flag]] = "1" def reset(self, flags): for flag in flags: self.data[ReportFlags._flags[flag]] = "0" def clear(self): self.data = ["1","1","1","0","0","0"] class SnapSCType: def __init__(self, starttime=2000, count=10): if type(starttime) is type(""): ss, cs = tuple(string.split(starttime, ":")) self.starttime = int(ss) self.count = int(cs) elif isinstance(starttime, SnapSCType): self.starttime = starttime.starttime self.count = starttime.count else: self.starttime = starttime self.count = count def __str__(self): return "%d:%d" % (self.starttime, self.count) class thruputRSBType: def __init__(self, range=0, step=0, binary=1): if type(range) is type(""): self.range, self.step, self.binary = tuple(map(int, string.split(range, ":"))) elif isinstance(range, thruputRSBType): self.range = range.range self.step = range.step self.binary = range.binary else: self.range = range self.step = step self.binary = binary def __str__(self): return "%d:%d:%d" % (self.range, self.step, self.binary) def __repr__(self): return "%s(%d, %d, %d)" % (self.__class__.__name__, self.range, self.step, self.binary) class thruputSMMRType: def __init__(self, start=1, min=0, max=100, resolution=1): if type(start) is type(""): self.start, self.min, self.max, self.resolution = tuple(map(int, string.split(start, ":"))) elif isinstance(start, thruputSMMRType): self.start = start.start self.min = start.min self.max = start.max self.resolution = start.resolution else: self.start = start self.min = min self.max = max self.resolution = resolution def __str__(self): return "%d:%d:%d:%d" % (self.start, self.min, self.max, self.resolution) class stepwisePLFTType: def __init__(self, ports=1, learning=1, flows=1, transmit=1): if type(ports) is type(""): self.ports, self.learning, self.flows, self.transmit = tuple(map(int, string.split(start, ":"))) elif isinstance(ports, stepwisePLFTType): self.ports = ports.ports self.learning = ports.learning self.flows = ports.flows self.transmit = ports.transmit else: self.ports = ports self.learning = learning self.flows = flows self.transmit = transmit def __str__(self): return "%d:%d:%d:%d" % (self.ports, self.learning, self.flows, self.transmit) ### end of SAI types ### Utility objects # A ParameterTable holds SAI table data. It is used internally by the # section objects. Lines starting with a '#' are comments. Each # non-comment line must contain four tab-delimited fields. Each field is # evaluated by the Python interpreter to convert them to python objects. # Those objects must exists in the namespace where this is run, or errors # will occur when reading in the file. # Field 1: name of attribute. This name becomes the first paramter given # to most O.append() methods. It selects the proper row data. # Field 2: description. A quoted string that describes the attribute. # useful for online help. # Field 3: default value. If a value other than None is given here it will # be used in the generated SAI file if it is not overridden by # explicitly setting it. # Field 4: An object that defines the permissible values for this # attribute. It may be a tuple containing a set of permissible # values. class ParameterTable: def __init__(self, fname): self.data = [] # to preserve order self.table = {} # the data files should be kept in the same directory as this # module. They will be automatically found then. FILE = os.path.join(os.path.dirname(__file__), fname) fp = open(FILE, "r") for line in fp.readlines(): if len(line) < 2 or line[0] == "#": continue name, desc, defval, types = tuple(string.split(line.strip(), "\t")) tr = TableRow(name, eval(desc), eval(defval), eval(types)) self.data.append(tr) self.table[name] = tr fp.close() def get(self, key, default=None): return self.table.get(key, default) def __getitem__(self, key): return self.table[key] def keys(self): return map(lambda o: o.name, self.data) def items(self): return self.table.items() # represents a row of data in the data files. The four fields are # represented here. class TableRow: def __init__(self, name, description, default, types): self.name = name self.description = description self.default = default self.types = types def __str__(self): return "%s\t%s\t%s\t%s" % (self.name, self.description, self.default, self.types) def __repr__(self): return "%r\t%r\t%r\t%r" % (self.name, self.description, self.default, self.types) # global function to verify parameter values def _check_value(types, val): typeoftypes = type(types) if typeoftypes is type(()): # recursively check objects in a tuple # if any pass, check is good (not efficient) cv = map(lambda ty, v=val: _check_value(ty, v), types) if yes in cv: return yes else: return no # if val in types: # return yes # else: # return no if typeoftypes is type(''): if val == types: return yes else: return no if typeoftypes is type(TableEntry): # class type try: testval = types(val) except: return no else: return yes # XXX check other values return yes # A TableEntry represents a particular kind of paramter line in the SAI # file. it allows setting the column values by name (named arguments), and # verifies that the supplied values make sense for that type as defined in # the data files. Printing the object emits the SAI parameter line. class TableEntry: def __init__(self, paramtype, table, **kwargs): self.paramtype = paramtype self.table = table self.values = {} self.set(**kwargs) # if a value has been supplied for a parameter, use that. If not, and # there is a default value defined in that paramter table, use that. # if the value is None, do not emit it. def __str__(self): s = [self.paramtype] for key in self.table.keys(): val = self.values.get(key, None) if val is None: val = self.table[key].default if val is not None: s.append(str(val)) return string.join(s, " ") def set(self, **kwargs): for key, val in kwargs.items(): te = self.table.get(key) if te is None: raise SAIError, "Invalid paramter name given: "+ key if _check_value(te.types, val): if type(te.types) is type(TableEntry): # class type, cast to class object self.values[key] = te.types(val) else: self.values[key] = val else: raise SAIError, "invalid value for %s (should be %r)" % (key, te.types) def get_value(self, name): rv = self.values.get(name, None) if rv is None: te = self.table.get(name) if te is None: raise ValueError, "no entry by the name %s." % (name) return te.default else: return rv class PortEntry(TableEntry): pass class EndPointEntry(TableEntry): pass class IPFlowEntry(TableEntry): pass class IPFlowEntry(TableEntry): pass class TCPFlowEntry(TableEntry): pass class UDPFlowEntry(TableEntry): pass class VIPFlowEntry(TableEntry): pass class VUDPFlowEntry(TableEntry): pass class VTCPFlowEntry(TableEntry): pass class LIPFlowEntry(TableEntry): pass class LUDPFlowEntry(TableEntry): pass class GroupEntry(TableEntry): pass # headings = """ # # %wireRate Step Speed Duplex AutoNg BurstSize AddressResolution # # H:S:P W St Sp D AutoNg B A IPaddress Gateway Netmask Name #""" #### Main API # The document sections. Most of them can inherit from the generic Section # class. those sections that have a list of Entry lines of arbitrary # length are objects of this type. Some sections are special, they contain # only keyword/value pairs (such as the testdefaults section). Those types # of sections have the keyword mapped to a section attribute. class Section(UserList): """ Abstract base class for most SAI sections. """ def __init__(self): UserList.__init__(self) self.tables = {} # _table_names entries relate a paramter line name to a table and # its Entry object name. The tables are defined in external text # files. for param, tabledata in self.__class__._table_names.items(): tablename, rowclass = tabledata self.tables[param] = (ParameterTable(tablename), rowclass) def __str__(self): s = map(lambda d: "\t%s\n" % (d), self.data) return self.__class__._header + "".join(s) + "\n" def add(self, entry_type, **kwargs): try: table, entryclass = self.tables[entry_type] self.data.append(entryclass(entry_type, table, **kwargs)) except IndexError: raise SAIError, "invalid parameter type. should be one of %r." % (self.__class__._table_names.keys(),) append = add def get_attributes(self): rv = [] for td in self.tables.values(): rv.extend(td[0].keys()) return rv class testdefaultsSection: """ TestDefaultsSection([parameters]) creates a 'testdefaults' SAI section. It will only emit an option line if you set a value different from the default. You can set the attribute by setting the instances attribute. ts = TestDefaultsSection() ts.sizewcrc = yes print ts or ts = TestDefaultsSection(sizewcrc = yes) sai.append(ts) print sai """ _header = "testdefaults\n" def __init__(self, **kwargs): self.__dict__["table"] = ParameterTable("table_testdefaults.txt") self.__dict__["values"] = kwargs def __str__(self): s = [] # only emit testdefaults if some parameter is different from the default for key, value in self.values.items(): if self.table[key].default != value: s.append("\t%s %s\n" % (key, value)) if s: return testdefaultsSection._header + string.join(s) + "\n" else: return "\n" def __setattr__(self, name, val): te = self.table.get(name, None) if te is None: raise AttributeError, "invalid testdefaults attribute" self.values[name] = val def get_attributes(self): return self.table.keys() class portsSection(Section): _header = "ports\n" _table_names = { "eth": ("table_ports_eth.txt", PortEntry), "pos": ("table_ports_pos.txt", PortEntry), } class resolve_endpointsSection(Section): _header = "resolve_endpoints\n" _table_names = {"node": ("table_resolve_endpoints.txt", EndPointEntry)} class defineflowsSection(Section): _header = "defineflows\n" _table_names = { # ethernet or POS "IP": ("table_flows_IP.txt", IPFlowEntry), "TCP": ("table_flows_TCP.txt", TCPFlowEntry), "UDP": ("table_flows_UDP.txt", UDPFlowEntry), "SIP": ("table_flows_IP.txt", IPFlowEntry), # this was in an example, but not the docs! # VLAN tagged "VIP": ("table_flows_VIP.txt", VIPFlowEntry), "VUDP": ("table_flows_VUDP.txt", VUDPFlowEntry), "VTCP": ("table_flows_VTCP.txt", VTCPFlowEntry), # MPLS labeled "LIP": ("table_flows_LIP.txt", LIPFlowEntry), "LUDP": ("table_flows_LUDP.txt", LUDPFlowEntry), } class definegroupsSection(Section): _header = "definegroups\n" _table_names = {"group": ("table_definegroups.txt", GroupEntry)} class flowtestSection: _serial = 1 def __init__(self, **kwargs): self.__dict__["table"] = ParameterTable("table_flowtest.txt") self.__dict__["values"] = {} self.__dict__["serial"] = flowtestSection._serial self.set(**kwargs) flowtestSection._serial += 1 def __str__(self): s = ["flowtest %d\n" % (self.serial)] valuenames = self.values.keys() for key in valuenames: s.append("\t%s %s\n" % (key, self.values[key])) for key, te in self.table.items(): if te.default is not None and key not in valuenames: s.append("\t%s %s\n" % (key, te.default)) return string.join(s) + "\n" def __setattr__(self, key, val): te = self.table.get(key, None) if te is None: raise AttributeError, "invalid parameter name" if _check_value(te.types, val): if type(te.types) is type(flowtestSection): # class type, cast to class object self.values[key] = te.types(val) else: self.values[key] = val else: raise SAIError, "invalid value for %s (should be %r)" % (key, te.types) def __getattr__(self, key): rv = self.values.get(key, None) if rv is None: rv = self.table.get(key, None) if rv is None: raise AttributeError, "AttributeError: no attribute '%s'" % (key) return rv.default return rv def set(self, **kwargs): for key, val in kwargs.items(): setattr(self, key, val) def get_attributes(self): return self.table.keys() def get_type(self, name): return self.table[name].types ### The top level container object. class SAI: """ SAI() The top-level SAI document. Instantiate this, then add section objects to it with the append() method. Use any method to "stringify" this object to produce the document. """ def __init__(self, application="smartflow 1.30"): self.data = [] self.application = application def __str__(self): s = ["sai %s\n\n" % (self.application)] s.extend(map(str, self.data)) return string.join(s, "") def append(self, sect): self.data.append(sect) add = append def insert(self, index, val): self.data.insert(index, val)
# Copyright 2014 OneConvergence, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Kedar Kulkarni, One Convergence, Inc. """Implementation of OneConvergence Neutron Plugin.""" from oslo.config import cfg from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.common import constants as q_const from neutron.common import exceptions as nexception from neutron.common import rpc as q_rpc from neutron.common import topics from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import db_base_plugin_v2 from neutron.db import dhcp_rpc_base from neutron.db import external_net_db from neutron.db import extraroute_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_gwmode_db from neutron.db import l3_rpc_base from neutron.db import portbindings_base from neutron.db import quota_db # noqa from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.extensions import portbindings from neutron.openstack.common import excutils from neutron.openstack.common import importutils from neutron.openstack.common import log as logging from neutron.openstack.common import rpc from neutron.plugins.common import constants as svc_constants import neutron.plugins.oneconvergence.lib.config # noqa import neutron.plugins.oneconvergence.lib.exception as nvsdexception import neutron.plugins.oneconvergence.lib.nvsd_db as nvsd_db from neutron.plugins.oneconvergence.lib import nvsdlib as nvsd_lib LOG = logging.getLogger(__name__) IPv6 = 6 class NVSDPluginRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin, l3_rpc_base.L3RpcCallbackMixin, sg_db_rpc.SecurityGroupServerRpcCallbackMixin): RPC_API_VERSION = '1.1' def create_rpc_dispatcher(self): """Get the rpc dispatcher for this manager.""" return q_rpc.PluginRpcDispatcher([self, agents_db.AgentExtRpcCallback()]) @staticmethod def get_port_from_device(device): port = nvsd_db.get_port_from_device(device) if port: port['device'] = device return port class NVSDPluginV2AgentNotifierApi(rpc.proxy.RpcProxy, sg_rpc.SecurityGroupAgentRpcApiMixin): BASE_RPC_API_VERSION = '1.0' def __init__(self, topic): super(NVSDPluginV2AgentNotifierApi, self).__init__( topic=topic, default_version=self.BASE_RPC_API_VERSION) self.topic_port_update = topics.get_topic_name(topic, topics.PORT, topics.UPDATE) def port_update(self, context, port): self.fanout_cast(context, self.make_msg('port_update', port=port, topic=self.topic_port_update)) class OneConvergencePluginV2(db_base_plugin_v2.NeutronDbPluginV2, extraroute_db.ExtraRoute_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, external_net_db.External_net_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, portbindings_base.PortBindingBaseMixin, sg_db_rpc.SecurityGroupServerRpcMixin): """L2 Virtual Network Plugin. OneConvergencePluginV2 is a Neutron plugin that provides L2 Virtual Network functionality. """ __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True _supported_extension_aliases = ['agent', 'binding', 'dhcp_agent_scheduler', 'ext-gw-mode', 'external-net', 'extraroute', 'l3_agent_scheduler', 'quotas', 'router', 'security-group' ] @property def supported_extension_aliases(self): if not hasattr(self, '_aliases'): aliases = self._supported_extension_aliases[:] sg_rpc.disable_security_group_extension_by_config(aliases) self._aliases = aliases return self._aliases def __init__(self): super(OneConvergencePluginV2, self).__init__() self.oneconvergence_init() self.base_binding_dict = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, portbindings.VIF_DETAILS: { portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} portbindings_base.register_port_dict_function() self.setup_rpc() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver) self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) def oneconvergence_init(self): """Initialize the connections and set the log levels for the plugin.""" self.nvsdlib = nvsd_lib.NVSDApi() self.nvsdlib.set_connection() def setup_rpc(self): # RPC support self.service_topics = {svc_constants.CORE: topics.PLUGIN, svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} self.conn = rpc.create_connection(new=True) self.notifier = NVSDPluginV2AgentNotifierApi(topics.AGENT) self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( l3_rpc_agent_api.L3AgentNotify ) self.callbacks = NVSDPluginRpcCallbacks() self.dispatcher = self.callbacks.create_rpc_dispatcher() for svc_topic in self.service_topics.values(): self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False) # Consume from all consumers in a thread self.conn.consume_in_thread() def create_network(self, context, network): tenant_id = self._get_tenant_id_for_create( context, network['network']) self._ensure_default_security_group(context, tenant_id) net = self.nvsdlib.create_network(network['network']) network['network']['id'] = net['id'] try: neutron_net = super(OneConvergencePluginV2, self).create_network(context, network) #following call checks whether the network is external or not and #if it is external then adds this network to externalnetworks #table of neutron db self._process_l3_create(context, neutron_net, network['network']) except nvsdexception.NVSDAPIException: with excutils.save_and_reraise_exception(): self.nvsdlib.delete_network(net) return neutron_net def update_network(self, context, net_id, network): with context.session.begin(subtransactions=True): neutron_net = super(OneConvergencePluginV2, self).update_network(context, net_id, network) self.nvsdlib.update_network(neutron_net, network['network']) # updates neutron database e.g. externalnetworks table. self._process_l3_update(context, neutron_net, network['network']) return neutron_net def delete_network(self, context, net_id): with context.session.begin(subtransactions=True): network = self._get_network(context, net_id) #get all the subnets under the network to delete them subnets = self._get_subnets_by_network(context, net_id) super(OneConvergencePluginV2, self).delete_network(context, net_id) self.nvsdlib.delete_network(network, subnets) def create_subnet(self, context, subnet): if subnet['subnet']['ip_version'] == IPv6: raise nexception.InvalidInput( error_message="NVSDPlugin doesn't support IPv6.") neutron_subnet = super(OneConvergencePluginV2, self).create_subnet(context, subnet) try: self.nvsdlib.create_subnet(neutron_subnet) except nvsdexception.NVSDAPIException: with excutils.save_and_reraise_exception(): #Log the message and delete the subnet from the neutron super(OneConvergencePluginV2, self).delete_subnet(context, neutron_subnet['id']) LOG.error(_("Failed to create subnet, " "deleting it from neutron")) return neutron_subnet def delete_subnet(self, context, subnet_id): neutron_subnet = self._get_subnet(context, subnet_id) with context.session.begin(subtransactions=True): super(OneConvergencePluginV2, self).delete_subnet(context, subnet_id) self.nvsdlib.delete_subnet(neutron_subnet) def update_subnet(self, context, subnet_id, subnet): with context.session.begin(subtransactions=True): neutron_subnet = super(OneConvergencePluginV2, self).update_subnet(context, subnet_id, subnet) self.nvsdlib.update_subnet(neutron_subnet, subnet) return neutron_subnet def create_port(self, context, port): self._ensure_default_security_group_on_port(context, port) sgids = self._get_security_groups_on_port(context, port) network = {} network_id = port['port']['network_id'] with context.session.begin(subtransactions=True): # Invoke the Neutron API for creating port neutron_port = super(OneConvergencePluginV2, self).create_port(context, port) self._process_portbindings_create_and_update(context, port['port'], neutron_port) self._process_port_create_security_group(context, neutron_port, sgids) if port['port']['device_owner'] in ('network:router_gateway', 'network:floatingip'): # for l3 requests, tenant_id will be None/'' network = self._get_network(context, network_id) tenant_id = network['tenant_id'] else: tenant_id = port['port']['tenant_id'] port_id = neutron_port['id'] try: self.nvsdlib.create_port(tenant_id, neutron_port) except nvsdexception.NVSDAPIException: with excutils.save_and_reraise_exception(): LOG.error(_("Deleting newly created " "neutron port %s"), port_id) super(OneConvergencePluginV2, self).delete_port(context, port_id) self.notify_security_groups_member_updated(context, neutron_port) return neutron_port def update_port(self, context, port_id, port): with context.session.begin(subtransactions=True): old_port = super(OneConvergencePluginV2, self).get_port(context, port_id) neutron_port = super(OneConvergencePluginV2, self).update_port(context, port_id, port) if neutron_port['tenant_id'] == '': network = self._get_network(context, neutron_port['network_id']) tenant_id = network['tenant_id'] else: tenant_id = neutron_port['tenant_id'] self.nvsdlib.update_port(tenant_id, neutron_port, port['port']) self._process_portbindings_create_and_update(context, port['port'], neutron_port) need_port_update_notify = self.update_security_group_on_port( context, port_id, port, old_port, neutron_port) if need_port_update_notify: self.notifier.port_update(context, neutron_port) return neutron_port def delete_port(self, context, port_id, l3_port_check=True): if l3_port_check: self.prevent_l3_port_deletion(context, port_id) with context.session.begin(subtransactions=True): neutron_port = super(OneConvergencePluginV2, self).get_port(context, port_id) self._delete_port_security_group_bindings(context, port_id) self.disassociate_floatingips(context, port_id) super(OneConvergencePluginV2, self).delete_port(context, port_id) network = self._get_network(context, neutron_port['network_id']) neutron_port['tenant_id'] = network['tenant_id'] self.nvsdlib.delete_port(port_id, neutron_port) self.notify_security_groups_member_updated(context, neutron_port) def create_floatingip(self, context, floatingip): neutron_floatingip = super(OneConvergencePluginV2, self).create_floatingip(context, floatingip) try: self.nvsdlib.create_floatingip(neutron_floatingip) except nvsdexception.NVSDAPIException: with excutils.save_and_reraise_exception(): LOG.error(_("Failed to create floatingip")) super(OneConvergencePluginV2, self).delete_floatingip(context, neutron_floatingip['id']) return neutron_floatingip def update_floatingip(self, context, fip_id, floatingip): with context.session.begin(subtransactions=True): neutron_floatingip = super(OneConvergencePluginV2, self).update_floatingip(context, fip_id, floatingip) self.nvsdlib.update_floatingip(neutron_floatingip, floatingip) return neutron_floatingip def delete_floatingip(self, context, floating_ip_id): with context.session.begin(subtransactions=True): floating_ip = self._get_floatingip(context, floating_ip_id) super(OneConvergencePluginV2, self).delete_floatingip(context, floating_ip_id) self.nvsdlib.delete_floatingip(floating_ip) def create_router(self, context, router): neutron_router = super(OneConvergencePluginV2, self).create_router(context, router) try: self.nvsdlib.create_router(neutron_router) except nvsdexception.NVSDAPIException: with excutils.save_and_reraise_exception(): LOG.error(_("Failed to create router")) super(OneConvergencePluginV2, self).delete_router(context, neutron_router['id']) return neutron_router def update_router(self, context, router_id, router): with context.session.begin(subtransactions=True): neutron_router = super(OneConvergencePluginV2, self).update_router(context, router_id, router) self.nvsdlib.update_router(neutron_router) return neutron_router def delete_router(self, context, router_id): tenant_id = self._get_router(context, router_id)['tenant_id'] with context.session.begin(subtransactions=True): super(OneConvergencePluginV2, self).delete_router(context, router_id) self.nvsdlib.delete_router(tenant_id, router_id)
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import os import glob from pyspark.sql import SparkSession from pyspark_llap.sql import HiveWarehouseBuilder from pyspark_llap.sql.session import CreateTableBuilder, HiveWarehouseSessionImpl TEST_USER = "userX" TEST_PASSWORD = "passwordX" TEST_HS2_URL = "jdbc:hive2://nohost:10084" TEST_DBCP2_CONF = "defaultQueryTimeout=100" TEST_EXEC_RESULTS_MAX = 12345 TEST_DEFAULT_DB = "default12345" root = os.path.abspath( os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../target")) basepath = glob.glob("%s/scala-*" % root) assert len(basepath) > 0, "Build the package first. ./target/scala-* directory was not found." basepath = basepath[0] jarpath = glob.glob("%s/hive-warehouse-connector-assembly-*" % basepath) assert len(jarpath) == 1, \ "Multiple assemply jars were detected in ./target/scala-* directory or not found %s. " \ "Please clean up and build again." % jarpath jarpath = jarpath[0] testjarpath = glob.glob("%s/hive-warehouse-connector*tests.jar" % basepath) assert len(testjarpath) == 1, \ "Multiple test:package jars were detected in ./target/scala-* directory or not found %s. " \ "Please clean up and build again." % testjarpath testjarpath = testjarpath[0] class HiveWarehouseBuilderTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.spark = SparkSession.builder \ .master("local[4]") \ .appName(cls.__name__) \ .config( "spark.driver.extraClassPath", "%s:%s" % (jarpath, testjarpath)) \ .config( "spark.executor.extraClassPath", "%s:%s" % (jarpath, testjarpath)) \ .getOrCreate() try: cls.spark._jvm.com.hortonworks.spark.sql.hive.llap.MockConnection() except: cls.tearDownClass() raise Exception("PySpark LLAP tests are dependent on mock classes defined in test" "codes. These should be compiled together, for example, by " "'sbt test:package'.") @classmethod def tearDownClass(cls): cls.spark.stop() conf_pairs = { u'spark.datasource.hive.warehouse.password': TEST_PASSWORD, u'spark.datasource.hive.warehouse.dbcp2.conf': TEST_DBCP2_CONF, u'spark.datasource.hive.warehouse.default.db': TEST_DEFAULT_DB, u'spark.datasource.hive.warehouse.user.name': TEST_USER, u'spark.datasource.hive.warehouse.exec.results.max': str(TEST_EXEC_RESULTS_MAX), } def test_all_builder_config(self): session = self.spark jstate = HiveWarehouseBuilder \ .session(session) \ .userPassword(TEST_USER, TEST_PASSWORD) \ .hs2url(TEST_HS2_URL) \ .dbcp2Conf(TEST_DBCP2_CONF) \ .maxExecResults(TEST_EXEC_RESULTS_MAX) \ .defaultDB(TEST_DEFAULT_DB) \ ._jhwbuilder.sessionStateForTest() hive = session._jvm.com.hortonworks.spark.sql.hive.llap.MockHiveWarehouseSessionImpl(jstate) self.assertEqual(dict(hive.sessionState().getProps()), HiveWarehouseBuilderTest.conf_pairs) def test_all_config(self): session = self.spark for key, value in HiveWarehouseBuilderTest.conf_pairs.items(): session.conf.set(key, value) for key in HiveWarehouseBuilderTest.conf_pairs.keys(): # conf().getOption(key).get(), is used in 'MockHiveWarehouseSessionImpl'. self.assertEqual( str(session._jsparkSession.conf().getOption(key).get()), str(HiveWarehouseBuilderTest.conf_pairs[key])) def test_session_build(self): session = self.spark self.assertTrue(HiveWarehouseBuilder.session(session).build() is not None) def test_new_entry_point(self): import pyspark_llap session = self.spark HIVESERVER2_JDBC_URL = "spark.sql.hive.hiveserver2.jdbc.url" session.conf.set(HIVESERVER2_JDBC_URL, "test") hive = pyspark_llap.HiveWarehouseSession.session(session) \ .userPassword(TEST_USER, TEST_PASSWORD) \ .dbcp2Conf(TEST_DBCP2_CONF) \ .maxExecResults(TEST_EXEC_RESULTS_MAX) \ .defaultDB(TEST_DEFAULT_DB).build() self.assertEqual(hive.session(), session) class HiveWarehouseSessionHiveQlTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.spark = SparkSession.builder \ .master("local[4]") \ .appName(cls.__name__) \ .config( "spark.driver.extraClassPath", "%s:%s" % (jarpath, testjarpath)) \ .config( "spark.executor.extraClassPath", "%s:%s" % (jarpath, testjarpath)) \ .getOrCreate() try: cls.spark._jvm.com.hortonworks.spark.sql.hive.llap.MockConnection() except: cls.tearDownClass() raise Exception("PySpark LLAP tests are dependent on mock classes defined in test" "codes. These should be compiled together, for example, by " "'sbt test:package'.") session = cls.spark jstate = HiveWarehouseBuilder \ .session(session) \ .userPassword(TEST_USER, TEST_PASSWORD) \ .hs2url(TEST_HS2_URL) \ .dbcp2Conf(TEST_DBCP2_CONF) \ .maxExecResults(TEST_EXEC_RESULTS_MAX) \ .defaultDB(TEST_DEFAULT_DB) \ ._jhwbuilder.sessionStateForTest() cls.hive = HiveWarehouseSessionImpl( session, session._jvm.com.hortonworks.spark.sql.hive.llap.MockHiveWarehouseSessionImpl(jstate)) cls.mockExecuteResultSize = session._jvm.com.hortonworks.spark.sql.hive.llap \ .MockHiveWarehouseSessionImpl.testFixture().getData().size() cls.RESULT_SIZE = session._jvm.com.hortonworks.spark.sql.hive.llap \ .MockHiveWarehouseDataReader.RESULT_SIZE @classmethod def tearDownClass(cls): cls.spark.stop() def test_execute_query(self): self.assertEqual(self.hive.executeQuery("SELECT * FROM t1").count(), self.RESULT_SIZE) def test_execute_update(self): self.assertEqual(self.hive.executeUpdate("SELECT * FROM t1"), True) def test_set_database(self): self.hive.setDatabase(TEST_DEFAULT_DB) def test_describe_table(self): self.assertEqual(self.hive.describeTable("testTable").count(), self.mockExecuteResultSize) def test_create_database(self): self.hive.createDatabase(TEST_DEFAULT_DB, False) self.hive.createDatabase(TEST_DEFAULT_DB, True) def test_show_table(self): self.assertEqual(self.hive.showTables().count(), self.mockExecuteResultSize) def test_create_table(self): CreateTableBuilder(self.spark, self.hive.createTable("TestTable")._jtablebuilder) \ .ifNotExists() \ .column("id", "int") \ .column("val", "string") \ .partition("id", "int") \ .clusterBy(100, "val") \ .prop("key", "value") \ .create() if __name__ == "__main__": unittest.main()
""" Unified interfaces to root finding algorithms. Functions --------- - root : find a root of a vector function. """ from __future__ import division, print_function, absolute_import __all__ = ['root'] import numpy as np from scipy._lib.six import callable from warnings import warn from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options from .minpack import _root_hybr, leastsq from ._spectral import _root_df_sane from . import nonlin def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, options=None): """ Find a root of a vector function. Parameters ---------- fun : callable A vector function to find a root of. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to the objective function and its Jacobian. method : str, optional Type of solver. Should be one of - 'hybr' :ref:`(see here) <optimize.root-hybr>` - 'lm' :ref:`(see here) <optimize.root-lm>` - 'broyden1' :ref:`(see here) <optimize.root-broyden1>` - 'broyden2' :ref:`(see here) <optimize.root-broyden2>` - 'anderson' :ref:`(see here) <optimize.root-anderson>` - 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>` - 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>` - 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>` - 'krylov' :ref:`(see here) <optimize.root-krylov>` - 'df-sane' :ref:`(see here) <optimize.root-dfsane>` jac : bool or callable, optional If `jac` is a Boolean and is True, `fun` is assumed to return the value of Jacobian along with the objective function. If False, the Jacobian will be estimated numerically. `jac` can also be a callable returning the Jacobian of `fun`. In this case, it must accept the same arguments as `fun`. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. callback : function, optional Optional callback function. It is called on every iteration as ``callback(x, f)`` where `x` is the current solution and `f` the corresponding residual. For all methods but 'hybr' and 'lm'. options : dict, optional A dictionary of solver options. E.g. `xtol` or `maxiter`, see :obj:`show_options()` for details. Returns ------- sol : OptimizeResult The solution represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the algorithm exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *hybr*. Method *hybr* uses a modification of the Powell hybrid method as implemented in MINPACK [1]_. Method *lm* solves the system of nonlinear equations in a least squares sense using a modification of the Levenberg-Marquardt algorithm as implemented in MINPACK [1]_. Method *df-sane* is a derivative-free spectral method. [3]_ Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*, *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods, with backtracking or full line searches [2]_. Each method corresponds to a particular Jacobian approximations. See `nonlin` for details. - Method *broyden1* uses Broyden's first Jacobian approximation, it is known as Broyden's good method. - Method *broyden2* uses Broyden's second Jacobian approximation, it is known as Broyden's bad method. - Method *anderson* uses (extended) Anderson mixing. - Method *Krylov* uses Krylov approximation for inverse Jacobian. It is suitable for large-scale problem. - Method *diagbroyden* uses diagonal Broyden Jacobian approximation. - Method *linearmixing* uses a scalar Jacobian approximation. - Method *excitingmixing* uses a tuned diagonal Jacobian approximation. .. warning:: The algorithms implemented for methods *diagbroyden*, *linearmixing* and *excitingmixing* may be useful for specific problems, but whether they will work may depend strongly on the problem. .. versionadded:: 0.11.0 References ---------- .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom. 1980. User Guide for MINPACK-1. .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear Equations. Society for Industrial and Applied Mathematics. <https://archive.siam.org/books/kelley/fr16/> .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006). Examples -------- The following functions define a system of nonlinear equations and its jacobian. >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] >>> def jac(x): ... return np.array([[1 + 1.5 * (x[0] - x[1])**2, ... -1.5 * (x[0] - x[1])**2], ... [-1.5 * (x[1] - x[0])**2, ... 1 + 1.5 * (x[1] - x[0])**2]]) A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr') >>> sol.x array([ 0.8411639, 0.1588361]) """ if not isinstance(args, tuple): args = (args,) meth = method.lower() if options is None: options = {} if callback is not None and meth in ('hybr', 'lm'): warn('Method %s does not accept callback.' % method, RuntimeWarning) # fun also returns the jacobian if not callable(jac) and meth in ('hybr', 'lm'): if bool(jac): fun = MemoizeJac(fun) jac = fun.derivative else: jac = None # set default tolerances if tol is not None: options = dict(options) if meth in ('hybr', 'lm'): options.setdefault('xtol', tol) elif meth in ('df-sane',): options.setdefault('ftol', tol) elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov'): options.setdefault('xtol', tol) options.setdefault('xatol', np.inf) options.setdefault('ftol', np.inf) options.setdefault('fatol', np.inf) if meth == 'hybr': sol = _root_hybr(fun, x0, args=args, jac=jac, **options) elif meth == 'lm': sol = _root_leastsq(fun, x0, args=args, jac=jac, **options) elif meth == 'df-sane': _warn_jac_unused(jac, method) sol = _root_df_sane(fun, x0, args=args, callback=callback, **options) elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov'): _warn_jac_unused(jac, method) sol = _root_nonlin_solve(fun, x0, args=args, jac=jac, _method=meth, _callback=callback, **options) else: raise ValueError('Unknown solver %s' % method) return sol def _warn_jac_unused(jac, method): if jac is not None: warn('Method %s does not use the jacobian (jac).' % (method,), RuntimeWarning) def _root_leastsq(func, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, **unknown_options): """ Solve for least squares with Levenberg-Marquardt Options ------- col_deriv : bool non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float Relative error desired in the sum of squares. xtol : float Relative error desired in the approximate solution. gtol : float Orthogonality desired between the function vector and the columns of the Jacobian. maxiter : int The maximum number of calls to the function. If zero, then 100*(N+1) is the maximum where N is the number of elements in x0. epsfcn : float A suitable step length for the forward-difference approximation of the Jacobian (for Dfun=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence N positive entries that serve as a scale factors for the variables. """ _check_unknown_options(unknown_options) x, cov_x, info, msg, ier = leastsq(func, x0, args=args, Dfun=jac, full_output=True, col_deriv=col_deriv, xtol=xtol, ftol=ftol, gtol=gtol, maxfev=maxiter, epsfcn=eps, factor=factor, diag=diag) sol = OptimizeResult(x=x, message=msg, status=ier, success=ier in (1, 2, 3, 4), cov_x=cov_x, fun=info.pop('fvec')) sol.update(info) return sol def _root_nonlin_solve(func, x0, args=(), jac=None, _callback=None, _method=None, nit=None, disp=False, maxiter=None, ftol=None, fatol=None, xtol=None, xatol=None, tol_norm=None, line_search='armijo', jac_options=None, **unknown_options): _check_unknown_options(unknown_options) f_tol = fatol f_rtol = ftol x_tol = xatol x_rtol = xtol verbose = disp if jac_options is None: jac_options = dict() jacobian = {'broyden1': nonlin.BroydenFirst, 'broyden2': nonlin.BroydenSecond, 'anderson': nonlin.Anderson, 'linearmixing': nonlin.LinearMixing, 'diagbroyden': nonlin.DiagBroyden, 'excitingmixing': nonlin.ExcitingMixing, 'krylov': nonlin.KrylovJacobian }[_method] if args: if jac: def f(x): return func(x, *args)[0] else: def f(x): return func(x, *args) else: f = func x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options), iter=nit, verbose=verbose, maxiter=maxiter, f_tol=f_tol, f_rtol=f_rtol, x_tol=x_tol, x_rtol=x_rtol, tol_norm=tol_norm, line_search=line_search, callback=_callback, full_output=True, raise_exception=False) sol = OptimizeResult(x=x) sol.update(info) return sol def _root_broyden1_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart`` Drop all matrix columns. Has no extra parameters. - ``simple`` Drop oldest matrix column. Has no extra parameters. - ``svd`` Keep only the most significant SVD components. Extra parameters: - ``to_retain`` Number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). """ pass def _root_broyden2_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart`` Drop all matrix columns. Has no extra parameters. - ``simple`` Drop oldest matrix column. Has no extra parameters. - ``svd`` Keep only the most significant SVD components. Extra parameters: - ``to_retain`` Number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). """ pass def _root_anderson_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01. """ pass def _root_linearmixing_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, ``NoConvergence`` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional initial guess for the jacobian is (-1/alpha). """ pass def _root_diagbroyden_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional initial guess for the jacobian is (-1/alpha). """ pass def _root_excitingmixing_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial Jacobian approximation is (-1/alpha). alphamax : float, optional The entries of the diagonal Jacobian are kept in the range ``[alpha, alphamax]``. """ pass def _root_krylov_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. rdiff : float, optional Relative step size to use in numerical differentiation. method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function Krylov method to use to approximate the Jacobian. Can be a string, or a function implementing the same interface as the iterative solvers in `scipy.sparse.linalg`. The default is `scipy.sparse.linalg.lgmres`. inner_M : LinearOperator or InverseJacobian Preconditioner for the inner Krylov iteration. Note that you can use also inverse Jacobians as (adaptive) preconditioners. For example, >>> jac = BroydenFirst() >>> kjac = KrylovJacobian(inner_M=jac.inverse). If the preconditioner has a method named 'update', it will be called as ``update(x, f)`` after each nonlinear step, with ``x`` giving the current point, and ``f`` the current function value. inner_tol, inner_maxiter, ... Parameters to pass on to the "inner" Krylov solver. See `scipy.sparse.linalg.gmres` for details. outer_k : int, optional Size of the subspace kept across LGMRES nonlinear iterations. See `scipy.sparse.linalg.lgmres` for details. """ pass
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. Assumptions =========== 1. Cache data directory exists on a filesytem that updates atime on reads ('noatime' should NOT be set) 2. Cache data directory exists on a filesystem that supports xattrs. This is optional, but highly recommended since it allows us to present ops with useful information pertaining to the cache, like human readable filenames and statistics. 3. `glance-prune` is scheduled to run as a periodic job via cron. This is needed to run the LRU prune strategy to keep the cache size within the limits set by the config file. Cache Directory Notes ===================== The image cache data directory contains the main cache path, where the active cache entries and subdirectories for handling partial downloads and errored-out cache images. The layout looks like: $image_cache_dir/ entry1 entry2 ... incomplete/ invalid/ queue/ """ from __future__ import absolute_import from contextlib import contextmanager import errno import os import stat import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import xattr from glance.common import exception from glance.common import utils from glance import i18n from glance.image_cache.drivers import base LOG = logging.getLogger(__name__) _ = i18n._ _LE = i18n._LE _LI = i18n._LI _LW = i18n._LW CONF = cfg.CONF class Driver(base.Driver): """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. """ def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ # Here we set up the various file-based image cache paths # that we need in order to find the files in different states # of cache management. self.set_paths() # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs image_cache_dir = self.base_dir fake_image_filepath = os.path.join(image_cache_dir, 'checkme') with open(fake_image_filepath, 'wb') as fake_file: fake_file.write("XXX") fake_file.flush() try: set_xattr(fake_image_filepath, 'hits', '1') except IOError as e: if e.errno == errno.EOPNOTSUPP: msg = (_("The device housing the image cache directory " "%(image_cache_dir)s does not support xattr. It is" " likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the" " device housing the cache directory.") % {'image_cache_dir': image_cache_dir}) LOG.error(msg) raise exception.BadDriverConfiguration(driver_name="xattr", reason=msg) else: # Cleanup after ourselves... if os.path.exists(fake_image_filepath): os.unlink(fake_image_filepath) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ sizes = [] for path in get_all_regular_files(self.base_dir): file_info = os.stat(path) sizes.append(file_info[stat.ST_SIZE]) return sum(sizes) def get_hit_count(self, image_id): """ Return the number of hits that an image has. :param image_id: Opaque image identifier """ if not self.is_cached(image_id): return 0 path = self.get_image_filepath(image_id) return int(get_xattr(path, 'hits', default=0)) def get_cached_images(self): """ Returns a list of records about cached images. """ LOG.debug("Gathering cached image entries.") entries = [] for path in get_all_regular_files(self.base_dir): image_id = os.path.basename(path) entry = {} entry['image_id'] = image_id file_info = os.stat(path) entry['last_modified'] = file_info[stat.ST_MTIME] entry['last_accessed'] = file_info[stat.ST_ATIME] entry['size'] = file_info[stat.ST_SIZE] entry['hits'] = self.get_hit_count(image_id) entries.append(entry) entries.sort() # Order by ID return entries def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return os.path.exists(self.get_image_filepath(image_id)) def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ # Make sure we're not already cached or caching the image return not (self.is_cached(image_id) or self.is_being_cached(image_id)) def is_being_cached(self, image_id): """ Returns True if the image with supplied id is currently in the process of having its image file cached. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'incomplete') return os.path.exists(path) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. """ path = self.get_image_filepath(image_id, 'queue') return os.path.exists(path) def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images """ deleted = 0 for path in get_all_regular_files(self.base_dir): delete_cached_file(path) deleted += 1 return deleted def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id) delete_cached_file(path) def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images """ files = [f for f in get_all_regular_files(self.queue_dir)] for file in files: os.unlink(file) return len(files) def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') if os.path.exists(path): os.unlink(path) def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ stats = [] for path in get_all_regular_files(self.base_dir): file_info = os.stat(path) stats.append((file_info[stat.ST_ATIME], # access time file_info[stat.ST_SIZE], # size in bytes path)) # absolute path if not stats: return None stats.sort() return os.path.basename(stats[0][2]), stats[0][1] @contextmanager def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ incomplete_path = self.get_image_filepath(image_id, 'incomplete') def set_attr(key, value): set_xattr(incomplete_path, key, value) def commit(): set_attr('hits', 0) final_path = self.get_image_filepath(image_id) LOG.debug("Fetch finished, moving " "'%(incomplete_path)s' to '%(final_path)s'", dict(incomplete_path=incomplete_path, final_path=final_path)) os.rename(incomplete_path, final_path) # Make sure that we "pop" the image from the queue... if self.is_queued(image_id): LOG.debug("Removing image '%s' from queue after " "caching it." % image_id) os.unlink(self.get_image_filepath(image_id, 'queue')) def rollback(e): set_attr('error', utils.exception_to_str(e)) invalid_path = self.get_image_filepath(image_id, 'invalid') LOG.debug("Fetch of cache file failed (%(e)s), rolling back by " "moving '%(incomplete_path)s' to " "'%(invalid_path)s'" % {'e': utils.exception_to_str(e), 'incomplete_path': incomplete_path, 'invalid_path': invalid_path}) os.rename(incomplete_path, invalid_path) try: with open(incomplete_path, 'wb') as cache_file: yield cache_file except Exception as e: with excutils.save_and_reraise_exception(): rollback(e) else: commit() finally: # if the generator filling the cache file neither raises an # exception, nor completes fetching all data, neither rollback # nor commit will have been called, so the incomplete file # will persist - in that case remove it as it is unusable # example: ^c from client fetch if os.path.exists(incomplete_path): rollback('incomplete fetch') @contextmanager def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) with open(path, 'rb') as cache_file: yield cache_file path = self.get_image_filepath(image_id) inc_xattr(path, 'hits', 1) def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ if self.is_cached(image_id): msg = _LI("Not queueing image '%s'. Already cached.") % image_id LOG.info(msg) return False if self.is_being_cached(image_id): msg = _LI("Not queueing image '%s'. Already being " "written to cache") % image_id LOG.info(msg) return False if self.is_queued(image_id): msg = _LI("Not queueing image '%s'. Already queued.") % image_id LOG.info(msg) return False path = self.get_image_filepath(image_id, 'queue') LOG.debug("Queueing image '%s'.", image_id) # Touch the file to add it to the queue with open(path, "w"): pass return True def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ files = [f for f in get_all_regular_files(self.queue_dir)] items = [] for path in files: mtime = os.path.getmtime(path) items.append((mtime, os.path.basename(path))) items.sort() return [image_id for (modtime, image_id) in items] def _reap_old_files(self, dirpath, entry_type, grace=None): now = time.time() reaped = 0 for path in get_all_regular_files(dirpath): mtime = os.path.getmtime(path) age = now - mtime if not grace: LOG.debug("No grace period, reaping '%(path)s'" " immediately", {'path': path}) delete_cached_file(path) reaped += 1 elif age > grace: LOG.debug("Cache entry '%(path)s' exceeds grace period, " "(%(age)i s > %(grace)i s)", {'path': path, 'age': age, 'grace': grace}) delete_cached_file(path) reaped += 1 LOG.info(_LI("Reaped %(reaped)s %(entry_type)s cache entries"), {'reaped': reaped, 'entry_type': entry_type}) return reaped def reap_invalid(self, grace=None): """Remove any invalid cache entries :param grace: Number of seconds to keep an invalid entry around for debugging purposes. If None, then delete immediately. """ return self._reap_old_files(self.invalid_dir, 'invalid', grace=grace) def reap_stalled(self, grace=None): """Remove any stalled cache entries :param grace: Number of seconds to keep an invalid entry around for debugging purposes. If None, then delete immediately. """ return self._reap_old_files(self.incomplete_dir, 'stalled', grace=grace) def clean(self, stall_time=None): """ Delete any image files in the invalid directory and any files in the incomplete directory that are older than a configurable amount of time. """ self.reap_invalid() if stall_time is None: stall_time = CONF.image_cache_stall_time self.reap_stalled(stall_time) def get_all_regular_files(basepath): for fname in os.listdir(basepath): path = os.path.join(basepath, fname) if os.path.isfile(path): yield path def delete_cached_file(path): if os.path.exists(path): LOG.debug("Deleting image cache file '%s'" % path) os.unlink(path) else: LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to" " delete") % path) def _make_namespaced_xattr_key(key, namespace='user'): """ Create a fully-qualified xattr-key by including the intended namespace. Namespacing differs among OSes[1]: FreeBSD: user, system Linux: user, system, trusted, security MacOS X: not needed Mac OS X won't break if we include a namespace qualifier, so, for simplicity, we always include it. -- [1] http://en.wikipedia.org/wiki/Extended_file_attributes """ namespaced_key = ".".join([namespace, key]) return namespaced_key def get_xattr(path, key, **kwargs): """Return the value for a particular xattr If the key doesn't not exist, or xattrs aren't supported by the file system then a KeyError will be raised, that is, unless you specify a default using kwargs. """ namespaced_key = _make_namespaced_xattr_key(key) try: return xattr.getxattr(path, namespaced_key) except IOError: if 'default' in kwargs: return kwargs['default'] else: raise def set_xattr(path, key, value): """Set the value of a specified xattr. If xattrs aren't supported by the file-system, we skip setting the value. """ namespaced_key = _make_namespaced_xattr_key(key) xattr.setxattr(path, namespaced_key, str(value)) def inc_xattr(path, key, n=1): """ Increment the value of an xattr (assuming it is an integer). BEWARE, this code *does* have a RACE CONDITION, since the read/update/write sequence is not atomic. Since the use-case for this function is collecting stats--not critical-- the benefits of simple, lock-free code out-weighs the possibility of an occasional hit not being counted. """ count = int(get_xattr(path, key)) count += n set_xattr(path, key, str(count))
import json from django.core import mail from olympia import amo from olympia.abuse.models import AbuseReport from olympia.amo.tests import ( APITestClient, TestCase, addon_factory, reverse_ns, user_factory) class AddonAbuseViewSetTestBase(object): client_class = APITestClient def setUp(self): self.url = reverse_ns('abusereportaddon-list') def check_reporter(self, report): raise NotImplementedError def check_report(self, report, text): assert unicode(report) == text assert report.ip_address == '123.45.67.89' assert mail.outbox[0].subject == text self.check_reporter(report) def test_report_addon_by_id(self): addon = addon_factory() response = self.client.post( self.url, data={'addon': unicode(addon.id), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201 assert AbuseReport.objects.filter(addon_id=addon.id).exists() report = AbuseReport.objects.get(addon_id=addon.id) self.check_report(report, u'[Extension] Abuse Report for %s' % addon.name) def test_report_addon_by_slug(self): addon = addon_factory() response = self.client.post( self.url, data={'addon': addon.slug, 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201 assert AbuseReport.objects.filter(addon_id=addon.id).exists() report = AbuseReport.objects.get(addon_id=addon.id) self.check_report(report, u'[Extension] Abuse Report for %s' % addon.name) def test_report_addon_by_guid(self): addon = addon_factory(guid='@badman') response = self.client.post( self.url, data={'addon': addon.guid, 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201 assert AbuseReport.objects.filter(addon_id=addon.id).exists() report = AbuseReport.objects.get(addon_id=addon.id) self.check_report(report, u'[Extension] Abuse Report for %s' % addon.name) def test_report_addon_guid_not_on_amo(self): guid = '@mysteryman' response = self.client.post( self.url, data={'addon': guid, 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201 assert AbuseReport.objects.filter(guid=guid).exists() report = AbuseReport.objects.get(guid=guid) self.check_report(report, u'[Addon] Abuse Report for %s' % guid) def test_report_addon_invalid_identifier(self): response = self.client.post( self.url, data={'addon': 'randomnotguid', 'message': 'abuse!'}) assert response.status_code == 404 def test_addon_not_public(self): addon = addon_factory(status=amo.STATUS_NULL) response = self.client.post( self.url, data={'addon': unicode(addon.id), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201 assert AbuseReport.objects.filter(addon_id=addon.id).exists() report = AbuseReport.objects.get(addon_id=addon.id) self.check_report(report, u'[Extension] Abuse Report for %s' % addon.name) def test_no_addon_fails(self): response = self.client.post( self.url, data={'message': 'abuse!'}) assert response.status_code == 400 assert json.loads(response.content) == { 'detail': 'Need an addon parameter'} def test_message_required_empty(self): addon = addon_factory() response = self.client.post( self.url, data={'addon': unicode(addon.id), 'message': ''}) assert response.status_code == 400 assert json.loads(response.content) == { 'detail': 'Abuse reports need a message'} def test_message_required_missing(self): addon = addon_factory() response = self.client.post( self.url, data={'addon': unicode(addon.id)}) assert response.status_code == 400 assert json.loads(response.content) == { 'detail': 'Abuse reports need a message'} def test_throttle(self): addon = addon_factory() for x in xrange(20): response = self.client.post( self.url, data={'addon': unicode(addon.id), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201, x response = self.client.post( self.url, data={'addon': unicode(addon.id), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 429 class TestAddonAbuseViewSetLoggedOut(AddonAbuseViewSetTestBase, TestCase): def check_reporter(self, report): assert not report.reporter class TestAddonAbuseViewSetLoggedIn(AddonAbuseViewSetTestBase, TestCase): def setUp(self): super(TestAddonAbuseViewSetLoggedIn, self).setUp() self.user = user_factory() self.client.login_api(self.user) def check_reporter(self, report): assert report.reporter == self.user class UserAbuseViewSetTestBase(object): client_class = APITestClient def setUp(self): self.url = reverse_ns('abusereportuser-list') def check_reporter(self, report): raise NotImplementedError def check_report(self, report, text): assert unicode(report) == text assert report.ip_address == '123.45.67.89' assert mail.outbox[0].subject == text self.check_reporter(report) def test_report_user_id(self): user = user_factory() response = self.client.post( self.url, data={'user': unicode(user.id), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201 assert AbuseReport.objects.filter(user_id=user.id).exists() report = AbuseReport.objects.get(user_id=user.id) self.check_report(report, u'[User] Abuse Report for %s' % user.name) def test_report_user_username(self): user = user_factory() response = self.client.post( self.url, data={'user': unicode(user.username), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201 assert AbuseReport.objects.filter(user_id=user.id).exists() report = AbuseReport.objects.get(user_id=user.id) self.check_report(report, u'[User] Abuse Report for %s' % user.name) def test_no_user_fails(self): response = self.client.post( self.url, data={'message': 'abuse!'}) assert response.status_code == 400 assert json.loads(response.content) == { 'detail': 'Need a user parameter'} def test_message_required_empty(self): user = user_factory() response = self.client.post( self.url, data={'user': unicode(user.username), 'message': ''}) assert response.status_code == 400 assert json.loads(response.content) == { 'detail': 'Abuse reports need a message'} def test_message_required_missing(self): user = user_factory() response = self.client.post( self.url, data={'user': unicode(user.username)}) assert response.status_code == 400 assert json.loads(response.content) == { 'detail': 'Abuse reports need a message'} def test_throttle(self): user = user_factory() for x in xrange(20): response = self.client.post( self.url, data={'user': unicode(user.username), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 201, x response = self.client.post( self.url, data={'user': unicode(user.username), 'message': 'abuse!'}, REMOTE_ADDR='123.45.67.89') assert response.status_code == 429 class TestUserAbuseViewSetLoggedOut(UserAbuseViewSetTestBase, TestCase): def check_reporter(self, report): assert not report.reporter class TestUserAbuseViewSetLoggedIn(UserAbuseViewSetTestBase, TestCase): def setUp(self): super(TestUserAbuseViewSetLoggedIn, self).setUp() self.user = user_factory() self.client.login_api(self.user) def check_reporter(self, report): assert report.reporter == self.user
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.cloud.monitoring_v3.types import common from google.cloud.monitoring_v3.types import mutation_record as gm_mutation_record from google.protobuf import duration_pb2 # type: ignore from google.protobuf import wrappers_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore __protobuf__ = proto.module(package="google.monitoring.v3", manifest={"AlertPolicy",},) class AlertPolicy(proto.Message): r"""A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see `Introduction to Alerting <https://cloud.google.com/monitoring/alerts/>`__. Attributes: name (str): Required if the policy exists. The resource name for this policy. The format is: :: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] ``[ALERT_POLICY_ID]`` is assigned by Stackdriver Monitoring when the policy is created. When calling the [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] method, do not include the ``name`` field in the alerting policy passed as part of the request. display_name (str): A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters. documentation (google.cloud.monitoring_v3.types.AlertPolicy.Documentation): Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. user_labels (Sequence[google.cloud.monitoring_v3.types.AlertPolicy.UserLabelsEntry]): User-supplied key/value data to be used for organizing and identifying the ``AlertPolicy`` objects. The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. conditions (Sequence[google.cloud.monitoring_v3.types.AlertPolicy.Condition]): A list of conditions for the policy. The conditions are combined by AND or OR according to the ``combiner`` field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If ``condition_time_series_query_language`` is present, it must be the only ``condition``. combiner (google.cloud.monitoring_v3.types.AlertPolicy.ConditionCombinerType): How to combine the results of multiple conditions to determine if an incident should be opened. If ``condition_time_series_query_language`` is present, this must be ``COMBINE_UNSPECIFIED``. enabled (google.protobuf.wrappers_pb2.BoolValue): Whether or not the policy is enabled. On write, the default interpretation if unset is that the policy is enabled. On read, clients should not make any assumption about the state if it has not been populated. The field should always be populated on List and Get operations, unless a field projection has been specified that strips it out. validity (google.rpc.status_pb2.Status): Read-only description of how the alert policy is invalid. OK if the alert policy is valid. If not OK, the alert policy will not generate incidents. notification_channels (Sequence[str]): Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the ``name`` field in each of the [``NotificationChannel``][google.monitoring.v3.NotificationChannel] objects that are returned from the [``ListNotificationChannels``] [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] method. The format of the entries in this field is: :: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] creation_record (google.cloud.monitoring_v3.types.MutationRecord): A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored. mutation_record (google.cloud.monitoring_v3.types.MutationRecord): A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored. alert_strategy (google.cloud.monitoring_v3.types.AlertPolicy.AlertStrategy): Control over how this alert policy's notification channels are notified. """ class ConditionCombinerType(proto.Enum): r"""Operators for combining conditions.""" COMBINE_UNSPECIFIED = 0 AND = 1 OR = 2 AND_WITH_MATCHING_RESOURCE = 3 class Documentation(proto.Message): r"""A content string and a MIME type that describes the content string's format. Attributes: content (str): The text of the documentation, interpreted according to ``mime_type``. The content may not exceed 8,192 Unicode characters and may not exceed more than 10,240 bytes when encoded in UTF-8 format, whichever is smaller. mime_type (str): The format of the ``content`` field. Presently, only the value ``"text/markdown"`` is supported. See `Markdown <https://en.wikipedia.org/wiki/Markdown>`__ for more information. """ content = proto.Field(proto.STRING, number=1,) mime_type = proto.Field(proto.STRING, number=2,) class Condition(proto.Message): r"""A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: name (str): Required if the condition exists. The unique resource name for this condition. Its format is: :: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] ``[CONDITION_ID]`` is assigned by Stackdriver Monitoring when the condition is created as part of a new or updated alerting policy. When calling the [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] method, do not include the ``name`` field in the conditions of the requested alerting policy. Stackdriver Monitoring creates the condition identifiers and includes them in the new policy. When calling the [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] method to update a policy, including a condition ``name`` causes the existing condition to be updated. Conditions without names are added to the updated policy. Existing conditions are deleted if they are not updated. Best practice is to preserve ``[CONDITION_ID]`` if you make only small changes, such as those to condition thresholds, durations, or trigger values. Otherwise, treat the change as a new condition and let the existing condition be deleted. display_name (str): A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. condition_threshold (google.cloud.monitoring_v3.types.AlertPolicy.Condition.MetricThreshold): A condition that compares a time series against a threshold. This field is a member of `oneof`_ ``condition``. condition_absent (google.cloud.monitoring_v3.types.AlertPolicy.Condition.MetricAbsence): A condition that checks that a time series continues to receive new data points. This field is a member of `oneof`_ ``condition``. condition_matched_log (google.cloud.monitoring_v3.types.AlertPolicy.Condition.LogMatch): A condition that checks for log messages matching given constraints. If set, no other conditions can be present. This field is a member of `oneof`_ ``condition``. condition_monitoring_query_language (google.cloud.monitoring_v3.types.AlertPolicy.Condition.MonitoringQueryLanguageCondition): A condition that uses the Monitoring Query Language to define alerts. This field is a member of `oneof`_ ``condition``. """ class Trigger(proto.Message): r"""Specifies how many time series must fail a predicate to trigger a condition. If not specified, then a ``{count: 1}`` trigger is used. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: count (int): The absolute number of time series that must fail the predicate for the condition to be triggered. This field is a member of `oneof`_ ``type``. percent (float): The percentage of time series that must fail the predicate for the condition to be triggered. This field is a member of `oneof`_ ``type``. """ count = proto.Field(proto.INT32, number=1, oneof="type",) percent = proto.Field(proto.DOUBLE, number=2, oneof="type",) class MetricThreshold(proto.Message): r"""A condition type that compares a collection of time series against a threshold. Attributes: filter (str): Required. A `filter <https://cloud.google.com/monitoring/api/v3/filters>`__ that identifies which time series should be compared with the threshold. The filter is similar to the one that is specified in the ```ListTimeSeries`` request <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list>`__ (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. aggregations (Sequence[google.cloud.monitoring_v3.types.Aggregation]): Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified. This field is similar to the one in the ```ListTimeSeries`` request <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list>`__. It is advisable to use the ``ListTimeSeries`` method when debugging this field. denominator_filter (str): A `filter <https://cloud.google.com/monitoring/api/v3/filters>`__ that identifies a time series that should be used as the denominator of a ratio that will be compared with the threshold. If a ``denominator_filter`` is specified, the time series specified by the ``filter`` field will be used as the numerator. The filter must specify the metric type and optionally may contain restrictions on resource type, resource labels, and metric labels. This field may not exceed 2048 Unicode characters in length. denominator_aggregations (Sequence[google.cloud.monitoring_v3.types.Aggregation]): Specifies the alignment of data points in individual time series selected by ``denominatorFilter`` as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). When computing ratios, the ``aggregations`` and ``denominator_aggregations`` fields must use the same alignment period and produce time series that have the same periodicity and labels. comparison (google.cloud.monitoring_v3.types.ComparisonType): The comparison to apply between the time series (indicated by ``filter`` and ``aggregation``) and the threshold (indicated by ``threshold_value``). The comparison is applied on each time series, with the time series on the left-hand side and the threshold on the right-hand side. Only ``COMPARISON_LT`` and ``COMPARISON_GT`` are supported currently. threshold_value (float): A value against which to compare the time series. duration (google.protobuf.duration_pb2.Duration): The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the ``aggregations`` field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. trigger (google.cloud.monitoring_v3.types.AlertPolicy.Condition.Trigger): The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by ``filter`` and ``aggregations``, or by the ratio, if ``denominator_filter`` and ``denominator_aggregations`` are specified. """ filter = proto.Field(proto.STRING, number=2,) aggregations = proto.RepeatedField( proto.MESSAGE, number=8, message=common.Aggregation, ) denominator_filter = proto.Field(proto.STRING, number=9,) denominator_aggregations = proto.RepeatedField( proto.MESSAGE, number=10, message=common.Aggregation, ) comparison = proto.Field(proto.ENUM, number=4, enum=common.ComparisonType,) threshold_value = proto.Field(proto.DOUBLE, number=5,) duration = proto.Field( proto.MESSAGE, number=6, message=duration_pb2.Duration, ) trigger = proto.Field( proto.MESSAGE, number=7, message="AlertPolicy.Condition.Trigger", ) class MetricAbsence(proto.Message): r"""A condition type that checks that monitored resources are reporting data. The configuration defines a metric and a set of monitored resources. The predicate is considered in violation when a time series for the specified metric of a monitored resource does not include any data in the specified ``duration``. Attributes: filter (str): Required. A `filter <https://cloud.google.com/monitoring/api/v3/filters>`__ that identifies which time series should be compared with the threshold. The filter is similar to the one that is specified in the ```ListTimeSeries`` request <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list>`__ (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. aggregations (Sequence[google.cloud.monitoring_v3.types.Aggregation]): Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified. This field is similar to the one in the ```ListTimeSeries`` request <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list>`__. It is advisable to use the ``ListTimeSeries`` method when debugging this field. duration (google.protobuf.duration_pb2.Duration): The amount of time that a time series must fail to report new data to be considered failing. The minimum value of this field is 120 seconds. Larger values that are a multiple of a minute--for example, 240 or 300 seconds--are supported. If an invalid value is given, an error will be returned. The ``Duration.nanos`` field is ignored. trigger (google.cloud.monitoring_v3.types.AlertPolicy.Condition.Trigger): The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by ``filter`` and ``aggregations``. """ filter = proto.Field(proto.STRING, number=1,) aggregations = proto.RepeatedField( proto.MESSAGE, number=5, message=common.Aggregation, ) duration = proto.Field( proto.MESSAGE, number=2, message=duration_pb2.Duration, ) trigger = proto.Field( proto.MESSAGE, number=3, message="AlertPolicy.Condition.Trigger", ) class LogMatch(proto.Message): r"""A condition type that checks whether a log message in the `scoping project <https://cloud.google.com/monitoring/api/v3#project_name>`__ satisfies the given filter. Logs from other projects in the metrics scope are not evaluated. Attributes: filter (str): Required. A logs-based filter. See `Advanced Logs Queries <https://cloud.google.com/logging/docs/view/advanced-queries>`__ for how this filter should be constructed. label_extractors (Sequence[google.cloud.monitoring_v3.types.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry]): Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match ``filter``. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition. Please see `the documentation on logs-based metric ``valueExtractor``\ s <https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor>`__ for syntax and examples. """ filter = proto.Field(proto.STRING, number=1,) label_extractors = proto.MapField(proto.STRING, proto.STRING, number=2,) class MonitoringQueryLanguageCondition(proto.Message): r"""A condition type that allows alert policies to be defined using `Monitoring Query Language <https://cloud.google.com/monitoring/mql>`__. Attributes: query (str): `Monitoring Query Language <https://cloud.google.com/monitoring/mql>`__ query that outputs a boolean stream. duration (google.protobuf.duration_pb2.Duration): The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the ``aggregations`` field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. trigger (google.cloud.monitoring_v3.types.AlertPolicy.Condition.Trigger): The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by ``filter`` and ``aggregations``, or by the ratio, if ``denominator_filter`` and ``denominator_aggregations`` are specified. """ query = proto.Field(proto.STRING, number=1,) duration = proto.Field( proto.MESSAGE, number=2, message=duration_pb2.Duration, ) trigger = proto.Field( proto.MESSAGE, number=3, message="AlertPolicy.Condition.Trigger", ) name = proto.Field(proto.STRING, number=12,) display_name = proto.Field(proto.STRING, number=6,) condition_threshold = proto.Field( proto.MESSAGE, number=1, oneof="condition", message="AlertPolicy.Condition.MetricThreshold", ) condition_absent = proto.Field( proto.MESSAGE, number=2, oneof="condition", message="AlertPolicy.Condition.MetricAbsence", ) condition_matched_log = proto.Field( proto.MESSAGE, number=20, oneof="condition", message="AlertPolicy.Condition.LogMatch", ) condition_monitoring_query_language = proto.Field( proto.MESSAGE, number=19, oneof="condition", message="AlertPolicy.Condition.MonitoringQueryLanguageCondition", ) class AlertStrategy(proto.Message): r"""Control over how the notification channels in ``notification_channels`` are notified when this alert fires. Attributes: notification_rate_limit (google.cloud.monitoring_v3.types.AlertPolicy.AlertStrategy.NotificationRateLimit): Required for alert policies with a ``LogMatch`` condition. This limit is not implemented for alert policies that are not log-based. auto_close (google.protobuf.duration_pb2.Duration): If an alert policy that was active has no data for this long, any open incidents will close """ class NotificationRateLimit(proto.Message): r"""Control over the rate of notifications sent to this alert policy's notification channels. Attributes: period (google.protobuf.duration_pb2.Duration): Not more than one notification per ``period``. """ period = proto.Field( proto.MESSAGE, number=1, message=duration_pb2.Duration, ) notification_rate_limit = proto.Field( proto.MESSAGE, number=1, message="AlertPolicy.AlertStrategy.NotificationRateLimit", ) auto_close = proto.Field( proto.MESSAGE, number=3, message=duration_pb2.Duration, ) name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) documentation = proto.Field(proto.MESSAGE, number=13, message=Documentation,) user_labels = proto.MapField(proto.STRING, proto.STRING, number=16,) conditions = proto.RepeatedField(proto.MESSAGE, number=12, message=Condition,) combiner = proto.Field(proto.ENUM, number=6, enum=ConditionCombinerType,) enabled = proto.Field(proto.MESSAGE, number=17, message=wrappers_pb2.BoolValue,) validity = proto.Field(proto.MESSAGE, number=18, message=status_pb2.Status,) notification_channels = proto.RepeatedField(proto.STRING, number=14,) creation_record = proto.Field( proto.MESSAGE, number=10, message=gm_mutation_record.MutationRecord, ) mutation_record = proto.Field( proto.MESSAGE, number=11, message=gm_mutation_record.MutationRecord, ) alert_strategy = proto.Field(proto.MESSAGE, number=21, message=AlertStrategy,) __all__ = tuple(sorted(__protobuf__.manifest))
from sqlparse import parse, tokens class SQLParser(object): """class used to parse sql statements into a data structure which can be used to execute the statement""" def parse_statement(self, statement): """converts a statement in string form to tokens and passes off to the parser""" def parse_tkns(tkns): """parse tokens into datastructure used to execute statement""" fns = {} joins = [] aliases = {} cases = {} ops = {} self.case_num = 0 nested_queries = {} literals = {} # some helpers for determining a token's attributes when it isn't # completely straight forward is_identifier = lambda token: token._get_repr_name() == 'Identifier' is_case = lambda token: token._get_repr_name() == 'Case' is_function = lambda token: token._get_repr_name() == 'Function' is_comparison = lambda token: token._get_repr_name() == 'Comparison' is_operator = lambda token: token.ttype == tokens.Operator \ or token.ttype == tokens.Wildcard def strip_tkns(tkns, punctuation=None): """convenience function to remove whitespace tokens and comments from list, optionally also remove punctuation""" if punctuation is None: return [token for token in tkns if not token.is_whitespace() and token._get_repr_name() != 'Comment'] return [token for token in tkns if not token.is_whitespace() and token._get_repr_name() != 'Comment' and token.ttype != tokens.Token.Punctuation] def get_fns(tkns): """get a dictionary of all functions in statement, needed for order of operations with grouping and case statements""" for tkn in tkns: if tkn._get_repr_name() == 'Function': col, fn = sql_function(tkn) _fns = fns.get(col, []) if fn not in _fns: _fns.append(fn) fns[col] = _fns elif tkn.is_group(): get_fns(tkn.tokens) def col_identifier(token): if token.ttype in tokens.Literal: literals[token.value] = token.value return token.value, None tkns = token.tokens # strip whitespace and punctuation tkns = strip_tkns(tkns) if len(tkns) == 1: identifier = tkns[0].value if tkns[0].ttype in tokens.Literal: literals[identifier] = identifier # handle issue of ambigous column names through aliasing # for now, may be able to find a more efficient way in future aliases[identifier] = None, None return identifier, None # find the index of 'AS' in tkns as_idx = next((i for i, t in enumerate(tkns) if t.value.upper() == 'AS'), None) if as_idx is None: op_idx = next((i for i, t in enumerate(tkns) if is_operator(t)), None) if op_idx is None: return tkns[0].value + '.' + tkns[-1].value, None return operation(tkns) as_name = tkns[as_idx+1].value tkns = tkns[:as_idx] if len(tkns) == 1: # handle aliasing if tkns[0].ttype in tokens.Literal: literals[as_name] = tkns[0].value return as_name, None elif is_case(tkns[0]): return parse_case(tkns[0].tokens, as_name=as_name) elif is_identifier(tkns[0]): aliases[as_name] = col_identifier(tkns[0]), None elif is_function(tkns[0]): col, fn = sql_function(tkns[0]) aliases[as_name] = col, fn return as_name, None op_idx = next((i for i, t in enumerate(tkns) if is_operator(t)), None) if op_idx is None: # handle aliasing for special case where parser doesn't group # identifier properly aliases[as_name] = tkns[0].value + "." + tkns[-1].value, None return as_name, None return operation(tkns, as_name) def sql_function(token): tkns = token.tokens fn, parens = tkns col = parens.tokens[1] fn = fn.value.lower() col = col_identifier(col)[0] return col, fn def identifier_list(token): """used to parse sql identifiers into actual table/column groupings""" if is_identifier(token): return col_identifier(token) if is_function(token): return [sql_function(token)] if is_case(token): return parse_case(token) tkns = token.tokens if len(tkns) == 1: if is_function(tkns[0]): return sql_function(tkns[0]) return col_identifier(token) proc = [] # filter whitespace and punctuation for tkn in tkns: if is_identifier(tkn): proc.append(col_identifier(tkn)) elif is_case(tkn): proc.append(parse_case(tkn.tokens)) elif is_function(tkn): col, fn = sql_function(tkn) proc.append((col, fn)) elif not tkn.is_whitespace() \ and tkn.ttype != tokens.Punctuation: proc.append(col_identifier(tkn)) return proc def operation(tkns, as_name=None): """perform arithmetic operations""" # identifiers used in comparision, needed to work around issue # #83 of numexpr identifiers = {} if len(tkns) == 1: return col_identifier(tkns[0]) # get indicies in tkns where operators are op_indices = [i for i, t in enumerate(tkns) if is_operator(t)] # get operators operators = [t.value for t in tkns if is_operator(t)] # group other tokens around operators ids = [tkns[:op_indices[0]]] ids += [tkns[i1+1:i2] for i1, i2 in zip(op_indices[:-1], op_indices[1:])] ids += [tkns[op_indices[-1]+1:]] def get_id(_id): if len(_id) > 1: return ''.join([t.value for t in _id]), None token = _id[0] if token._get_repr_name() == 'Parenthesis': # TODO: instead of just leveraging parsing, # pass parenthesis into numexpr for performance # gains return operation(token.tokens[1:-1]) if token._get_repr_name() == 'Integer': return token.value, None if is_function(token): return sql_function(token) elif token.is_group(): return col_identifier(token) ids = map(get_id, ids) cols = [(x if y is None else (x+'_'+y)).replace('.', '_') for x, y in ids] for _id, col in zip(ids, cols): try: # only add non-numbers to column identifiers dict float(col) except: identifiers[col] = _id expr = reduce(lambda x, (y, z): x+' '+y+' '+z, zip(operators, cols[1:]), cols[0]) # give auto-genereted name if no alias specified if as_name is None: as_name = ''.join(cols) op = {'as_name': as_name, 'expr': (expr, identifiers)} _ops = ops.get(curr_sect, []) _ops.append(op) ops[curr_sect] = _ops return as_name, None def comparison(comps, operators=None): # identifiers used in comparision, needed to work around issue #83 identifiers = {} # need a counter for number of comparisons for variable names def comp_str(comp): comp_map = { '=': '==', '<>': '!=', } comp = strip_tkns(comp) assert len(comp) == 3 col, comp, val = comp comp = comp_map.get(comp.value, comp.value) if is_function(col): col, fn = sql_function(col) col_str = (col+'_'+fn).replace('.', '_') identifiers[col_str] = col, fn elif col.is_group(): col = col_identifier(col)[0] col_str = col.replace('.', '_') identifiers[col_str] = col, None if val.is_group(): val = col_identifier(val)[0] identifiers[val.replace('.', '_')] = val, None else: val = val.value val_str = val.replace('.', '_') return """({col} {comp} {val})""".format(col=col_str, comp=comp, val=val_str) comp = comps[0] ev_str = comp_str(comp) if operators is not None: for comp, op in zip(comps[1:], operators): # build string to eventually evaluate if op == 'AND': ev_str += " & " + comp_str(comp) elif op == 'OR': ev_str += " | " + comp_str(comp) return ev_str, identifiers def parse_case(tkns, as_name=None): def get_stmt(token): if is_function(token): return sql_function(token) else: return col_identifier(token) # give auto-genereted name if no alias specified if as_name is None: self.case_num += 1 as_name = 'case' + str(self.case_num) case = {'as_name': as_name, 'stmts': []} # remove whitespace from tokens tkns = strip_tkns(tkns) # need to parse backwards for proper order of operations for i, token in reversed(list(enumerate(tkns))): # stop at CASE as we are looping in reverse so will # be starting at END if token.ttype == tokens.Keyword.CASE: break elif tkns[i-1].value == 'ELSE': case['else_stmt'] = get_stmt(token) elif tkns[i-1].value == 'THEN': stmt = get_stmt(token) elif is_comparison(token): if token.is_group(): cond = comparison([token.tokens]) else: cond = comparison([[tkns[i-1], token, tkns[i+1]]]) case['stmts'].append((cond, stmt)) _cases = cases.get(curr_sect, []) _cases.append(case) cases[curr_sect] = _cases return as_name, None def parse_select(tkns): identifiers = [] tkns = strip_tkns(tkns) for i, token in enumerate(tkns): if token.ttype is tokens.Wildcard: return elif is_identifier(token): identifiers = [col_identifier(token)] elif token.is_group(): identifiers = identifier_list(token) return identifiers def parse_into(tkns): for token in tkns: if is_identifier(token): return token.value def tbl_identifier(tkns): """returns identifier as tuple of tablename, identifier""" if len(tkns) == 1: return (tkns[0].value,) * 2 return tkns[0].value, tkns[-1].value def parse_from(tkns): how = None for i, token in enumerate(tkns): if token._get_repr_name() == 'Parenthesis': table, identifier = tbl_identifier(tkns[i+1].tokens) table = '###temp_' + table nested = parse_tkns(token.tokens[1:-1]) nested_queries[table] = nested # remove next token from list as it is already processed del tkns[i+1] elif token.is_group(): table, identifier = tbl_identifier(token.tokens) elif 'JOIN' in token.value: how = token.value.split()[0].lower() break if how is not None: parse_join(tkns[i+1:], how) return table, identifier def parse_join(tkns, how): for i, token in enumerate(tkns): if 'JOIN' in token.value: how_new = token.value.split()[0].lower() parse_join(tkns[i+1:], how_new) break elif token._get_repr_name() == 'Parenthesis': right, right_identifier = tbl_identifier(tkns[i+1].tokens) right = '###temp_' + right nested = parse_tkns(token.tokens[1:-1]) nested_queries[right] = nested # remove next token from list as it is already processed del tkns[i+1] elif is_comparison(token): left_on = col_identifier(token.tokens[0])[0] right_on = col_identifier(token.tokens[-1])[0] elif token.is_group(): right, right_identifier = tbl_identifier(token.tokens) joins.append((right, how, left_on, right_on, right_identifier)) def parse_where(tkns): # list of boolean indices to apply to current value comps = [token.tokens for token in tkns if is_comparison(token)] operators = [token.value for token in tkns if token.value in ('AND', 'OR')] return comparison(comps, operators) def parse_group(tkns): for tkn in tkns: if tkn.is_group(): group_by = zip(*identifier_list(tkn))[0] return group_by def parse_order(tkns): for token in tkns: if token.is_group(): identifiers = identifier_list(token) return identifiers sections = {'SELECT': parse_select, 'INTO': parse_into, 'FROM': parse_from, 'WHERE': parse_where, 'GROUP': parse_group, 'ORDER': parse_order} # remove whitespace from tokens tkns = strip_tkns(tkns) _parsed = {} for i, token in enumerate(tkns): if i == 0: start = 0 curr_sect = token.value.upper() continue if token._get_repr_name().upper() == 'WHERE': _parsed[curr_sect] = sections[curr_sect](tkns[start:i]) # start next category of statement curr_sect = 'WHERE' _parsed['WHERE'] = sections['WHERE'](token.tokens) continue if token.value.upper() in sections.keys() \ and token.ttype in tokens.Keyword: if curr_sect != 'WHERE': _parsed[curr_sect] = sections[curr_sect](tkns[start:i]) # start next category of statement start = i curr_sect = token.value.upper() # add in last section if curr_sect != 'WHERE': _parsed[curr_sect] = sections[curr_sect](tkns[start:]) get_fns(tkns) _parsed['FUNCTIONS'] = fns _parsed['JOINS'] = joins _parsed['ALIASES'] = aliases _parsed['CASES'] = cases _parsed['NESTED_QUERIES'] = nested_queries _parsed['OPS'] = ops _parsed['LITERALS'] = literals return _parsed tkns = parse(statement)[0].tokens return parse_tkns(tkns)
# Copyright (c) 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright (C) 2013 Association of Universities for Research in Astronomy # (AURA) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of AURA and its representatives may not be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS import os import re import sys import tempfile import textwrap import fixtures import mock import pkg_resources import six import testscenarios from testtools import matchers from pbr import git from pbr import packaging from pbr.tests import base class TestRepo(fixtures.Fixture): """A git repo for testing with. Use of TempHomeDir with this fixture is strongly recommended as due to the lack of config --local in older gits, it will write to the users global configuration without TempHomeDir. """ def __init__(self, basedir): super(TestRepo, self).__init__() self._basedir = basedir def setUp(self): super(TestRepo, self).setUp() base._run_cmd(['git', 'init', '.'], self._basedir) base._config_git() base._run_cmd(['git', 'add', '.'], self._basedir) def commit(self, message_content='test commit'): files = len(os.listdir(self._basedir)) path = self._basedir + '/%d' % files open(path, 'wt').close() base._run_cmd(['git', 'add', path], self._basedir) base._run_cmd(['git', 'commit', '-m', message_content], self._basedir) def uncommit(self): base._run_cmd(['git', 'reset', '--hard', 'HEAD^'], self._basedir) def tag(self, version): base._run_cmd( ['git', 'tag', '-sm', 'test tag', version], self._basedir) class GPGKeyFixture(fixtures.Fixture): """Creates a GPG key for testing. It's recommended that this be used in concert with a unique home directory. """ def setUp(self): super(GPGKeyFixture, self).setUp() tempdir = self.useFixture(fixtures.TempDir()) gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])') gnupg_version = base._run_cmd(['gpg', '--version'], tempdir.path) for line in gnupg_version[0].split('\n'): gnupg_version = gnupg_version_re.match(line) if gnupg_version: gnupg_version = (int(gnupg_version.group(1)), int(gnupg_version.group(2)), int(gnupg_version.group(3))) break else: if gnupg_version is None: gnupg_version = (0, 0, 0) config_file = tempdir.path + '/key-config' f = open(config_file, 'wt') try: if gnupg_version[0] == 2 and gnupg_version[1] >= 1: f.write(""" %no-protection %transient-key """) f.write(""" %no-ask-passphrase Key-Type: RSA Name-Real: Example Key Name-Comment: N/A Name-Email: example@example.com Expire-Date: 2d Preferences: (setpref) %commit """) finally: f.close() # Note that --quick-random (--debug-quick-random in GnuPG 2.x) # does not have a corresponding preferences file setting and # must be passed explicitly on the command line instead if gnupg_version[0] == 1: gnupg_random = '--quick-random' elif gnupg_version[0] >= 2: gnupg_random = '--debug-quick-random' else: gnupg_random = '' base._run_cmd( ['gpg', '--gen-key', '--batch', gnupg_random, config_file], tempdir.path) class TestPackagingInGitRepoWithCommit(base.BaseTestCase): scenarios = [ ('preversioned', dict(preversioned=True)), ('postversioned', dict(preversioned=False)), ] def setUp(self): super(TestPackagingInGitRepoWithCommit, self).setUp() repo = self.useFixture(TestRepo(self.package_dir)) repo.commit() self.run_setup('sdist', allow_fail=False) def test_authors(self): # One commit, something should be in the authors list with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f: body = f.read() self.assertNotEqual(body, '') def test_changelog(self): with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f: body = f.read() # One commit, something should be in the ChangeLog list self.assertNotEqual(body, '') def test_manifest_exclude_honoured(self): with open(os.path.join( self.package_dir, 'pbr_testpackage.egg-info/SOURCES.txt'), 'r') as f: body = f.read() self.assertThat( body, matchers.Not(matchers.Contains('pbr_testpackage/extra.py'))) self.assertThat(body, matchers.Contains('pbr_testpackage/__init__.py')) class TestPackagingInGitRepoWithoutCommit(base.BaseTestCase): def setUp(self): super(TestPackagingInGitRepoWithoutCommit, self).setUp() self.useFixture(TestRepo(self.package_dir)) self.run_setup('sdist', allow_fail=False) def test_authors(self): # No commits, no authors in list with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f: body = f.read() self.assertEqual(body, '\n') def test_changelog(self): # No commits, nothing should be in the ChangeLog list with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f: body = f.read() self.assertEqual(body, 'CHANGES\n=======\n\n') class TestPackagingInPlainDirectory(base.BaseTestCase): def setUp(self): super(TestPackagingInPlainDirectory, self).setUp() self.run_setup('sdist', allow_fail=False) def test_authors(self): # Not a git repo, no AUTHORS file created filename = os.path.join(self.package_dir, 'AUTHORS') self.assertFalse(os.path.exists(filename)) def test_changelog(self): # Not a git repo, no ChangeLog created filename = os.path.join(self.package_dir, 'ChangeLog') self.assertFalse(os.path.exists(filename)) class TestPresenceOfGit(base.BaseTestCase): def testGitIsInstalled(self): with mock.patch.object(git, '_run_shell_command') as _command: _command.return_value = 'git version 1.8.4.1' self.assertEqual(True, git._git_is_installed()) def testGitIsNotInstalled(self): with mock.patch.object(git, '_run_shell_command') as _command: _command.side_effect = OSError self.assertEqual(False, git._git_is_installed()) class TestNestedRequirements(base.BaseTestCase): def test_nested_requirement(self): tempdir = tempfile.mkdtemp() requirements = os.path.join(tempdir, 'requirements.txt') nested = os.path.join(tempdir, 'nested.txt') with open(requirements, 'w') as f: f.write('-r ' + nested) with open(nested, 'w') as f: f.write('pbr') result = packaging.parse_requirements([requirements]) self.assertEqual(result, ['pbr']) class TestVersions(base.BaseTestCase): scenarios = [ ('preversioned', dict(preversioned=True)), ('postversioned', dict(preversioned=False)), ] def setUp(self): super(TestVersions, self).setUp() self.repo = self.useFixture(TestRepo(self.package_dir)) self.useFixture(GPGKeyFixture()) self.useFixture(base.DiveDir(self.package_dir)) def test_capitalized_headers(self): self.repo.commit() self.repo.tag('1.2.3') self.repo.commit('Sem-Ver: api-break') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('2.0.0.dev1')) def test_capitalized_headers_partial(self): self.repo.commit() self.repo.tag('1.2.3') self.repo.commit('Sem-ver: api-break') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('2.0.0.dev1')) def test_tagged_version_has_tag_version(self): self.repo.commit() self.repo.tag('1.2.3') version = packaging._get_version_from_git('1.2.3') self.assertEqual('1.2.3', version) def test_untagged_version_has_dev_version_postversion(self): self.repo.commit() self.repo.tag('1.2.3') self.repo.commit() version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.2.4.dev1')) def test_untagged_pre_release_has_pre_dev_version_postversion(self): self.repo.commit() self.repo.tag('1.2.3.0a1') self.repo.commit() version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1')) def test_untagged_version_minor_bump(self): self.repo.commit() self.repo.tag('1.2.3') self.repo.commit('sem-ver: deprecation') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.3.0.dev1')) def test_untagged_version_major_bump(self): self.repo.commit() self.repo.tag('1.2.3') self.repo.commit('sem-ver: api-break') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('2.0.0.dev1')) def test_untagged_version_has_dev_version_preversion(self): self.repo.commit() self.repo.tag('1.2.3') self.repo.commit() version = packaging._get_version_from_git('1.2.5') self.assertThat(version, matchers.StartsWith('1.2.5.dev1')) def test_untagged_version_after_pre_has_dev_version_preversion(self): self.repo.commit() self.repo.tag('1.2.3.0a1') self.repo.commit() version = packaging._get_version_from_git('1.2.5') self.assertThat(version, matchers.StartsWith('1.2.5.dev1')) def test_untagged_version_after_rc_has_dev_version_preversion(self): self.repo.commit() self.repo.tag('1.2.3.0a1') self.repo.commit() version = packaging._get_version_from_git('1.2.3') self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1')) def test_preversion_too_low_simple(self): # That is, the target version is either already released or not high # enough for the semver requirements given api breaks etc. self.repo.commit() self.repo.tag('1.2.3') self.repo.commit() # Note that we can't target 1.2.3 anymore - with 1.2.3 released we # need to be working on 1.2.4. err = self.assertRaises( ValueError, packaging._get_version_from_git, '1.2.3') self.assertThat(err.args[0], matchers.StartsWith('git history')) def test_preversion_too_low_semver_headers(self): # That is, the target version is either already released or not high # enough for the semver requirements given api breaks etc. self.repo.commit() self.repo.tag('1.2.3') self.repo.commit('sem-ver: feature') # Note that we can't target 1.2.4, the feature header means we need # to be working on 1.3.0 or above. err = self.assertRaises( ValueError, packaging._get_version_from_git, '1.2.4') self.assertThat(err.args[0], matchers.StartsWith('git history')) def test_get_kwargs_corner_cases(self): # No tags: git_dir = self.repo._basedir + '/.git' get_kwargs = lambda tag: packaging._get_increment_kwargs(git_dir, tag) def _check_combinations(tag): self.repo.commit() self.assertEqual(dict(), get_kwargs(tag)) self.repo.commit('sem-ver: bugfix') self.assertEqual(dict(), get_kwargs(tag)) self.repo.commit('sem-ver: feature') self.assertEqual(dict(minor=True), get_kwargs(tag)) self.repo.uncommit() self.repo.commit('sem-ver: deprecation') self.assertEqual(dict(minor=True), get_kwargs(tag)) self.repo.uncommit() self.repo.commit('sem-ver: api-break') self.assertEqual(dict(major=True), get_kwargs(tag)) self.repo.commit('sem-ver: deprecation') self.assertEqual(dict(major=True, minor=True), get_kwargs(tag)) _check_combinations('') self.repo.tag('1.2.3') _check_combinations('1.2.3') def test_invalid_tag_ignored(self): # Fix for bug 1356784 - we treated any tag as a version, not just those # that are valid versions. self.repo.commit() self.repo.tag('1') self.repo.commit() # when the tree is tagged and its wrong: self.repo.tag('badver') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.0.1.dev1')) # When the tree isn't tagged, we also fall through. self.repo.commit() version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.0.1.dev2')) # We don't fall through x.y versions self.repo.commit() self.repo.tag('1.2') self.repo.commit() self.repo.tag('badver2') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.2.1.dev1')) # Or x.y.z versions self.repo.commit() self.repo.tag('1.2.3') self.repo.commit() self.repo.tag('badver3') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.2.4.dev1')) # Or alpha/beta/pre versions self.repo.commit() self.repo.tag('1.2.4.0a1') self.repo.commit() self.repo.tag('badver4') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('1.2.4.0a2.dev1')) # Non-release related tags are ignored. self.repo.commit() self.repo.tag('2') self.repo.commit() self.repo.tag('non-release-tag/2014.12.16-1') version = packaging._get_version_from_git() self.assertThat(version, matchers.StartsWith('2.0.1.dev1')) def test_valid_tag_honoured(self): # Fix for bug 1370608 - we converted any target into a 'dev version' # even if there was a distance of 0 - indicating that we were on the # tag itself. self.repo.commit() self.repo.tag('1.3.0.0a1') version = packaging._get_version_from_git() self.assertEqual('1.3.0.0a1', version) class TestRequirementParsing(base.BaseTestCase): def test_requirement_parsing(self): tempdir = self.useFixture(fixtures.TempDir()).path requirements = os.path.join(tempdir, 'requirements.txt') with open(requirements, 'wt') as f: f.write(textwrap.dedent(six.u("""\ bar quux<1.0; python_version=='2.6' """))) setup_cfg = os.path.join(tempdir, 'setup.cfg') with open(setup_cfg, 'wt') as f: f.write(textwrap.dedent(six.u("""\ [metadata] name = test_reqparse [extras] test = foo baz>3.2 :python_version=='2.7' """))) # pkg_resources.split_sections uses None as the title of an # anonymous section instead of the empty string. Weird. expected_requirements = { None: ['bar'], ":(python_version=='2.6')": ['quux<1.0'], "test:(python_version=='2.7')": ['baz>3.2'], "test": ['foo'] } setup_py = os.path.join(tempdir, 'setup.py') with open(setup_py, 'wt') as f: f.write(textwrap.dedent(six.u("""\ #!/usr/bin/env python import setuptools setuptools.setup( setup_requires=['pbr'], pbr=True, ) """))) self._run_cmd(sys.executable, (setup_py, 'egg_info'), allow_fail=False, cwd=tempdir) egg_info = os.path.join(tempdir, 'test_reqparse.egg-info') requires_txt = os.path.join(egg_info, 'requires.txt') with open(requires_txt, 'rt') as requires: generated_requirements = dict( pkg_resources.split_sections(requires)) self.assertEqual(expected_requirements, generated_requirements) def load_tests(loader, in_tests, pattern): return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
# -*- coding: utf-8 -*- from __future__ import absolute_import # So yaml can be imported import re import logging import sys import copy import collections import exceptions import traceback import pprint import json import StringIO import salt.fileclient import salt.client import salt.renderers.pyobjects import salt.utils.odict import salt.utils import salt.template try: import salt.utils.serializers.yaml as yaml_serializer except ImportError: import salt.serializers.yaml as yaml_serializer from salt.exceptions import SaltRenderError from salt.utils.yamlloader import SaltYamlSafeLoader import voluptuous from voluptuous import (MultipleInvalid, Invalid, Required, Any, Coerce, # All, # Msg, # Lower ) log = logging.getLogger(__name__) class Schema(object): ''' Checks schema of objects. Will raise a RenderError if schema is not correct Will coerce data where applicable as well as return defaults where no values were supplied ''' @classmethod def coerce_to_list(cls, type, msg=None): """ Convert a value into a list. """ # pylint: disable=W0622, C0103 def f(v): if isinstance(v, type): return [v] else: raise Invalid(msg or ('expected %s' % type.__name__)) return f @classmethod def schema(cls, data, schema, index): schema = voluptuous.Schema(schema) try: return schema(data) except MultipleInvalid as error: raise RenderError(str(error), error.path[-1], index=index) @classmethod def pillars(cls, data=None, index=None): ''' $pillars declaration needs to be before any other yaml or python $pillars: auto: True|(False) disabled: - <state_id> - <state_id> enabled: - <state_id> - <state_id>: <pillar_id> aliases: - <state_id>.<state_name>: None|<path> The `$pillars` declaration contains instructions on how to deal with merging pillar data into the state file. auto: True: Default. Will attempt to automatically merge pillar data for all state_id's. A pillar_id with the same name as the state_id will need exist with the same yaml structure: pillar_id: pillar_name: - values - values False: No automatic attempt will be made to merge pillar data. enabled/disabled: Over-rides auto declaration. If auto is set to True/all any state_id's contained in `disabled` will not be merged, unless over-ridden by a state_data `__pillar__` directive. `disabled` has no meaning if `auto` is False If auto is set to False any state_id's contained in `enabled` will attempt to be merged with pillar data, unless over-ridden by a state_data `__pillar__` directive. `enabled` also allows a map to specific a pillar_id that is not the same as the state_id. Specify a mapped pillar_id like this: - state_id: pillar_id - http: apache aliases: Aliases allow shorter paths to the pillar data. Normally, the pillar structure must be the same as state structure in able to automatically merge data. In some cases this may not be desired so an alias may be set on a per state basis. Setting `path` to `None` will use root (base) of pillar_id. Typical structure: state_id: pillar_id: state_name: pillar_name: - state_data1 ---> - pillar_data1 - state_data2 ---> - pillar_data2 Aliased: state_id.state_name: None state_id.state_name: pillar_id: - state_data1 ---> - pillar_data1 - state_data2 ---> - pillar_data2 __pillar__ declaration: A `__pillar__` declaration can be set in any state and will over ride `auto`, `disabled` and `enabled` declarations. state_id: state_name: __pillar__: True|False|<string> True: Will attempt to merge pillar data string: string value of the pillar_id to use (map) False: Will not attempt to merge pillar data __alias__ declaration: An `__alias__` declaration can be set to change the path to pillar_data. Only the path needs to be set since state_id and state_path can be obtained. state_id: state_name: __alias__: None|<path> ''' if not index: index = {} if not data: data = {} enabled = [str, {str: str}] schema = { Required('auto', default=False): Any(Coerce(bool)), Required('disabled', default=[]): Any([str], cls.coerce_to_list(str)), Required('enabled', default=[]): Any(enabled, cls.coerce_to_list(str)), Required('aliases', default=[]): [{str: Any(str, None)}], } # Repack short dictionary list as a dictionary data = cls.schema(data, schema, index) data['aliases'] = salt.utils.repack_dictlist(data['aliases']) return data class RenderError(SaltRenderError): ''' Used when the YamlScript renderer needs to raise an explicit error. If an index object are passed, get_context will be invoked to get the location of the error. ''' header = '-- ERROR IN YAMLSCRIPT TEMPLATE ------------------' header2 = '-- DEBUGGING TRACE INFO --------------------------' _marker = object debug = True def __init__(self, error, value=_marker, index=None, mode=None): line_num = None buf = '' marker = ' <======================' trace = None if mode == 'exec': error, line_num, buf = self.exec_error(error, value, index) elif index: error, line_num, buf = self.index_error(error, value, index) else: error = self.basic_error(error, value) if line_num is not None: marker = ' <======[LINE {0}]======'.format(line_num) SaltRenderError.__init__(self, error, line_num, buf, marker, trace) @classmethod def exec_error(cls, error, value, index): msg, line_num, buf = cls.index_error(error, value, index) if isinstance(error, exceptions.SyntaxError): lineno = error[1][1] elif isinstance(sys.exc_info()[1], exceptions.SyntaxError): lineno = sys.exc_info()[1][1][1] else: lineno = 0 for frame in traceback.extract_tb(sys.exc_info()[2]): fname, lineno, fn_, text = frame # pylint: disable=W0612 line_num = lineno + index['key_start_line'] + 1 + index.get('key_start_line_offset', 0) return msg, line_num, buf @classmethod def index_error(cls, error, value, index): line_num = None if value is None: value = 'None|null' node_name = index['key'] template = index['template'] sls = index['sls'] start = index.get('key_start_index', 0) start_line = index.get('key_start_line', 0) end = index.get('value_end_index', len(template)) snippet = template[start:end] # XXX: why only splice [0]? if isinstance(value, list): value = value[0] # TODO: pattern may still need some work pdict = { 'prefix': '' if not node_name.startswith('$') else '\$', 'node_name': node_name if not node_name.startswith('$') else node_name[1:], 'value_or_eol': '\\b{0}'.format(value) if value != cls._marker else '$', } pattern = r'({0[prefix]}\b{0[node_name]}).+?({0[value_or_eol]})'.format(pdict) # Ignore case to be able to match True/true, False/false pattern = re.compile(pattern, re.MULTILINE | re.DOTALL | re.IGNORECASE) for match in re.finditer(pattern, snippet): line_num = snippet.count("\n", 0, match.end()) + 1 break if line_num: line_num = line_num + start_line msg = cls.text_trace() msg += '\n{0}\n{1}{2}\n{3}{4}\n{5}{6}{7}{8}{9}\n'.format( cls.header, 'SLS FILE: ', sls or 'UNKNOWN', 'NODE : ', node_name, 'VALUE : ' if value != cls._marker else '', value if value != cls._marker else '', '\n' if value != cls._marker else '', 'ERROR : ', error, ) return msg, line_num, template @classmethod def basic_error(cls, error, value): header = '{0}\n-- NO POSITIONAL INFO AVAILABLE --'.format(cls.header) msg = cls.text_trace() msg += '\n{0}\n{1}{2}{3}{4}{5}\n'.format( header, 'value: ' if value != cls._marker else '', value if value != cls._marker else '', '\n' if value != cls._marker else '', 'description of error: ', error, ) return msg @classmethod def text_trace(cls): if not cls.debug: return '' try: fname, lineno, fn_, text = traceback.extract_tb(sys.exc_info()[2])[0] except IndexError: return '' msg = '\n{0}\n{1}{2}\n{3}{4}\n{5}{6}\n{7}{8}\n'.format( cls.header2, 'FILENAME: ', fname, 'LINE NUM: ', lineno, 'FUNCTION: ', fn_, 'LINE TEXT: ', text ) return msg class YSOrderedDict(salt.utils.odict.OrderedDict): ''' Extend OrderedDict so we can store positional information for debugging ''' __index__ = None def __init__(self, *args, **kwds): ''' args[0]: Initialize with arg[0] dictionary args[1]: (list) - Contains a list of string names to include when creating the dictionary so a listing containing ['users']['user']['file'] would only copy the 'tree' values from arg[0] leaving a dictionary with the following structure: {user: {user: {file: <everything under file>}}} args[1]: (YSOrderedDict) - will be used only to set the __index__ parameters ''' self.__index__ = {} if len(args) > 2: raise TypeError('expected at most 2 arguments, got %d' % len(args)) elif len(args) == 2 and not (isinstance(args[1], list) or isinstance(args[1], YSOrderedDict)): raise TypeError('expected a list or YSOrderedDict as second argument, got %d' % type(args[1])) super(YSOrderedDict, self).__init__() # create dictionary from sections provided # skips **kwds if len(args) == 2 and isinstance(args[1], list): if not args[1]: self.update(args[0], **kwds) return other = args[0] sections = args[1] for i, section in enumerate(sections): info = other.__index__.get(section, {}) if isinstance(other, YSOrderedDict) else {} other = other[section] if i == len(sections) - 1: self.setdefault(sections[i], other) self.__index__[sections[i]] = info else: self.setdefault(sections[i], YSOrderedDict()) self.__index__[sections[i]] = info self = self[section] else: self.update(*args, **kwds) def update(self, *args, **kwds): if args: super(YSOrderedDict, self).update(args[0], **kwds) else: super(YSOrderedDict, self).update(**kwds) if len(args) == 1 and isinstance(args[0], YSOrderedDict): for key in args[0].__index__.keys(): self.__index__[key] = args[0].__index__[key] elif len(args) == 2 and isinstance(args[1], YSOrderedDict): for key in self.keys(): if key in args[1].__index__.keys(): self.__index__[key] = args[1].__index__[key] def setdefault(self, key, default=None, info=None): # pylint: disable=W0221 if info: if isinstance(info, YSOrderedDict) and key in info.__index__.keys(): self.__index__[key] = info.__index__[key] return super(YSOrderedDict, self).setdefault(key, default) def update_at(self, other, index=0): 'updates the dictionary at index position' if hasattr(other, 'viewitems'): other = other.viewitems() ins = [(k if k not in self else self[k], v) for k, v in other] if ins: left = self.items()[0:index] right = self.items()[index:] self.clear() self.update(left) self.update(ins) self.update(right) def insert(self, item, index=0): 'insert a single item at index position' replace = self.items() replace.insert(index, item) self.clear() self.update(replace) def insert_before(self, key, item): 'insert a single item before key name' self.insert(self.keys().index(key), item) def insert_after(self, key, item): 'insert a single item after key name' self.insert(self.keys().index(key) + 1, item) def rename(self, key, new_key): 'rename a key. keeps position in ordereddict' replace = [(new_key if k == key else k, v) for k, v in self.iteritems()] self.clear() self.update(replace) if key in self.__index__.keys(): self.__index__[new_key] = self.__index__.pop(key) @classmethod def convert(cls, dict_): 'converts any dictionary and nested dictionaries to a YSOrderedDict' for key, value in dict_.iteritems(): if isinstance(value, collections.Mapping) and not isinstance(value, YSOrderedDict): dict_[key] = cls.convert(dict_[key]) elif isinstance(value, list): for element in value: if isinstance(element, collections.Mapping) and not isinstance(element, YSOrderedDict): value[value.index(element)] = cls.convert(element) return YSOrderedDict(dict_) class YamlScriptSafeLoader(SaltYamlSafeLoader, object): ''' create a custom YAML loader that uses the custom constructor. The default salt loader will not allow duplicate key, which can exist within yamlscript for yamlscript $commands so those keys are caught and renamed by appending '# <position>' where <position> is len(mapping). ''' BAD_CHARS = {'none': None, 'true': True, 'false': False} sls = '' template = '' def convert_bad_chars(self, value): ''' yaml pillar conversion can convert values such as None to a string and we don't want that, so we fix it here ''' if isinstance(value, str): if value.strip().lower() in self.BAD_CHARS: return self.BAD_CHARS[value.strip().lower()] return value def __init__(self, stream, dictclass=YSOrderedDict): self.template = stream salt.utils.yamlloader.SaltYamlSafeLoader.__init__(self, stream, dictclass) def construct_mapping(self, node, deep=False): ''' Build the mapping for YAML ''' if not isinstance(node, salt.utils.yamlloader.MappingNode): raise salt.utils.yamlloader.ConstructorError( None, None, 'expected a mapping node, but found {0}'.format(node.id), node.start_mark) self.flatten_mapping(node) mapping = self.dictclass() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) try: hash(key) except TypeError: err = ('While constructing a mapping {0} found unacceptable ' 'key {1}').format(node.start_mark, key_node.start_mark) raise salt.utils.yamlloader.ConstructorError(err) value = self.construct_object(value_node, deep=deep) value = self.convert_bad_chars(value) if key in mapping: if is_script_node(key): key = ''.join('{0} #{1}'.format(key, len(mapping))) # We want to know about it if key is still not unique enough if key in mapping: raise salt.utils.yamlloader.ConstructorError('Conflicting ID "{0}"'.format(key)) # Store positional information for debugging index = {'key': key, 'key_start_line': key_node.start_mark.line, 'key_start_index': key_node.start_mark.index, 'key_end_line': key_node.end_mark.line, 'key_end_index': key_node.end_mark.index, 'value_start_line': value_node.start_mark.line, 'value_start_index': value_node.start_mark.index, 'value_end_line': value_node.end_mark.line, 'value_end_index': value_node.end_mark.index, 'sls': self.sls, 'template': self.template, } mapping.__index__[key] = index # pylint: disable=E1103 mapping[key] = value return mapping def debug(*args): ''' Pretty print debug messages if debugging enabled ''' # So we can debug our output if needed _debug = False if _debug: print 'debug(): type({0})'.format(type(args)) if isinstance(args, tuple) and len(args) == 1: args = args[0] if _debug: print 'debug(): type({0})'.format(type(args)) if isinstance(args, DataWrapper): args = args._data # pylint: disable=E1103, W0212 if isinstance(args, salt.utils.odict.OrderedDict): if _debug: print 'debug(): Format as OrderedDict (json)' print json.dumps(args, indent=2) elif isinstance(args, dict) or isinstance(args, list): if _debug: print 'debug(): Format as dict (pprint)' pprint.pprint(args) else: if _debug: print 'debug(): Format as plain (print)' print args class DataWrapper(object): ''' Wrap an existing dict, or create a new one, and access with either dot notation or key lookup. The attribute _data is reserved and stores the underlying dictionary. When using the += operator with default=True, the empty nested dict is replaced with the operand, effectively creating a default dictionary of mixed types. d({}) Existing dict to wrap, an empty dict is created by default default({}) Create an e default value instead of raising a KeyError example: >>>dw = DataWrapper({'pp':3}) >>>dw.a.b += 2 >>>dw.a.b += 2 >>>dw.a['c'] += 'Hello' >>>dw.a['c'] += ' World' >>>dw.a.d >>>print dw._data {'a': {'c': 'Hello World', 'b': 4, 'd': {}}, 'pp': 3} ''' __marker = object() def __init__(self, d=None, default=__marker): if not d: d = {} supr = super(DataWrapper, self) supr.__setattr__('_data', d) supr.__setattr__('__default', default) supr.__setattr__('__empty', self.__Empty__()) def __getattr__(self, name): try: value = self._data[name] except KeyError: if not super(DataWrapper, self).__getattribute__('__default'): value = super(DataWrapper, self).__getattribute__('__default') self._data[name] = value elif super(DataWrapper, self).__getattribute__('__default') == self.__marker: return super(DataWrapper, self).__getattribute__('__empty') else: raise # If value is a dictionary; wrap it if hasattr(value, 'items'): default = super(DataWrapper, self).__getattribute__('__default') return DataWrapper(value, default=default) return value def __setattr__(self, name, value): self._data[name] = value def __getitem__(self, key): try: value = self._data[key] except KeyError: if not super(DataWrapper, self).__getattribute__('__default'): value = super(DataWrapper, self).__getattribute__('__default') self._data[key] = value elif super(DataWrapper, self).__getattribute__('__default') == self.__marker: return super(DataWrapper, self).__getattribute__('__empty') else: raise # If value is a dictionary; wrap it if hasattr(value, 'items'): default = super(DataWrapper, self).__getattribute__('__default') return DataWrapper(value, default=default) return value def __setitem__(self, key, value): self._data[key] = value def __iadd__(self, other): if self._data: raise TypeError("A Nested dict will only be replaced if it's empty") else: return other def __str__(self): return self._data.__str__() def __repr__(self): return self._data.__repr__() @staticmethod class __Empty__(object): # pylint: disable=C0103 def __len__(self): return 0 def __getitem__(self, key): return self def __getattr__(self, key): return self def __call__(self): return (None, None) def get(self, key, failobj=None): # pylint: disable=W0613 return self def __contains__(self, key): return self def __str__(self): return 'EMPTY - NoneType' def __repr__(self, _repr_running=None): return 'EMPTY - NoneType' def update(target, source, create=False, allowed=None): ''' Updates the values of a nested dictionary of varying depth without over- writing the targets root nodes. Original code example from: http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth target Target dictionary to update source Source dictionary that will be used to update `target` create if True then new keys can be created during update, otherwise they will be tossed if they do not exist in `target`. allowed list of allowed keys that can be created even if create is False ''' if not allowed: allowed = [] for key, value in source.items(): if isinstance(value, collections.Mapping): if key in target.keys() or create or key in allowed: replace = update(target.get(key, {}), value, create=create) target[key] = replace else: if key in target.keys() or create or key in allowed: target[key] = source[key] return target def is_script_node(key): ''' Returns the token stripped of the leading '$' and spaces if found Returns None if key starts with a '$' but is not valid Returns False otherwise ''' valid_keys = ['$python', '$for ', '$if ', '$elif ', '$else', '$with', '$include', '$extend', '$pillars', '$test_file', '$defaults', '$comment', '$import'] for k in valid_keys: if key.startswith(k): return k.strip()[1:] if key.startswith('$'): return None return False _marker = object def set_alias(data, pillars, state_id, state_name): ''' If a pillar exists, it will set as __pillar_data__ and returned. Aliases can be used to assist locatinf the pillar This Allows shorter path names to values when looking up pillar values. set_aliases does not check if pillar_data should be used or not; that is up to the calling function to provide those checks (set_pillar does) ''' pillar = data.get('__pillar__', state_id) pillar_data = data.get('__pillar_data__', {}) if pillar is not False and not pillar_data: pillar_data = __pillar__.get(pillar, {}) # Over-ride pillar preferences and allow since no checks are done here if pillar is False: pillar = state_id alias = data.get('__alias__', _marker) if alias == _marker: if state_name: pillar = '{0}.{1}'.format(pillar, state_name) alias = pillars['aliases'].get(pillar, _marker) # alias not found; automatically see if we can find one if alias == _marker: # pillar is not nested; use found pillar # pillar_id.state_name key if pillar_data.get(pillar, _marker) != _marker: alias = pillar # state_name key elif state_name and pillar_data.get(state_name, _marker) != _marker: alias = state_name else: # Just can't find a suitable match to be able to provide # pillar data in a reliable manner alias = False data['__alias__'] = alias # If the pillar_key is set to null/None we want the root if alias is False: pillar_data = {} elif alias is not None: pillar_data = pillar_data.get(alias, {}) data['__pillar_data__'] = pillar_data # XXX: Template return pillar_data def set_pillar_data(data, pillars, state_id, state_name): ''' Don't allow pillar merging for templates that are not yamlscript since UNLESS the __pillar__ key has already been set or pillars.auto has been set to 'all' Any yamlscript that has already been generated will already have a __pillar__ declaration, so we wont need to worry about rules set within ''' def is_true(key): for value in pillars[key]: if isinstance(value, str) and state_id == value: return True elif isinstance(value, dict) and state_id in value.keys(): return value[state_id] # Will need the mapped value return False pillar = data.get('__pillar__', None) if pillar is False: return {} elif pillar: return set_alias(data, pillars, state_id, state_name) # Check to see if we should even search for pillar data auto = pillars['auto'] disabled = is_true('disabled') enabled = is_true('enabled') if auto and disabled: pillar = False elif not auto and enabled: pillar = enabled # XXX: Added for Template elif auto: pillar = state_id else: pillar = False data['__pillar__'] = pillar if pillar is False: return {} return set_alias(data, pillars, state_id, state_name) def compile_state_data( data, state_id=None, state_name=None, set_defaults=False, attach_defaults=False, pillars=None, saltenv=None): ''' Takes salt structured data like {id}{state[.function]}[{function value list}] and converts it to {id}{state}{key:values} Returns an YSOrderedDict ordered dictionary ''' if state_name is not None and '.' in state_name: state_name = state_name.split('.')[0] # Lets not mess with original if state_id is not None: if state_id not in data.keys(): raise KeyError data = copy.deepcopy(YSOrderedDict(data, [state_id])) else: data = copy.deepcopy(YSOrderedDict(data)) valid_keys = [ 'name', 'names', '__id__', '__fun__', '__argspec__', '__pillar__', '__alias__', '__pillar_data__', '__data__', '__env__', ] # be sure all the data is YSOrderedDict type data = YSOrderedDict.convert(data) high = YSOrderedDict() # Split apart any state.function combinations for state_id, states in data.items(): for key in states: if not isinstance(states[key], list): continue if '.' in key: comps = key.split('.') states.rename(key, comps[0]) states[comps[0]].append(comps[1]) key = comps[0] if state_name is not None and state_name != key: continue high.setdefault(state_id, YSOrderedDict(), data) high[state_id].setdefault(key, YSOrderedDict(), states) # Find function and add it as __fun__: for value in states[key]: if isinstance(value, str): high[state_id][key]['__fun__'] = value elif isinstance(value, dict): high[state_id][key].update(value) else: continue state_values = high[state_id][key] state_func_name = '{0}.{1}'.format(key, state_values['__fun__']) if saltenv: state_values['__env__'] = saltenv if set_defaults or attach_defaults: function = __states__[state_func_name] args, kwargs = salt.utils.arg_lookup(function).values() if set_defaults: for k in args: state_values.setdefault(k, None) for k, val in kwargs.items(): state_values.setdefault(k, val) if attach_defaults: valid_keys.extend(args) valid_keys.extend(kwargs.keys()) state_values['__argspec__'] = valid_keys set_pillar_data(state_values, pillars, state_id, state_name) return high class Deserialize(object): test_data = YSOrderedDict() script_data = YSOrderedDict() templates = YSOrderedDict() state_list = [] pillars = {} script_node = None index = None def __init__(self, template, saltenv='base', sls='', defaults=False, **kwargs): self.template = template self.saltenv = saltenv self.sls = sls self.sls_type = kwargs.get('sls_type', None) self.kwargs = kwargs self.defaults = defaults # Use __opts__ from config module otherwise roots file client will get # stuck using the 'file_roots' for pillars if any pillars are using # yamlscript. config module contains unmodified 'file_roots' opts = __opts__ if self.sls_type in 'pillar': opts = sys.modules['salt.loaded.int.module.config'].__opts__ self.client = salt.fileclient.get_file_client(opts) if isinstance(template, dict): self.state_file_content = YSOrderedDict.convert(template) self.pillars = Schema.pillars() self.pillars['auto'] = False else: # Set sls name in YamlScriptSafeLoader so it can be included when # creating __index__ self.pillars = Schema.pillars() template.seek(0) self.template = template.read() template.seek(0) YamlScriptSafeLoader.template = self.template YamlScriptSafeLoader.sls = self.sls self.state_file_content = self.deserialize_yamlscript_file(template) def get_saltenv(self, sls, saltenv): try: sls, saltenv = sls.split('@') except ValueError: pass return sls, saltenv def get_state_dest(self, sls, saltenv=None): state_data = self.client.get_state(sls, saltenv or self.saltenv) dest = state_data.get('dest', False) if not dest: raise RenderError('No such file or directory', sls, index=self.index) return dest def get_state_source(self, sls, saltenv=None): state_data = self.client.get_state(sls, saltenv or self.saltenv) source = state_data.get('source', False) if not source: raise RenderError('No such file or directory', sls, index=self.index) return source def get_salt_file(self, salt_file, saltenv=None): try: state_file = self.client.cache_file(salt_file, saltenv or self.saltenv) with open(state_file) as file_: return file_.read() except IOError, error: raise RenderError(error, salt_file, self.index) @staticmethod def deserialize_yamlscript_file(template): template.seek(0) return yaml_serializer.deserialize(template.read(), **{'Loader': YamlScriptSafeLoader}) def deserialize_salt_file(self, template): return yaml_serializer.deserialize(self.get_salt_file(template)) def deserialize_salt_files(self, templates): state_file_content = [] if isinstance(templates, str): templates = [templates] for data in templates: data = self.deserialize_salt_file(data) state_file_content.append(data) return state_file_content def generate(self, state_file_content, script_data, saltenv=''): ''' ''' for key_node, value_node in state_file_content.items(): # Make positional debugging info available if hasattr(state_file_content, '__index__'): self.index = state_file_content.__index__.get(key_node, {}) else: self.index = {} # script_node returns None if it had a '$' prefix, but invalid token script_node = is_script_node(key_node) if script_node is None: raise RenderError('Invalid Yamlscript token', index=self.index) if script_node: self.script_node = script_node key_node = key_node[1:] # Strip '$' token command = YSOrderedDict( {'__yamlscript__': {'type': script_node, 'statement': ''.join(key_node.split("#")[0]).strip(), 'index': self.index } } ) # $defaults if script_node == 'defaults': self.defaults = value_node # $python elif script_node in ['python']: command['__yamlscript__']['statement'] = value_node script_data[key_node] = command script_data.__index__['$' + key_node] = self.index # $pillar elif script_node == 'pillars': self.pillars = Schema.pillars(value_node, self.index) # $test_file elif script_node == 'test_file': for data in self.deserialize_salt_files(value_node): data = data.get('local', data) self.test_data.update(data) # $with elif script_node == 'with' and isinstance(value_node.values()[0], list): # 'with' node withing YAML syntax only id_ = key_node.split(' ', 1)[1] new_key_node = YSOrderedDict({id_: YSOrderedDict()}) new_key_node.__index__[id_] = state_file_content.__index__['$' + key_node] new_key_node[id_].update(YSOrderedDict({value_node.keys()[0]: value_node.values()[0]}, value_node)) # Just leave the nested items in value_node so we can attach them later value_node.pop(value_node.keys()[0]) # create command object, then attach the nested content script_data[key_node] = self.generate(new_key_node, command) content = self.generate(value_node, YSOrderedDict()) script_data[key_node]['__yamlscript__']['content'] = content script_data.__index__['$' + key_node] = self.index # $import elif script_node == 'import': if isinstance(value_node, str): value_node = [value_node] for sls in value_node: source = self.get_state_source(sls) data = self.generate( self.deserialize_yamlscript_file(StringIO.StringIO(self.get_salt_file(source))), YSOrderedDict() ) # Don't allow duplicate keys or values could be over-written for key_data, value_data in data.items(): if '$' + key_data in state_file_content.keys(): key_data += source script_data[key_data] = value_data # $include elif script_node == 'include': if isinstance(value_node, str): value_node = [value_node] for sls in value_node: # Allow include from another env ($include vim@base) sls, saltenv = self.get_saltenv(sls, self.saltenv) dest = self.get_state_dest(sls, saltenv) kwargs = copy.deepcopy(self.kwargs) kwargs['env'] = saltenv try: state = salt.template.compile_template(dest, renderers=kwargs.pop('renderers'), default=__opts__['renderer'], saltenv=saltenv, sls=sls, **kwargs ) except SaltRenderError: raise # If a yamlscript sls file was included, it cached the # Deserialize instance, so we use it if isinstance(Cache.get(sls), Deserialize): deserialize = Cache.get(sls) self.test_data.update(deserialize.test_data) Cache.pop(sls) else: deserialize = Deserialize(state, saltenv=saltenv, sls=sls, **kwargs ) deserialize.generate(deserialize.state_file_content, YSOrderedDict(), saltenv=saltenv) script_data.update(deserialize.script_data) self.state_list.extend(deserialize.state_list) # $for, $if, $elif, $else, $with elif script_node in ['for', 'if', 'elif', 'else', 'with']: script_data[key_node] = self.generate(value_node, command) script_data.__index__['$' + key_node] = self.index # $comment elif script_node in ['comment']: pass else: raise RenderError('Yamlscript token not implemented', value_node, index=self.index) continue # sls '- include' - convert to a yamlscript $include and parse it elif key_node == 'include': state_file_content.rename(key_node, '${0}'.format(key_node)) key_node = '${0}'.format(key_node) self.generate(YSOrderedDict(state_file_content, [key_node]), script_data) continue # Deal with pillars if self.sls_type == 'pillar': # TODO: No positional info available! script_data.setdefault(key_node, YSOrderedDict(), state_file_content) script_data[key_node].update(YSOrderedDict(pillar=value_node)) continue # Deal with templates elif self.sls_type == 'template': script_data.setdefault(key_node, YSOrderedDict(), state_file_content) script_data[key_node].update(YSOrderedDict(template=value_node)) # Set alias to None if not already set to ensure pillar data can be found if self.pillars['aliases'].get(key_node, None) is None: self.pillars['aliases'][key_node + '.template'] = None # Retreive any related pillar data for template pd = set_pillar_data(script_data[key_node], self.pillars, key_node, 'template') # Replace data with pillar data if pd: script_data[key_node]['template'] = pd self.state_list.append((key_node, 'template')) # XXX: Is this needed continue # Only deal with one item at a time elif isinstance(value_node, dict) and len(value_node) > 1: for nested_script_data in value_node.keys(): # pylint: disable=E1103 if is_script_node(nested_script_data): self.generate( YSOrderedDict( {nested_script_data: YSOrderedDict( {key_node: state_file_content[key_node][nested_script_data]}, state_file_content ) }, state_file_content[key_node] ), script_data ) # XXX: Why break? we not completeing other nested data ### break else: self.generate(YSOrderedDict(state_file_content, [key_node, nested_script_data]), script_data) continue # Allow empty states like cmd.run elif isinstance(value_node, str): value_node = YSOrderedDict({value_node: []}) state_file_content[key_node] = value_node elif not isinstance(value_node, dict): raise RenderError('Not implemented', index=self.index) state_name = value_node.keys()[0] # pylint: disable=E1103 if '.' in state_name: state_name = state_name.split('.')[0] high = compile_state_data( YSOrderedDict(state_file_content, [key_node]), state_id=key_node, state_name=state_name, set_defaults=self.defaults, attach_defaults=True, pillars=self.pillars, saltenv=saltenv ) script_data.setdefault(key_node, YSOrderedDict(), state_file_content) script_data[key_node].update(YSOrderedDict(high[key_node], [state_name])) self.state_list.append((key_node, state_name)) self.script_data = script_data return script_data class Cache(object): ''' Cache is used to cache copies of Deserialize instances when an 'include' statement is provided in the template since we will want to be able to use the already rendered data if available for tests and not have to deserialize the script data again. ''' cache = {} @classmethod def __init__(cls, context): 'context is a global varable used to hold the cache' if 'yamlscript_cache' not in context.keys(): context['yamlscript_cache'] = {} cls.cache = context['yamlscript_cache'] @classmethod def all(cls): 'returns the complete cache dictionary' return cls.cache @classmethod def get(cls, sls): 'get a cached item from cache' return cls.cache.get(sls, None) @classmethod def set(cls, sls, value): 'set an item to be cached' cls.cache[sls] = value @classmethod def pop(cls, sls): 'remove an item from the cache' cls.cache.pop(sls, None) def test(salt_data, test_data, sls=''): ''' Runs a test to confirm state values provided in test file matched the generated salt_data values exactly as well as confirming the id's of the yaml represented states. :param OrderedDict salt_data: pyobjects generated state data :param YSOrderedDict test_data: De-serialized test data :return: number of errors in test1 and test2 :rtype: int, int The best way to run tests is to call the state you are running directly like: `salt-call --local --out=yaml state.show_sls users` test_data can be included in the state file and defined as a list like: .. code-block:: yaml $test_file: - salt://users/tests.mel - salt://users/tests.bobby And the test_data file should look something like this: .. code-block:: yaml local: mel_shadow_group: group: - addusers: null - delusers: null - gid: null - members: null - name: shadow - system: false - present mel_sudo_group: ... The test runs two ways, first confirming all expected salt_data is with test_data, then the other way around testing if there are extra values within the salt_data that are not in the test_data. ''' # Ignore these keys since they may differ on each run ignore = ['order', '__sls__', '__env__'] # Compile salt_data into a valid yamlscript structure salt_data = compile_state_data(salt_data) # Deserialize test state file if isinstance(test_data, str): test_data = yaml_serializer.deserialize(test_data) # Compile test_data into a valid yamlscript structure and allow test_data to # contain the parent 'local' key as shown 'state.show_sls users' or not test_data = compile_state_data(test_data.get('local', test_data)) def compare(test_data, data, text=None, mismatch='mismatch', key_error='key_error'): ''' Function that compares test_data to salt_data and generates a list of errors that will be displayed :param YSOrderedDict test_data: used as the base to test from :param YSOrderedDict data: data is compared to see if it matches test_data :param dict text: dictionary of all default text messages to use :param str mismatch: key to use to identify a data mismatch that refers to a message in text :param str key_error: key to use to identify a data key_error that refers to a message in text :return: a list of error messages :rtype: list ''' if not text: text = {} errors = [] for state_id, states in test_data.items(): for state_name, state_values in states.items(): for key, value in state_values.items(): if key in ignore: continue try: result_vars = dict(state_id=state_id, state_name=state_name, key=key, value=value) data_value = data[state_id][state_name][key] result_vars.update(data_value=data_value) if value != data_value: # MISMATCH if result_vars['value'] == '': result_vars['value'] = "\'\'" if result_vars['data_value'] == '': result_vars['data_value'] = "\'\'" errors.append(text[mismatch].format(result_vars)) except KeyError: # KEY ERROR if result_vars['value'] == '': result_vars['value'] = "\'\'" errors.append(text[key_error].format(result_vars)) return errors sls = ' ({0})'.format(sls) if sls else sls text = { 'mismatch': '<> MISMATCH: {{{0[state_id]}}}:{{{0[state_name]}}}:{{{0[key]}}} is {0[data_value]} but should ' 'be: {0[value]}', 'key_error': '-- MISSING : {{{0[state_id]}}}:{{{0[state_name]}}} does not contain key {{{0[key]}: {0[value]}}}', 'key_error2': '++ EXTRA : {{{0[state_id]}}}:{{{0[state_name]}}}:{{{0[key]}: {0[value]}}} not in test_data', 'test_data': 'TEST RESULTS - THE FOLLOWING IS MISSING FROM RENDERED STATE FILE', 'ruler': '========================================================================================', 'salt_data': 'TEST RESULTS - THE FOLLOWING IS PRESENT IN GENERATED STATE FILE, BUT NOT IN TEST FILE', 'end': 'TEST RESULTS - END =====================================================================', 'blank': '', 'no_error_test': 'Yamlscript Renderer{0}: No errors for test_data'.format(sls), 'no_error_salt': 'Yamlscript Renderer{0}: No errors for salt_data'.format(sls) } test1 = compare(test_data, salt_data, text=text) if test1: print text['test_data'] print text['ruler'] for message in test1: print message print text['blank'] else: print text['no_error_test'] test2 = compare(salt_data, test_data, text=text, mismatch='', key_error='key_error2') if test2: print text['salt_data'] print text['ruler'] for message in test2: print message print text['blank'] else: print text['no_error_salt'] return len(test1), len(test2)
# -*- coding: utf-8 -*- # from __future__ import print_function import six from . import color as mycol from . import path as mypath def get_legend_label_(line): '''Check if line is in legend ''' label = line.get_label() try: ax = line.axes leg = ax.get_legend() return label in [l.get_label() for l in leg.get_lines()] except AttributeError: return None def draw_line2d(data, obj): '''Returns the PGFPlots code for an Line2D environment. ''' content = [] addplot_options = [] # If line is of length 0, do nothing. Otherwise, an empty \addplot table # will be created, which will be interpreted as an external data source # in either the file '' or '.tex'. Instead, render nothing. # pylint: disable=len-as-condition if len(obj.get_xdata()) == 0: return data, [] # get the linewidth (in pt) line_width = _mpl_linewidth2pgfp_linewidth(data, obj.get_linewidth()) if line_width: addplot_options.append(line_width) # get line color color = obj.get_color() data, line_xcolor, _ = mycol.mpl_color2xcolor(data, color) addplot_options.append(line_xcolor) alpha = obj.get_alpha() if alpha is not None: addplot_options.append('opacity=%r' % alpha) show_line, linestyle = _mpl_linestyle2pgfp_linestyle(obj.get_linestyle()) if show_line and linestyle: addplot_options.append(linestyle) marker_face_color = obj.get_markerfacecolor() marker_edge_color = obj.get_markeredgecolor() data, marker, extra_mark_options = \ _mpl_marker2pgfp_marker(data, obj.get_marker(), marker_face_color) if marker: addplot_options.append('mark=' + marker) mark_size = obj.get_markersize() if mark_size: # setting half size because pgfplots counts the radius/half-width pgf_size = int(0.5 * mark_size) # make sure we didn't round off to zero by accident if pgf_size == 0 and mark_size != 0: pgf_size = 1 addplot_options.append('mark size=%d' % pgf_size) mark_every = obj.get_markevery() if mark_every: addplot_options.append('mark repeat=%d' % mark_every) mark_options = ['solid'] if extra_mark_options: mark_options.append(extra_mark_options) if marker_face_color is None or \ (isinstance(marker_face_color, six.string_types) and marker_face_color == 'none'): mark_options.append('fill opacity=0') else: data, face_xcolor, _ = mycol.mpl_color2xcolor( data, marker_face_color ) if face_xcolor != line_xcolor: mark_options.append('fill=' + face_xcolor) face_and_edge_have_equal_color = \ marker_edge_color == marker_face_color # Sometimes, the colors are given as arrays. Collapse them into a # single boolean. try: face_and_edge_have_equal_color = \ all(face_and_edge_have_equal_color) except TypeError: pass if not face_and_edge_have_equal_color: data, draw_xcolor, _ = mycol.mpl_color2xcolor( data, marker_edge_color ) if draw_xcolor != line_xcolor: mark_options.append('draw=' + draw_xcolor) addplot_options.append('mark options={%s}' % ','.join(mark_options)) if marker and not show_line: addplot_options.append('only marks') # Check if a line is not in a legend and forget it if so, # fixes bug #167: if not get_legend_label_(obj): addplot_options.append("forget plot") # process options content.append('\\addplot ') if addplot_options: options = ', '.join(addplot_options) content.append('[' + options + ']\n') content.append('table {%\n') # nschloe, Oct 2, 2015: # The transform call yields warnings and it is unclear why. Perhaps # the input data is not suitable? Anyhow, this should not happen. # Comment out for now. # xdata, ydata = _transform_to_data_coordinates(obj, *obj.get_data()) xdata, ydata = obj.get_data() try: has_mask = ydata.mask.any() except AttributeError: has_mask = 0 if has_mask: # matplotlib jumps at masked images, while PGFPlots by default # interpolates. Hence, if we have a masked plot, make sure that # PGFPlots jumps as well. data['extra axis options'].add('unbounded coords=jump') for (x, y, is_masked) in zip(xdata, ydata, ydata.mask): if is_masked: content.append('%.15g nan\n' % x) else: content.append('%.15g %.15g\n' % (x, y)) else: for (x, y) in zip(xdata, ydata): content.append('%.15g %.15g\n' % (x, y)) content.append('};\n') return data, content def draw_linecollection(data, obj): '''Returns Pgfplots code for a number of patch objects. ''' content = [] edgecolors = obj.get_edgecolors() linestyles = obj.get_linestyles() linewidths = obj.get_linewidths() paths = obj.get_paths() for i, path in enumerate(paths): if i < len(edgecolors): color = edgecolors[i] else: color = edgecolors[0] if i < len(linestyles): style = linestyles[i] else: style = linestyles[0] if i < len(linewidths): width = linewidths[i] else: width = linewidths[0] data, options = mypath.get_draw_options(data, color, None) width = _mpl_linewidth2pgfp_linewidth(data, width) if width: options.append(width) # linestyle is a string or dash tuple. Legal string values are # solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) # where onoffseq is an even length tuple of on and off ink in points. # # solid: [(None, None), (None, None), ..., (None, None)] # dashed: (0, (6.0, 6.0)) # dotted: (0, (1.0, 3.0)) # dashdot: (0, (3.0, 5.0, 1.0, 5.0)) if style[0] is not None: assert isinstance(style, tuple) if len(style[1]) == 2: linestyle = 'dash pattern=on %dpt off %dpt' % \ (int(style[1][0]), int(style[1][1])) else: assert len(style[1]) == 4 linestyle = 'dash pattern=on %dpt off %dpt on %dpt off %dpt' \ % (int(style[1][0]), int(style[1][1]), int(style[1][2]), int(style[1][3])) options.append(linestyle) # TODO what about masks? data, cont = mypath.draw_path( data, path, draw_options=options, simplify=False ) content.append(cont) return data, content TIKZ_LINEWIDTHS = { 0.1: 'ultra thin', 0.2: 'very thin', 0.4: 'thin', 0.6: 'semithick', 0.8: 'thick', 1.2: 'very thick', 1.6: 'ultra thick' } def _mpl_linewidth2pgfp_linewidth(data, line_width): if data['strict']: # Takes the matplotlib linewidths, and just translate them # into PGFPlots. try: return TIKZ_LINEWIDTHS[line_width] except KeyError: # explicit line width return 'line width=%spt' % line_width else: # The following is an alternative approach to line widths. # The default line width in matplotlib is 1.0pt, in PGFPlots 0.4pt # ('thin'). # Match the two defaults, and scale for the rest. scaled_line_width = line_width / 1.0 # scale by default line width literals = { 0.25: 'ultra thin', 0.5: 'very thin', 1.0: None, # default, 'thin' 1.5: 'semithick', 2: 'thick', 3: 'very thick', 4: 'ultra thick', } try: out = literals[scaled_line_width] except KeyError: # explicit line width out = 'line width=%rpt' % (0.4 * line_width) return out # for matplotlib markers, see: http://matplotlib.org/api/markers_api.html _MP_MARKER2PGF_MARKER = { '.': '*', # point 'o': 'o', # circle '+': '+', # plus 'x': 'x', # x 'None': None, ' ': None, '': None } # the following markers are only available with PGF's plotmarks library _MP_MARKER2PLOTMARKS = { 'v': ('triangle', 'rotate=180'), # triangle down '1': ('triangle', 'rotate=180'), '^': ('triangle', None), # triangle up '2': ('triangle', None), '<': ('triangle', 'rotate=270'), # triangle left '3': ('triangle', 'rotate=270'), '>': ('triangle', 'rotate=90'), # triangle right '4': ('triangle', 'rotate=90'), 's': ('square', None), 'p': ('pentagon', None), '*': ('asterisk', None), 'h': ('star', None), # hexagon 1 'H': ('star', None), # hexagon 2 'd': ('diamond', None), # diamond 'D': ('diamond', None), # thin diamond '|': ('|', None), # vertical line '_': ('-', None) # horizontal line } def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color): '''Translates a marker style of matplotlib to the corresponding style in PGFPlots. ''' # try default list try: pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker] if (marker_face_color is not None) and pgfplots_marker == 'o': pgfplots_marker = '*' data['tikz libs'].add('plotmarks') marker_options = None return (data, pgfplots_marker, marker_options) except KeyError: pass # try plotmarks list try: data['tikz libs'].add('plotmarks') pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker] if marker_face_color is not None and \ marker_face_color.lower() != 'none' and \ pgfplots_marker not in ['|', '-']: pgfplots_marker += '*' return (data, pgfplots_marker, marker_options) except KeyError: pass # There's no equivalent for the pixel marker in Pgfplots. if mpl_marker == ',': print('Unsupported marker '','' (pixel).') return data, None, None _MPLLINESTYLE_2_PGFPLOTSLINESTYLE = { '': None, 'None': None, 'none': None, # happens when using plt.boxplot() '-': None, ':': 'dotted', '--': 'dashed', '-.': 'dash pattern=on 1pt off 3pt on 3pt off 3pt' } def _mpl_linestyle2pgfp_linestyle(line_style): '''Translates a line style of matplotlib to the corresponding style in PGFPlots. ''' show_line = (line_style != 'None') style = _MPLLINESTYLE_2_PGFPLOTSLINESTYLE[line_style] return show_line, style # def _transform_to_data_coordinates(obj, xdata, ydata): # '''The coordinates might not be in data coordinates, but could be partly # in axes coordinates. For example, the matplotlib command # axes.axvline(2) # will have the y coordinates set to 0 and 1, not to the limits. Therefore, # a two-stage transform has to be applied: # 1. first transforming to display coordinates, then # 2. from display to data. # In case of problems (non-invertible, or whatever), print a warning and # continue anyways. # ''' # try: # import matplotlib.transforms # points = numpy.array(zip(xdata, ydata)) # transform = matplotlib.transforms.composite_transform_factory( # obj.get_transform(), # obj.axes.transData.inverted() # ) # points_data = transform.transform(points) # xdata, ydata = zip(*points_data) # except Exception as e: # print(xdata, ydata) # print(('Problem during transformation:\n' + # ' %s\n' + # 'Continuing with original data.') # % e # ) # return (xdata, ydata)
from __future__ import print_function from collections import namedtuple import numpy as np import tensorflow as tf from model import LSTMPolicy, MetaPolicy import six.moves.queue as queue import scipy.signal import threading import distutils.version use_tf12_api = distutils.version.LooseVersion(tf.VERSION) >= distutils.version.LooseVersion('0.12.0') import cv2 def discount(x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] class A3C(object): def __init__(self, env, task, visualise, test=False): """ An implementation of the A3C algorithm that is reasonably well-tuned for the VNC environments. Below, we will have a modest amount of complexity due to the way TensorFlow handles data parallelism. But overall, we'll define the model, specify its inputs, and describe how the policy gradients step should be computed. """ self.env = env self.task = task self.meta_action_size = 32 worker_device = "/job:worker/task:{}/cpu:0".format(task) if test: worker_device = "/job:eval/task:{}/cpu:0".format(task) with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)): with tf.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n, self.meta_action_size) self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.constant_initializer(0, dtype=tf.int32), trainable=False) self.meta_network = MetaPolicy(env.observation_space.shape, self.meta_action_size) with tf.device(worker_device): with tf.variable_scope("local"): self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n, self.meta_action_size) self.local_meta_network = meta_pi = MetaPolicy(env.observation_space.shape, self.meta_action_size) pi.global_step = self.global_step self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac") self.adv = tf.placeholder(tf.float32, [None], name="adv") self.r = tf.placeholder(tf.float32, [None], name="r") log_prob_tf = tf.nn.log_softmax(pi.logits) prob_tf = tf.nn.softmax(pi.logits) # the "policy gradients" loss: its derivative is precisely the policy gradient # notice that self.ac is a placeholder that is provided externally. # adv will contain the advantages, as calculated in process_rollout pi_loss = - tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv) # loss of value function vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r)) entropy = - tf.reduce_sum(prob_tf * log_prob_tf) bs = tf.to_float(tf.shape(pi.x)[0]) self.loss = pi_loss + 0.5 * vf_loss - entropy * 0.01 self.visualise = visualise grads = tf.gradients(self.loss, pi.var_list) actor_summary = [ tf.summary.scalar("model/policy_loss", pi_loss / bs), tf.summary.scalar("model/value_loss", vf_loss / bs), tf.summary.scalar("model/entropy", entropy / bs), tf.summary.image("model/state", pi.x), tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads)), tf.summary.scalar("model/var_global_norm", tf.global_norm(pi.var_list)) ] self.summary_op = tf.summary.merge(actor_summary) grads, _ = tf.clip_by_global_norm(grads, 40.0) # This is sync ops which copy weights from shared space to the local. self.sync = tf.group( *( [ v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)] )) grads_and_vars = list(zip(grads, self.network.var_list)) inc_step = self.global_step.assign_add(tf.shape(pi.x)[0]) # each worker has a different set of adam optimizer parameters opt = tf.train.AdamOptimizer(1e-4) self.train_op = tf.group(opt.apply_gradients(grads_and_vars), inc_step) self.summary_writer = None self.local_steps = 0 ################################### ########## META CONTROLLER ######## ################################### self.meta_ac = tf.placeholder(tf.float32, [None, self.meta_action_size], name="meta_ac") self.meta_adv = tf.placeholder(tf.float32, [None], name="meta_adv") self.meta_r = tf.placeholder(tf.float32, [None], name="meta_r") meta_log_prob_tf = tf.nn.log_softmax(meta_pi.logits) meta_prob_tf = tf.nn.softmax(meta_pi.logits) meta_pi_loss = - tf.reduce_sum(tf.reduce_sum(meta_log_prob_tf * self.meta_ac, [1]) * self.meta_adv) meta_vf_loss = 0.5 * tf.reduce_sum(tf.square(meta_pi.vf - self.meta_r)) # entropy meta_entropy = - tf.reduce_sum(meta_prob_tf * meta_log_prob_tf) meta_bs = tf.to_float(tf.shape(meta_pi.x)[0]) self.meta_loss = meta_pi_loss + 0.5 * meta_vf_loss - meta_entropy * 0.01 meta_grads = tf.gradients(self.meta_loss, meta_pi.var_list) meta_grads, _ = tf.clip_by_global_norm(meta_grads, 40.0) self.meta_sync = tf.group( *( [ v1.assign(v2) for v1, v2 in zip(meta_pi.var_list, self.meta_network.var_list)] )) meta_grads_and_vars = list(zip(meta_grads, self.meta_network.var_list)) meta_opt = tf.train.AdamOptimizer(1e-4) self.meta_train_op = meta_opt.apply_gradients(meta_grads_and_vars) meta_summary = [ tf.summary.scalar("meta_model/policy_loss", meta_pi_loss / meta_bs), tf.summary.scalar("meta_model/value_loss", meta_vf_loss / meta_bs), tf.summary.scalar("meta_model/entropy", meta_entropy / meta_bs), tf.summary.scalar("meta_model/grad_global_norm", tf.global_norm(meta_grads)), tf.summary.scalar("meta_model/var_global_norm", tf.global_norm(meta_pi.var_list)) ] self.meta_summary_op = tf.summary.merge(meta_summary) self.beta = 0.75 def start(self, sess, summary_writer): self.summary_writer = summary_writer # Initialise Actor # Initialise last_state and last_features self.last_state = self.env.reset() self.last_features = self.local_network.get_initial_features() self.last_action = np.zeros(self.env.action_space.n) self.last_reward = [0] self.length = 0 self.rewards = 0 self.ex_rewards = 0 self.in_rewards = 0 # Initialise Meta controller self.last_meta_state = self.env.reset() self.last_meta_features = self.local_meta_network.get_initial_features() self.last_meta_action = np.zeros(self.meta_action_size) self.last_meta_reward = [0] # self.last_conv_feature = np.zeros(self.meta_action_size) def process(self, sess): """ Everytime process is called. The meta_network get sync. The actor_process is run for 20 times. The meta_network calculate gradient and update """ sess.run(self.meta_sync) terminal_end = False # TODO: tune this too num_local_steps = 20 env = self.env policy = self.local_meta_network states = [] actions = [] rewards = [] values = [] r = 0.0 terminal= False features= [] prev_actions = [] prev_rewards = [] for _local_step in range(num_local_steps): fetched = policy.act(self.last_meta_state, self.last_meta_features[0], self.last_meta_features[1], self.last_meta_action, self.last_meta_reward) action, value_, features_ = fetched[0], fetched[1], fetched[2:] reward = 0 # run actors several times # TODO: tune this ... 2? maybe for _ in range(5): state, reward_, terminal, info = self.actor_process(sess, action) reward += reward_ if terminal: break # collect experience states += [self.last_meta_state] actions += [action] rewards += [reward] values += [value_] features += [self.last_meta_features] prev_actions += [self.last_meta_action] prev_rewards += [self.last_meta_reward] # update state self.last_meta_state = state self.last_meta_features = features_ self.last_meta_action = action self.last_meta_reward = [reward] if terminal: self.last_meta_features = policy.get_initial_features() break if not terminal: r = policy.value(self.last_meta_state, self.last_meta_features[0], self.last_meta_features[1], self.last_meta_action, self.last_meta_reward) # Process rollout gamma = 0.99 lambda_ = 1.0 batch_si = np.asarray(states) batch_a = np.asarray(actions) rewards_plus_v = np.asarray(rewards + [r]) rewards = np.asarray(rewards) vpred_t = np.asarray(values + [r]) batch_r = discount(rewards_plus_v, gamma)[:-1] delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1] # this formula for the advantage comes "Generalized Advantage Estimation": # https://arxiv.org/abs/1506.02438 batch_adv = discount(delta_t, gamma * lambda_) batch_prev_a = np.asarray(prev_actions) batch_prev_r = np.asarray(prev_rewards) features = features[0] # Gradient Calculation fetches = [self.meta_summary_op, self.meta_train_op, self.global_step] feed_dict = { self.local_meta_network.x: batch_si, self.meta_ac: batch_a, self.meta_adv: batch_adv, self.meta_r: batch_r, self.local_meta_network.state_in[0]: features[0], self.local_meta_network.state_in[1]: features[1], self.local_meta_network.prev_action: batch_prev_a, self.local_meta_network.prev_reward: batch_prev_r } fetched = sess.run(fetches, feed_dict=feed_dict) if self.task == 0: self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1]) self.summary_writer.flush() def actor_process(self, sess, meta_action): """ Every time actor_process is called. The network get sync. The environment is run for 20 steps or until termination. The worker calculates gradients and then one update to the shared weight is made. (one local step = one update =< 20 env steps ) (global step is the number of frames) """ sess.run(self.sync) # copy weights from shared to local # Environment run for 20 steps or less terminal_end = False num_local_steps = 20 env = self.env policy = self.local_network states = [] actions = [] rewards = [] values = [] r = 0.0 terminal= False features= [] prev_actions = [] prev_rewards = [] extrinsic_rewards = [] # select patch 1 in 36. each patch is 14x14 # idx = 6*x + y where x:[0,5], y[0:5], idx:[0,35] # x = idx // 6 idx = meta_action.argmax() #pos_x = idx // 6 #pos_y = idx - 6*pos_x #goal_patch = np.zeros([84, 84, 3]) #if idx != 37: # goal_patch[ 14 * pos_x: 14 * (pos_x + 1) + 1, 14*pos_y: 14*(pos_y+1) +1 ] = 1 for _local_step in range(num_local_steps): # Take a step fetched = policy.act(self.last_state, self.last_features[0], self.last_features[1], self.last_action, self.last_reward, meta_action) action, value_, features_ = fetched[0], fetched[1], fetched[2:] # argmax to convert from one-hot state, reward, terminal, info = env.step(action.argmax()) # clip reward reward = min(1, max(-1, reward)) # Intrinsic reward # Pixel control #pixel_changes = (state - self.last_state)**2 # mean square error normalized by all pixel_changes #intrinsic_reward = 0.05 * np.sum( pixel_changes * goal_patch ) / np.sum( pixel_changes + 1e-5) # Feature control [selectivity (Bengio et al., 2017)] conv_feature = policy.get_conv_feature(state)[0][0] sel = np.abs(conv_feature[idx] - self.last_conv_feature[idx]) sel = sel / ( np.sum( np.abs(conv_feature - self.last_conv_feature) ) + 1e-5) self.last_conv_feature = conv_feature intrinsic_reward = 0.05 * sel # record extrinsic reward extrinsic_rewards += [reward] self.ex_rewards += reward self.in_rewards += intrinsic_reward # Apply intrinsic reward beta = self.beta reward = beta * reward + (1.0 - beta) * intrinsic_reward if self.visualise: vis = state - 0.5 * state * goal_patch + 0.5 * goal_patch vis = cv2.resize(vis, (500,500)) cv2.imshow('img', vis) cv2.waitKey(10) # collect the experience states += [self.last_state] actions += [action] rewards += [reward] values += [value_] features += [self.last_features] prev_actions += [self.last_action] prev_rewards += [self.last_reward] self.length += 1 self.rewards += reward self.last_state = state self.last_features = features_ self.last_action = action self.last_reward = [reward] if info: summary = tf.Summary() for k, v in info.items(): summary.value.add(tag=k, simple_value=float(v)) self.summary_writer.add_summary(summary, policy.global_step.eval()) self.summary_writer.flush() timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps') if terminal or self.length >= timestep_limit: terminal_end = True if self.length >= timestep_limit or not env.metadata.get('semantics.autoreset'): self.last_state = env.reset() self.last_features = policy.get_initial_features() print("Episode finished. Sum of rewards: %d. Length: %d" % (self.rewards, self.length)) summary = tf.Summary() summary.value.add(tag='global/episode_shaped_reward', simple_value=self.rewards) summary.value.add(tag='global/shaped_reward_per_time', simple_value=self.rewards/self.length) summary.value.add(tag='global/episode_extrinsic_reward', simple_value=self.ex_rewards) summary.value.add(tag='global/episode_intrinsic_reward', simple_value=self.in_rewards) self.summary_writer.add_summary(summary, policy.global_step.eval()) self.summary_writer.flush() self.length = 0 self.rewards = 0 self.ex_rewards = 0 self.in_rewards = 0 break if not terminal_end: r = policy.value(self.last_state, self.last_features[0], self.last_features[1], self.last_action, self.last_reward, meta_action) # Process rollout gamma = 0.99 lambda_ = 1.0 batch_si = np.asarray(states) batch_a = np.asarray(actions) rewards_plus_v = np.asarray(rewards + [r]) rewards = np.asarray(rewards) vpred_t = np.asarray(values + [r]) batch_r = discount(rewards_plus_v, gamma)[:-1] delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1] # this formula for the advantage comes "Generalized Advantage Estimation": # https://arxiv.org/abs/1506.02438 batch_adv = discount(delta_t, gamma * lambda_) batch_prev_a = np.asarray(prev_actions) batch_prev_r = np.asarray(prev_rewards) features = features[0] # only use first feature into dynamic rnn # Batch meta action batch_meta_ac = np.repeat([meta_action], len(batch_si), axis=0) # Gradient Calculation should_compute_summary = self.task == 0 and self.local_steps % 11 == 0 if should_compute_summary: fetches = [self.summary_op, self.train_op, self.global_step] else: fetches = [self.train_op, self.global_step] feed_dict = { self.local_network.x: batch_si, self.ac: batch_a, self.adv: batch_adv, self.r: batch_r, self.local_network.state_in[0]: features[0], self.local_network.state_in[1]: features[1], self.local_network.prev_action: batch_prev_a, self.local_network.prev_reward: batch_prev_r, self.local_network.meta_action: batch_meta_ac } fetched = sess.run(fetches, feed_dict=feed_dict) if should_compute_summary: self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1]) self.summary_writer.flush() self.local_steps += 1 # discount extrinsic reward for the meta controller #gamma = 0.99 # early rewards are better? #discount_filter = np.array([gamma**i for i in range(len(extrinsic_rewards))]) #extrinsic_reward = np.sum(discount_filter * extrinsic_rewards) return self.last_state, np.sum(extrinsic_rewards), terminal_end, None def evaluate(self,sess): global_step = sess.run(self.global_step) sess.run(self.meta_sync) sess.run(self.sync) meta_policy = self.local_meta_network policy = self.local_network env = self.env rewards_stat = [] length_stat = [] # average over 100 episode? for episode in range(100): terminal = False last_state = env.reset() last_meta_state = last_state last_features = policy.get_initial_features() last_meta_features = meta_policy.get_initial_features() last_meta_action = np.zeros(self.meta_action_size) last_meta_reward = [0] last_action = np.zeros(self.env.action_space.n) last_reward = [0] rewards = 0 length = 0 last_conv_feature = np.zeros(self.meta_action_size) while not terminal: fetched = meta_policy.act(last_meta_state, last_meta_features[0], last_meta_features[1], last_meta_action, last_meta_reward) meta_action, meta_value_, meta_features_ = fetched[0], fetched[1], fetched[2:] meta_reward = 0 idx = meta_action.argmax() for _ in range(20*5): fetched = policy.act(last_state, last_features[0], last_features[1], last_action, last_reward, meta_action) action, value_, features_ = fetched[0], fetched[1], fetched[2:] state, reward, terminal, info = env.step(action.argmax()) if self.visualise: vis = cv2.resize(state , (500,500)) cv2.imshow('img', vis) cv2.waitKey(10) env_reward = reward # clip reward reward = min(1, max(-1, reward)) # Feature control [selectivity (Bengio et al., 2017)] conv_feature = policy.get_conv_feature(state)[0][0] sel = np.abs(conv_feature[idx] - last_conv_feature[idx]) sel = sel / ( np.sum( np.abs(conv_feature - last_conv_feature) ) + 1e-5) last_conv_feature = conv_feature intrinsic_reward = 0.05 * sel # Apply intrinsic reward beta = self.beta shaped_reward = beta * reward + (1.0 - beta) * intrinsic_reward length += 1 rewards += env_reward last_state = state last_features = features_ last_action = action last_reward = [shaped_reward] meta_reward += reward timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps') if terminal or length >= timestep_limit: terminal = True break last_meta_state = last_state last_meta_features = meta_features_ last_meta_action = meta_action last_meta_reward = [meta_reward] if terminal: break rewards_stat.append(rewards) length_stat.append(length) summary = tf.Summary() summary.value.add(tag='Eval/Average_Reward', simple_value=np.mean(rewards_stat)) summary.value.add(tag='Eval/SD_Reward', simple_value=np.std(rewards_stat)) summary.value.add(tag='Eval/Average_Lenght', simple_value=np.mean(length_stat)) self.summary_writer.add_summary(summary, global_step) self.summary_writer.flush()
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates C++ source files from a mojom.Module.""" import mojom.generate.generator as generator import mojom.generate.module as mojom import mojom.generate.pack as pack from mojom.generate.template_expander import UseJinja _kind_to_cpp_type = { mojom.BOOL: "bool", mojom.INT8: "int8_t", mojom.UINT8: "uint8_t", mojom.INT16: "int16_t", mojom.UINT16: "uint16_t", mojom.INT32: "int32_t", mojom.UINT32: "uint32_t", mojom.FLOAT: "float", mojom.HANDLE: "mojo::Handle", mojom.DCPIPE: "mojo::DataPipeConsumerHandle", mojom.DPPIPE: "mojo::DataPipeProducerHandle", mojom.MSGPIPE: "mojo::MessagePipeHandle", mojom.SHAREDBUFFER: "mojo::SharedBufferHandle", mojom.NULLABLE_HANDLE: "mojo::Handle", mojom.NULLABLE_DCPIPE: "mojo::DataPipeConsumerHandle", mojom.NULLABLE_DPPIPE: "mojo::DataPipeProducerHandle", mojom.NULLABLE_MSGPIPE: "mojo::MessagePipeHandle", mojom.NULLABLE_SHAREDBUFFER: "mojo::SharedBufferHandle", mojom.INT64: "int64_t", mojom.UINT64: "uint64_t", mojom.DOUBLE: "double", } _kind_to_cpp_literal_suffix = { mojom.UINT8: "U", mojom.UINT16: "U", mojom.UINT32: "U", mojom.FLOAT: "f", mojom.UINT64: "ULL", } def ConstantValue(constant): return ExpressionToText(constant.value, kind=constant.kind) def DefaultValue(field): if field.default: if mojom.IsStructKind(field.kind): assert field.default == "default" return "%s::New()" % GetNameForKind(field.kind) return ExpressionToText(field.default, kind=field.kind) return "" def NamespaceToArray(namespace): return namespace.split(".") if namespace else [] def GetNameForKind(kind, internal = False): parts = [] if kind.imported_from: parts.extend(NamespaceToArray(kind.imported_from["namespace"])) if internal: parts.append("internal") if kind.parent_kind: parts.append(kind.parent_kind.name) parts.append(kind.name) return "::".join(parts) def GetCppType(kind): if mojom.IsArrayKind(kind): return "mojo::internal::Array_Data<%s>*" % GetCppType(kind.kind) if mojom.IsMapKind(kind): return "mojo::internal::Map_Data<%s, %s>*" % ( GetCppType(kind.key_kind), GetCppType(kind.value_kind)) if mojom.IsStructKind(kind): return "%s_Data*" % GetNameForKind(kind, internal=True) if mojom.IsUnionKind(kind): return "%s_Data" % GetNameForKind(kind, internal=True) if mojom.IsInterfaceKind(kind): return "mojo::internal::Interface_Data" if mojom.IsInterfaceRequestKind(kind): return "mojo::MessagePipeHandle" if mojom.IsEnumKind(kind): return "int32_t" if mojom.IsStringKind(kind): return "mojo::internal::String_Data*" return _kind_to_cpp_type[kind] def GetCppPodType(kind): if mojom.IsStringKind(kind): return "char*" return _kind_to_cpp_type[kind] def GetCppArrayArgWrapperType(kind): if mojom.IsEnumKind(kind): return GetNameForKind(kind) if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind): return "%sPtr" % GetNameForKind(kind) if mojom.IsArrayKind(kind): return "mojo::Array<%s> " % GetCppArrayArgWrapperType(kind.kind) if mojom.IsMapKind(kind): return "mojo::Map<%s, %s> " % (GetCppArrayArgWrapperType(kind.key_kind), GetCppArrayArgWrapperType(kind.value_kind)) if mojom.IsInterfaceKind(kind): raise Exception("Arrays of interfaces not yet supported!") if mojom.IsInterfaceRequestKind(kind): raise Exception("Arrays of interface requests not yet supported!") if mojom.IsStringKind(kind): return "mojo::String" if mojom.IsGenericHandleKind(kind): return "mojo::ScopedHandle" if mojom.IsDataPipeConsumerKind(kind): return "mojo::ScopedDataPipeConsumerHandle" if mojom.IsDataPipeProducerKind(kind): return "mojo::ScopedDataPipeProducerHandle" if mojom.IsMessagePipeKind(kind): return "mojo::ScopedMessagePipeHandle" if mojom.IsSharedBufferKind(kind): return "mojo::ScopedSharedBufferHandle" return _kind_to_cpp_type[kind] def GetCppResultWrapperType(kind): if mojom.IsEnumKind(kind): return GetNameForKind(kind) if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind): return "%sPtr" % GetNameForKind(kind) if mojom.IsArrayKind(kind): return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind) if mojom.IsMapKind(kind): return "mojo::Map<%s, %s>" % (GetCppArrayArgWrapperType(kind.key_kind), GetCppArrayArgWrapperType(kind.value_kind)) if mojom.IsInterfaceKind(kind): return "%sPtr" % GetNameForKind(kind) if mojom.IsInterfaceRequestKind(kind): return "mojo::InterfaceRequest<%s>" % GetNameForKind(kind.kind) if mojom.IsStringKind(kind): return "mojo::String" if mojom.IsGenericHandleKind(kind): return "mojo::ScopedHandle" if mojom.IsDataPipeConsumerKind(kind): return "mojo::ScopedDataPipeConsumerHandle" if mojom.IsDataPipeProducerKind(kind): return "mojo::ScopedDataPipeProducerHandle" if mojom.IsMessagePipeKind(kind): return "mojo::ScopedMessagePipeHandle" if mojom.IsSharedBufferKind(kind): return "mojo::ScopedSharedBufferHandle" return _kind_to_cpp_type[kind] def GetCppWrapperType(kind): if mojom.IsEnumKind(kind): return GetNameForKind(kind) if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind): return "%sPtr" % GetNameForKind(kind) if mojom.IsArrayKind(kind): return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind) if mojom.IsMapKind(kind): return "mojo::Map<%s, %s>" % (GetCppArrayArgWrapperType(kind.key_kind), GetCppArrayArgWrapperType(kind.value_kind)) if mojom.IsInterfaceKind(kind): return "%sPtr" % GetNameForKind(kind) if mojom.IsInterfaceRequestKind(kind): raise Exception("InterfaceRequest fields not supported!") if mojom.IsStringKind(kind): return "mojo::String" if mojom.IsGenericHandleKind(kind): return "mojo::ScopedHandle" if mojom.IsDataPipeConsumerKind(kind): return "mojo::ScopedDataPipeConsumerHandle" if mojom.IsDataPipeProducerKind(kind): return "mojo::ScopedDataPipeProducerHandle" if mojom.IsMessagePipeKind(kind): return "mojo::ScopedMessagePipeHandle" if mojom.IsSharedBufferKind(kind): return "mojo::ScopedSharedBufferHandle" return _kind_to_cpp_type[kind] def GetCppConstWrapperType(kind): if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind): return "%sPtr" % GetNameForKind(kind) if mojom.IsArrayKind(kind): return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind) if mojom.IsMapKind(kind): return "mojo::Map<%s, %s>" % (GetCppArrayArgWrapperType(kind.key_kind), GetCppArrayArgWrapperType(kind.value_kind)) if mojom.IsInterfaceKind(kind): return "%sPtr" % GetNameForKind(kind) if mojom.IsInterfaceRequestKind(kind): return "mojo::InterfaceRequest<%s>" % GetNameForKind(kind.kind) if mojom.IsEnumKind(kind): return GetNameForKind(kind) if mojom.IsStringKind(kind): return "const mojo::String&" if mojom.IsGenericHandleKind(kind): return "mojo::ScopedHandle" if mojom.IsDataPipeConsumerKind(kind): return "mojo::ScopedDataPipeConsumerHandle" if mojom.IsDataPipeProducerKind(kind): return "mojo::ScopedDataPipeProducerHandle" if mojom.IsMessagePipeKind(kind): return "mojo::ScopedMessagePipeHandle" if mojom.IsSharedBufferKind(kind): return "mojo::ScopedSharedBufferHandle" if not kind in _kind_to_cpp_type: print "missing:", kind.spec return _kind_to_cpp_type[kind] def GetCppFieldType(kind): if mojom.IsStructKind(kind): return ("mojo::internal::StructPointer<%s_Data>" % GetNameForKind(kind, internal=True)) if mojom.IsUnionKind(kind): return "%s_Data" % GetNameForKind(kind, internal=True) if mojom.IsArrayKind(kind): return "mojo::internal::ArrayPointer<%s>" % GetCppType(kind.kind) if mojom.IsMapKind(kind): return ("mojo::internal::StructPointer<mojo::internal::Map_Data<%s, %s>>" % (GetCppType(kind.key_kind), GetCppType(kind.value_kind))) if mojom.IsInterfaceKind(kind): return "mojo::internal::Interface_Data" if mojom.IsInterfaceRequestKind(kind): return "mojo::MessagePipeHandle" if mojom.IsEnumKind(kind): return GetNameForKind(kind) if mojom.IsStringKind(kind): return "mojo::internal::StringPointer" return _kind_to_cpp_type[kind] def GetCppUnionFieldType(kind): if mojom.IsAnyHandleKind(kind): return "MojoHandle" if mojom.IsInterfaceKind(kind): return "uint64_t" if mojom.IsEnumKind(kind): return "int32_t" if mojom.IsUnionKind(kind): return ("mojo::internal::UnionPointer<%s_Data>" % GetNameForKind(kind, internal=True)) return GetCppFieldType(kind) def GetUnionGetterReturnType(kind): if (mojom.IsStructKind(kind) or mojom.IsUnionKind(kind) or mojom.IsArrayKind(kind) or mojom.IsMapKind(kind) or mojom.IsAnyHandleKind(kind) or mojom.IsInterfaceKind(kind)): return "%s&" % GetCppWrapperType(kind) return GetCppResultWrapperType(kind) def TranslateConstants(token, kind): if isinstance(token, mojom.NamedValue): # Both variable and enum constants are constructed like: # Namespace::Struct::CONSTANT_NAME # For enums, CONSTANT_NAME is ENUM_NAME_ENUM_VALUE. name = [] if token.imported_from: name.extend(NamespaceToArray(token.namespace)) if token.parent_kind: name.append(token.parent_kind.name) if isinstance(token, mojom.EnumValue): name.append( "%s_%s" % (generator.CamelCaseToAllCaps(token.enum.name), token.name)) else: name.append(token.name) return "::".join(name) if isinstance(token, mojom.BuiltinValue): if token.value == "double.INFINITY" or token.value == "float.INFINITY": return "INFINITY"; if token.value == "double.NEGATIVE_INFINITY" or \ token.value == "float.NEGATIVE_INFINITY": return "-INFINITY"; if token.value == "double.NAN" or token.value == "float.NAN": return "NAN"; if (kind is not None and mojom.IsFloatKind(kind)): return token if token.isdigit() else token + "f"; # Per C++11, 2.14.2, the type of an integer literal is the first of the # corresponding list in Table 6 in which its value can be represented. In this # case, the list for decimal constants with no suffix is: # int, long int, long long int # The standard considers a program ill-formed if it contains an integer # literal that cannot be represented by any of the allowed types. # # As it turns out, MSVC doesn't bother trying to fall back to long long int, # so the integral constant -2147483648 causes it grief: it decides to # represent 2147483648 as an unsigned integer, and then warns that the unary # minus operator doesn't make sense on unsigned types. Doh! if kind == mojom.INT32 and token == "-2147483648": return "(-%d - 1) /* %s */" % ( 2**31 - 1, "Workaround for MSVC bug; see https://crbug.com/445618") return "%s%s" % (token, _kind_to_cpp_literal_suffix.get(kind, "")) def ExpressionToText(value, kind=None): return TranslateConstants(value, kind) def ShouldInlineStruct(struct): # TODO(darin): Base this on the size of the wrapper class. if len(struct.fields) > 4: return False for field in struct.fields: if mojom.IsMoveOnlyKind(field.kind): return False return True def ShouldInlineUnion(union): return not any(mojom.IsMoveOnlyKind(field.kind) for field in union.fields) def GetArrayValidateParamsCtorArgs(kind): if mojom.IsStringKind(kind): expected_num_elements = 0 element_is_nullable = False element_validate_params = "nullptr" elif mojom.IsMapKind(kind): expected_num_elements = 0 element_is_nullable = mojom.IsNullableKind(kind.value_kind) element_validate_params = GetNewArrayValidateParams(kind.value_kind) else: expected_num_elements = generator.ExpectedArraySize(kind) or 0 element_is_nullable = mojom.IsNullableKind(kind.kind) element_validate_params = GetNewArrayValidateParams(kind.kind) return "%d, %s, %s" % (expected_num_elements, "true" if element_is_nullable else "false", element_validate_params) def GetNewArrayValidateParams(kind): if (not mojom.IsArrayKind(kind) and not mojom.IsMapKind(kind) and not mojom.IsStringKind(kind)): return "nullptr" return "new mojo::internal::ArrayValidateParams(%s)" % ( GetArrayValidateParamsCtorArgs(kind)) def GetMapValidateParamsCtorArgs(value_kind): # Unlike GetArrayValidateParams, we are given the wrapped kind, instead of # the raw array kind. So we wrap the return value of GetArrayValidateParams. element_is_nullable = mojom.IsNullableKind(value_kind) return "0, %s, %s" % ("true" if element_is_nullable else "false", GetNewArrayValidateParams(value_kind)) class Generator(generator.Generator): cpp_filters = { "constant_value": ConstantValue, "cpp_const_wrapper_type": GetCppConstWrapperType, "cpp_field_type": GetCppFieldType, "cpp_union_field_type": GetCppUnionFieldType, "cpp_pod_type": GetCppPodType, "cpp_result_type": GetCppResultWrapperType, "cpp_type": GetCppType, "cpp_union_getter_return_type": GetUnionGetterReturnType, "cpp_wrapper_type": GetCppWrapperType, "default_value": DefaultValue, "expression_to_text": ExpressionToText, "get_array_validate_params_ctor_args": GetArrayValidateParamsCtorArgs, "get_map_validate_params_ctor_args": GetMapValidateParamsCtorArgs, "get_name_for_kind": GetNameForKind, "get_pad": pack.GetPad, "has_callbacks": mojom.HasCallbacks, "should_inline": ShouldInlineStruct, "should_inline_union": ShouldInlineUnion, "is_array_kind": mojom.IsArrayKind, "is_cloneable_kind": mojom.IsCloneableKind, "is_enum_kind": mojom.IsEnumKind, "is_integral_kind": mojom.IsIntegralKind, "is_move_only_kind": mojom.IsMoveOnlyKind, "is_any_handle_kind": mojom.IsAnyHandleKind, "is_interface_kind": mojom.IsInterfaceKind, "is_interface_request_kind": mojom.IsInterfaceRequestKind, "is_map_kind": mojom.IsMapKind, "is_nullable_kind": mojom.IsNullableKind, "is_object_kind": mojom.IsObjectKind, "is_string_kind": mojom.IsStringKind, "is_struct_kind": mojom.IsStructKind, "is_union_kind": mojom.IsUnionKind, "struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE, "stylize_method": generator.StudlyCapsToCamel, "to_all_caps": generator.CamelCaseToAllCaps, "under_to_camel": generator.UnderToCamel, } def GetJinjaExports(self): return { "module": self.module, "namespace": self.module.namespace, "namespaces_as_array": NamespaceToArray(self.module.namespace), "imports": self.module.imports, "kinds": self.module.kinds, "enums": self.module.enums, "structs": self.GetStructs(), "unions": self.GetUnions(), "interfaces": self.GetInterfaces(), } @UseJinja("cpp_templates/module.h.tmpl", filters=cpp_filters) def GenerateModuleHeader(self): return self.GetJinjaExports() @UseJinja("cpp_templates/module-internal.h.tmpl", filters=cpp_filters) def GenerateModuleInternalHeader(self): return self.GetJinjaExports() @UseJinja("cpp_templates/module.cc.tmpl", filters=cpp_filters) def GenerateModuleSource(self): return self.GetJinjaExports() def GenerateFiles(self, args): self.Write(self.GenerateModuleHeader(), self.MatchMojomFilePath("%s.h" % self.module.name)) self.Write(self.GenerateModuleInternalHeader(), self.MatchMojomFilePath("%s-internal.h" % self.module.name)) self.Write(self.GenerateModuleSource(), self.MatchMojomFilePath("%s.cc" % self.module.name))
import numpy as np from lasso_utils import * def warm_start_lasso_shooting_solver_strong_rules(X, y, tol, lam_grid_size, lam_min, lam_max=9999, apply_sequential_strong_rules=True): """ Parameters: @X {2D numpy array} matrix of predictor variables @y {numpy array} continuous response variable @tol {float} converge tolerance (threshold) @lam_grid_size {int} number of regularization parameters to try @lam_min {float} min val of regularization parameters to try @lam_max {optional, float} max val of regularization parameters to try, by default we compute the max val s.t. all coefficients in beta=0 @apply_sequential_strong_rules {optional, boolean} flag whether or not to use the sequential strong rules to exclude variables from the optimization Return: @output {2D numpy array} array where in each row, the index 0 element is the lambda value used, the index 1 element is the number of iterations to convergence, and the rest of the elements in the row are coefficient estimates in the beta vector. This function implements the shooting algorithm which uses coordinate descent in order to solve the lasso problem. It also makes use of warm starts and considers a grid of lambda values with log spacing. This function can also implement the sequential strong rules to discard predictors. """ # get the number of cols in X, i.e., the number of predictors n_col = X.shape[1] # transpose X XT = np.transpose(X) # if the default arg is chosen, use lam_max, otherwise use user-specified max val if lam_max == 9999: # take infinity norm lam_max = 2 * np.max(np.abs(np.dot(XT,y))) # make a grid of lam with specified number of points and log spacing lam_grid = 10.**np.linspace(np.log10(lam_max), np.log10(lam_min), lam_grid_size) # initialize the array to store the output # col0=lam, col1=#iter, col2-coln=beta coeffs output = np.zeros([len(lam_grid), n_col+2]) # fill the 1st col with lambdas for i in range(0,len(lam_grid)): output[i,0] = lam_grid[i] # initialize beta_curr beta_curr = np.zeros(n_col) # loop over all values of the regularization parameter in the grid # use "l" as the index since we already have an inner for loop on j for l in range(0,len(lam_grid)): # initialize the iteration count k = 0 # set the convergence flag to false converged = False # initialize beta_prev to whatever the last solution was # this is the WARM START PART beta_prev = np.copy(beta_curr) # make a list of all possible predictor indices to consider in the optimization # remove indices for those that are ruled out by the basic strong rules vars_to_consider = range(0,n_col) if apply_sequential_strong_rules and l>=1: # compute the vector of quantities to check and compare against the difference in lambda values check = np.abs(np.dot(XT,y-np.dot(X,output[l-1,range(2,len(output[0,:]))]))) # current lam val and that from prev iteration lam_l = output[l,0] lam_l_1 = output[l-1,0] # if we should not consider the variable in the optimzation, remove it from # the list of indices and set it = 0 for j in range(0,len(check)): if check[j] < (2.*lam_l - lam_l_1): vars_to_consider.remove(j) beta_curr[j] = 0 # continue to loop and update parameter values until the convergence # threshold is reached while converged==False: # update the iteration count k = k + 1 # get the beta vector from the previous iteration and store it before # any of its elements are modified beta_old = np.copy(beta_prev) for j in vars_to_consider: #for j in range(0,n_col): # del/del_w_j RSS(w) = a_j*w_j - c_j a_j = 2 * np.dot(X[:, j],X[:, j]) # proportional to corr between jth variable and residual # excluding the jth variable c_j = 2 * np.dot(X[:, j], (y - np.dot(X, beta_prev) + beta_prev[j]*X[:, j])) # apply soft thresholding pos_part = np.abs(c_j/a_j) - (lam_grid[l]/a_j) if pos_part <= 0: pos_part = 0 # update the jth coefficient of the parameter vector beta_curr[j] = np.sign(c_j/a_j) * pos_part # VERY IMPORTANT: NEED TO UPDATE BETA_PREV WITH THE NEW jth COEFF beta_prev[j] = np.copy(beta_curr[j]) # check to see if the 2-norm squared for beta_cur-beta_prev # is sufficiently small, if so we have converged, if not # do another iteration two_norm = np.linalg.norm(beta_prev-beta_old)**2 # check convergence if (two_norm <= tol): converged = True # insert the param vector and # of iter into the output matrix output[l, 1] = k for i in range(0, n_col): output[l, i+2] = beta_curr[i] return output def lasso_shooting_solver_strong_rules(X, y, lam, tol, verbose=False, apply_basic_strong_rule=False): """ Parameters: @X {2D numpy array} matrix of predictor variables @y {numpy array} continuous response variable @lam {float} regularization parameter @tol {float} converge tolerance (threshold) @verbose {optional, boolean} flag stating whether or not to print info on each iteration to the console @apply_basic_strong_rule {optional, boolean} flag whether or not to use the basic strong rule to exclude variables from the optimization Return: @beta_curr {1D numpy array} vector regression coefficients This function implements the shooting algorithm which uses coordinate descent in order to solve the lasso problem. It also can apply the basic strong rule to remove predictors if desired. """ # get the number of cols in X, i.e., the number of predictors n_col = X.shape[1] # initialize the parameter vectors from the previous iteration and the one # that results from the current iteration, respectively. beta_prev = np.zeros(n_col) beta_curr = np.zeros(n_col) # calc lam_max for use in strong rules XT = np.transpose(X) lam_max = np.max(np.abs(np.dot(XT,y))) # make a list of all possible predictor indices to consider in the optimization # remove indices for those that are ruled out by the basic strong rules vars_to_consider = range(0,n_col) if apply_basic_strong_rule: # calculate the vector of quantities compared to the difference in lambda values check = np.abs(np.dot(XT,y)) # loop over all values in the aforementioned vector and compare them to the # difference in lambda values, one by one. if the criteria is met, remove the # predictor from the set we consider for j in range(0,len(check)): if check[j] < 2*lam - lam_max: # if we remove the variable to consider, its coeff val will remain to # that at which it was initialized, i.e., 0 vars_to_consider.remove(j) if verbose: print "Variable at index %d ruled out by basic strong rule!" % (j) # initialize the iteration count k = 0 # set the convergence flag to false converged = False # continue to loop and update parameter values until the convergence # threshold is reached while converged==False: # update the iteration count k = k + 1 # get the beta vector from the previous iteration and store it before # any of its elements are modified beta_old = np.copy(beta_prev) for j in vars_to_consider: #for j in range(0,n_col): # del/del_w_j RSS(w) = a_j*w_j - c_j a_j = 2 * np.dot(X[:, j],X[:, j]) # proportional to corr between jth variable and residual # excluding the jth variable c_j = 2 * np.dot(X[:, j], (y - np.dot(X, beta_prev) + beta_prev[j]*X[:, j])) # apply soft thresholding pos_part = np.abs(c_j/a_j) - (lam/a_j) if pos_part <= 0: pos_part = 0 # update the jth coefficient of the parameter vector beta_curr[j] = np.sign(c_j/a_j) * pos_part # VERY IMPORTANT: NEED TO UPDATE BETA_PREV WITH THE NEW jth COEFF beta_prev[j] = np.copy(beta_curr[j]) # check to see if the 2-norm squared for beta_cur-beta_prev # is sufficiently small, if so we have converged, if not # do another iteration two_norm = np.linalg.norm(beta_prev-beta_old)**2 # compute the value of the lasso loss function for the current iteration loss_curr = lasso_loss(X, y, beta_curr, lam, True) # if verbose output is desired, print info on the current iteration if verbose: print "Iter %d done, l2^2=%.03f, Loss=%.03f" % (k, two_norm, loss_curr) # check convergence if (two_norm <= tol): converged = True # return the parameter vector return beta_curr
""" Base class for Level Editor You should write your own LevelEditor class inheriting this. Refer LevelEditor.py for example. """ from direct.showbase.DirectObject import * from direct.directtools.DirectUtil import * from direct.gui.DirectGui import * from CurveEditor import * from FileMgr import * from ActionMgr import * from MayaConverter import * class LevelEditorBase(DirectObject): """ Base Class for Panda3D LevelEditor """ def __init__(self): #loadPrcFileData('startup', 'window-type none') self.currentFile = None self.fNeedToSave = False self.actionEvents = [] #self.objectMgr = ObjectMgr(self) self.curveEditor = CurveEditor(self) self.fileMgr = FileMgr(self) self.actionMgr = ActionMgr() self.fMoveCamera = False self.NPParent = render # define your own config file in inherited class self.settingsFile = None # you can show/hide specific properties by using propertiesMask and this mode self.BASE_MODE = BitMask32.bit(0) self.CREATE_CURVE_MODE = BitMask32.bit(2) self.EDIT_CURVE_MODE = BitMask32.bit(3) self.ANIM_MODE = BitMask32.bit(4) self.GRAPH_EDITOR = False self.mode = self.BASE_MODE self.preMode = None def initialize(self): """ You should call this in your __init__ method of inherited LevelEditor class """ # specifiy what obj can be 'selected' as objects base.direct.selected.addTag('OBJRoot') self.actionEvents.extend([ # Node path events ('DIRECT-select', self.select), ('DIRECT-delete', self.handleDelete), ('DIRECT-preDeselectAll', self.deselectAll), ('DIRECT_deselectAll', self.deselectAllCB), ('preRemoveNodePath', self.removeNodePathHook), ('DIRECT_deselectedNodePath', self.deselectAllCB), ('DIRECT_selectedNodePath_fMulti_fTag_fLEPane', self.selectedNodePathHook), ('DIRECT_deselectAll', self.deselectAll), ('LE-Undo', self.actionMgr.undo), ('LE-Redo', self.actionMgr.redo), ('LE-Duplicate', self.objectMgr.duplicateSelected), ('DIRECT_manipulateObjectCleanup', self.cleanUpManipulating), ('LE-MakeLive', self.objectMgr.makeSelectedLive), ('LE-NewScene', self.ui.onNew), ('LE-SaveScene', self.ui.onSave), ('LE-OpenScene', self.ui.onOpen), ('LE-Quit', self.ui.quit), ('DIRECT-mouse1', self.handleMouse1), ('DIRECT-mouse1Up', self.handleMouse1Up), ('DIRECT-mouse2', self.handleMouse2), ('DIRECT-mouse2Up', self.handleMouse2Up), ('DIRECT-mouse3', self.handleMouse3), ('DIRECT-mouse3Up', self.handleMouse3Up), ('DIRECT-toggleWidgetVis', self.toggleWidget), ]) # Add all the action events for event in self.actionEvents: if len(event) == 3: self.accept(event[0], event[1], event[2]) else: self.accept(event[0], event[1]) # editor state text display such as edit mode self.statusReadout = OnscreenText( pos = (-1.2, 0.9), bg=Vec4(1,1,1,1), scale = 0.05, align = TextNode.ALeft, mayChange = 1, font = TextNode.getDefaultFont()) self.statusReadout.setText("") # Make sure readout is never lit or drawn in wireframe useDirectRenderStyle(self.statusReadout) self.statusReadout.reparentTo(hidden) self.statusLines = [] taskMgr.doMethodLater(5, self.updateStatusReadoutTimeouts, 'updateStatus') self.loadSettings() self.reset() def setTitleWithFilename(self, filename=""): title = self.ui.appname if filename != "": filenameshort = os.path.basename(filename) title = title + " (%s)"%filenameshort self.ui.SetLabel(title) def removeNodePathHook(self, nodePath): if nodePath is None: return base.direct.deselect(nodePath) self.objectMgr.removeObjectByNodePath(nodePath) if base.direct.selected.last is not None and nodePath == base.direct.selected.last: # if base.direct.selected.last is refering to this # removed obj, clear the reference if (hasattr(__builtins__,'last')): __builtins__.last = None else: __builtins__['last'] = None base.direct.selected.last = None def toggleWidget(self): if self.objectMgr.currNodePath: obj = self.objectMgr.findObjectByNodePath(self.objectMgr.currNodePath) if obj and not obj[OG.OBJ_DEF].movable: return base.direct.toggleWidgetVis() def handleMouse1(self, modifiers): if base.direct.fAlt or modifiers == 4: self.fMoveCamera = True return if self.mode == self.CREATE_CURVE_MODE : self.curveEditor.createCurve() def handleMouse1Up(self): self.fMoveCamera = False def handleMouse2(self, modifiers): if base.direct.fAlt or modifiers == 4: self.fMoveCamera = True return def handleMouse2Up(self): self.fMoveCamera = False def handleMouse3(self, modifiers): if base.direct.fAlt or modifiers == 4: self.fMoveCamera = True return self.ui.onRightDown() def handleMouse3Up(self): self.fMoveCamera = False def handleDelete(self): oldSelectedNPs = base.direct.selected.getSelectedAsList() oldUIDs = [] for oldNP in oldSelectedNPs: obj = self.objectMgr.findObjectByNodePath(oldNP) if obj: oldUIDs.append(obj[OG.OBJ_UID]) action = ActionDeleteObj(self) self.actionMgr.push(action) action() for uid in oldUIDs: self.ui.sceneGraphUI.delete(uid) ## reply = wx.MessageBox("Do you want to delete selected?", "Delete?", ## wx.YES_NO | wx.ICON_QUESTION) ## if reply == wx.YES: ## base.direct.removeAllSelected() ## else: ## # need to reset COA ## dnp = base.direct.selected.last ## # Update camera controls coa to this point ## # Coa2Camera = Coa2Dnp * Dnp2Camera ## mCoa2Camera = dnp.mCoa2Dnp * dnp.getMat(base.direct.camera) ## row = mCoa2Camera.getRow(3) ## coa = Vec3(row[0], row[1], row[2]) ## base.direct.cameraControl.updateCoa(coa) def cleanUpManipulating(self, selectedNPs): for np in selectedNPs: obj = self.objectMgr.findObjectByNodePath(np) if obj: action = ActionTransformObj(self, obj[OG.OBJ_UID], Mat4(np.getMat())) self.actionMgr.push(action) action() def select(self, nodePath, fMultiSelect=0, fSelectTag=1, fResetAncestry=1, fLEPane=0, fUndo=1): if fUndo: # Select tagged object if present if fSelectTag: for tag in base.direct.selected.tagList: if nodePath.hasNetTag(tag): nodePath = nodePath.findNetTag(tag) break action = ActionSelectObj(self, nodePath, fMultiSelect) self.actionMgr.push(action) action() else: base.direct.selectCB(nodePath, fMultiSelect, fSelectTag, fResetAncestry, fLEPane, fUndo) def selectedNodePathHook(self, nodePath, fMultiSelect = 0, fSelectTag = 1, fLEPane = 0): # handle unpickable nodepath if nodePath.getName() in base.direct.iRay.unpickable: base.direct.deselect(nodePath) return if fMultiSelect == 0 and fLEPane == 0: oldSelectedNPs = base.direct.selected.getSelectedAsList() for oldNP in oldSelectedNPs: obj = self.objectMgr.findObjectByNodePath(oldNP) if obj: self.ui.sceneGraphUI.deSelect(obj[OG.OBJ_UID]) self.objectMgr.selectObject(nodePath, fLEPane) self.ui.buildContextMenu(nodePath) if self.mode == self.EDIT_CURVE_MODE: taskMgr.add(self.curveEditor.editCurve, "modify") self.curveEditor.accept("DIRECT-enter", self.curveEditor.onBaseMode) def deselectAll(self, np=None): if len(base.direct.selected.getSelectedAsList()) ==0: return action = ActionDeselectAll(self) self.actionMgr.push(action) action() def deselectAllCB(self, dnp=None): self.objectMgr.deselectAll() def reset(self): if self.fNeedToSave: reply = wx.MessageBox("Do you want to save current scene?", "Save?", wx.YES_NO | wx.ICON_QUESTION) if reply == wx.YES: result = self.ui.onSave() if result == False: return base.direct.deselectAll() base.direct.selected.last = None self.ui.reset() self.objectMgr.reset() self.animMgr.reset() self.actionMgr.reset() self.ui.perspView.camera.setPos(-19, -19, 19) self.ui.perspView.camera.lookAt(Point3(0, 0, 0)) self.ui.leftView.camera.setPos(600, 0, 0) self.ui.frontView.camera.setPos(0, -600, 0) self.ui.topView.camera.setPos(0, 0, 600) self.resetOrthoCam(self.ui.topView) self.resetOrthoCam(self.ui.frontView) self.resetOrthoCam(self.ui.leftView) self.fNeedToSave = False self.setTitleWithFilename() def resetOrthoCam(self, view): base.direct.drList[base.camList.index(NodePath(view.camNode))].orthoFactor = 0.1 x = view.ClientSize.GetWidth() * 0.1 y = view.ClientSize.GetHeight() * 0.1 view.camLens.setFilmSize(x, y) def save(self): self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT)) if self.currentFile: self.fileMgr.saveToFile(self.currentFile) self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) def saveAs(self, fileName): self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT)) self.fileMgr.saveToFile(fileName) self.currentFile = fileName self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) def load(self, fileName): self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT)) self.reset() self.fileMgr.loadFromFile(fileName) self.currentFile = fileName self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) def saveSettings(self): if self.settingsFile is None: return try: f = open(self.settingsFile, 'w') f.write('gridSize\n%f\n'%self.ui.perspView.grid.gridSize) f.write('gridSpacing\n%f\n'%self.ui.perspView.grid.gridSpacing) f.write('hotKey\n%s\n'%base.direct.hotKeyMap) f.close() except: pass def loadSettings(self): if self.settingsFile is None: return self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT)) try: f = open(self.settingsFile, 'r') configLines = f.readlines() f.close() gridSize = 100.0 gridSpacing = 5.0 for i in range(0, len(configLines)): line = configLines[i] i = i + 1 if line.startswith('gridSize'): gridSize = float(configLines[i]) elif line.startswith('gridSpacing'): gridSpacing = float(configLines[i]) elif line.startswith('hotKey'): customHotKeyMap = eval(configLines[i]) customHotKeyDict = {} for hotKey in customHotKeyMap.keys(): desc = customHotKeyMap[hotKey] customHotKeyDict[desc[1]] = hotKey overriddenKeys = [] for key in base.direct.hotKeyMap.keys(): desc = base.direct.hotKeyMap[key] if desc[1] in customHotKeyDict.keys(): overriddenKeys.append(key) for key in overriddenKeys: del base.direct.hotKeyMap[key] base.direct.hotKeyMap.update(customHotKeyMap) self.ui.updateGrids(gridSize, gridSpacing) self.ui.updateMenu() except: pass self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) def convertMaya(self, modelname, callBack, obj=None, isAnim=False): if obj and isAnim: mayaConverter = MayaConverter(self.ui, self, modelname, callBack, obj, isAnim) else: reply = wx.MessageBox("Is it an animation file?", "Animation?", wx.YES_NO | wx.ICON_QUESTION) if reply == wx.YES: mayaConverter = MayaConverter(self.ui, self, modelname, callBack, None, True) else: mayaConverter = MayaConverter(self.ui, self, modelname, callBack, None, False) mayaConverter.Show() def convertFromMaya(self, modelname, callBack): mayaConverter = MayaConverter(self.ui, self, modelname, callBack, None, False) mayaConverter.Show() def exportToMaya(self, mayaFileName): exportRootNP = render self.exportToMayaCB(mayaFileName, exportRootNP) def exportToMayaCB(self, mayaFileName, exportRootNP): bamFileName = mayaFileName + ".bam" if base.direct.selected.last: obj = self.objectMgr.findObjectByNodePath(base.direct.selected.last) if obj: exportRootNP = obj[OG.OBJ_NP] exportRootNP.writeBamFile(bamFileName) mayaConverter = MayaConverter(self.ui, self, mayaFileName, None, None, False, FROM_BAM_TO_MAYA) mayaConverter.Show() def updateStatusReadout(self, status, color=None): if status: # add new status line, first check to see if it already exists alreadyExists = False for currLine in self.statusLines: if (status == currLine[1]): alreadyExists = True break if (alreadyExists == False): time = globalClock.getRealTime() + 15 self.statusLines.append([time,status,color]) # update display of new status lines self.statusReadout.reparentTo(aspect2d) statusText = "" lastColor = None for currLine in self.statusLines: statusText += currLine[1] + '\n' lastColor = currLine[2] self.statusReadout.setText(statusText) if (lastColor): self.statusReadout.textNode.setCardColor( lastColor[0], lastColor[1], lastColor[2], lastColor[3]) self.statusReadout.textNode.setCardAsMargin(0.1, 0.1, 0.1, 0.1) else: self.statusReadout.textNode.setCardColor(1,1,1,1) self.statusReadout.textNode.setCardAsMargin(0.1, 0.1, 0.1, 0.1) def updateStatusReadoutTimeouts(self,task=None): removalList = [] for currLine in self.statusLines: if (globalClock.getRealTime() >= currLine[0]): removalList.append(currLine) for currRemoval in removalList: self.statusLines.remove(currRemoval) self.updateStatusReadout(None) # perform doMethodLater again after delay # This crashes when CTRL-C'ing, so this is a cheap hack. #return 2 from direct.task import Task return Task.again def propMeetsReq(self, typeName, parentNP): if self.ui.parentToSelectedMenuItem.IsChecked(): if base.direct.selected.last: parent = base.le.objectMgr.findObjectByNodePath(base.direct.selected.last) if parent: parentNP[0] = parent[OG.OBJ_NP] else: parentNP[0] = None return True
# -*- coding: utf-8 -*- from __future__ import unicode_literals import string from importlib import import_module import warnings from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser from django.contrib.auth.signals import user_logged_in, user_logged_out from django.core.exceptions import ObjectDoesNotExist from django.db import models, DEFAULT_DB_ALIAS from django.db.models.fields import FieldDoesNotExist from django.dispatch import receiver from django.utils import timezone from django.utils.encoding import python_2_unicode_compatible from django.utils.functional import SimpleLazyObject from django.utils.translation import ugettext_lazy as _, ugettext_noop from django.utils.six import with_metaclass from shop import deferred from shop.models.fields import JSONField from shop.signals import customer_recognized from .related import ChoiceEnum SessionStore = import_module(settings.SESSION_ENGINE).SessionStore() class CustomerState(ChoiceEnum): UNRECOGNIZED = 0 ugettext_noop("CustomerState.Unrecognized") GUEST = 1 ugettext_noop("CustomerState.Guest") REGISTERED = 2 ugettext_noop("CustomerState.Registered") class CustomerStateField(models.PositiveSmallIntegerField): description = _("Customer recognition state") def __init__(self, *args, **kwargs): kwargs.update(choices=CustomerState.choices()) kwargs.setdefault('default', CustomerState.UNRECOGNIZED) super(CustomerStateField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(CustomerStateField, self).deconstruct() del kwargs['choices'] if kwargs['default'] is CustomerState.UNRECOGNIZED: del kwargs['default'] elif isinstance(kwargs['default'], CustomerState): kwargs['default'] = kwargs['default'].value return name, path, args, kwargs def from_db_value(self, value, expression, connection, context): return CustomerState(value) def get_prep_value(self, state): return state.value def to_python(self, state): return CustomerState(state) class CustomerQuerySet(models.QuerySet): def _filter_or_exclude(self, negate, *args, **kwargs): """ Emulate filter queries on a Customer using attributes from the User object. Example: Customer.objects.filter(last_name__icontains='simpson') will return a queryset with customers whose last name contains "simpson". """ opts = self.model._meta lookup_kwargs = {} for key, lookup in kwargs.items(): try: field_name = key[:key.index('__')] except ValueError: field_name = key if field_name == 'pk': field_name = opts.pk.name try: opts.get_field(field_name) if isinstance(lookup, get_user_model()): lookup.pk # force lazy object to resolve lookup_kwargs[key] = lookup except FieldDoesNotExist as fdne: try: get_user_model()._meta.get_field(field_name) lookup_kwargs['user__' + key] = lookup except FieldDoesNotExist: raise fdne except Exception as othex: raise othex result = super(CustomerQuerySet, self)._filter_or_exclude(negate, *args, **lookup_kwargs) return result class CustomerManager(models.Manager): """ Manager for the Customer database model. This manager can also cope with customers, which have an entity in the database but otherwise are considered as anonymous. The username of these so called unrecognized customers is a compact version of the session key. """ BASE64_ALPHABET = string.digits + string.ascii_uppercase + string.ascii_lowercase + '.@' REVERSE_ALPHABET = dict((c, i) for i, c in enumerate(BASE64_ALPHABET)) BASE36_ALPHABET = string.digits + string.ascii_lowercase _queryset_class = CustomerQuerySet @classmethod def encode_session_key(cls, session_key): """ Session keys have base 36 and length 32. Since the field ``username`` accepts only up to 30 characters, the session key is converted to a base 64 representation, resulting in a length of approximately 28. """ return cls._encode(int(session_key[:32], 36), cls.BASE64_ALPHABET) @classmethod def decode_session_key(cls, compact_session_key): """ Decode a compact session key back to its original length and base. """ base_length = len(cls.BASE64_ALPHABET) n = 0 for c in compact_session_key: n = n * base_length + cls.REVERSE_ALPHABET[c] return cls._encode(n, cls.BASE36_ALPHABET).zfill(32) @classmethod def _encode(cls, n, base_alphabet): base_length = len(base_alphabet) s = [] while True: n, r = divmod(n, base_length) s.append(base_alphabet[r]) if n == 0: break return ''.join(reversed(s)) def get_queryset(self): """ Whenever we fetch from the Customer table, inner join with the User table to reduce the number of presumed future queries to the database. """ qs = self._queryset_class(self.model, using=self._db).select_related('user') return qs def create(self, *args, **kwargs): if 'user' in kwargs and kwargs['user'].is_authenticated(): kwargs.setdefault('recognized', CustomerState.REGISTERED) customer = super(CustomerManager, self).create(*args, **kwargs) return customer def _get_visiting_user(self, session_key): """ Since the Customer has a 1:1 relation with the User object, look for an entity of a User object. As its ``username`` (which must be unique), use the given session key. """ username = self.encode_session_key(session_key) try: user = get_user_model().objects.get(username=username) except get_user_model().DoesNotExist: user = AnonymousUser() return user def get_from_request(self, request): """ Return an Customer object for the current User object. """ if request.user.is_anonymous() and request.session.session_key: # the visitor is determined through the session key user = self._get_visiting_user(request.session.session_key) else: user = request.user try: if user.customer: return user.customer except AttributeError: pass if request.user.is_authenticated(): customer, created = self.get_or_create(user=user) if created: # `user` has been created by another app than shop customer.recognize_as_registered(request) else: customer = VisitingCustomer() return customer def get_or_create_from_request(self, request): if request.user.is_authenticated(): user = request.user recognized = CustomerState.REGISTERED else: if not request.session.session_key: request.session.cycle_key() assert request.session.session_key username = self.encode_session_key(request.session.session_key) # create or get a previously created inactive intermediate user, # which later can declare himself as guest, or register as a valid Django user try: user = get_user_model().objects.get(username=username) except get_user_model().DoesNotExist: user = get_user_model().objects.create_user(username) user.is_active = False user.save() recognized = CustomerState.UNRECOGNIZED customer, created = self.get_or_create(user=user, recognized=recognized) return customer @python_2_unicode_compatible class BaseCustomer(with_metaclass(deferred.ForeignKeyBuilder, models.Model)): """ Base class for shop customers. Customer is a profile model that extends the django User model if a customer is authenticated. On checkout, a User object is created for anonymous customers also (with unusable password). """ user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True) recognized = CustomerStateField(_("Recognized as"), help_text=_("Designates the state the customer is recognized as.")) last_access = models.DateTimeField(_("Last accessed"), default=timezone.now) extra = JSONField(editable=False, verbose_name=_("Extra information about this customer")) objects = CustomerManager() class Meta: abstract = True def __str__(self): return self.get_username() def get_username(self): return self.user.get_username() def get_full_name(self): return self.user.get_full_name() @property def first_name(self): return self.user.first_name @first_name.setter def first_name(self, value): self.user.first_name = value @property def last_name(self): return self.user.last_name @last_name.setter def last_name(self, value): self.user.last_name = value @property def email(self): return self.user.email @email.setter def email(self, value): self.user.email = value @property def date_joined(self): return self.user.date_joined @property def last_login(self): return self.user.last_login @property def groups(self): return self.user.groups def is_anonymous(self): return self.recognized in (CustomerState.UNRECOGNIZED, CustomerState.GUEST) def is_authenticated(self): return self.recognized is CustomerState.REGISTERED def is_recognized(self): """ Return True if the customer is associated with a User account. Unrecognized customers have accessed the shop, but did not register an account nor declared themselves as guests. """ return self.recognized is not CustomerState.UNRECOGNIZED def is_guest(self): """ Return true if the customer isn't associated with valid User account, but declared himself as a guest, leaving their email address. """ return self.recognized is CustomerState.GUEST def recognize_as_guest(self, request=None, commit=True): """ Recognize the current customer as guest customer. """ if self.recognized != CustomerState.GUEST: self.recognized = CustomerState.GUEST if commit: self.save(update_fields=['recognized']) customer_recognized.send(sender=self.__class__, customer=self, request=request) def is_registered(self): """ Return true if the customer has registered himself. """ return self.recognized is CustomerState.REGISTERED def recognize_as_registered(self, request=None, commit=True): """ Recognize the current customer as registered customer. """ if self.recognized != CustomerState.REGISTERED: self.recognized = CustomerState.REGISTERED if commit: self.save(update_fields=['recognized']) customer_recognized.send(sender=self.__class__, customer=self, request=request) def is_visitor(self): """ Always False for instantiated Customer objects. """ return False def is_expired(self): """ Return True if the session of an unrecognized customer expired or is not decodable. Registered customers never expire. Guest customers only expire, if they failed fulfilling the purchase. """ if self.recognized is CustomerState.UNRECOGNIZED: try: session_key = CustomerManager.decode_session_key(self.user.username) return not SessionStore.exists(session_key) except KeyError: msg = "Unable to decode username '{}' as session key" warnings.warn(msg.format(self.user.username)) return True return False def get_or_assign_number(self): """ Hook to get or to assign the customers number. It is invoked, every time an Order object is created. Using a customer number, which is different from the primary key is useful for merchants, wishing to assign sequential numbers only to customers which actually bought something. Otherwise the customer number (primary key) is increased whenever a site visitor puts something into the cart. If he never proceeds to checkout, that entity expires and may be deleted at any time in the future. """ return self.get_number() def get_number(self): """ Hook to get the customer's number. Customers haven't purchased anything may return None. """ return str(self.user_id) def save(self, **kwargs): if 'update_fields' not in kwargs: self.user.save(using=kwargs.get('using', DEFAULT_DB_ALIAS)) super(BaseCustomer, self).save(**kwargs) def delete(self, *args, **kwargs): if self.user.is_active and self.recognized is CustomerState.UNRECOGNIZED: # invalid state of customer, keep the referred User super(BaseCustomer, self).delete(*args, **kwargs) else: # also delete self through cascading self.user.delete(*args, **kwargs) CustomerModel = deferred.MaterializedModel(BaseCustomer) class VisitingCustomer(object): """ This dummy object is used for customers which just visit the site. Whenever a VisitingCustomer adds something to the cart, this object is replaced against a real Customer object. """ user = AnonymousUser() def __str__(self): return 'Visitor' @property def email(self): return '' @email.setter def email(self, value): pass def is_anonymous(self): return True def is_authenticated(self): return False def is_recognized(self): return False def is_guest(self): return False def is_registered(self): return False def is_visitor(self): return True def save(self, **kwargs): pass @receiver(user_logged_in) def handle_customer_login(sender, **kwargs): """ Update request.customer to an authenticated Customer """ try: kwargs['request'].customer = kwargs['request'].user.customer except (AttributeError, ObjectDoesNotExist): kwargs['request'].customer = SimpleLazyObject(lambda: CustomerModel.objects.get_from_request(kwargs['request'])) @receiver(user_logged_out) def handle_customer_logout(sender, **kwargs): """ Update request.customer to a visiting Customer """ # defer assignment to anonymous customer, since the session_key is not yet rotated kwargs['request'].customer = SimpleLazyObject(lambda: CustomerModel.objects.get_from_request(kwargs['request']))
""" Check metadata in a THREDDS catalog is consistent with the DRS. Things to check: 1. All DRS components set as properties and validate with drslib 2. drs_id is consistent with properties 3. version is a date 4. dataset urlPath is consistent with the DRS directory structure. 5. Checksums are present and the right format (NOT 'MD5:...') 6. tracking_id is present 7. Check product assignement is right. Currently implemented: 1-3 """ import sys import os import re from lxml import etree as ET from drslib.drs import CmipDRS from drslib.cmip5 import make_translator import urlparse from optparse import OptionParser import logging log = logging.getLogger(__name__) THREDDS_NS = 'http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0' XLINK_NS = 'http://www.w3.org/1999/xlink' usage = """%prog [options] thredds ... thredds: A thredds url or file path of a dataset published by esgpublish.\ """ trans = make_translator('') drs_prop_map = {'dataset_version': 'version', 'project': 'activity', 'experiment': 'experiment', 'product': 'product', 'model': 'model', 'time_frequency': 'frequency', 'realm': 'realm', 'cmor_table': 'table', 'ensemble': 'ensemble', 'institute': 'institute' } class InvalidThreddsException(Exception): """ An exception raised to indicate failure of a ThreddsCheck """ pass class CheckNotPossible(Exception): """ Raised to indicate a check doesn't have enough information to continue. """ pass class ThreddsCheck(object): """ Base class of all checks, defining the interface. """ def __init__(self, environ=None): """ environ is a dictionary shared accross all checks allowing checks to share information. """ self.environ = environ def check(self, etree): """ Check the THREDDS catalogue represented by an ElementTree object. """ pass def run_checks(etree, checks, environ=None): """ Run a sequence of checks on a THREDDS catalogue as an ElementTree. InvalidThreddsExceptions are converted to log messages. """ if environ is None: environ = {} for CheckClass in checks: check = CheckClass(environ) try: check.check(etree) except InvalidThreddsException, e: log.error(e) except CheckNotPossible: log.warn('Check %s aborted' % CheckClass.__name__) else: log.info('Check %s succeeded' % CheckClass.__name__) return environ class DRSIdCheck(ThreddsCheck): """ Check drs_id is present and consistent with dataset_id. """ def check(self, etree): dataset = get_dataset(etree) drs_id = get_property(dataset, 'drs_id') dataset_id = get_property(dataset, 'dataset_id') # Check 2 ids are consistent if drs_id != dataset_id: raise InvalidThreddsException("dataset_id != drs_id for dataset %s" % dataset.get('ID')) self.environ['dataset_id'] = dataset_id self.environ['drs_id'] = drs_id class DRSPropCheck(ThreddsCheck): """ Check all drs components are defined as properties. Creates a drs attribute in the environment if successful. """ def check(self, etree): dataset = get_dataset(etree) props = {} for prop_name in drs_prop_map: prop = get_property(dataset, prop_name) if prop_name == 'dataset_version': prop = int(prop) elif prop_name == 'ensemble': #!TODO: refactor this to share code with drslib.translate mo = re.match(r'(?:r(\d+))?(?:i(\d+))?(?:p(\d+))?', prop) if not mo: raise InvalidThreddsException('Unrecognised ensemble syntax %s' % prop) (r, i, p) = mo.groups() prop = tuple(x and int(x) for x in (r, i, p)) props[drs_prop_map[prop_name]] = prop drs = CmipDRS(**props) # If present in environ check against drs_id if 'drs_id' in self.environ: if drs.to_dataset_id() != self.environ['drs_id']: raise InvalidThreddsException("drs properties inconsistent with drs_id for dataset %s" % dataset.get('ID')) self.environ['drs'] = drs class ValidDRSCheck(ThreddsCheck): """ Check the drs object in the environment is valid. """ def check(self, etree): if 'drs' not in self.environ: raise CheckNotPossible drs = self.environ['drs'] try: path = trans.drs_to_path(drs) except: raise InvalidThreddsException("drs %s fails to validate" % drs) class ValidDateCheck(ThreddsCheck): """ Check date versioning. """ def check(self, etree): if not 'drs' in self.environ: raise CheckNotPossible drs = self.environ['drs'] if not drs.version > 20100101: raise InvalidThreddsException("The version of dataset doesn't look like a date: %s" % drs) # # Utility functions # def get_dataset(etree): # There should be only 1 top-level dataset element datasets = etree.findall('{%s}dataset' % THREDDS_NS) if len(datasets) != 1: raise InvalidThreddsException("More than one top-level dataset") return datasets[0] def get_property(dataset, name): prop = dataset.find('{%s}property[@name="%s"]' % (THREDDS_NS, name)) if prop is None: raise InvalidThreddsException("Property %s not found in dataset %s" % (name, dataset.get('ID'))) return prop.get('value') def read_master_catalog(catalog_url): """ Read master catalogue and generate dataset catalogue ElementTree objects. """ cat_etree = ET.parse(catalog_url) scheme, netloc, path, query, fragment = urlparse.urlsplit(catalog_url) base_url = urlparse.urlunsplit((scheme, netloc, os.path.dirname(path)+'/', None, None)) for catalog_ref in cat_etree.findall('{%s}catalogRef' % THREDDS_NS): ds_url = catalog_ref.get('{%s}href' % XLINK_NS) abs_ds_url = urlparse.urljoin(base_url, ds_url) yield abs_ds_url def main(argv=sys.argv): logging.basicConfig(level=logging.ERROR) checks = [DRSIdCheck, DRSPropCheck, ValidDRSCheck, ValidDateCheck] op = OptionParser(usage) op.add_option('-c', '--catalog', action='store', help="Scan root THREDDS catalog CATALOG for catalogRef " "elements and check each referenced catalog") opts, args = op.parse_args(argv[1:]) xmls = args if opts.catalog: log.info('Discovering catalogs from master catalog %s' % opts.catalog) xmls += list(read_master_catalog(opts.catalog)) if not xmls: op.print_help() for xml in xmls: log.info('Checking %s' % xml) etree = ET.parse(xml) run_checks(etree, checks) if __name__ == '__main__': main()
import logging import numpy as np import pandas as pd import numexpr as ne from astropy import units as u, constants as const from tardis.plasma.properties.base import ProcessingPlasmaProperty from tardis.plasma.properties.util import macro_atom logger = logging.getLogger(__name__) __all__ = ['StimulatedEmissionFactor', 'TauSobolev', 'BetaSobolev', 'TransitionProbabilities', 'LTEJBlues'] class StimulatedEmissionFactor(ProcessingPlasmaProperty): """ Attributes ---------- stimulated_emission_factor : Numpy Array, dtype float Indexed by lines, columns as zones. """ outputs = ('stimulated_emission_factor',) latex_formula = ('1-\\dfrac{g_{lower}n_{upper}}{g_{upper}n_{lower}}',) def __init__(self, plasma_parent=None, nlte_species=None): super(StimulatedEmissionFactor, self).__init__(plasma_parent) self._g_upper = None self._g_lower = None try: self.nlte_species = self.plasma_parent.nlte_species except: self.nlte_species = nlte_species def get_g_lower(self, g, lines_lower_level_index): if self._g_lower is None: g_lower = np.array(g.ix[lines_lower_level_index], dtype=np.float64) self._g_lower = g_lower[np.newaxis].T return self._g_lower def get_g_upper(self, g, lines_upper_level_index): if self._g_upper is None: g_upper = np.array(g.ix[lines_upper_level_index], dtype=np.float64) self._g_upper = g_upper[np.newaxis].T return self._g_upper def get_metastable_upper(self, metastability, lines_upper_level_index): if getattr(self, '_meta_stable_upper', None) is None: self._meta_stable_upper = metastability.values[ lines_upper_level_index][np.newaxis].T return self._meta_stable_upper def calculate(self, g, level_number_density, lines_lower_level_index, lines_upper_level_index, metastability, lines): n_lower = level_number_density.values.take(lines_lower_level_index, axis=0, mode='raise') n_upper = level_number_density.values.take(lines_upper_level_index, axis=0, mode='raise') g_lower = self.get_g_lower(g, lines_lower_level_index) g_upper = self.get_g_upper(g, lines_upper_level_index) meta_stable_upper = self.get_metastable_upper(metastability, lines_upper_level_index) stimulated_emission_factor = ne.evaluate('1 - ((g_lower * n_upper) / ' '(g_upper * n_lower))') stimulated_emission_factor[n_lower == 0.0] = 0.0 stimulated_emission_factor[np.isneginf(stimulated_emission_factor)]\ = 0.0 stimulated_emission_factor[meta_stable_upper & (stimulated_emission_factor < 0)] = 0.0 if self.nlte_species: nlte_lines_mask = \ np.zeros(stimulated_emission_factor.shape[0]).astype(bool) for species in self.nlte_species: nlte_lines_mask |= (lines.atomic_number == species[0]) & \ (lines.ion_number == species[1]) stimulated_emission_factor[(stimulated_emission_factor < 0) & nlte_lines_mask[np.newaxis].T] = 0.0 return stimulated_emission_factor class TauSobolev(ProcessingPlasmaProperty): """ Attributes ---------- tau_sobolev : Pandas DataFrame, dtype float Sobolev optical depth for each line. Indexed by line. Columns as zones. """ outputs = ('tau_sobolevs',) latex_name = ('\\tau_{\\textrm{sobolev}}',) latex_formula = ('\\dfrac{\\pi e^{2}}{m_{e} c}f_{lu}\\lambda t_{exp}\ n_{lower} \\Big(1-\\dfrac{g_{lower}n_{upper}}{g_{upper}n_{lower}}\\Big)',) def __init__(self, plasma_parent): super(TauSobolev, self).__init__(plasma_parent) self.sobolev_coefficient = (((np.pi * const.e.gauss ** 2) / (const.m_e.cgs * const.c.cgs)) * u.cm * u.s / u.cm**3).to(1).value def calculate(self, lines, level_number_density, lines_lower_level_index, time_explosion, stimulated_emission_factor, j_blues, f_lu, wavelength_cm): f_lu = f_lu.values[np.newaxis].T wavelength = wavelength_cm.values[np.newaxis].T n_lower = level_number_density.values.take(lines_lower_level_index, axis=0, mode='raise') tau_sobolevs = (self.sobolev_coefficient * f_lu * wavelength * time_explosion * n_lower * stimulated_emission_factor) return pd.DataFrame(tau_sobolevs, index=lines.index, columns=np.array(level_number_density.columns)) class BetaSobolev(ProcessingPlasmaProperty): """ Attributes ---------- beta_sobolev : Numpy Array, dtype float """ outputs = ('beta_sobolev',) latex_name = ('\\beta_{\\textrm{sobolev}}',) def calculate(self, tau_sobolevs): if getattr(self, 'beta_sobolev', None) is None: beta_sobolev = np.zeros_like(tau_sobolevs.values) else: beta_sobolev = self.beta_sobolev macro_atom.calculate_beta_sobolev( tau_sobolevs.values.ravel(), beta_sobolev.ravel()) return beta_sobolev class TransitionProbabilities(ProcessingPlasmaProperty): """ Attributes ---------- transition_probabilities : Pandas DataFrame, dtype float """ outputs = ('transition_probabilities',) def __init__(self, plasma_parent): super(TransitionProbabilities, self).__init__(plasma_parent) self.initialize = True def calculate(self, atomic_data, beta_sobolev, j_blues, stimulated_emission_factor, tau_sobolevs): #I wonder why? # Not sure who wrote this but the answer is that when the plasma is # first initialised (before the first iteration, without temperature # values etc.) there are no j_blues values so this just prevents # an error. Aoife. if len(j_blues) == 0: return None macro_atom_data = self._get_macro_atom_data(atomic_data) if self.initialize: self.initialize_macro_atom_transition_type_filters(atomic_data, macro_atom_data) self.transition_probability_coef = ( self._get_transition_probability_coefs(macro_atom_data)) self.initialize = False transition_probabilities = self._calculate_transition_probability(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) transition_probabilities = pd.DataFrame(transition_probabilities, index=macro_atom_data.transition_line_id, columns=tau_sobolevs.columns) return transition_probabilities def _calculate_transition_probability(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): transition_probabilities = np.empty((self.transition_probability_coef.shape[0], beta_sobolev.shape[1])) #trans_old = self.calculate_transition_probabilities(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) transition_type = macro_atom_data.transition_type.values lines_idx = macro_atom_data.lines_idx.values tpos = macro_atom_data.transition_probability.values #optimized_calculate_transition_probabilities(tpos, beta_sobolev, j_blues, stimulated_emission_factor, transition_type, lines_idx, self.block_references, transition_probabilities) macro_atom.calculate_transition_probabilities(tpos, beta_sobolev, j_blues, stimulated_emission_factor, transition_type, lines_idx, self.block_references, transition_probabilities) return transition_probabilities def calculate_transition_probabilities(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): transition_probabilities = self.prepare_transition_probabilities(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) return transition_probabilities def initialize_macro_atom_transition_type_filters(self, atomic_data, macro_atom_data): self.transition_up_filter = (macro_atom_data.transition_type.values == 1) self.transition_up_line_filter = macro_atom_data.lines_idx.values[ self.transition_up_filter] self.block_references = np.hstack(( atomic_data.macro_atom_references.block_references, len(macro_atom_data))) @staticmethod def _get_transition_probability_coefs(macro_atom_data): return macro_atom_data.transition_probability.values[np.newaxis].T def prepare_transition_probabilities(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): current_beta_sobolev = beta_sobolev.take( macro_atom_data.lines_idx.values, axis=0, mode='raise') transition_probabilities = self.transition_probability_coef * current_beta_sobolev j_blues = j_blues.take(self.transition_up_line_filter, axis=0, mode='raise') macro_stimulated_emission = stimulated_emission_factor.take( self.transition_up_line_filter, axis=0, mode='raise') transition_probabilities[self.transition_up_filter] *= (j_blues * macro_stimulated_emission) return transition_probabilities def _normalize_transition_probabilities(self, transition_probabilities): macro_atom.normalize_transition_probabilities( transition_probabilities, self.block_references) def _new_normalize_transition_probabilities(self, transition_probabilites): for i, start_id in enumerate(self.block_references[:-1]): end_id = self.block_references[i + 1] block = transition_probabilites[start_id:end_id] transition_probabilites[start_id:end_id] *= 1 / ne.evaluate( 'sum(block, 0)') @staticmethod def _get_macro_atom_data(atomic_data): try: return atomic_data.macro_atom_data except: return atomic_data.macro_atom_data_all class LTEJBlues(ProcessingPlasmaProperty): ''' Attributes ---------- lte_j_blues : Pandas DataFrame, dtype float J_blue values as calculated in LTE. ''' outputs = ('lte_j_blues',) latex_name = ('J^{b}_{lu(LTE)}') @staticmethod def calculate(lines, nu, beta_rad): beta_rad = pd.Series(beta_rad) nu = pd.Series(nu) h = const.h.cgs.value c = const.c.cgs.value df = pd.DataFrame(1, index=nu.index, columns=beta_rad.index) df = df.mul(nu, axis='index') * beta_rad exponential = (np.exp(h * df) - 1)**(-1) remainder = (2 * (h * nu.values ** 3) / (c ** 2)) j_blues = exponential.mul(remainder, axis=0) return pd.DataFrame(j_blues, index=lines.index, columns=beta_rad.index)
import json import nibabel import os import shutil import tempfile import io import zipfile from django.core.files.uploadedfile import SimpleUploadedFile from django.core.urlresolvers import reverse from django.test import TestCase, Client, override_settings, RequestFactory from uuid import uuid4 from neurovault.apps.statmaps.models import Collection, User, Image, Atlas from neurovault.apps.statmaps.utils import detect_4D, split_4D_to_3D from neurovault.apps.statmaps.views import delete_collection, download_collection from neurovault.settings import PRIVATE_MEDIA_ROOT from .utils import clearDB, save_statmap_form class CollectionSharingTest(TestCase): def setUp(self): self.user = {} self.client = {} for role in ['owner','contrib','someguy']: self.user[role] = User.objects.create_user('%s_%s' % (role, self.uniqid()), None,'pwd') self.user[role].save() self.client[role] = Client() self.client[role].login(username=self.user[role].username, password='pwd') self.coll = Collection( owner=self.user['owner'], name="Test %s" % self.uniqid() ) self.coll.save() self.coll.contributors.add(self.user['contrib']) self.coll.save() def uniqid(self): return str(uuid4())[:8] @override_settings(CRISPY_FAIL_SILENTLY=False) def testCollectionSharing(self): #view_url = self.coll.get_absolute_url() edit_url = reverse('edit_collection',kwargs={'cid': self.coll.pk}) resp = {} for role in ['owner','contrib','someguy']: resp[role] = self.client[role].get(edit_url, follow=True) """ assert that owner and contributor can edit the collection, and that some guy cannot: """ self.assertEqual(resp['owner'].status_code,200) self.assertEqual(resp['contrib'].status_code,200) self.assertEqual(resp['someguy'].status_code,403) """ assert that only the owner can view/edit contributors: """ self.assertTrue('contributor' in resp['owner'].content.lower()) self.assertFalse('contributor' in resp['contrib'].content.lower()) class DeleteCollectionsTest(TestCase): def setUp(self): self.factory = RequestFactory() self.test_path = os.path.abspath(os.path.dirname(__file__)) self.user = User.objects.create(username='neurovault') self.client = Client() self.client.login(username=self.user) self.Collection1 = Collection(name='Collection1',owner=self.user) self.Collection1.save() self.unorderedAtlas = Atlas(name='unorderedAtlas', description='',collection=self.Collection1) self.unorderedAtlas.file = SimpleUploadedFile('VentralFrontal_thr75_summaryimage_2mm.nii.gz', file(os.path.join(self.test_path,'test_data/api/VentralFrontal_thr75_summaryimage_2mm.nii.gz')).read()) self.unorderedAtlas.label_description_file = SimpleUploadedFile('test_VentralFrontal_thr75_summaryimage_2mm.xml', file(os.path.join(self.test_path,'test_data/api/unordered_VentralFrontal_thr75_summaryimage_2mm.xml')).read()) self.unorderedAtlas.save() self.Collection2 = Collection(name='Collection2',owner=self.user) self.Collection2.save() self.orderedAtlas = Atlas(name='orderedAtlas', collection=self.Collection2, label_description_file='VentralFrontal_thr75_summaryimage_2mm.xml') self.orderedAtlas.file = SimpleUploadedFile('VentralFrontal_thr75_summaryimage_2mm.nii.gz', file(os.path.join(self.test_path,'test_data/api/VentralFrontal_thr75_summaryimage_2mm.nii.gz')).read()) self.orderedAtlas.label_description_file = SimpleUploadedFile('test_VentralFrontal_thr75_summaryimage_2mm.xml', file(os.path.join(self.test_path,'test_data/api/VentralFrontal_thr75_summaryimage_2mm.xml')).read()) self.orderedAtlas.save() def tearDown(self): clearDB() def testDeleteCollection(self): self.client.login(username=self.user) pk1 = self.Collection1.pk pk2 = self.Collection2.pk request = self.factory.get('/collections/%s/delete' %pk1) request.user = self.user delete_collection(request, str(pk1)) imageDir = os.path.join(PRIVATE_MEDIA_ROOT, 'images') dirList = os.listdir(imageDir) print dirList self.assertIn(str(self.Collection2.pk), dirList) self.assertNotIn(str(self.Collection1.pk), dirList) class Afni4DTest(TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() app_path = os.path.abspath(os.path.dirname(__file__)) self.afni_file = os.path.join(app_path,'test_data/TTatlas.nii.gz') self.nii_file = os.path.abspath(os.path.join(app_path,'../static/anatomical/MNI152.nii.gz')) def tearDown(self): shutil.rmtree(self.tmpdir) clearDB() """ TTatlas is the example 4D file that ships with afni, has two sub-bricks: vagrant@localhost$ 3dinfo TTatlas.nii.gz ++ 3dinfo: AFNI version=AFNI_2011_12_21_1014 (Nov 22 2014) [64-bit] <<... snip ..>> Number of values stored at each pixel = 2 -- At sub-brick #0 'uu3[0]' datum type is byte: 0 to 77 keywords = uu3+tlrc[0] ; TTatlas+tlrc[0] ; uu3+tlrc[0] -- At sub-brick #1 'uu5[0]' datum type is byte: 0 to 151 keywords = uu5+tlrc[0] ; TTatlas+tlrc[1] ; uu5+tlrc[0] """ def testAfni4DSlicing(self): test_afni = detect_4D(nibabel.load(self.afni_file)) test_non_afni = detect_4D(nibabel.load(self.nii_file)) bricks = split_4D_to_3D(nibabel.load(self.afni_file),tmp_dir=self.tmpdir) # check detection of 4D is correct self.assertTrue(test_afni) self.assertFalse(test_non_afni) # check for 2 sub bricks self.assertEquals(len(bricks),2) # check that brick labels match afni 3dinfo binary output self.assertEquals(bricks[0][0],'uu3[0]') self.assertEquals(bricks[1][0],'uu5[0]') # check that sliced niftis exist at output location self.assertTrue(os.path.exists(bricks[0][1])) self.assertTrue(os.path.exists(bricks[1][1])) class CollectionMetaDataTest(TestCase): def setUp(self): base_username = 'owner' password = 'pwd' test_path = os.path.abspath(os.path.dirname(__file__)) self.user = User.objects.create_user( "%s_%s" % (base_username, self.uniqid()), None, password ) self.user.save() self.client = Client() self.client.login(username=self.user.username, password=password) self.coll = Collection(owner=self.user, name="Test %s" % self.uniqid()) self.coll.save() def test_data_path(filename): return os.path.join(test_path, 'test_data/statmaps/%s' % filename) self.image1 = save_statmap_form( image_path=test_data_path('motor_lips.nii.gz'), collection=self.coll ) self.image2 = save_statmap_form( image_path=test_data_path('beta_0001.nii.gz'), collection=self.coll ) def tearDown(self): clearDB() def uniqid(self): return str(uuid4())[:8] def test_post_metadata(self): cognitive_paradigms = ('Early Social and Communication Scales', 'Cambridge Gambling Task') test_data = [ ['Filename', 'Subject ID', 'Sex', 'modality', 'cognitive_paradigm_cogatlas'], ['motor_lips.nii.gz', '12', '1', 'fMRI-BOLD', cognitive_paradigms[0]], ['beta_0001.nii.gz', '13', '2', 'fMRI-BOLD', cognitive_paradigms[1]] ] url = reverse('edit_metadata', kwargs={'collection_cid': self.coll.pk}) resp = self.client.post(url, data=json.dumps(test_data), content_type='application/json; charset=utf-8') self.assertEqual(resp.status_code, 200) image1 = Image.objects.get(id=self.image1.id) self.assertEqual(image1.data, {'Sex': '1', 'Subject ID': '12'}) self.assertEqual(image1.modality, 'fMRI-BOLD') self.assertEqual(image1.cognitive_paradigm_cogatlas.name, cognitive_paradigms[0]) image2 = Image.objects.get(id=self.image2.id) self.assertEqual(image2.cognitive_paradigm_cogatlas.name, cognitive_paradigms[1]) def test_empty_string_value_in_fixed_numeric_field(self): test_data = [ ['Filename', 'Subject ID', 'number_of_subjects'], ['motor_lips.nii.gz', '12', ''], ['beta_0001.nii.gz', '13', None] ] url = reverse('edit_metadata', kwargs={'collection_cid': self.coll.pk}) resp = self.client.post(url, data=json.dumps(test_data), content_type='application/json; charset=utf-8') self.assertEqual(resp.status_code, 200) image1 = Image.objects.get(id=self.image1.id) self.assertIsNone(image1.number_of_subjects) image2 = Image.objects.get(id=self.image2.id) self.assertIsNone(image2.number_of_subjects) def test_metadata_for_files_missing_in_the_collection(self): test_data = [ ['Filename', 'Subject ID', 'Sex'], ['motor_lips.nii.gz', '12', '1'], ['beta_0001.nii.gz', '13', '2'], ['file3.nii.gz', '14', '3'] ] url = reverse('edit_metadata', kwargs={'collection_cid': self.coll.pk}) resp = self.client.post(url, data=json.dumps(test_data), content_type='application/json; charset=utf-8') self.assertEqual(resp.status_code, 400) resp_json = json.loads(resp.content) self.assertEqual(resp_json['message'], 'File is not found in the collection: file3.nii.gz') def test_incorrect_value_in_fixed_basic_field(self): test_data = [ ['Filename', 'Subject ID', 'Sex', 'modality', 'cognitive_paradigm_cogatlas'], ['motor_lips.nii.gz', '12', '1', 'fMRI-BOLD', 'Cambridge Gambling Task'], ['beta_0001.nii.gz', '13', '2', '-*NOT-EXISTING-MOD*-', 'Cambridge Gambling Task'] ] url = reverse('edit_metadata', kwargs={'collection_cid': self.coll.pk}) resp = self.client.post(url, data=json.dumps(test_data), content_type='application/json; charset=utf-8') self.assertEqual(resp.status_code, 400) resp_json = json.loads(resp.content) self.assertEqual(resp_json['messages'], {'beta_0001.nii.gz': [{ 'Modality & acquisition type': [ "Value '-*NOT-EXISTING-MOD*-' is not a valid choice." ] }]}) def test_incorrect_value_in_fixed_foreign_field(self): test_data = [ ["Filename", "Subject ID", "Sex", "modality", "cognitive_paradigm_cogatlas"], ["motor_lips.nii.gz", "12", "1", "fMRI-BOLD", '-*NOT-EXISTING-PARADIGM*-'], ["beta_0001.nii.gz", "13", "2", "fMRI-BOLD", 'Cambridge Gambling Task'] ] url = reverse('edit_metadata', kwargs={'collection_cid': self.coll.pk}) resp = self.client.post(url, data=json.dumps(test_data), content_type='application/json; charset=utf-8') self.assertEqual(resp.status_code, 400) resp_json = json.loads(resp.content) self.assertEqual(resp_json['messages'], {'motor_lips.nii.gz': [{ 'Cognitive atlas paradigm': [ "Value '-*NOT-EXISTING-PARADIGM*-' is not a valid choice." ] }]}) class DownloadCollectionsTest(TestCase): def setUp(self): self.factory = RequestFactory() self.test_path = os.path.abspath(os.path.dirname(__file__)) self.user = User.objects.create(username='neurovault') self.client = Client() self.client.login(username=self.user) self.Collection1 = Collection(name='Collection1',owner=self.user) self.Collection1.save() self.unorderedAtlas = Atlas(name='unorderedAtlas', description='',collection=self.Collection1) self.unorderedAtlas.file = SimpleUploadedFile('VentralFrontal_thr75_summaryimage_2mm.nii.gz', file(os.path.join(self.test_path,'test_data/api/VentralFrontal_thr75_summaryimage_2mm.nii.gz')).read()) self.unorderedAtlas.label_description_file = SimpleUploadedFile('test_VentralFrontal_thr75_summaryimage_2mm.xml', file(os.path.join(self.test_path,'test_data/api/unordered_VentralFrontal_thr75_summaryimage_2mm.xml')).read()) self.unorderedAtlas.save() def tearDown(self): clearDB() self.user.delete() def testDownloadCollection(self): self.client.login(username=self.user) pk1 = self.Collection1.pk request = self.factory.get('/collections/%s/download' %pk1) request.user = self.user response = download_collection(request, str(pk1)) self.assertTrue(response.streaming_content) self.assertEqual(response.status_code, 200) zf = zipfile.ZipFile(io.BytesIO(''.join(response.streaming_content))) self.assertEqual(len(zf.filelist), 1) # 1 Atlas self.assertIsNone(zf.testzip()) self.assertIn("Collection1/VentralFrontal_thr75_summaryimage_2mm.nii.gz", zf.namelist())
import histogram.hdf as hh, numpy as np import os, sys # hide the warnings on divide by zero etc np.seterr(divide='ignore', invalid='ignore') def getDOS(sample_nxs, mt_nxs=None, mt_fraction=0.9, const_bg_fraction=0., Emin=-100, Emax=100, dE=1., Qmin=0, Qmax=15., dQ=0.1, T=300, Ecutoff=50., elastic_E_cutoff=(-20., 7), M=50.94, C_ms=0.3, Ei=116.446, initdos=None, update_strategy_weights=None, workdir='work', iqe_h5="iqe.h5", maxiter=10): """Compute DOS from direct-geometry powder neutron scattering spectrum by performing multiphonon and multiple-scattering corrections. Inorder to monitor messages, this function returns an iterator. Please call it with an evaluation of an iteration. For example: >>> output = list(getDOS(...)) Parameters ---------- sample_nxs : str Sample Nexus file mt_nxs : str Empty can Nexus file mt_fraction : float 0<=mt_fraction<=1. Amount of empty can data to be subtracted from sample data const_bg_fraction : float Constant background fraction Emin, Emax, dE : floats Energy transfer axis setting Qmin, Qmax, dQ : floats Momentum transfer axis setting T : float Temperature (Kelvin) Ecutoff : float Maximum phonon energy elastic_E_cutoff: 2-tuple of floats cutoff for elastic peak (meV) M : float Average atomic mass (u) C_ms: float MS = C_ms * MP Ei : float Incident energy (meV) initdos : histogram initial guess of DOS update_strategy_weights : floats Weights for the update strategies (force continuity, area conservation). Useful only if multiple Ei. work : str Work directory iqe_h5 : str A name of the file to hold the reduced data. If this file already exits, in the work directory, with the correct parameters the it is loaded rather than re reduced. maxiter: int Max iteration """ for msg in reduce2iqe(sample_nxs, Emin, Emax, dE, Qmin, Qmax, dQ, mt_nxs, iqe_h5, workdir): yield msg iqe_h5, mtiqe_h5, Qaxis, Eaxis = msg iqehist = hh.load(iqe_h5) if const_bg_fraction: ave = np.nanmean(iqehist.I) iqehist.I -= ave*const_bg_fraction if mt_nxs is not None: iqehist -= hh.load(mtiqe_h5) * (mt_fraction, 0) I = iqehist.I if (I<0).sum() > I.size * 0.005: import warnings warnings.warn("After MT subtraction, some intensities are negative. Please check your MT data and mt_fraction value") # to DOS # interpolate data from .sqe import interp # probably don't need this line newiqe = interp(iqehist, newE = np.arange(*Eaxis)) # save interpolated data hh.dump(newiqe, 'iqe-interped.h5') # init dos if initdos: initdos = hh.load(initdos) # create processing engine from .backward import sqe2dos iterdos = sqe2dos.sqe2dos( newiqe, T=T, Ecutoff=Ecutoff, elastic_E_cutoff=elastic_E_cutoff, M=M, C_ms=C_ms, Ei=Ei, initdos=initdos, update_strategy_weights=update_strategy_weights, workdir=workdir, MAX_ITERATION=maxiter) doslist = [] yield "Iterative computation of DOS..." for i, dos in enumerate(iterdos): yield "Finished round #%s" % (i+1,) continue yield "Done" return def reduce2iqe(sample_nxs, Emin, Emax, dE, Qmin, Qmax, dQ, mt_nxs=None, iqe_h5='iqe.h5', workdir='work'): """Reduce sample and (optionally) empty can nxs files and generate I(Q,E) histograms. Inorder to monitor messages, this function returns an iterator. Please call it using this form: >>> for msg in reduce2iqe(...): print msg Parameters ---------- sample_nxs : str Sample Nexus file Emin : float Energy tranfer axis minimum Emax : float Energy tranfer axis maximum dE : float Energy tranfer axis step size Qmin : float Momentum tranfer axis minimum Qmax : float Momentum tranfer axis maximum dQ : float Momentum tranfer axis step size mt_nxs : str Empty can Nexus file iqe_h5: str output histogram filename of reduced I(Q,E) workdir: str path to working directory """ # prepare paths if not os.path.exists(workdir): os.makedirs(workdir) if not os.path.isabs(iqe_h5): iqe_h5 = os.path.abspath(os.path.join(workdir, iqe_h5)) # reduce Eaxis = _normalize_axis_setting(Emin, Emax, dE) Eaxis = _checkEaxis(*Eaxis) Qaxis = _normalize_axis_setting(Qmin, Qmax, dQ) yield "Converting sample data to powder I(Q,E)..." raw2iqe(sample_nxs, iqe_h5, Eaxis, Qaxis, type='sample') if mt_nxs is not None: _tomtpath = lambda p: os.path.join( os.path.dirname(p), 'mt-'+os.path.basename(p)) mtiqe_h5 = _tomtpath(iqe_h5) yield "Converting MT data to powder I(Q,E)..." raw2iqe(mt_nxs, mtiqe_h5, Eaxis, Qaxis, type='MT') else: mtiqe_h5=None yield "Results: sample IQE, MT IQE, Qaxis, Eaxis" yield iqe_h5, mtiqe_h5, Qaxis, Eaxis def _checkEaxis(Emin, Emax, dE): saved = Emin, Emax, dE centers = np.arange(Emin, Emax, dE) if np.isclose(centers, 0.).any(): return saved import warnings Emin = int(Emin/dE) * dE Emax = int(Emax/dE) * dE new = Emin, Emax, dE warnings.warn( "Zero has to be one of the ticks in the energy axis.\n" "Energy axis modified from %s to %s \n" % (saved, new) ) return new def _normalize_axis_setting(min, max, delta): # try to deal with numerical error nsteps = round(1.*(max-min)/delta) if abs(max - (min+nsteps*delta)) < 1e-5: max = max + delta/1.e4 return min, max, delta def _md5(s): import hashlib return hashlib.md5(s).hexdigest() def raw2iqe(eventnxs, iqe_h5, Eaxis, Qaxis, type): """Read and reduce a raw nxs file. If the reduced file already exists it will read the existing file rather than recreate it. Parameters ---------- eventnxs : str The raw data file iqe_h5 : str The filename to create from the raw If this file already exits with the correct parameters, it is simply read. Eaxis : tpl A tuple containing Emin, Emax, Edelta Qaxis : tpl A tuple containing Qmin, Qmax, Qdelta type : str """ # if iqe_h5 exists and the parameters do not match, # we need to remove the old result parameters_fn = os.path.join(os.path.dirname(iqe_h5), 'raw2iqe-%s.params' % type) parameters_text = 'nxs=%s\nEaxis=%s\nQxis=%s\n' % (eventnxs, Eaxis, Qaxis) remove_cache = False if os.path.exists(iqe_h5): if os.path.exists(parameters_fn): with open(parameters_fn) as stream: saved = stream.read() if saved != parameters_text: remove_cache = True else: remove_cache = True if remove_cache: os.remove(iqe_h5) # from .redutils import reduce Emin, Emax, dE = Eaxis Emin-=dE/2; Emax-=dE/2 # mantid algo use bin boundaries Qmin, Qmax, dQ = Qaxis Qmin-=dQ/2; Qmax-=dQ/2 # reduce if not os.path.exists(iqe_h5): qaxis = Qmin, dQ, Qmax eaxis = Emin, dE, Emax if sys.version_info < (3,0) and isinstance(eventnxs, unicode): eventnxs = eventnxs.encode() if sys.version_info < (3,0) and isinstance(iqe_h5, unicode): iqe_h5 = iqe_h5.encode() reduce(eventnxs, qaxis, iqe_h5, eaxis=eaxis, tof2E='guess', ibnorm='ByCurrent') else: import warnings msg = "Reusing old reduction result from %s" % iqe_h5 warnings.warn(msg) # fix energy axis if necessary _fixEaxis(iqe_h5, Eaxis) # save parameters with open(parameters_fn, 'wt') as stream: stream.write(parameters_text) return def _fixEaxis(iqe_h5_path, Eaxis): """when iqe is obtained from a nxs or nxspe file where tof axis is already converted to E, the reduced data may not have the Eaxis as desired. this method fixes it by interpolation """ h = hh.load(iqe_h5_path) eaxis = h.axes()[1] centers = eaxis.binCenters() emin, emax, de = Eaxis centers1 = np.arange(emin, emax, de) if centers.size == centers1.size and np.allclose(centers, centers1): return # save a copy of the original histogram import shutil shutil.copyfile(iqe_h5_path, iqe_h5_path+'.bkup-wrongEaxis') from .sqe import interp h1 = interp(h, centers1) hh.dump(h1, iqe_h5_path) return from .ui.getdos0 import notebookUI
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Extremely random forest graph builder. go/brain-tree.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import random import tensorflow as tf from tensorflow.contrib.tensor_forest.python.ops import inference_ops from tensorflow.contrib.tensor_forest.python.ops import training_ops # If tree[i][0] equals this value, then i is a leaf node. LEAF_NODE = -1 # A convenience class for holding random forest hyperparameters. # # To just get some good default parameters, use: # hparams = ForestHParams(num_classes=2, num_features=40).fill() # # Note that num_classes can not be inferred and so must always be specified. # Also, either num_splits_to_consider or num_features should be set. # # To override specific values, pass them to the constructor: # hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill() # # TODO(thomaswc): Inherit from tf.HParams when that is publicly available. class ForestHParams(object): """A base class for holding hyperparameters and calculating good defaults.""" def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0, max_depth=0, num_splits_to_consider=0, feature_bagging_fraction=1.0, max_fertile_nodes=0, split_after_samples=250, valid_leaf_threshold=1, **kwargs): self.num_trees = num_trees self.max_nodes = max_nodes self.bagging_fraction = bagging_fraction self.feature_bagging_fraction = feature_bagging_fraction self.max_depth = max_depth self.num_splits_to_consider = num_splits_to_consider self.max_fertile_nodes = max_fertile_nodes self.split_after_samples = split_after_samples self.valid_leaf_threshold = valid_leaf_threshold for name, value in kwargs.items(): setattr(self, name, value) def values(self): return self.__dict__ def fill(self): """Intelligently sets any non-specific parameters.""" # Fail fast if num_classes or num_features isn't set. _ = getattr(self, 'num_classes') _ = getattr(self, 'num_features') self.training_library_base_dir = getattr( self, 'training_library_base_dir', '') self.inference_library_base_dir = getattr( self, 'inference_library_base_dir', '') self.bagged_num_features = int(self.feature_bagging_fraction * self.num_features) self.bagged_features = None if self.feature_bagging_fraction < 1.0: self.bagged_features = [random.sample( range(self.num_features), self.bagged_num_features) for _ in range(self.num_trees)] self.regression = getattr(self, 'regression', False) # Num_outputs is the actual number of outputs (a single prediction for # classification, a N-dimenensional point for regression). self.num_outputs = self.num_classes if self.regression else 1 # Add an extra column to classes for storing counts, which is needed for # regression and avoids having to recompute sums for classification. self.num_output_columns = self.num_classes + 1 # Allow each tree to be unbalanced by up to a factor of 2. self.max_depth = (self.max_depth or int(2 * math.ceil(math.log(self.max_nodes, 2)))) # The Random Forest literature recommends sqrt(# features) for # classification problems, and p/3 for regression problems. # TODO(thomaswc): Consider capping this for large number of features. self.num_splits_to_consider = ( self.num_splits_to_consider or max(10, int(math.ceil(math.sqrt(self.num_features))))) # max_fertile_nodes doesn't effect performance, only training speed. # We therefore set it primarily based upon space considerations. # Each fertile node takes up num_splits_to_consider times as much # as space as a non-fertile node. We want the fertile nodes to in # total only take up as much space as the non-fertile nodes, so num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider)) # But always use at least 1000 accumulate slots. num_fertile = max(num_fertile, 1000) self.max_fertile_nodes = self.max_fertile_nodes or num_fertile # But it also never needs to be larger than the number of leaves, # which is max_nodes / 2. self.max_fertile_nodes = min(self.max_fertile_nodes, int(math.ceil(self.max_nodes / 2.0))) # We have num_splits_to_consider slots to fill, and we want to spend # approximately split_after_samples samples initializing them. num_split_initializiations_per_input = max(1, int(math.floor( self.num_splits_to_consider / self.split_after_samples))) self.split_initializations_per_input = getattr( self, 'split_initializations_per_input', num_split_initializiations_per_input) # If base_random_seed is 0, the current time will be used to seed the # random number generators for each tree. If non-zero, the i-th tree # will be seeded with base_random_seed + i. self.base_random_seed = getattr(self, 'base_random_seed', 0) return self # A simple container to hold the training variables for a single tree. class TreeTrainingVariables(object): """Stores tf.Variables for training a single random tree. Uses tf.get_variable to get tree-specific names so that this can be used with a tf.learn-style implementation (one that trains a model, saves it, then relies on restoring that model to evaluate). """ def __init__(self, params, tree_num, training): self.tree = tf.get_variable( name=self.get_tree_name('tree', tree_num), dtype=tf.int32, initializer=tf.constant( [[-1, -1]] + [[-2, -1]] * (params.max_nodes - 1))) self.tree_thresholds = tf.get_variable( name=self.get_tree_name('tree_thresholds', tree_num), shape=[params.max_nodes], initializer=tf.constant_initializer(-1.0)) self.tree_depths = tf.get_variable( name=self.get_tree_name('tree_depths', tree_num), shape=[params.max_nodes], dtype=tf.int32, initializer=tf.constant_initializer(1)) self.end_of_tree = tf.get_variable( name=self.get_tree_name('end_of_tree', tree_num), dtype=tf.int32, initializer=tf.constant([1])) if training: self.non_fertile_leaves = tf.get_variable( name=self.get_tree_name('non_fertile_leaves', tree_num), dtype=tf.int32, initializer=tf.constant([0])) self.non_fertile_leaf_scores = tf.get_variable( name=self.get_tree_name('non_fertile_leaf_scores', tree_num), initializer=tf.constant([1.0])) self.node_to_accumulator_map = tf.get_variable( name=self.get_tree_name('node_to_accumulator_map', tree_num), shape=[params.max_nodes], dtype=tf.int32, initializer=tf.constant_initializer(-1)) self.candidate_split_features = tf.get_variable( name=self.get_tree_name('candidate_split_features', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider], dtype=tf.int32, initializer=tf.constant_initializer(-1)) self.candidate_split_thresholds = tf.get_variable( name=self.get_tree_name('candidate_split_thresholds', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider], initializer=tf.constant_initializer(0.0)) # Statistics shared by classification and regression. self.node_sums = tf.get_variable( name=self.get_tree_name('node_sums', tree_num), shape=[params.max_nodes, params.num_output_columns], initializer=tf.constant_initializer(0.0)) if training: self.candidate_split_sums = tf.get_variable( name=self.get_tree_name('candidate_split_sums', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider, params.num_output_columns], initializer=tf.constant_initializer(0.0)) self.accumulator_sums = tf.get_variable( name=self.get_tree_name('accumulator_sums', tree_num), shape=[params.max_fertile_nodes, params.num_output_columns], initializer=tf.constant_initializer(-1.0)) # Regression also tracks second order stats. if params.regression: self.node_squares = tf.get_variable( name=self.get_tree_name('node_squares', tree_num), shape=[params.max_nodes, params.num_output_columns], initializer=tf.constant_initializer(0.0)) self.candidate_split_squares = tf.get_variable( name=self.get_tree_name('candidate_split_squares', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider, params.num_output_columns], initializer=tf.constant_initializer(0.0)) self.accumulator_squares = tf.get_variable( name=self.get_tree_name('accumulator_squares', tree_num), shape=[params.max_fertile_nodes, params.num_output_columns], initializer=tf.constant_initializer(-1.0)) else: self.node_squares = tf.constant( 0.0, name=self.get_tree_name('node_squares', tree_num)) self.candidate_split_squares = tf.constant( 0.0, name=self.get_tree_name('candidate_split_squares', tree_num)) self.accumulator_squares = tf.constant( 0.0, name=self.get_tree_name('accumulator_squares', tree_num)) def get_tree_name(self, name, num): return '{0}-{1}'.format(name, num) class ForestStats(object): def __init__(self, tree_stats, params): """A simple container for stats about a forest.""" self.tree_stats = tree_stats self.params = params def get_average(self, thing): val = 0.0 for i in range(self.params.num_trees): val += getattr(self.tree_stats[i], thing) return val / self.params.num_trees class TreeStats(object): def __init__(self, num_nodes, num_leaves): self.num_nodes = num_nodes self.num_leaves = num_leaves class ForestTrainingVariables(object): """A container for a forests training data, consisting of multiple trees. Instantiates a TreeTrainingVariables object for each tree. We override the __getitem__ and __setitem__ function so that usage looks like this: forest_variables = ForestTrainingVariables(params) ... forest_variables.tree ... """ def __init__(self, params, device_assigner, training=True, tree_variable_class=TreeTrainingVariables): self.variables = [] for i in range(params.num_trees): with tf.device(device_assigner.get_device(i)): self.variables.append(tree_variable_class(params, i, training)) def __setitem__(self, t, val): self.variables[t] = val def __getitem__(self, t): return self.variables[t] class RandomForestDeviceAssigner(object): """A device assigner that uses the default device. Write subclasses that implement get_device for control over how trees get assigned to devices. This assumes that whole trees are assigned to a device. """ def __init__(self): self.cached = None def get_device(self, unused_tree_num): if not self.cached: dummy = tf.constant(0) self.cached = dummy.device return self.cached class RandomForestGraphs(object): """Builds TF graphs for random forest training and inference.""" def __init__(self, params, device_assigner=None, variables=None, tree_graphs=None, t_ops=training_ops, i_ops=inference_ops): self.params = params self.device_assigner = device_assigner or RandomForestDeviceAssigner() tf.logging.info('Constructing forest with params = ') tf.logging.info(self.params.__dict__) self.variables = variables or ForestTrainingVariables( self.params, device_assigner=self.device_assigner) tree_graph_class = tree_graphs or RandomTreeGraphs self.trees = [ tree_graph_class( self.variables[i], self.params, t_ops.Load(self.params.training_library_base_dir), i_ops.Load(self.params.inference_library_base_dir), i) for i in range(self.params.num_trees)] def _bag_features(self, tree_num, input_data): split_data = tf.split(1, self.params.num_features, input_data) return tf.concat(1, [split_data[ind] for ind in self.params.bagged_features[tree_num]]) def training_graph(self, input_data, input_labels): """Constructs a TF graph for training a random forest. Args: input_data: A tensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. Returns: The last op in the random forest training graph. """ tree_graphs = [] for i in range(self.params.num_trees): with tf.device(self.device_assigner.get_device(i)): seed = self.params.base_random_seed if seed != 0: seed += i # If using bagging, randomly select some of the input. tree_data = input_data tree_labels = input_labels if self.params.bagging_fraction < 1.0: # TODO(thomaswc): This does sampling without replacment. Consider # also allowing sampling with replacement as an option. batch_size = tf.slice(tf.shape(input_data), [0], [1]) r = tf.random_uniform(batch_size, seed=seed) mask = tf.less(r, tf.ones_like(r) * self.params.bagging_fraction) gather_indices = tf.squeeze(tf.where(mask), squeeze_dims=[1]) # TODO(thomaswc): Calculate out-of-bag data and labels, and store # them for use in calculating statistics later. tree_data = tf.gather(input_data, gather_indices) tree_labels = tf.gather(input_labels, gather_indices) if self.params.bagged_features: tree_data = self._bag_features(i, tree_data) tree_graphs.append( self.trees[i].training_graph(tree_data, tree_labels, seed)) return tf.group(*tree_graphs) def inference_graph(self, input_data): """Constructs a TF graph for evaluating a random forest. Args: input_data: A tensor or placeholder for input data. Returns: The last op in the random forest inference graph. """ probabilities = [] for i in range(self.params.num_trees): with tf.device(self.device_assigner.get_device(i)): tree_data = input_data if self.params.bagged_features: tree_data = self._bag_features(i, input_data) probabilities.append(self.trees[i].inference_graph(tree_data)) with tf.device(self.device_assigner.get_device(0)): all_predict = tf.pack(probabilities) return tf.reduce_sum(all_predict, 0) / self.params.num_trees def average_size(self): """Constructs a TF graph for evaluating the average size of a forest. Returns: The average number of nodes over the trees. """ sizes = [] for i in range(self.params.num_trees): with tf.device(self.device_assigner.get_device(i)): sizes.append(self.trees[i].size()) return tf.reduce_mean(tf.pack(sizes)) def average_impurity(self): """Constructs a TF graph for evaluating the leaf impurity of a forest. Returns: The last op in the graph. """ impurities = [] for i in range(self.params.num_trees): with tf.device(self.device_assigner.get_device(i)): impurities.append(self.trees[i].average_impurity()) return tf.reduce_mean(tf.pack(impurities)) def get_stats(self, session): tree_stats = [] for i in range(self.params.num_trees): with tf.device(self.device_assigner.get_device(i)): tree_stats.append(self.trees[i].get_stats(session)) return ForestStats(tree_stats, self.params) class RandomTreeGraphs(object): """Builds TF graphs for random tree training and inference.""" def __init__(self, variables, params, t_ops, i_ops, tree_num): self.training_ops = t_ops self.inference_ops = i_ops self.variables = variables self.params = params self.tree_num = tree_num def _gini(self, class_counts): """Calculate the Gini impurity. If c(i) denotes the i-th class count and c = sum_i c(i) then score = 1 - sum_i ( c(i) / c )^2 Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + tf.slice(class_counts, [0, 1], [-1, -1]) sums = tf.reduce_sum(smoothed, 1) sum_squares = tf.reduce_sum(tf.square(smoothed), 1) return 1.0 - sum_squares / (sums * sums) def _weighted_gini(self, class_counts): """Our split score is the Gini impurity times the number of examples. If c(i) denotes the i-th class count and c = sum_i c(i) then score = c * (1 - sum_i ( c(i) / c )^2 ) = c - sum_i c(i)^2 / c Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + tf.slice(class_counts, [0, 1], [-1, -1]) sums = tf.reduce_sum(smoothed, 1) sum_squares = tf.reduce_sum(tf.square(smoothed), 1) return sums - sum_squares / sums def _variance(self, sums, squares): """Calculate the variance for each row of the input tensors. Variance is V = E[x^2] - (E[x])^2. Args: sums: A tensor containing output sums, usually a slice from variables.node_sums. Should contain the number of examples seen in index 0 so we can calculate expected value. squares: Same as sums, but sums of squares. Returns: A 1-D tensor of the variances for each row in the input. """ total_count = tf.slice(sums, [0, 0], [-1, 1]) e_x = sums / total_count e_x2 = squares / total_count return tf.reduce_sum(e_x2 - tf.square(e_x), 1) def training_graph(self, input_data, input_labels, random_seed): """Constructs a TF graph for training a random tree. Args: input_data: A tensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. random_seed: The random number generator seed to use for this tree. 0 means use the current time as the seed. Returns: The last op in the random tree training graph. """ # Count extremely random stats. (node_sums, node_squares, splits_indices, splits_sums, splits_squares, totals_indices, totals_sums, totals_squares, input_leaves) = ( self.training_ops.count_extremely_random_stats( input_data, input_labels, self.variables.tree, self.variables.tree_thresholds, self.variables.node_to_accumulator_map, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, num_classes=self.params.num_output_columns, regression=self.params.regression)) node_update_ops = [] node_update_ops.append( tf.assign_add(self.variables.node_sums, node_sums)) splits_update_ops = [] splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.candidate_split_sums, splits_indices, splits_sums)) splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.accumulator_sums, totals_indices, totals_sums)) if self.params.regression: node_update_ops.append(tf.assign_add(self.variables.node_squares, node_squares)) splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.candidate_split_squares, splits_indices, splits_squares)) splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.accumulator_squares, totals_indices, totals_squares)) # Sample inputs. update_indices, feature_updates, threshold_updates = ( self.training_ops.sample_inputs( input_data, self.variables.node_to_accumulator_map, input_leaves, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, split_initializations_per_input=( self.params.split_initializations_per_input), split_sampling_random_seed=random_seed)) update_features_op = tf.scatter_update( self.variables.candidate_split_features, update_indices, feature_updates) update_thresholds_op = tf.scatter_update( self.variables.candidate_split_thresholds, update_indices, threshold_updates) # Calculate finished nodes. with tf.control_dependencies(splits_update_ops): children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = tf.equal(LEAF_NODE, children) leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1])) finished = self.training_ops.finished_nodes( leaves, self.variables.node_to_accumulator_map, self.variables.accumulator_sums, num_split_after_samples=self.params.split_after_samples) # Update leaf scores. # TODO(gilberth): Optimize this. It currently calculates counts for # every non-fertile leaf. with tf.control_dependencies(node_update_ops): def dont_update_leaf_scores(): return self.variables.non_fertile_leaf_scores def update_leaf_scores_regression(): sums = tf.gather(self.variables.node_sums, self.variables.non_fertile_leaves) squares = tf.gather(self.variables.node_squares, self.variables.non_fertile_leaves) new_scores = self._variance(sums, squares) return tf.assign(self.variables.non_fertile_leaf_scores, new_scores) def update_leaf_scores_classification(): counts = tf.gather(self.variables.node_sums, self.variables.non_fertile_leaves) new_scores = self._weighted_gini(counts) return tf.assign(self.variables.non_fertile_leaf_scores, new_scores) # Because we can't have tf.self.variables of size 0, we have to put in a # garbage value of -1 in there. Here we check for that so we don't # try to index into node_per_class_weights in a tf.gather with a negative # number. update_nonfertile_leaves_scores_op = tf.cond( tf.less(self.variables.non_fertile_leaves[0], 0), dont_update_leaf_scores, update_leaf_scores_regression if self.params.regression else update_leaf_scores_classification) # Calculate best splits. with tf.control_dependencies(splits_update_ops): split_indices = self.training_ops.best_splits( finished, self.variables.node_to_accumulator_map, self.variables.candidate_split_sums, self.variables.candidate_split_squares, self.variables.accumulator_sums, self.variables.accumulator_squares, regression=self.params.regression) # Grow tree. with tf.control_dependencies([update_features_op, update_thresholds_op]): (tree_update_indices, tree_children_updates, tree_threshold_updates, tree_depth_updates, new_eot) = ( self.training_ops.grow_tree( self.variables.end_of_tree, self.variables.tree_depths, self.variables.node_to_accumulator_map, finished, split_indices, self.variables.candidate_split_features, self.variables.candidate_split_thresholds)) tree_update_op = tf.scatter_update( self.variables.tree, tree_update_indices, tree_children_updates) threhsolds_update_op = tf.scatter_update( self.variables.tree_thresholds, tree_update_indices, tree_threshold_updates) depth_update_op = tf.scatter_update( self.variables.tree_depths, tree_update_indices, tree_depth_updates) # Update fertile slots. with tf.control_dependencies([update_nonfertile_leaves_scores_op, depth_update_op]): (node_map_updates, accumulators_cleared, accumulators_allocated, new_nonfertile_leaves, new_nonfertile_leaves_scores) = ( self.training_ops.update_fertile_slots( finished, self.variables.non_fertile_leaves, self.variables.non_fertile_leaf_scores, self.variables.end_of_tree, self.variables.tree_depths, self.variables.accumulator_sums, self.variables.node_to_accumulator_map, max_depth=self.params.max_depth, regression=self.params.regression)) # Ensure end_of_tree doesn't get updated until UpdateFertileSlots has # used it to calculate new leaves. gated_new_eot, = tf.tuple([new_eot], control_inputs=[new_nonfertile_leaves]) eot_update_op = tf.assign(self.variables.end_of_tree, gated_new_eot) updates = [] updates.append(eot_update_op) updates.append(tree_update_op) updates.append(threhsolds_update_op) updates.append(tf.assign( self.variables.non_fertile_leaves, new_nonfertile_leaves, validate_shape=False)) updates.append(tf.assign( self.variables.non_fertile_leaf_scores, new_nonfertile_leaves_scores, validate_shape=False)) updates.append(tf.scatter_update( self.variables.node_to_accumulator_map, tf.squeeze(tf.slice(node_map_updates, [0, 0], [1, -1]), squeeze_dims=[0]), tf.squeeze(tf.slice(node_map_updates, [1, 0], [1, -1]), squeeze_dims=[0]))) cleared_and_allocated_accumulators = tf.concat( 0, [accumulators_cleared, accumulators_allocated]) # Calculate values to put into scatter update for candidate counts. # Candidate split counts are always reset back to 0 for both cleared # and allocated accumulators. This means some accumulators might be doubly # reset to 0 if the were released and not allocated, then later allocated. split_values = tf.tile( tf.expand_dims(tf.expand_dims( tf.zeros_like(cleared_and_allocated_accumulators, dtype=tf.float32), 1), 2), [1, self.params.num_splits_to_consider, self.params.num_output_columns]) updates.append(tf.scatter_update( self.variables.candidate_split_sums, cleared_and_allocated_accumulators, split_values)) if self.params.regression: updates.append(tf.scatter_update( self.variables.candidate_split_squares, cleared_and_allocated_accumulators, split_values)) # Calculate values to put into scatter update for total counts. total_cleared = tf.tile( tf.expand_dims( tf.neg(tf.ones_like(accumulators_cleared, dtype=tf.float32)), 1), [1, self.params.num_output_columns]) total_reset = tf.tile( tf.expand_dims( tf.zeros_like(accumulators_allocated, dtype=tf.float32), 1), [1, self.params.num_output_columns]) accumulator_updates = tf.concat(0, [total_cleared, total_reset]) updates.append(tf.scatter_update( self.variables.accumulator_sums, cleared_and_allocated_accumulators, accumulator_updates)) if self.params.regression: updates.append(tf.scatter_update( self.variables.accumulator_squares, cleared_and_allocated_accumulators, accumulator_updates)) # Calculate values to put into scatter update for candidate splits. split_features_updates = tf.tile( tf.expand_dims( tf.neg(tf.ones_like(cleared_and_allocated_accumulators)), 1), [1, self.params.num_splits_to_consider]) updates.append(tf.scatter_update( self.variables.candidate_split_features, cleared_and_allocated_accumulators, split_features_updates)) return tf.group(*updates) def inference_graph(self, input_data): """Constructs a TF graph for evaluating a random tree. Args: input_data: A tensor or placeholder for input data. Returns: The last op in the random tree inference graph. """ return self.inference_ops.tree_predictions( input_data, self.variables.tree, self.variables.tree_thresholds, self.variables.node_sums, valid_leaf_threshold=self.params.valid_leaf_threshold) def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = tf.equal(LEAF_NODE, children) leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1])) counts = tf.gather(self.variables.node_sums, leaves) impurity = self._weighted_gini(counts) return tf.reduce_sum(impurity) / tf.reduce_sum(counts + 1.0) def size(self): """Constructs a TF graph for evaluating the current number of nodes. Returns: The current number of nodes in the tree. """ return self.variables.end_of_tree - 1 def get_stats(self, session): num_nodes = self.variables.end_of_tree.eval(session=session) - 1 num_leaves = tf.where( tf.equal(tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1])), LEAF_NODE)).eval(session=session).shape[0] return TreeStats(num_nodes, num_leaves)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Document class and its helper functions. This module also provides a deafult root-document object. """ import os import tempfile import shutil from functools import partial from types import ModuleType from typing import Any, Callable, Optional, Union import weakref from wdom import server from wdom.element import Element, Attr, HTMLElement, getElementsBy from wdom.element import getElementsByClassName, getElementsByTagName from wdom.element import querySelector, querySelectorAll from wdom.event import Event, EventTarget, WebEventTarget from wdom.node import Node, DocumentType, Text, RawHtml, Comment, ParentNode from wdom.node import DocumentFragment, NodeList from wdom.options import config from wdom.tag import Tag from wdom.tag import Html, Head, Body, Meta, Link, Title, Script from wdom.web_node import WdomElement from wdom.window import Window def getElementById(id: str) -> Optional[Node]: """Get element with ``id``.""" elm = Element._elements_with_id.get(id) return elm def getElementByWdomId(id: str) -> Optional[WebEventTarget]: """Get element with ``wdom_id``.""" if not id: return None elif id == 'document': return get_document() elif id == 'window': return get_document().defaultView elm = WdomElement._elements_with_wdom_id.get(id) return elm def _cleanup(path: str) -> None: """Cleanup temporary directory.""" if os.path.isdir(path): shutil.rmtree(path) def create_element(tag: str, name: str = None, base: type = None, attr: dict = None) -> Node: """Create element with a tag of ``name``. :arg str name: html tag. :arg type base: Base class of the created element (defatlt: ``WdomElement``) :arg dict attr: Attributes (key-value pairs dict) of the new element. """ from wdom.web_node import WdomElement from wdom.tag import Tag from wdom.window import customElements if attr is None: attr = {} if name: base_class = customElements.get((name, tag)) else: base_class = customElements.get((tag, None)) if base_class is None: attr['_registered'] = False base_class = base or WdomElement if issubclass(base_class, Tag): return base_class(**attr) return base_class(tag, **attr) def _find_tag(elm: Node, tag: str) -> Optional[Node]: _tag = tag.lower() for child in elm.childNodes: if child.nodeType == Element.nodeType and child.localName == _tag: return child return None class Document(Node, ParentNode, EventTarget): """Base class for Document node.""" nodeType = Node.DOCUMENT_NODE nodeName = '#document' def __init__(self, *, doctype: str = 'html', default_class: type = HTMLElement, **kwargs: Any) -> None: """Create new Document node. :arg str doctype: Document type of this document. :arg type default_class: Default class created by :py:meth:`createElement` method. """ super().__init__() self.__window = Window(self) self._default_class = default_class self.__doctype = DocumentType(doctype, parent=self) self.__html = Html(parent=self) self.__head = Head(parent=self.documentElement) self.__body = Body(parent=self.documentElement) @property def defaultView(self) -> Window: """Return :class:`Window` class of this document.""" return self.__window @property def doctype(self) -> DocumentType: """Return DocumentType element of this document.""" return self.__doctype @property def documentElement(self) -> Element: """Return <html> element of this document.""" return self.__html @property def head(self) -> Element: """Return <head> element of this document.""" return self.__head def _find_charset_node(self) -> Optional[Element]: for child in self.head: if child.localName == 'meta' and child.hasAttribute('charset'): return child return None @property def characterSet(self) -> str: """Get/Set charset of this document.""" charset = self._find_charset_node() if charset: return charset.getAttribute('charset') # type: ignore return '' @characterSet.setter def characterSet(self, charset: str) -> None: """Set character set of this document.""" charset_node = self._find_charset_node() or Meta(parent=self.head) charset_node.setAttribute('charset', charset) @property def body(self) -> Element: """Return <body> element of this document.""" return self.__body @property def title(self) -> str: """Get/Set title string of this document.""" title_element = _find_tag(self.head, 'title') if title_element: return title_element.textContent return '' @title.setter def title(self, new_title: str) -> None: _title = _find_tag(self.head, 'title') title_element = _title or Title(parent=self.head) title_element.textContent = new_title def getElementsBy(self, cond: Callable[[Element], bool]) -> NodeList: """Get elements in this document which matches condition.""" return getElementsBy(self, cond) def getElementsByTagName(self, tag: str) -> NodeList: """Get elements with tag name in this document.""" return getElementsByTagName(self, tag) def getElementsByClassName(self, class_name: str) -> NodeList: """Get elements with class name in this document.""" return getElementsByClassName(self, class_name) def getElementById(self, id: str) -> Optional[Node]: """Get element by ``id``. If this document does not have the element with the id, return None. """ elm = getElementById(id) if elm and elm.ownerDocument is self: return elm return None def createDocumentFragment(self) -> DocumentFragment: """Create empty document fragment.""" return DocumentFragment() def createTextNode(self, text: str) -> Text: """Create text node with ``text``.""" return Text(text) def createComment(self, comment: str) -> Comment: """Create comment node with ``comment``.""" return Comment(comment) def createElement(self, tag: str) -> Node: """Create new element whose tag name is ``tag``.""" return create_element(tag, base=self._default_class) def createEvent(self, event: str) -> Event: """Create Event object with ``event`` type.""" return Event(event) def createAttribute(self, name: str) -> Attr: """Create Attribute object with ``name``.""" return Attr(name) def querySelector(self, selectors: str) -> Node: """Not Implemented.""" return querySelector(self, selectors) def querySelectorAll(self, selectors: str) -> NodeList: """Not Implemented.""" return querySelectorAll(self, selectors) class WdomDocument(Document, WebEventTarget): """Main document class for WDOM applications.""" @property def wdom_id(self) -> str: # noqa: D102 return 'document' @property def connected(self) -> bool: # noqa: D102 return server.is_connected() @property def tempdir(self) -> str: """Return temporary directory used by this document.""" return self.__tempdir def __init__(self, *, doctype: str = 'html', title: str = 'W-DOM', charset: str = 'utf-8', default_class: type = WdomElement, autoreload: bool = None, reload_wait: float =None, **kwargs: Any) -> None: """Create new document object for WDOM application. .. caution:: Don't create new document from :class:`WdomDocument` class constructor. Use :func:`get_new_document` function instead. :arg str doctype: doctype of the document (default: html). :arg str title: title of the document. :arg str charset: charset of the document. :arg type default_class: Set default Node class of the document. This class is used when make node by :py:meth:`createElement()` :arg bool autoreload: Enable/Disable autoreload (default: False). :arg float reload_wait: How long (seconds) wait to reload. This parameter is only used when autoreload is enabled. """ self.__tempdir = _tempdir = tempfile.mkdtemp() self._finalizer = weakref.finalize(self, # type: ignore partial(_cleanup, _tempdir)) self._autoreload = autoreload self._reload_wait = reload_wait super().__init__(doctype=doctype, default_class=default_class) self.characterSet = charset self.title = title self.script = Script(parent=self.body) self._autoreload_script = Script(parent=self.head) self.addEventListener('mount', self._on_mount) def _set_autoreload(self) -> None: self._autoreload_script.textContent = '' if self._autoreload is None: autoreload = (config.autoreload or config.debug) else: autoreload = self._autoreload if autoreload: ar_script = [] ar_script.append('var WDOM_AUTORELOAD = true') if self._reload_wait is not None: ar_script.append('var WDOM_RELOAD_WAIT = {}'.format( self._reload_wait)) self._autoreload_script.textContent = '\n{}\n'.format( '\n'.join(ar_script)) def getElementByWdomId(self, id: Union[str]) -> Optional[WebEventTarget]: """Get an element node with ``wdom_id``. If this document does not have the element with the id, return None. """ elm = getElementByWdomId(id) if elm and elm.ownerDocument is self: return elm return None def add_jsfile(self, src: str) -> None: """Add JS file to load at this document's bottom of the body.""" self.body.appendChild(Script(src=src)) def add_jsfile_head(self, src: str) -> None: """Add JS file to load at this document's header.""" self.head.appendChild(Script(src=src)) def add_cssfile(self, src: str) -> None: """Add CSS file to load at this document's header.""" self.head.appendChild(Link(rel='stylesheet', href=src)) def add_header(self, header: str) -> None: """Insert header tag staring at this document's header. :arg str header: tag to insert <head> ~ </head> area. """ self.head.appendChild(RawHtml(header)) def register_theme(self, theme: ModuleType) -> None: """Set theme for this docuemnt. This method sets theme's js/css files and headers on this document. :arg ModuleType theme: a module which has ``js_files``, ``css_files``, ``headers``, and ``extended_classes``. see ``wdom.themes`` directory actual theme module structures. """ if not hasattr(theme, 'css_files'): raise ValueError('theme module must include `css_files`.') for css in getattr(theme, 'css_files', []): self.add_cssfile(css) for js in getattr(theme, 'js_files', []): self.add_jsfile(js) for header in getattr(theme, 'headers', []): self.add_header(header) for cls in getattr(theme, 'extended_classes', []): self.defaultView.customElements.define(cls) def build(self) -> str: """Return HTML representation of this document.""" self._set_autoreload() return ''.join(child.html for child in self.childNodes) def get_new_document( # noqa: C901 include_wdom_js: bool = True, include_skeleton: bool = False, include_normalizecss: bool = False, autoreload: bool = None, reload_wait: float = None, log_level: Union[int, str] = None, log_prefix: str = None, log_console: bool = False, ws_url: str = None, message_wait: float = None, document_factory: Callable[..., Document] = WdomDocument, **kwargs: Any) -> Document: """Create new :class:`Document` object with options. :arg bool include_wdom_js: Include wdom.js file. Usually should be True. :arg bool include_skeleton: Include skelton.css. :arg bool include_normalizecss: Include normalize.css. :arg bool autoreload: Enable autoreload flag. This flag overwrites ``--debug`` flag, which automatically enables autoreload. :arg float reload_wait: Seconds to wait until reload when autoreload is enabled. :arg str log_level: Log level string, chosen from DEBUG, INFO, WARN, ERROR. Integer values are also acceptable like ``logging.INFO``. By default use ``wdom.config.options.log_level``, which default is ``INFO``. :arg str log_prefix: Prefix of log outputs. :arg bool log_console: Flag to show wdom log on browser console. :arg str ws_url: URL string to the ws url. Default: ``ws://localhost:8888/wdom_ws``. :arg float message_wait: Duration (seconds) to send WS messages. :arg Callable document_factory: Factory function/class to create Document object. :rtype: Document """ document = document_factory( autoreload=autoreload, reload_wait=reload_wait, **kwargs ) if log_level is None: log_level = config.logging if message_wait is None: message_wait = config.message_wait log_script = [] log_script.append('var WDOM_MESSAGE_WAIT = {}'.format(message_wait)) if isinstance(log_level, str): log_script.append('var WDOM_LOG_LEVEL = \'{}\''.format(log_level)) elif isinstance(log_level, int): log_script.append('var WDOM_LOG_LEVEL = {}'.format(log_level)) if log_prefix: log_script.append('var WDOM_LOG_PREFIX = \'{}\''.format(log_prefix)) if log_console: log_script.append('var WDOM_LOG_CONSOLE = true') if log_script: _s = Script(parent=document.head) _s.textContent = '\n{}\n'.format('\n'.join(log_script)) if ws_url: _s = Script(parent=document.head) _s.textContent = '\nvar WDOM_WS_URL = \'{}\'\n'.format(ws_url) if include_wdom_js: document.add_jsfile_head('_static/js/wdom.js') return document def get_document() -> Document: """Get current root document object. :rtype: Document """ return rootDocument def set_document(new_document: Document) -> None: """Set a new document as a current root document. :param Document new_document: New root document. """ global rootDocument rootDocument = new_document def set_app(app: Tag) -> None: """Set ``Tag`` as applicaion to the current root document. Equivalent to ``get_document().body.prepend(app)``. """ document = get_document() document.body.prepend(app) rootDocument = get_new_document()
from __future__ import unicode_literals import mock import unittest from nose.tools import * # noqa from tests.factories import ProjectFactory, NodeFactory, CommentFactory from website.addons.osfstorage.tests import factories from website.addons.osfstorage.tests.utils import StorageTestCase import datetime from modularodm import exceptions as modm_errors from website.files import models from website.addons.osfstorage import utils from website.addons.osfstorage import settings from website.files.exceptions import FileNodeCheckedOutError class TestOsfstorageFileNode(StorageTestCase): def test_root_node_exists(self): assert_true(self.node_settings.root_node is not None) def test_root_node_has_no_parent(self): assert_true(self.node_settings.root_node.parent is None) def test_node_reference(self): assert_equal(self.project, self.node_settings.root_node.node) # def test_get_folder(self): # file = models.OsfStorageFileNode(name='MOAR PYLONS', is_file=True, node=self.node) # folder = models.OsfStorageFileNode(name='MOAR PYLONS', is_file=False, node=self.node) # _id = folder._id # file.save() # folder.save() # assert_equal(folder, models.OsfStorageFileNode.get_folder(_id, self.node_settings)) # def test_get_file(self): # file = models.OsfStorageFileNode(name='MOAR PYLONS', is_file=True, node=self.node) # folder = models.OsfStorageFileNode(name='MOAR PYLONS', is_file=False, node=self.node) # file.save() # folder.save() # _id = file._id # assert_equal(file, models.OsfStorageFileNode.get_file(_id, self.node_settings)) def test_serialize(self): file = models.OsfStorageFile(name='MOAR PYLONS', node=self.node_settings.owner) assert_equals(file.serialize(), { u'id': file._id, u'path': file.path, u'name': 'MOAR PYLONS', u'kind': 'file', u'version': 0, u'downloads': 0, u'size': None, u'modified': None, u'contentType': None, u'checkout': None, u'md5': None, u'sha256': None, }) version = file.create_version( self.user, { 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': '06d80e', }, { 'size': 1234, 'contentType': 'text/plain' }) assert_equals(file.serialize(), { 'id': file._id, 'path': file.path, 'name': 'MOAR PYLONS', 'kind': 'file', 'version': 1, 'downloads': 0, 'size': 1234, 'modified': None, 'contentType': 'text/plain', 'checkout': None, 'md5': None, 'sha256': None, }) date = datetime.datetime.now() version.update_metadata({ 'modified': date.isoformat() }) assert_equals(file.serialize(), { 'id': file._id, 'path': file.path, 'name': 'MOAR PYLONS', 'kind': 'file', 'version': 1, 'downloads': 0, 'size': 1234, 'modified': date.isoformat(), 'contentType': 'text/plain', 'checkout': None, 'md5': None, 'sha256': None, }) def test_get_child_by_name(self): child = self.node_settings.get_root().append_file('Test') assert_equal(child, self.node_settings.get_root().find_child_by_name('Test')) def test_root_node_path(self): assert_equal(self.node_settings.get_root().name, '') def test_folder_path(self): path = '/{}/'.format(self.node_settings.root_node._id) assert_equal(self.node_settings.get_root().path, path) def test_file_path(self): file = models.OsfStorageFileNode(name='MOAR PYLONS', is_file=True, node=self.node) file.save() assert_equal(file.name, 'MOAR PYLONS') assert_equal(file.path, '/{}'.format(file._id)) def test_append_folder(self): child = self.node_settings.get_root().append_folder('Test') children = self.node_settings.get_root().children assert_equal(child.kind, 'folder') assert_equal([child], list(children)) def test_append_file(self): child = self.node_settings.get_root().append_file('Test') children = self.node_settings.get_root().children assert_equal(child.kind, 'file') assert_equal([child], list(children)) def test_append_to_file(self): child = self.node_settings.get_root().append_file('Test') with assert_raises(AttributeError): child.append_file('Cant') def test_children(self): assert_equals([ self.node_settings.get_root().append_file('Foo{}Bar'.format(x)) for x in xrange(100) ], list(self.node_settings.get_root().children)) def test_download_count_file_defaults(self): child = self.node_settings.get_root().append_file('Test') assert_equals(child.get_download_count(), 0) @mock.patch('framework.analytics.session') def test_download_count_file(self, mock_session): mock_session.data = {} child = self.node_settings.get_root().append_file('Test') utils.update_analytics(self.project, child._id, 0) utils.update_analytics(self.project, child._id, 1) utils.update_analytics(self.project, child._id, 2) assert_equals(child.get_download_count(), 3) assert_equals(child.get_download_count(0), 1) assert_equals(child.get_download_count(1), 1) assert_equals(child.get_download_count(2), 1) @unittest.skip def test_create_version(self): pass @unittest.skip def test_update_version_metadata(self): pass def test_delete_folder(self): parent = self.node_settings.get_root().append_folder('Test') kids = [] for x in range(10): kid = parent.append_file(str(x)) kid.save() kids.append(kid) count = models.OsfStorageFileNode.find().count() tcount = models.TrashedFileNode.find().count() parent.delete() assert_is(models.OsfStorageFileNode.load(parent._id), None) assert_equals(count - 11, models.OsfStorageFileNode.find().count()) assert_equals(tcount + 11, models.TrashedFileNode.find().count()) for kid in kids: assert_is( models.OsfStorageFileNode.load(kid._id), None ) def test_delete_file(self): child = self.node_settings.get_root().append_file('Test') child.delete() assert_is(models.OsfStorageFileNode.load(child._id), None) trashed = models.TrashedFileNode.load(child._id) child_storage = child.to_storage() trashed_storage = trashed.to_storage() trashed_storage['parent'] = trashed_storage['parent'][0] child_storage['materialized_path'] = child.materialized_path trashed_storage.pop('deleted_by') trashed_storage.pop('deleted_on') assert_equal(child_storage.pop('path'), '') assert_equal(trashed_storage.pop('path'), '/' + child._id) assert_equal(trashed_storage, child_storage) def test_materialized_path(self): child = self.node_settings.get_root().append_file('Test') assert_equals('/Test', child.materialized_path) def test_materialized_path_folder(self): child = self.node_settings.get_root().append_folder('Test') assert_equals('/Test/', child.materialized_path) def test_materialized_path_nested(self): child = self.node_settings.get_root().append_folder('Cloud').append_file('Carp') assert_equals('/Cloud/Carp', child.materialized_path) def test_copy(self): to_copy = self.node_settings.get_root().append_file('Carp') copy_to = self.node_settings.get_root().append_folder('Cloud') copied = to_copy.copy_under(copy_to) assert_not_equal(copied, to_copy) assert_equal(copied.parent, copy_to) assert_equal(to_copy.parent, self.node_settings.get_root()) def test_move_nested(self): new_project = ProjectFactory() other_node_settings = new_project.get_addon('osfstorage') move_to = other_node_settings.get_root().append_folder('Cloud') to_move = self.node_settings.get_root().append_folder('Carp') child = to_move.append_file('A dee um') moved = to_move.move_under(move_to) child.reload() assert_equal(moved, to_move) assert_equal(new_project, to_move.node) assert_equal(new_project, move_to.node) assert_equal(new_project, child.node) def test_copy_rename(self): to_copy = self.node_settings.get_root().append_file('Carp') copy_to = self.node_settings.get_root().append_folder('Cloud') copied = to_copy.copy_under(copy_to, name='But') assert_equal(copied.name, 'But') assert_not_equal(copied, to_copy) assert_equal(to_copy.name, 'Carp') assert_equal(copied.parent, copy_to) assert_equal(to_copy.parent, self.node_settings.get_root()) def test_move(self): to_move = self.node_settings.get_root().append_file('Carp') move_to = self.node_settings.get_root().append_folder('Cloud') moved = to_move.move_under(move_to) assert_equal(to_move, moved) assert_equal(moved.parent, move_to) def test_move_and_rename(self): to_move = self.node_settings.get_root().append_file('Carp') move_to = self.node_settings.get_root().append_folder('Cloud') moved = to_move.move_under(move_to, name='Tuna') assert_equal(to_move, moved) assert_equal(to_move.name, 'Tuna') assert_equal(moved.parent, move_to) @unittest.skip def test_move_folder(self): pass @unittest.skip def test_move_folder_and_rename(self): pass @unittest.skip def test_rename_folder(self): pass @unittest.skip def test_rename_file(self): pass @unittest.skip def test_move_across_nodes(self): pass @unittest.skip def test_move_folder_across_nodes(self): pass @unittest.skip def test_copy_across_nodes(self): pass @unittest.skip def test_copy_folder_across_nodes(self): pass class TestNodeSettingsModel(StorageTestCase): def test_fields(self): assert_true(self.node_settings._id) assert_is(self.node_settings.has_auth, True) assert_is(self.node_settings.complete, True) def test_after_fork_copies_versions(self): num_versions = 5 path = 'jazz/dreamers-ball.mp3' record = self.node_settings.get_root().append_file(path) for _ in range(num_versions): version = factories.FileVersionFactory() record.versions.append(version) record.save() fork = self.project.fork_node(self.auth_obj) fork_node_settings = fork.get_addon('osfstorage') fork_node_settings.reload() cloned_record = fork_node_settings.get_root().find_child_by_name(path) assert_equal(cloned_record.versions, record.versions) assert_true(fork_node_settings.root_node) class TestOsfStorageFileVersion(StorageTestCase): def setUp(self): super(TestOsfStorageFileVersion, self).setUp() self.user = factories.AuthUserFactory() self.mock_date = datetime.datetime(1991, 10, 31) def test_fields(self): version = factories.FileVersionFactory( size=1024, content_type='application/json', date_modified=datetime.datetime.now(), ) retrieved = models.FileVersion.load(version._id) assert_true(retrieved.creator) assert_true(retrieved.location) assert_true(retrieved.size) assert_is(retrieved.identifier, 0) assert_true(retrieved.content_type) assert_true(retrieved.date_modified) def test_is_duplicate_true(self): version1 = factories.FileVersionFactory() version2 = factories.FileVersionFactory() assert_true(version1.is_duplicate(version2)) assert_true(version2.is_duplicate(version1)) def test_is_duplicate_false(self): version1 = factories.FileVersionFactory( location={ 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': 'd077f2', }, ) version2 = factories.FileVersionFactory( location={ 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': '06d80e', }, ) assert_false(version1.is_duplicate(version2)) assert_false(version2.is_duplicate(version1)) def test_validate_location(self): version = factories.FileVersionFactory.build(location={}) with assert_raises(modm_errors.ValidationValueError): version.save() version.location = { 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': 'object', } version.save() def test_update_metadata(self): version = factories.FileVersionFactory() version.update_metadata({'archive': 'glacier', 'size': 123, 'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'}) version.reload() assert_in('archive', version.metadata) assert_equal(version.metadata['archive'], 'glacier') def test_matching_archive(self): version = factories.FileVersionFactory( location={ 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': 'd077f2', }, metadata={'sha256': 'existing'} ) factories.FileVersionFactory( location={ 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': '06d80e', }, metadata={ 'sha256': 'existing', 'vault': 'the cloud', 'archive': 'erchiv' } ) assert_is(version._find_matching_archive(), True) assert_is_not(version.archive, None) assert_equal(version.metadata['vault'], 'the cloud') assert_equal(version.metadata['archive'], 'erchiv') def test_archive_exits(self): node_addon = self.project.get_addon('osfstorage') fnode = node_addon.get_root().append_file('MyCoolTestFile') version = fnode.create_version( self.user, { 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': '06d80e', }, { 'sha256': 'existing', 'vault': 'the cloud', 'archive': 'erchiv' }) assert_equal(version.archive, 'erchiv') version2 = fnode.create_version( self.user, { 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': '07d80a', }, { 'sha256': 'existing', }) assert_equal(version2.archive, 'erchiv') def test_no_matching_archive(self): models.FileVersion.remove() assert_is(False, factories.FileVersionFactory( location={ 'service': 'cloud', settings.WATERBUTLER_RESOURCE: 'osf', 'object': 'd077f2', }, metadata={'sha256': 'existing'} )._find_matching_archive()) class TestOsfStorageCheckout(StorageTestCase): def setUp(self): super(TestOsfStorageCheckout, self).setUp() self.user = factories.AuthUserFactory() self.node = ProjectFactory(creator=self.user) self.osfstorage = self.node.get_addon('osfstorage') self.root_node = self.osfstorage.get_root() self.file = self.root_node.append_file('3005') def test_checkout_logs(self): non_admin = factories.AuthUserFactory() self.node.add_contributor(non_admin, permissions=['read', 'write']) self.node.save() self.file.check_in_or_out(non_admin, non_admin, save=True) self.file.reload() self.node.reload() assert_equal(self.file.checkout, non_admin) assert_equal(self.node.logs[-1].action, 'checked_out') assert_equal(self.node.logs[-1].user, non_admin) self.file.check_in_or_out(self.user, None, save=True) self.file.reload() self.node.reload() assert_equal(self.file.checkout, None) assert_equal(self.node.logs[-1].action, 'checked_in') assert_equal(self.node.logs[-1].user, self.user) self.file.check_in_or_out(self.user, self.user, save=True) self.file.reload() self.node.reload() assert_equal(self.file.checkout, self.user) assert_equal(self.node.logs[-1].action, 'checked_out') assert_equal(self.node.logs[-1].user, self.user) with assert_raises(FileNodeCheckedOutError): self.file.check_in_or_out(non_admin, None, save=True) with assert_raises(FileNodeCheckedOutError): self.file.check_in_or_out(non_admin, non_admin, save=True) def test_delete_checked_out_file(self): self.file.check_in_or_out(self.user, self.user, save=True) self.file.reload() assert_equal(self.file.checkout, self.user) with assert_raises(FileNodeCheckedOutError): self.file.delete() def test_delete_folder_with_checked_out_file(self): folder = self.root_node.append_folder('folder') self.file.move_under(folder) self.file.check_in_or_out(self.user, self.user, save=True) self.file.reload() assert_equal(self.file.checkout, self.user) with assert_raises(FileNodeCheckedOutError): folder.delete() def test_move_checked_out_file(self): self.file.check_in_or_out(self.user, self.user, save=True) self.file.reload() assert_equal(self.file.checkout, self.user) folder = self.root_node.append_folder('folder') with assert_raises(FileNodeCheckedOutError): self.file.move_under(folder) def test_checked_out_merge(self): user = factories.AuthUserFactory() node = ProjectFactory(creator=user) osfstorage = node.get_addon('osfstorage') root_node = osfstorage.get_root() file = root_node.append_file('test_file') user_merge_target = factories.AuthUserFactory() file.check_in_or_out(user, user, save=True) file.reload() assert_equal(file.checkout, user) user_merge_target.merge_user(user) file.reload() assert_equal(user_merge_target, file.checkout) def test_remove_contributor_with_checked_file(self): user = factories.AuthUserFactory() self.node.contributors.append(user) self.node.add_permission(user, 'admin') self.node.visible_contributor_ids.append(user._id) self.node.save() self.file.check_in_or_out(self.user, self.user, save=True) self.file.reload() assert_equal(self.file.checkout, self.user) self.file.node.remove_contributors([self.user], save=True) self.file.reload() assert_equal(self.file.checkout, None)
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import random import tempfile import unittest from typing import List, Tuple import numpy as np import transformers from huggingface_hub import delete_repo, login from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import PASS, USER, CaptureLogger, is_pt_flax_cross_test, is_staging_test, require_flax from transformers.utils import logging if is_flax_available(): import os import jax import jax.numpy as jnp from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from transformers import ( FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForSequenceClassification, FlaxBertModel, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key: setattr(configs_no_init, key, 1e-10) return configs_no_init def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return np.array(values, dtype=jnp.float32).reshape(shape) def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask @require_flax class FlaxModelTesterMixin: model_tester = None all_model_classes = () test_mismatched_shapes = True is_encoder_decoder = False test_head_masking = False def _prepare_for_class(self, inputs_dict, model_class): inputs_dict = copy.deepcopy(inputs_dict) # hack for now until we have AutoModel classes if "ForMultipleChoice" in model_class.__name__: inputs_dict = { k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1])) if isinstance(v, (jnp.ndarray, np.ndarray)) else v for k, v in inputs_dict.items() } return inputs_dict def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() # verify that normal save_pretrained works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) # verify that save_pretrained for distributed training # with `params=params` works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_naming_convention(self): for model_class in self.all_model_classes: model_class_name = model_class.__name__ module_class_name = ( model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module" ) bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name]) module_cls = getattr(bert_modeling_flax_module, module_class_name) self.assertIsNotNone(module_cls) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs_dict)["logits"] self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = FlaxAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_default_params_dtype(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # check if all params are still in float32 when dtype of computation is half-precision model = model_class(config, dtype=jnp.float16) types = jax.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.") def test_to_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to bf16 params = model.to_bf16(model.params) types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_to_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 params = model.to_fp16(model.params) types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_to_fp32(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 and back to fp32 params = model.to_fp16(model.params) params = model.to_fp32(params) # test if all params are in fp32 types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) # cast to fp16 and back to fp32 with mask params = model.to_fp16(model.params) params = model.to_fp32(params, mask) # test if all params are in fp32 except key types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") else: self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") def test_save_load_in_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to fp16 and save params = model.to_fp16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_save_load_in_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to bf16 and save params = model.to_bf16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "__call__")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_headmasking(self): if not self.test_head_masking: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)]) if i == num_hidden_layers - 1: return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)]) return np.ones(attention_heads, dtype=jnp.int32) for model_class in self.all_model_classes: model = model_class(config) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False inputs = self._prepare_for_class(inputs_dict, model_class).copy() # Prepare head mask inputs["head_mask"] = np.stack( [ _prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ] ) outputs = model(**inputs) def _check_attentions_validity(attentions): # Remove NaN for t in attentions: # Check we don't have more than 25% nans (arbitrary) self.assertLess(np.isnan(t).sum(), t.size / 4) attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions] self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0) if model.config.is_encoder_decoder: raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.") else: _check_attentions_validity(outputs.attentions) @require_flax @is_staging_test class FlaxModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = login(username=USER, password=PASS) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, name="test-model-flax") except HTTPError: pass try: delete_repo(token=cls._token, name="test-model-flax-org", organization="valid_org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( os.path.join(tmp_dir, "test-model-flax"), push_to_hub=True, use_auth_token=self._token ) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( os.path.join(tmp_dir, "test-model-flax-org"), push_to_hub=True, use_auth_token=self._token, organization="valid_org", ) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
from itertools import chain from typing import NamedTuple, Dict, Sequence, List, Set, Type, Union, TYPE_CHECKING, Tuple from lightbus.exceptions import TransportNotFound, TransportsNotInstalled from lightbus.transports.pool import TransportPool from lightbus.utilities.importing import load_entrypoint_classes empty = NamedTuple("Empty") if TYPE_CHECKING: # pylint: disable=unused-import,cyclic-import from lightbus.config import Config from lightbus.transports import ( RpcTransport, ResultTransport, EventTransport, SchemaTransport, Transport, ) EventTransportPoolType = TransportPool["EventTransport"] RpcTransportPoolType = TransportPool["RpcTransport"] ResultTransportPoolType = TransportPool["ResultTransport"] SchemaTransportPoolType = TransportPool["SchemaTransport"] AnyTransportPoolType = Union[ EventTransportPoolType, RpcTransportPoolType, ResultTransportPoolType, SchemaTransportPoolType ] class TransportRegistry: """ Manages access to transports It is possible for different APIs within lightbus to use different transports. This registry handles the logic of loading the transports for a given configuration. Thereafter, it provides access to these transports based on a given API. The 'default' API is a special case as it is fallback transport for any APIs that do not have their own specific transports configured. """ class _RegistryEntry(NamedTuple): rpc: RpcTransportPoolType = None result: ResultTransportPoolType = None event: EventTransportPoolType = None schema_transport: TransportPool = None def __init__(self): self._registry: Dict[str, TransportRegistry._RegistryEntry] = {} def load_config(self, config: "Config") -> "TransportRegistry": # For every configured API... for api_name, api_config in config.apis().items(): # ...and for each type of transport... for transport_type in ("event", "rpc", "result"): # ...get the transport config... transport_selector = getattr(api_config, f"{transport_type}_transport") transport_config = self._get_transport_config(transport_selector) # ... and use it to create the transport. if transport_config: transport_name, transport_config = transport_config transport_class = get_transport(type_=transport_type, name=transport_name) self._set_transport( api_name, transport_class, transport_type, transport_config, config ) # Schema transport transport_config = self._get_transport_config(config.bus().schema.transport) if transport_config: transport_name, transport_config = transport_config transport_class = get_transport(type_="schema", name=transport_name) self.schema_transport = self._instantiate_transport_pool( transport_class, transport_config, config ) return self def _get_transport_config(self, transport_selector): if transport_selector: for transport_name in transport_selector._fields: transport_config = getattr(transport_selector, transport_name) if transport_config is not None: return transport_name, transport_config def _instantiate_transport_pool( self, transport_class: Type["Transport"], transport_config: NamedTuple, config: "Config" ): transport_pool = TransportPool( transport_class=transport_class, transport_config=transport_config, config=config ) return transport_pool def _set_transport( self, api_name: str, transport_class: Type["Transport"], transport_type: str, transport_config: NamedTuple, config: "Config", ): """Set the transport pool for a specific API""" from lightbus.transports import Transport assert issubclass( transport_class, Transport ), f"Must be a subclass for Transport, was {transport_class}" self._registry.setdefault(api_name, self._RegistryEntry()) transport_pool = self._instantiate_transport_pool(transport_class, transport_config, config) self._registry[api_name] = self._registry[api_name]._replace( **{transport_type: transport_pool} ) def _get_transport_pool( self, api_name: str, transport_type: str, default=empty ) -> AnyTransportPoolType: # Get the registry entry for this API (if any) registry_entry = self._registry.get(api_name) api_transport = None # If we have a registry entry for this API, then get the transport for it if registry_entry: api_transport = getattr(registry_entry, transport_type) # Otherwise get the transport for the default API (which is always our fallback) # (but don't bother if they have explicity asked for the default_api, as if they # have then we've already failed to get that in the previous step) if not api_transport and api_name != "default": try: api_transport = self._get_transport_pool("default", transport_type) except TransportNotFound: pass # If we STILL don't have a transport then show a sensible error if not api_transport and default == empty: raise TransportNotFound( f"No {transport_type} transport found for API '{api_name}'. Neither was a default " f"API transport found. Either specify a {transport_type} transport for this specific API, " f"or specify a default {transport_type} transport. In most cases setting a default transport " f"is the best course of action." ) else: return api_transport def _get_transport_pools( self, api_names: Sequence[str], transport_type: str ) -> Dict[AnyTransportPoolType, List[str]]: apis_by_transport: Dict[AnyTransportPoolType, List[str]] = {} for api_name in api_names: transport = self._get_transport_pool(api_name, transport_type) apis_by_transport.setdefault(transport, []) apis_by_transport[transport].append(api_name) return apis_by_transport def _has_transport(self, api_name: str, transport_type: str) -> bool: try: self._get_transport_pool(api_name, transport_type) except TransportNotFound: return False else: return True def set_rpc_transport( self, api_name: str, transport_class: Type["RpcTransport"], transport_config: NamedTuple, config: "Config", ): self._set_transport(api_name, transport_class, "rpc", transport_config, config) def set_result_transport( self, api_name: str, transport_class: Type["ResultTransport"], transport_config: NamedTuple, config: "Config", ): self._set_transport(api_name, transport_class, "result", transport_config, config) def set_event_transport( self, api_name: str, transport_class: Type["EventTransport"], transport_config: NamedTuple, config: "Config", ): self._set_transport(api_name, transport_class, "event", transport_config, config) def set_schema_transport( self, transport_class: Type["SchemaTransport"], transport_config: NamedTuple, config: "Config", ): self.schema_transport = self._instantiate_transport_pool( transport_class, transport_config, config ) def get_rpc_transport(self, api_name: str, default=empty) -> RpcTransportPoolType: return self._get_transport_pool(api_name, "rpc", default=default) def get_result_transport(self, api_name: str, default=empty) -> ResultTransportPoolType: return self._get_transport_pool(api_name, "result", default=default) def get_event_transport(self, api_name: str, default=empty) -> EventTransportPoolType: return self._get_transport_pool(api_name, "event", default=default) def get_all_rpc_transports(self) -> Set[RpcTransportPoolType]: return {t.rpc for t in self._registry.values() if t.rpc} def get_all_result_transports(self) -> Set[ResultTransportPoolType]: return {t.result for t in self._registry.values() if t.result} def get_all_event_transports(self) -> Set[EventTransportPoolType]: return {t.event for t in self._registry.values() if t.event} def get_schema_transport(self, default=empty) -> SchemaTransportPoolType: if self.schema_transport or default != empty: return self.schema_transport or default else: # TODO: Link to docs raise TransportNotFound( "No schema transport is configured for this bus. Check your schema transport " "configuration is setup correctly (config section: bus.schema.transport)." ) def has_rpc_transport(self, api_name: str) -> bool: return self._has_transport(api_name, "rpc") def has_result_transport(self, api_name: str) -> bool: return self._has_transport(api_name, "result") def has_event_transport(self, api_name: str) -> bool: return self._has_transport(api_name, "event") def has_schema_transport(self) -> bool: return bool(self.schema_transport) def get_rpc_transports(self, api_names: Sequence[str]) -> Dict[RpcTransportPoolType, List[str]]: """Get a mapping of transports to lists of APIs This is useful when multiple APIs can be served by a single transport """ return self._get_transport_pools(api_names, "rpc") def get_event_transports( self, api_names: Sequence[str] ) -> Dict[EventTransportPoolType, List[str]]: """Get a mapping of transports to lists of APIs This is useful when multiple APIs can be served by a single transport """ return self._get_transport_pools(api_names, "event") def get_all_transports(self) -> Set[AnyTransportPoolType]: """Get a set of all transports irrespective of type""" all_transports = chain(*[entry._asdict().values() for entry in self._registry.values()]) return set([t for t in all_transports if t is not None]) def get_available_transports(type_): loaded = load_entrypoint_classes(f"lightbus_{type_}_transports") if not loaded: raise TransportsNotInstalled( f"No {type_} transports are available, which means lightbus has not been " f"installed correctly. This is likely because you are working on Lightbus itself. " f"In which case, within your local lightbus repo you should run " f"something like 'pip install .' or 'python setup.py develop'.\n\n" f"This will install the entrypoints (defined in setup.py) which point Lightbus " f"to it's bundled transports." ) return {name: class_ for module_name, name, class_ in loaded} def get_transport(type_, name): for name_, class_ in get_available_transports(type_).items(): if name == name_: return class_ raise TransportNotFound( f"No '{type_}' transport found named '{name}'. Check the transport is installed and " f"has the relevant entrypoints setup in it's setup.py file. Or perhaps " f"you have a typo in your config file." ) def get_transport_name(cls: Type[AnyTransportPoolType]): for type_ in ("rpc", "result", "event"): for *_, name, class_ in load_entrypoint_classes(f"lightbus_{type_}_transports"): if cls == class_: return name raise TransportNotFound( f"Transport class {cls.__module__}.{cls.__name__} is not specified in any entrypoint." )
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import _add_pooling from ast import literal_eval def _get_input_output_name(net, node, index=0): name = node['name'] inputs = node['inputs'] if index == 'all': input_name = [_get_node_name(net, inputs[idx][0]) for idx in range(len(inputs))] elif type(index) == int: input_name = _get_node_name(net, inputs[0][0]) else: input_name = [_get_node_name(net, inputs[idx][0]) for idx in index] return input_name, name def _get_node_name(net, node_id): return net['nodes'][node_id]['name'] def _get_node_shape(net, node_id): return net['nodes'][node_id]['shape'] # TODO These operators still need to be converted (listing in order of priority): # High priority: # mxnet.symbol.repeat -> builder.add_repeat to flatten and repeat the NDArray sequence # mxnet.symbol.Crop -> builder.add_crop to crop image along spacial dimensions # mxnet.symbol.Pad -> builder.add_padding putting 0's on height and width for tensor # Low Priority: # depthwise seperable convolution support through groups in builder.add_convolution # add_optional -> for all RNNs defining what goes in and out (to define beam search or if input is streaming) # mx.symbol.Embedding -> add_embedding takes indicies, word ids from dict that is outside coreml or # in pipeline only if we have text mapping to indicies # FusedRNNCell -> add_bidirlstm # add_unilstm -> reverse_input param true as second and concat on outputs # Do vanilla (0.9 mxnet) lstm, gru, vanilla_rnn def convert_reshape(net, node, module, builder): """Converts a reshape layer from mxnet to coreml. This doesn't currently handle the deprecated parameters for the reshape layer. Parameters ---------- network: net An mxnet network object. layer: node Node to convert. module: module A module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] target_shape = node['shape'] if any(item <= 0 for item in target_shape): raise NotImplementedError('Special dimensional values less than or equal to 0 are not supported yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.') if 'reverse' in node and node['reverse'] == 'True': raise NotImplementedError('"reverse" parameter is not supported by yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.') mode = 0 # CHANNEL_FIRST builder.add_reshape(name, input_name, output_name, target_shape, mode) def convert_transpose(net, node, module, builder): """Convert a transpose layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = node['attr'] axes = literal_eval(param['axes']) builder.add_permute(name, axes, input_name, output_name) def convert_flatten(net, node, module, builder): """Convert a flatten layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] mode = 0 # CHANNEL_FIRST builder.add_flatten(name, mode, input_name, output_name) def convert_softmax(net, node, module, builder): """Convert a softmax layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] builder.add_softmax(name=name, input_name=input_name, output_name=output_name) def convert_activation(net, node, module, builder): """Convert an activation layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] mx_non_linearity = node['attr']['act_type'] #TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR if mx_non_linearity == 'relu': non_linearity = 'RELU' elif mx_non_linearity == 'tanh': non_linearity = 'TANH' elif mx_non_linearity == 'sigmoid': non_linearity = 'SIGMOID' else: raise TypeError('Unknown activation type %s' % mx_non_linearity) builder.add_activation(name = name, non_linearity = non_linearity, input_name = input_name, output_name = output_name) def convert_elementwise_add(net, node, module, builder): """Convert an elementwise add layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_names, output_name = _get_input_output_name(net, node, [0, 1]) name = node['name'] builder.add_elementwise(name, input_names, output_name, 'ADD') def convert_dense(net, node, module, builder): """Convert a dense layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) has_bias = True name = node['name'] inputs = node['inputs'] args, _ = module.get_params() W = args[_get_node_name(net, inputs[1][0])].asnumpy() if has_bias: Wb = args[_get_node_name(net, inputs[2][0])].asnumpy() else: Wb = None nC, nB = W.shape builder.add_inner_product( name=name, W=W, b=Wb, input_channels=nB, output_channels=nC, has_bias=has_bias, input_name=input_name, output_name=output_name ) def convert_convolution(net, node, module, builder): """Convert a convolution layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = node['attr'] inputs = node['inputs'] args, _ = module.get_params() if 'no_bias' in param.keys(): has_bias = not literal_eval(param['no_bias']) else: has_bias = True if literal_eval(param['pad']) != (0, 0): pad = literal_eval(param['pad']) builder.add_padding( name=name+"_pad", left=pad[1], right=pad[1], top=pad[0], bottom=pad[0], value=0, input_name=input_name, output_name=name+"_pad_output") input_name = name+"_pad_output" border_mode = "valid" n_filters = int(param['num_filter']) W = args[_get_node_name(net, inputs[1][0])].asnumpy() if has_bias: Wb = args[_get_node_name(net, inputs[2][0])].asnumpy() else: Wb = None channels = W.shape[1] stride_height, stride_width = literal_eval(param['stride']) kernel_height, kernel_width = literal_eval(param['kernel']) W = W.transpose((2, 3, 1, 0)) builder.add_convolution( name=name, kernel_channels=channels, output_channels=n_filters, height=kernel_height, width=kernel_width, stride_height=stride_height, stride_width=stride_width, border_mode=border_mode, groups=1, W=W, b=Wb, has_bias=has_bias, is_deconv=False, output_shape=None, input_name=input_name, output_name=output_name) def convert_pooling(net, node, module, builder): """Convert a pooling layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = node['attr'] layer_type_mx = param['pool_type'] if layer_type_mx == 'max': layer_type = 'MAX' elif layer_type_mx == 'avg': layer_type = 'AVERAGE' else: raise TypeError("Pooling type %s not supported" % layer_type_mx) # Add padding if there is any if literal_eval(param['pad']) != (0, 0): pad = literal_eval(param['pad']) builder.add_padding( name=name+"_pad", left=pad[1], right=pad[1], top=pad[0], bottom=pad[0], value=0, input_name=input_name, output_name=name+"_pad_output") input_name = name+"_pad_output" stride_height, stride_width = literal_eval(param['stride']) kernel_width, kernel_height = literal_eval(param['kernel']) type_map = {'valid': 'VALID', 'full': 'INCLUDE_LAST_PIXEL'} padding_type = param['pooling_convention'] if 'pooling_convention' in param else 'valid' if padding_type not in type_map: raise KeyError("%s type is not supported in this converter. It is a Github issue.") padding_type = type_map[padding_type] if 'global_pool' in param.keys(): is_global = literal_eval(param['global_pool']) else: is_global = False # For reasons why we are not using the standard builder but having our own implementation, # see the function documentation. _add_pooling.add_pooling_with_padding_types( builder=builder, name=name, height=kernel_height, width=kernel_width, stride_height=stride_height, stride_width=stride_width, layer_type=layer_type, padding_type=padding_type, exclude_pad_area=False, is_global=is_global, input_name=input_name, output_name=output_name ) def convert_batchnorm(net, node, module, builder): """Convert a transpose layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] inputs = node['inputs'] eps = 1e-3 # Default value of eps for MXNet. use_global_stats = False # Default value of use_global_stats for MXNet. if 'attr' in node: if 'eps' in node['attr']: eps = literal_eval(node['attr']['eps']) args, aux = module.get_params() gamma = args[_get_node_name(net, inputs[1][0])].asnumpy() beta = args[_get_node_name(net, inputs[2][0])].asnumpy() mean = aux[_get_node_name(net, inputs[3][0])].asnumpy() variance = aux[_get_node_name(net, inputs[4][0])].asnumpy() nb_channels = gamma.shape[0] builder.add_batchnorm( name=name, channels=nb_channels, gamma=gamma, beta=beta, mean=mean, variance=variance, input_name=input_name, output_name=output_name, epsilon=eps) def convert_concat(net, node, module, builder): """Convert concat layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names input_names, output_name = _get_input_output_name(net, node, 'all') name = node['name'] mode = 'CONCAT' builder.add_elementwise(name = name, input_names = input_names, output_name = output_name, mode = mode) def convert_deconvolution(net, node, module, builder): """Convert a deconvolution layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = node['attr'] inputs = node['inputs'] args, _ = module.get_params() if 'no_bias' in param.keys(): has_bias = not literal_eval(param['no_bias']) else: has_bias = False border_mode = "valid" n_filters = int(param['num_filter']) output_shape = None if 'target_shape' in param: target_shape = literal_eval(param['target_shape']) output_shape = (int(target_shape[0]), int(target_shape[1])) W = args[_get_node_name(net, inputs[1][0])].asnumpy() if has_bias: Wb = args[_get_node_name(net, inputs[2][0])].asnumpy() else: Wb = None channels = W.shape[0] stride_height, stride_width = literal_eval(param['stride']) kernel_height, kernel_width = literal_eval(param['kernel']) W = W.transpose((2, 3, 0, 1)) use_crop = False if literal_eval(param['pad']) != (0, 0) and output_shape is None: use_crop = True builder.add_convolution( name=name, kernel_channels=channels, output_channels=n_filters, height=kernel_height, width=kernel_width, stride_height=stride_height, stride_width=stride_width, border_mode=border_mode, groups=1, W=W, b=Wb, has_bias=has_bias, is_deconv=True, output_shape=output_shape, input_name=input_name, output_name=output_name+'before_pad' if use_crop else output_name ) if use_crop: pad = literal_eval(param['pad']) builder.add_crop( name=name+"_pad", left=pad[1], right=pad[1], top=pad[0], bottom=pad[0], offset=0, input_names=[output_name+'before_pad'], output_name=output_name )
# Copyright (c) 2017 Yingxin Cheng # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABCMeta from abc import abstractproperty from collections import defaultdict from functools import total_ordering from ... import reserved_vars as rv from ...datasource import Line from ...datasource import LineStateBase from ...datasource import Thread from ...graph.token import Step from ..exc import StateError @total_ordering class IntervalBase(object): __metaclass__ = ABCMeta def __init__(self): self._from_pace = None self._to_pace = None self.order = 0 ### from pace @property def from_pace(self): return self._from_pace @from_pace.setter def from_pace(self, val): assert isinstance(val, Pace) self._from_pace = val @property def from_time(self): if self._from_pace: return self._from_pace.time raise RuntimeError("Interval %s has no from_pace" % self.int_name) @property def from_seconds(self): if self._from_pace: return self._from_pace.seconds raise RuntimeError("Interval %s has no from_pace" % self.int_name) @property def from_edgename(self): if self._from_pace: return self._from_pace.edgename raise RuntimeError("Interval %s has no from_pace" % self.int_name) @property def from_keyword(self): if self._from_pace: return self._from_pace.keyword raise RuntimeError("Interval %s has no from_pace" % self.int_name) ### to pace @property def to_pace(self): return self._to_pace @to_pace.setter def to_pace(self, val): if val is None: assert self._to_pace is None else: assert isinstance(val, Pace) self._to_pace = val @property def to_time(self): if self._to_pace: return self._to_pace.time raise RuntimeError("Interval %s has no to_pace" % self.int_name) @property def to_seconds(self): if self._to_pace: return self._to_pace.seconds raise RuntimeError("Interval %s has no to_pace" % self.int_name) @property def to_edgename(self): if self._to_pace: return self._to_pace.edgename raise RuntimeError("Interval %s has no to_pace" % self.int_name) @property def to_keyword(self): if self._to_pace: return self._to_pace.keyword raise RuntimeError("Interval %s has no to_pace" % self.int_name) @property def is_violated(self): return self.from_seconds > self.to_seconds ### abstracts @abstractproperty def int_name(self): return None @abstractproperty def requestins(self): return None ### properties @property def is_interval(self): return self.from_pace and self.to_pace @property def path(self): ret = "" if self._from_pace: ret += "%s[" % self.from_edgename else: ret += "|[" ret += str(self.int_name) if self._to_pace: ret += "]%s" % self.to_edgename else: ret += "]|" return ret @property def lapse(self): if self.is_interval: return self.to_seconds - self.from_seconds raise RuntimeError("Interval %s has no from/to pace" % self.int_name) @property def request(self): return self.requestins.request @property def request_type(self): return self.requestins.request_type ### prints def __repr_intlabels__(self): labels = "" if self._from_pace and self._to_pace: if self.from_seconds > self.to_seconds: labels += "#" elif not self.from_pace and not self.to_pace: labels += "X" return labels def __repr__(self): ret = "<%s#%s: %s " % ( self.__class__.__name__, self.int_name, self.__repr_intlabels__()) if self._from_pace: ret += "%.3f,%s`%s`->" % (self.from_seconds, self.from_edgename, self.from_keyword) ret += "." if self._to_pace: ret += "->%.3f,%s`%s`" % (self.to_seconds, self.to_edgename, self.to_keyword) ret += " >" return ret def __repr_from__(self): ret = "[%s %s" % (self.int_name, self.__repr_intlabels__()) if self._from_pace: context = ",".join(str(k)+"="+str(v) for k,v in self._from_pace.line_context.items()) if context: context = " " + context ret += "<-(%.3f,%s`%s`%s)]" % ( self.from_seconds, self.from_edgename, self.from_keyword, context) else: ret += "]" return ret def __repr_to__(self): ret = "[%s %s" % (self.int_name, self.__repr_intlabels__()) if self._to_pace: context = ",".join(str(k)+"="+str(v) for k,v in self._to_pace.line_context.items()) if context: context = " " + context ret += "->(%.3f,%s`%s`%s)]" % ( self.to_seconds, self.to_edgename, self.to_keyword, context) else: ret += "]" return ret __eq__ = lambda self, other:\ self.from_pace == other.to_pace and\ self.to_pace == other.to_pace __lt__ = lambda self, other:\ (self.from_seconds, self.to_seconds) <\ (other.from_seconds, other.to_seconds) def __hash__(self): return id(self) class RequestinsBase(IntervalBase): __metaclass__ = ABCMeta def __init__(self, request): self._request = None setattr(self, "request", request) @property def request(self): return self._request @request.setter def request(self, val): if val is not None: assert isinstance(val, str) assert self._request is None self._request = val @property def requestins(self): return self @property def int_name(self): return self.request class ThreadinsBase(IntervalBase): __metaclass__ = ABCMeta def __init__(self, thread_obj): super(ThreadinsBase, self).__init__() self.thread_obj = thread_obj self.thread_vars = {} self.thread_vars_dup = defaultdict(set) self._request = None self._requestins = None @property def request(self): return self._request @request.setter def request(self, val): assert isinstance(val, str) if self._request: assert self._request == val else: self._request = val @property def requestins(self): return self._requestins @requestins.setter def requestins(self, val): assert isinstance(val, RequestinsBase) assert self._requestins is None self._requestins = val ### thread_obj @property def thread(self): return self.thread_obj.thread @property def target(self): return self.thread_obj.target @property def component(self): return self.thread_obj.component @property def host(self): return self.thread_obj.host @property def target_obj(self): return self.thread_obj.target_obj @property def thread_name(self): return self.thread_obj.name @property def int_name(self): return self.thread_name def __hash__(self): return id(self) def _process_vars(self, line_obj): assert isinstance(line_obj, Line) for key in line_obj.keys: if key in ("keyword", "time", "seconds"): continue new_val = line_obj[key] if key in ("component", "target", "host", "thread"): val = getattr(self, key) if val != line_obj[key]: raise StateError("(ThreadInstance) parse error: " "variable %s mismatch: %s is not %s!" % (key, val, new_val)) else: pass elif key == "request": if new_val is None: pass elif self.request is None: self.request = new_val elif self.request != new_val: raise StateError("(ThreadInstance) parse error: " "request mismatch: %s is not %s!" % (val, new_val)) else: pass else: if key in self.thread_vars_dup: self.thread_vars_dup[key].add(new_val) else: val = self.thread_vars.get(key) if val is None: self.thread_vars[key] = new_val elif val != new_val: self.thread_vars_dup[key].add(val) self.thread_vars_dup[key].add(new_val) self.thread_vars.pop(key) else: pass class ActivityBase(IntervalBase): __metaclass__ = ABCMeta _act_type = object() _act_lim_back = None _act_lim_forth = None def __init__(self, from_pace, to_pace, aname): assert isinstance(aname, str) super(ActivityBase, self).__init__() if from_pace: self.from_pace = from_pace if to_pace: self.to_pace = to_pace self.is_main = False self.activity_name = aname @property def int_name(self): return self.activity_name def __repr_intlabels__(self): marks = super(ActivityBase, self).__repr_intlabels__() if self.is_main: marks += "!" return marks @total_ordering class Pace(LineStateBase, object): """ Pace is relative to transition. """ def __init__(self, line_obj, step, threadins): assert isinstance(line_obj, Line) assert isinstance(step, Step) assert isinstance(threadins, ThreadinsBase) assert line_obj.thread_obj is threadins.thread_obj self.line_obj = line_obj self.step = step self.threadins = threadins self.prv_activity_bytype = defaultdict(list) self.nxt_activity_bytype = defaultdict(list) self.is_main = False self.prv_main_activity = None self.nxt_main_activity = None ### step @property def path_step(self): return self.step.path @property def edgename(self): return self.step.edgename @property def joinable(self): return self.step.joinable ### threadins @property def requestins(self): return self.threadins.requestins @property def thread_obj(self): return self.threadins.thread_obj @property def target_obj(self): return self.threadins.target_obj ### LineState @property def line_keys(self): return self.line_obj.keys_ @property def line_context(self): ret = {} for k in self.line_keys: ret[k] = self[k] return ret @property def refresh_vars(self): return self.step.refresh_vars # TODO: bug here @property def _ls_state(self): # if self.is_thread_start and self.is_thread_end: # return "*" # elif self.is_thread_start: # return "+" # elif self.is_thread_end: # return "-" # else: if self.prv_main_activity or self.nxt_main_activity: return "!" else: return "|" @property def _ls_request(self): return self.request @property def _ls_path(self): return self.path_step # total ordering __eq__ = lambda self, other: self.seconds == other.seconds __lt__ = lambda self, other: self.seconds < other.seconds def __getattribute__(self, item): assert isinstance(item, str) if item in rv.ALL_VARS: ret = getattr(self.line_obj, item) if ret is None and item == rv.REQUEST: ret = getattr(self.threadins, "request") return ret else: return super(Pace, self).__getattribute__(item) def __getitem__(self, item): assert isinstance(item, str) if item in rv.ALL_VARS: return getattr(self, item) elif item in self.line_obj: return self.line_obj[item] elif item in self.threadins.thread_vars: return self.threadins.thread_vars[item] elif item in self.threadins.thread_vars_dup: raise StateError("(Pace) got multiple %s: %s" % (item, self.threadins.thread_vars_dup[item])) else: raise StateError("(Pace) key %s not exist!" % item) def __repr_marks__(self): mark_str = "" for type_, acts in self.prv_activity_bytype.items(): mark_str += ", prv_"+type_+"(" mark_str += ",".join(act.__repr_from__() for act in acts) mark_str += ")" for type_, acts in self.nxt_activity_bytype.items(): mark_str += ", nxt_"+type_+"(" mark_str += ",".join(act.__repr_to__() for act in acts) mark_str += ")" return mark_str def __repr__(self): return "<P %.3f %s [%s %s] %s, `%s`, %s%s>" % ( self.seconds, self.path_step, self.target, self.thread, self.request, self.keyword, self.line_context, self.__repr_marks__()) def __repr_thread__(self): return "%.3f %s, `%s`, %s%s" % ( self.seconds, self.path_step, self.keyword, self.line_context, self.__repr_marks__()) def __hash__(self): return id(self) def append_nxt(self, activity, template=None): assert isinstance(activity, ActivityBase) if not template: template = activity.__class__ assert issubclass(template, ActivityBase) act_type = template._act_type lim = template._act_lim_forth assert not act_type is ActivityBase._act_type assert isinstance(act_type, str) assert isinstance(lim, bool) if lim: assert not self.nxt_activity_bytype[act_type] self.nxt_activity_bytype[act_type].append(activity) def append_prv(self, activity, template=None): assert isinstance(activity, ActivityBase) if not template: template = activity.__class__ assert issubclass(template, ActivityBase) act_type = template._act_type lim = template._act_lim_back assert not act_type is ActivityBase._act_type assert isinstance(act_type, str) assert isinstance(lim, bool) if lim: assert not self.prv_activity_bytype[act_type] self.prv_activity_bytype[act_type].append(activity) def replace_nxt(self, act, newact): assert isinstance(act, ActivityBase) assert isinstance(newact, ActivityBase) act_type = act._act_type index = 0 for act_ in self.nxt_activity_bytype[act_type]: if act_ is act: self.nxt_activity_bytype[act_type][index] = newact return index += 1 raise RuntimeError("Cannot find nxt_act %r in pace" % act) def replace_prv(self, act, newact): assert isinstance(act, ActivityBase) assert isinstance(newact, ActivityBase) act_type = act._act_type index = 0 for act_ in self.prv_activity_bytype[act_type]: if act_ is act: self.prv_activity_bytype[act_type][index] = newact return index += 1 raise RuntimeError("Cannot find prv_act %r in pace" % act) def get_prv(self, act_type): assert not act_type is ActivityBase._act_type return self.prv_activity_bytype[act_type] def get_nxt(self, act_type): assert not act_type is ActivityBase._act_type return self.nxt_activity_bytype[act_type]
#!/usr/bin/python ############################################## ###Python template ###Author: Elizabeth Lee ###Date: 3/17/14 ###Purpose: visualize results of time-based epidemic simulations with varying T values vy age #### pairs with age_time_T-age.py ###Import data: ###Command Line: python age_time_T-age_viz.py ############################################## ####### notes ####### ### codebook of age class codes # '1' - Toddlers: 0-2 # '2' - Preschool: 3-4 # '3' - Children: 5-18 # '4' - Adults: 19-64 # '5' - Seniors: 65+ (community) # '6' - Elders: 65+ (nursing home) # There are only 94 "elders" in the Vancouver network, and they all reside in one nursing home, so they can be combined with the seniors for analysis purposes (all_elderly). # T_critical = 0.0565868 ### packages/modules ### import matplotlib.pyplot as plt import numpy as np from collections import defaultdict import zipfile from time import clock ## local modules ## import percolations as perc import pretty_print as pp ### plotting settings ### colorvec = ['black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet', 'hotpink', 'brown', 'indigo'] ### simulation parameters ### numsims = 800 # number of simulations size_epi = 515 # threshold value that designates an epidemic in the network (5% of network) # gamma = probability of recovery at each time step # on avg, assume 5 days till recovery gamma = 1/float(5) # 5 days recovery here T = 0.0643 # total epidemic size = 20% # T = 0.075 # total epidemic size = 30% # T = beta / (beta + gamma) # when T = 0.0643 and gamma = 1/5, b = 0.0137 # when T = 0.075 and gamma = 1/5, b = 0.0162 b = (-T * gamma)/(T - 1) # define different child transmissibility multipliers # Cauchemez 2004 cites that household risk when there is a child infected vs when there is an adult infected is 1.85 times greater (0.48/0.26) m1, m2 = 1, 2 Tmult_list = np.linspace(m1, m2, num=11, endpoint=True) ### data structures ### # d_node_age[nodenumber] = ageclass d_node_age = {} ### ziparchive to read and write results ### zipname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Results/adultT-age_time_%ssims_beta%.3f_Tmult%.1f-%.1f_vax0.zip' %(numsims, b, m1, m2) ############################################# # age data processing graph_ages = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Data/urban_ages_Sarah.csv') # node number and age class for line in graph_ages: new_line = line.split() for line in new_line: node, age = line.split(',') d_node_age[node] = age # node-ageclass dictionary # define network size N = len(d_node_age) # create binary lists to indicate children and adults ch = [1 if d_node_age[str(node)] == '3' else 0 for node in xrange(1, int(N) + 1)] ad = [1 if d_node_age[str(node)] == '4' else 0 for node in xrange(1, int(N) + 1)] ############################################## # data processing - convert tstep info into dictionaries # declare dictionaries # dict_epiincid[(s, simnumber, 'T', 'C' or 'A')] = [T, C or A incid at tstep 0, T, C or A incid at tstep 1...], where incidence is simply number of new cases (raw) # dict_epiAR[(s, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per population size # dict_epiOR[(s, simnumber)] = [OR at tstep0, OR at tstep1...] # dict_epiOR_filt[(s, simnum)] = [OR for each time step for epidemics only where OR is nan when we want to exclude the time point due to small infected numbers] # dict_epiresults[(s, simnumber)] = (episize, c_episize, a_episize) d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = defaultdict(list), defaultdict(list), {}, defaultdict(list), defaultdict(list) for m in Tmult_list: processing = clock() # reference filenames in zipfolder Itstep_file = 'Results/Itstep_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.txt' %(numsims, b, m) Rtstep_file = 'Results/Rtstep_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.txt' %(numsims, b, m) # recreate epidata from zip archive d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = perc.recreate_epidata(Itstep_file, Rtstep_file, zipname, m, size_epi, ch, ad, d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt) print m, "processed", clock() - processing # grab unique list of child T mult values that produced at least one epidemic Tmult_epi = list(set([key[0] for key in d_epiincid])) ############################################## ### plot OR by time for each Tmult value ### # each epidemic sim is one line for m in Tmult_epi: pl_ls = [key for key in d_epiOR if key[0] == m] for key in pl_ls: plt.plot(xrange(len(d_epiOR[key])), d_epiOR[key], marker = 'None', color = 'grey') plt.plot(xrange(250), [1] * 250, marker = 'None', color = 'red', linewidth = 2) plt.xlabel('time step, adult T multiplier: ' + str(m)) plt.ylabel('OR, child:adult') plt.ylim([0, 30]) plt.xlim([-1, 200]) figname = 'Figures/epiOR_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.png' %(numsims, b, m) plt.savefig(figname) plt.close() pp.compress_to_ziparchive(zipname, figname) # plt.show() ############################################## ### plot filtered OR by time for each suscep value ### # each sim is one line for m in Tmult_epi: pl_ls = [key for key in d_epiOR_filt if key[0] == m] for key in pl_ls: plt.plot(xrange(len(d_epiOR_filt[key])), d_epiOR_filt[key], marker = 'None', color = 'grey') plt.plot(xrange(250), [1] * 250, marker = 'None', color = 'red', linewidth = 2) plt.xlabel('sim time step, adult T multiplier: ' + str(m) + ', 5-95% cum infections') plt.ylabel('OR, child:adult') plt.ylim([0, 30]) plt.xlim([-1, 200]) figname = 'Figures/epiORfilt_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.png' %(numsims, b, m) plt.savefig(figname) plt.close() pp.compress_to_ziparchive(zipname, figname) # plt.show() ############################################## ### plot filtered OR by time for all m values ### # each sim is one line, each Tmult is a diff color on one plot for m in Tmult_epi: pl_ls = [key for key in d_epiOR_filt if key[0] == m] colvec = colorvec.pop() for key in pl_ls: plt.plot(xrange(len(d_epiOR_filt[key])), d_epiOR_filt[key], marker = 'None', color = colvec) plt.plot(xrange(250), [1] * 250, marker = 'None', color = 'red', linewidth = 2) plt.xlabel('time step, all m values, 5-95% cum infections') plt.ylabel('filtered OR, child:adult') plt.ylim([0, 30]) plt.xlim([-1, 200]) figname = 'Figures/epiORfilt_adultT-age_time_%ssims_beta%.3f_allTmult_vax0.png' %(numsims, b) plt.savefig(figname) plt.close() pp.compress_to_ziparchive(zipname, figname) # plt.show() ############################################## ### plot incidence by time for each m value ### # each sim is one line for m in Tmult_epi: pl_ls = [key for key in d_epiincid if key[0] == m and key[2] == 'T'] for key in pl_ls: plt.plot(xrange(len(d_epiincid[key])), d_epiincid[key], marker = 'None', color = 'grey') plt.xlabel('time step, adult T multiplier: ' + str(m)) plt.ylabel('number of new cases') plt.xlim([-1, 200]) figname = 'Figures/epiincid_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.png' %(numsims, b, m) plt.savefig(figname) plt.close() pp.compress_to_ziparchive(zipname, figname) # plt.show() ############################################## ### plot hist of episize by child suscep value ### d_episize = defaultdict(list) for m in Tmult_epi: d_episize[m] = [sum(d_epiincid[key]) for key in d_epiincid if key[0] == m and key[2] == 'T'] plt.errorbar(Tmult_epi, [np.mean(d_episize[m]) for m in Tmult_epi], yerr = [np.std(d_episize[m]) for m in Tmult_epi], marker = 'o', color = 'black', linestyle = 'None') plt.xlim([1.0, 2.0]) plt.xlabel('adult T multiplier') plt.ylabel('epidemic size') figname = 'Figures/episize_adultT-age_time_%ssims_beta%.3f_vax0.png' %(numsims, b) plt.savefig(figname) plt.close() pp.compress_to_ziparchive(zipname, figname) # plt.show()
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_config import cfg from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants from neutron.common import exceptions from neutron.plugins.common import constants as p_const from neutron.plugins.linuxbridge.agent import linuxbridge_neutron_agent from neutron.plugins.linuxbridge.common import constants as lconst from neutron.tests import base LOCAL_IP = '192.168.0.33' DEVICE_1 = 'tapabcdef01-12' class FakeIpLinkCommand(object): def set_up(self): pass class FakeIpDevice(object): def __init__(self): self.link = FakeIpLinkCommand() class TestLinuxBridge(base.BaseTestCase): def setUp(self): super(TestLinuxBridge, self).setUp() interface_mappings = {'physnet1': 'eth1'} with mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager, 'get_interface_by_ip', return_value=None): self.linux_bridge = linuxbridge_neutron_agent.LinuxBridgeManager( interface_mappings) def test_ensure_physical_in_bridge_invalid(self): result = self.linux_bridge.ensure_physical_in_bridge('network_id', p_const.TYPE_VLAN, 'physnetx', 7) self.assertFalse(result) def test_ensure_physical_in_bridge_flat(self): with mock.patch.object(self.linux_bridge, 'ensure_flat_bridge') as flat_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', p_const.TYPE_FLAT, 'physnet1', None) self.assertTrue(flat_bridge_func.called) def test_ensure_physical_in_bridge_vlan(self): with mock.patch.object(self.linux_bridge, 'ensure_vlan_bridge') as vlan_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', p_const.TYPE_VLAN, 'physnet1', 7) self.assertTrue(vlan_bridge_func.called) def test_ensure_physical_in_bridge_vxlan(self): self.linux_bridge.vxlan_mode = lconst.VXLAN_UCAST with mock.patch.object(self.linux_bridge, 'ensure_vxlan_bridge') as vxlan_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', 'vxlan', 'physnet1', 7) self.assertTrue(vxlan_bridge_func.called) class TestLinuxBridgeAgent(base.BaseTestCase): def setUp(self): super(TestLinuxBridgeAgent, self).setUp() # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT') self.get_devices_p = mock.patch.object(ip_lib.IPWrapper, 'get_devices') self.get_devices = self.get_devices_p.start() self.get_devices.return_value = [ip_lib.IPDevice('eth77')] self.get_mac_p = mock.patch('neutron.agent.linux.utils.' 'get_interface_mac') self.get_mac = self.get_mac_p.start() self.get_mac.return_value = '00:00:00:00:00:01' with mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager, 'get_interface_by_ip', return_value=None): self.agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC( {}, 0, cfg.CONF.AGENT.quitting_rpc_timeout) with mock.patch.object(self.agent, "daemon_loop"): self.agent.start() def test_treat_devices_removed_with_existed_device(self): agent = self.agent devices = [DEVICE_1] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf: fn_udd.return_value = {'device': DEVICE_1, 'exists': True} with mock.patch.object(linuxbridge_neutron_agent.LOG, 'info') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(2, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) def test_treat_devices_removed_with_not_existed_device(self): agent = self.agent devices = [DEVICE_1] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf: fn_udd.return_value = {'device': DEVICE_1, 'exists': False} with mock.patch.object(linuxbridge_neutron_agent.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(1, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) def test_treat_devices_removed_failed(self): agent = self.agent devices = [DEVICE_1] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf: fn_udd.side_effect = Exception() with mock.patch.object(linuxbridge_neutron_agent.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(2, log.call_count) self.assertTrue(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) def _test_scan_devices(self, previous, updated, fake_current, expected, sync): self.agent.br_mgr = mock.Mock() self.agent.br_mgr.get_tap_devices.return_value = fake_current self.agent.updated_devices = updated results = self.agent.scan_devices(previous, sync) self.assertEqual(expected, results) def test_scan_devices_no_changes(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set()} fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set()} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_added_removed(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set()} fake_current = set([2, 3]) updated = set() expected = {'current': set([2, 3]), 'updated': set(), 'added': set([3]), 'removed': set([1])} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_removed_retried_on_sync(self): previous = {'current': set([2, 3]), 'updated': set(), 'added': set(), 'removed': set([1])} fake_current = set([2, 3]) updated = set() expected = {'current': set([2, 3]), 'updated': set(), 'added': set([2, 3]), 'removed': set([1])} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_vanished_removed_on_sync(self): previous = {'current': set([2, 3]), 'updated': set(), 'added': set(), 'removed': set([1])} # Device 2 disappeared. fake_current = set([3]) updated = set() # Device 1 should be retried. expected = {'current': set([3]), 'updated': set(), 'added': set([3]), 'removed': set([1, 2])} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_updated(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set()} fake_current = set([1, 2]) updated = set([1]) expected = {'current': set([1, 2]), 'updated': set([1]), 'added': set(), 'removed': set()} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_updated_non_existing(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set()} fake_current = set([1, 2]) updated = set([3]) expected = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set()} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_updated_on_sync(self): previous = {'current': set([1, 2]), 'updated': set([1]), 'added': set(), 'removed': set()} fake_current = set([1, 2]) updated = set([2]) expected = {'current': set([1, 2]), 'updated': set([1, 2]), 'added': set([1, 2]), 'removed': set()} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_process_network_devices(self): agent = self.agent device_info = {'current': set(), 'added': set(['tap3', 'tap4']), 'updated': set(['tap2', 'tap3']), 'removed': set(['tap1'])} agent.sg_agent.prepare_devices_filter = mock.Mock() agent.sg_agent.refresh_firewall = mock.Mock() agent.treat_devices_added_updated = mock.Mock(return_value=False) agent.treat_devices_removed = mock.Mock(return_value=False) agent.process_network_devices(device_info) agent.sg_agent.prepare_devices_filter.assert_called_with( set(['tap3', 'tap4'])) self.assertTrue(agent.sg_agent.refresh_firewall.called) agent.treat_devices_added_updated.assert_called_with(set(['tap2', 'tap3', 'tap4'])) agent.treat_devices_removed.assert_called_with(set(['tap1'])) def test_treat_devices_added_updated_admin_state_up_true(self): agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1'} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.br_mgr = mock.Mock() agent.br_mgr.add_interface.return_value = True resync_needed = agent.treat_devices_added_updated(set(['tap1'])) self.assertFalse(resync_needed) agent.br_mgr.add_interface.assert_called_with('net123', 'vlan', 'physnet1', 100, 'port123') self.assertTrue(agent.plugin_rpc.update_device_up.called) def test_treat_devices_added_updated_admin_state_up_false(self): agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': False, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1'} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.remove_port_binding = mock.Mock() resync_needed = agent.treat_devices_added_updated(set(['tap1'])) self.assertFalse(resync_needed) agent.remove_port_binding.assert_called_with('net123', 'port123') self.assertFalse(agent.plugin_rpc.update_device_up.called) def test_set_rpc_timeout(self): self.agent.stop() for rpc_client in (self.agent.plugin_rpc.client, self.agent.sg_plugin_rpc.client, self.agent.state_rpc.client): self.assertEqual(cfg.CONF.AGENT.quitting_rpc_timeout, rpc_client.timeout) def test_set_rpc_timeout_no_value(self): self.agent.quitting_rpc_timeout = None with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc: self.agent.stop() self.assertFalse(mock_set_rpc.called) class TestLinuxBridgeManager(base.BaseTestCase): def setUp(self): super(TestLinuxBridgeManager, self).setUp() self.interface_mappings = {'physnet1': 'eth1'} with mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager, 'get_interface_by_ip', return_value=None): self.lbm = linuxbridge_neutron_agent.LinuxBridgeManager( self.interface_mappings) def test_interface_exists_on_bridge(self): with mock.patch.object(os, 'listdir') as listdir_fn: listdir_fn.return_value = ["abc"] self.assertTrue( self.lbm.interface_exists_on_bridge("br-int", "abc") ) self.assertFalse( self.lbm.interface_exists_on_bridge("br-int", "abd") ) def test_get_bridge_name(self): nw_id = "123456789101112" self.assertEqual(self.lbm.get_bridge_name(nw_id), "brq" + nw_id[0:11]) nw_id = "" self.assertEqual(self.lbm.get_bridge_name(nw_id), "brq") def test_get_subinterface_name(self): self.assertEqual(self.lbm.get_subinterface_name("eth0", "0"), "eth0.0") self.assertEqual(self.lbm.get_subinterface_name("eth0", ""), "eth0.") def test_get_tap_device_name(self): if_id = "123456789101112" self.assertEqual(self.lbm.get_tap_device_name(if_id), constants.TAP_DEVICE_PREFIX + if_id[0:11]) if_id = "" self.assertEqual(self.lbm.get_tap_device_name(if_id), constants.TAP_DEVICE_PREFIX) def test_get_vxlan_device_name(self): vn_id = p_const.MAX_VXLAN_VNI self.assertEqual(self.lbm.get_vxlan_device_name(vn_id), "vxlan-" + str(vn_id)) self.assertIsNone(self.lbm.get_vxlan_device_name(vn_id + 1)) def test_get_all_neutron_bridges(self): br_list = ["br-int", "brq1", "brq2", "br-ex"] with mock.patch.object(os, 'listdir') as listdir_fn: listdir_fn.return_value = br_list self.assertEqual(self.lbm.get_all_neutron_bridges(), br_list[1:3]) self.assertTrue(listdir_fn.called) def test_get_interfaces_on_bridge(self): with mock.patch.object(utils, 'execute'),\ mock.patch.object(os, 'listdir') as listdir_fn,\ mock.patch.object(ip_lib, 'device_exists', return_value=True): listdir_fn.return_value = ["qbr1"] self.assertEqual(self.lbm.get_interfaces_on_bridge("br0"), ["qbr1"]) def test_get_interfaces_on_bridge_not_existing(self): with mock.patch.object(ip_lib, 'device_exists', return_value=False): self.assertEqual([], self.lbm.get_interfaces_on_bridge("br0")) def test_get_tap_devices_count(self): with mock.patch.object(os, 'listdir') as listdir_fn: listdir_fn.return_value = ['tap2101', 'eth0.100', 'vxlan-1000'] self.assertEqual(self.lbm.get_tap_devices_count('br0'), 1) listdir_fn.side_effect = OSError() self.assertEqual(self.lbm.get_tap_devices_count('br0'), 0) def test_get_interface_by_ip(self): with mock.patch.object(ip_lib.IPWrapper, 'get_devices') as get_dev_fn,\ mock.patch.object(ip_lib.IpAddrCommand, 'list') as ip_list_fn: device = mock.Mock() device.name = 'dev_name' get_dev_fn.return_value = [device] ip_list_fn.returnvalue = mock.Mock() self.assertEqual(self.lbm.get_interface_by_ip(LOCAL_IP), 'dev_name') def test_get_bridge_for_tap_device(self): with mock.patch.object(self.lbm, "get_all_neutron_bridges") as get_all_qbr_fn,\ mock.patch.object(self.lbm, "get_interfaces_on_bridge") as get_if_fn: get_all_qbr_fn.return_value = ["br-int", "br-ex"] get_if_fn.return_value = ["tap1", "tap2", "tap3"] self.assertEqual(self.lbm.get_bridge_for_tap_device("tap1"), "br-int") self.assertIsNone(self.lbm.get_bridge_for_tap_device("tap4")) def test_is_device_on_bridge(self): self.assertTrue(not self.lbm.is_device_on_bridge("")) with mock.patch.object(os.path, 'exists') as exists_fn: exists_fn.return_value = True self.assertTrue(self.lbm.is_device_on_bridge("tap1")) exists_fn.assert_called_with( "/sys/class/net/tap1/brport" ) def test_get_interface_details(self): with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') as getgw_fn: gwdict = dict(gateway='1.1.1.1') getgw_fn.return_value = gwdict ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=4, dynamic=False) list_fn.return_value = ipdict ret = self.lbm.get_interface_details("eth0") self.assertTrue(list_fn.called) self.assertTrue(getgw_fn.called) self.assertEqual(ret, (ipdict, gwdict)) def test_ensure_flat_bridge(self): with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') as getgw_fn: gwdict = dict(gateway='1.1.1.1') getgw_fn.return_value = gwdict ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=4, dynamic=False) list_fn.return_value = ipdict with mock.patch.object(self.lbm, 'ensure_bridge') as ens: self.assertEqual( self.lbm.ensure_flat_bridge("123", "eth0"), "eth0" ) self.assertTrue(list_fn.called) self.assertTrue(getgw_fn.called) ens.assert_called_once_with("brq123", "eth0", ipdict, gwdict) def test_ensure_vlan_bridge(self): with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\ mock.patch.object(self.lbm, 'ensure_bridge') as ens,\ mock.patch.object(self.lbm, 'get_interface_details') as get_int_det_fn: ens_vl_fn.return_value = "eth0.1" get_int_det_fn.return_value = (None, None) self.assertEqual(self.lbm.ensure_vlan_bridge("123", "eth0", "1"), "eth0.1") ens.assert_called_with("brq123", "eth0.1", None, None) get_int_det_fn.return_value = ("ips", "gateway") self.assertEqual(self.lbm.ensure_vlan_bridge("123", "eth0", "1"), "eth0.1") ens.assert_called_with("brq123", "eth0.1", "ips", "gateway") def test_ensure_local_bridge(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn: self.lbm.ensure_local_bridge("54321") ens_fn.assert_called_once_with("brq54321") def test_ensure_vlan(self): with mock.patch.object(ip_lib, 'device_exists') as de_fn: de_fn.return_value = True self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1") de_fn.return_value = False with mock.patch.object(utils, 'execute') as exec_fn: exec_fn.return_value = False self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1") # FIXME(kevinbenton): validate the params to the exec_fn calls self.assertEqual(exec_fn.call_count, 2) exec_fn.return_value = True self.assertIsNone(self.lbm.ensure_vlan("eth0", "1")) self.assertEqual(exec_fn.call_count, 3) def test_ensure_vxlan(self): seg_id = "12345678" self.lbm.local_int = 'eth0' self.lbm.vxlan_mode = lconst.VXLAN_MCAST with mock.patch.object(ip_lib, 'device_exists') as de_fn: de_fn.return_value = True self.assertEqual(self.lbm.ensure_vxlan(seg_id), "vxlan-" + seg_id) de_fn.return_value = False with mock.patch.object(self.lbm.ip, 'add_vxlan') as add_vxlan_fn: add_vxlan_fn.return_value = FakeIpDevice() self.assertEqual(self.lbm.ensure_vxlan(seg_id), "vxlan-" + seg_id) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", dev=self.lbm.local_int) cfg.CONF.set_override('l2_population', 'True', 'VXLAN') self.assertEqual(self.lbm.ensure_vxlan(seg_id), "vxlan-" + seg_id) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", dev=self.lbm.local_int, proxy=True) def test_update_interface_ip_details(self): gwdict = dict(gateway='1.1.1.1', metric=50) ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=4, dynamic=False) with mock.patch.object(ip_lib.IpAddrCommand, 'add') as add_fn,\ mock.patch.object(ip_lib.IpAddrCommand, 'delete') as del_fn: self.lbm.update_interface_ip_details("br0", "eth0", [ipdict], None) self.assertTrue(add_fn.called) self.assertTrue(del_fn.called) with mock.patch.object(ip_lib.IpRouteCommand, 'add_gateway') as addgw_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'delete_gateway') as delgw_fn: self.lbm.update_interface_ip_details("br0", "eth0", None, gwdict) self.assertTrue(addgw_fn.called) self.assertTrue(delgw_fn.called) def test_bridge_exists_and_ensure_up(self): ip_lib_mock = mock.Mock() with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock): # device exists self.assertTrue(self.lbm._bridge_exists_and_ensure_up("br0")) self.assertTrue(ip_lib_mock.link.set_up.called) # device doesn't exists ip_lib_mock.link.set_up.side_effect = RuntimeError self.assertFalse(self.lbm._bridge_exists_and_ensure_up("br0")) def test_ensure_bridge(self): with mock.patch.object(self.lbm, '_bridge_exists_and_ensure_up') as de_fn,\ mock.patch.object(utils, 'execute') as exec_fn,\ mock.patch.object(self.lbm, 'update_interface_ip_details') as upd_fn,\ mock.patch.object(self.lbm, 'interface_exists_on_bridge') as ie_fn,\ mock.patch.object(self.lbm, 'is_device_on_bridge'),\ mock.patch.object(self.lbm, 'get_bridge_for_tap_device') as get_if_br_fn: de_fn.return_value = False exec_fn.return_value = False self.assertEqual(self.lbm.ensure_bridge("br0", None), "br0") ie_fn.return_Value = False self.lbm.ensure_bridge("br0", "eth0") upd_fn.assert_called_with("br0", "eth0", None, None) ie_fn.assert_called_with("br0", "eth0") self.lbm.ensure_bridge("br0", "eth0", "ips", "gateway") upd_fn.assert_called_with("br0", "eth0", "ips", "gateway") ie_fn.assert_called_with("br0", "eth0") exec_fn.side_effect = Exception() de_fn.return_value = True self.lbm.ensure_bridge("br0", "eth0") ie_fn.assert_called_with("br0", "eth0") exec_fn.reset_mock() exec_fn.side_effect = None de_fn.return_value = True ie_fn.return_value = False get_if_br_fn.return_value = "br1" self.lbm.ensure_bridge("br0", "eth0") expected = [ mock.call(['brctl', 'delif', 'br1', 'eth0'], run_as_root=True), mock.call(['brctl', 'addif', 'br0', 'eth0'], run_as_root=True), ] exec_fn.assert_has_calls(expected) def test_ensure_physical_in_bridge(self): self.assertFalse( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN, "phys", "1") ) with mock.patch.object(self.lbm, "ensure_flat_bridge") as flbr_fn: self.assertTrue( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_FLAT, "physnet1", None) ) self.assertTrue(flbr_fn.called) with mock.patch.object(self.lbm, "ensure_vlan_bridge") as vlbr_fn: self.assertTrue( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN, "physnet1", "1") ) self.assertTrue(vlbr_fn.called) with mock.patch.object(self.lbm, "ensure_vxlan_bridge") as vlbr_fn: self.lbm.vxlan_mode = lconst.VXLAN_MCAST self.assertTrue( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VXLAN, "physnet1", "1") ) self.assertTrue(vlbr_fn.called) def test_add_tap_interface(self): with mock.patch.object(ip_lib, "device_exists") as de_fn: de_fn.return_value = False self.assertFalse( self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, "physnet1", "1", "tap1") ) de_fn.return_value = True with mock.patch.object(self.lbm, "ensure_local_bridge") as en_fn,\ mock.patch.object(utils, "execute") as exec_fn,\ mock.patch.object(self.lbm, "get_bridge_for_tap_device") as get_br: exec_fn.return_value = False get_br.return_value = True self.assertTrue(self.lbm.add_tap_interface("123", p_const.TYPE_LOCAL, "physnet1", None, "tap1")) en_fn.assert_called_with("123") get_br.return_value = False exec_fn.return_value = True self.assertFalse(self.lbm.add_tap_interface("123", p_const.TYPE_LOCAL, "physnet1", None, "tap1")) with mock.patch.object(self.lbm, "ensure_physical_in_bridge") as ens_fn,\ mock.patch.object(self.lbm, "ensure_tap_mtu") as en_mtu_fn,\ mock.patch.object(self.lbm, "get_bridge_for_tap_device") as get_br: ens_fn.return_value = False self.assertFalse(self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, "physnet1", "1", "tap1")) ens_fn.return_value = "eth0.1" get_br.return_value = "brq123" self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, "physnet1", "1", "tap1") en_mtu_fn.assert_called_once_with("tap1", "eth0.1") def test_add_interface(self): with mock.patch.object(self.lbm, "add_tap_interface") as add_tap: self.lbm.add_interface("123", p_const.TYPE_VLAN, "physnet-1", "1", "234") add_tap.assert_called_with("123", p_const.TYPE_VLAN, "physnet-1", "1", "tap234") def test_delete_vlan_bridge(self): with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "get_interfaces_on_bridge") as getif_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_vxlan") as del_vxlan,\ mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False self.lbm.delete_vlan_bridge("br0") self.assertFalse(getif_fn.called) de_fn.return_value = True getif_fn.return_value = ["eth0", "eth1", "vxlan-1002"] if_det_fn.return_value = ("ips", "gateway") exec_fn.return_value = False self.lbm.delete_vlan_bridge("br0") updif_fn.assert_called_with("eth1", "br0", "ips", "gateway") del_vxlan.assert_called_with("vxlan-1002") def test_delete_vlan_bridge_with_ip(self): with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "get_interfaces_on_bridge") as getif_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_vlan") as del_vlan,\ mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = True getif_fn.return_value = ["eth0", "eth1.1"] if_det_fn.return_value = ("ips", "gateway") exec_fn.return_value = False self.lbm.delete_vlan_bridge("br0") updif_fn.assert_called_with("eth1.1", "br0", "ips", "gateway") self.assertFalse(del_vlan.called) def test_delete_vlan_bridge_no_ip(self): with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "get_interfaces_on_bridge") as getif_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_vlan") as del_vlan,\ mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = True getif_fn.return_value = ["eth0", "eth1.1"] exec_fn.return_value = False if_det_fn.return_value = ([], None) self.lbm.delete_vlan_bridge("br0") del_vlan.assert_called_with("eth1.1") self.assertFalse(updif_fn.called) def test_delete_vxlan_bridge_no_int_mappings(self): interface_mappings = {} with mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager, 'get_interface_by_ip', return_value=None): lbm = linuxbridge_neutron_agent.LinuxBridgeManager( interface_mappings) with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(lbm, "get_interfaces_on_bridge") as getif_fn,\ mock.patch.object(lbm, "remove_interface"),\ mock.patch.object(lbm, "delete_vxlan") as del_vxlan,\ mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False lbm.delete_vlan_bridge("br0") self.assertFalse(getif_fn.called) de_fn.return_value = True getif_fn.return_value = ["vxlan-1002"] exec_fn.return_value = False lbm.delete_vlan_bridge("br0") del_vxlan.assert_called_with("vxlan-1002") def test_remove_empty_bridges(self): self.lbm.network_map = {'net1': mock.Mock(), 'net2': mock.Mock()} def tap_count_side_effect(*args): return 0 if args[0] == 'brqnet1' else 1 with mock.patch.object(self.lbm, "delete_vlan_bridge") as del_br_fn,\ mock.patch.object(self.lbm, "get_tap_devices_count", side_effect=tap_count_side_effect): self.lbm.remove_empty_bridges() del_br_fn.assert_called_once_with('brqnet1') def test_remove_interface(self): with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "is_device_on_bridge") as isdev_fn,\ mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False self.assertFalse(self.lbm.remove_interface("br0", "eth0")) self.assertFalse(isdev_fn.called) de_fn.return_value = True isdev_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) isdev_fn.return_value = True exec_fn.return_value = True self.assertFalse(self.lbm.remove_interface("br0", "eth0")) exec_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) def test_delete_vlan(self): with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False self.lbm.delete_vlan("eth1.1") self.assertFalse(exec_fn.called) de_fn.return_value = True exec_fn.return_value = False self.lbm.delete_vlan("eth1.1") self.assertTrue(exec_fn.called) def _check_vxlan_support(self, expected, vxlan_ucast_supported, vxlan_mcast_supported): with mock.patch.object(self.lbm, 'vxlan_ucast_supported', return_value=vxlan_ucast_supported),\ mock.patch.object(self.lbm, 'vxlan_mcast_supported', return_value=vxlan_mcast_supported): if expected == lconst.VXLAN_NONE: self.assertRaises(exceptions.VxlanNetworkUnsupported, self.lbm.check_vxlan_support) self.assertEqual(expected, self.lbm.vxlan_mode) else: self.lbm.check_vxlan_support() self.assertEqual(expected, self.lbm.vxlan_mode) def test_check_vxlan_support(self): self._check_vxlan_support(expected=lconst.VXLAN_UCAST, vxlan_ucast_supported=True, vxlan_mcast_supported=True) self._check_vxlan_support(expected=lconst.VXLAN_MCAST, vxlan_ucast_supported=False, vxlan_mcast_supported=True) self._check_vxlan_support(expected=lconst.VXLAN_NONE, vxlan_ucast_supported=False, vxlan_mcast_supported=False) self._check_vxlan_support(expected=lconst.VXLAN_NONE, vxlan_ucast_supported=False, vxlan_mcast_supported=False) def _check_vxlan_ucast_supported( self, expected, l2_population, iproute_arg_supported, fdb_append): cfg.CONF.set_override('l2_population', l2_population, 'VXLAN') with mock.patch.object(ip_lib, 'device_exists', return_value=False),\ mock.patch.object(self.lbm, 'delete_vxlan', return_value=None),\ mock.patch.object(self.lbm, 'ensure_vxlan', return_value=None),\ mock.patch.object( utils, 'execute', side_effect=None if fdb_append else RuntimeError()),\ mock.patch.object(ip_lib, 'iproute_arg_supported', return_value=iproute_arg_supported): self.assertEqual(expected, self.lbm.vxlan_ucast_supported()) def test_vxlan_ucast_supported(self): self._check_vxlan_ucast_supported( expected=False, l2_population=False, iproute_arg_supported=True, fdb_append=True) self._check_vxlan_ucast_supported( expected=False, l2_population=True, iproute_arg_supported=False, fdb_append=True) self._check_vxlan_ucast_supported( expected=False, l2_population=True, iproute_arg_supported=True, fdb_append=False) self._check_vxlan_ucast_supported( expected=True, l2_population=True, iproute_arg_supported=True, fdb_append=True) def _check_vxlan_mcast_supported( self, expected, vxlan_group, iproute_arg_supported): cfg.CONF.set_override('vxlan_group', vxlan_group, 'VXLAN') with mock.patch.object( ip_lib, 'iproute_arg_supported', return_value=iproute_arg_supported): self.assertEqual(expected, self.lbm.vxlan_mcast_supported()) def test_vxlan_mcast_supported(self): self._check_vxlan_mcast_supported( expected=False, vxlan_group='', iproute_arg_supported=True) self._check_vxlan_mcast_supported( expected=False, vxlan_group='224.0.0.1', iproute_arg_supported=False) self._check_vxlan_mcast_supported( expected=True, vxlan_group='224.0.0.1', iproute_arg_supported=True) class TestLinuxBridgeRpcCallbacks(base.BaseTestCase): def setUp(self): cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN') super(TestLinuxBridgeRpcCallbacks, self).setUp() class FakeLBAgent(object): def __init__(self): self.agent_id = 1 with mock.patch.object( linuxbridge_neutron_agent.LinuxBridgeManager, 'get_interface_by_ip', return_value=None): self.br_mgr = (linuxbridge_neutron_agent. LinuxBridgeManager({'physnet1': 'eth1'})) self.br_mgr.vxlan_mode = lconst.VXLAN_UCAST segment = mock.Mock() segment.network_type = 'vxlan' segment.segmentation_id = 1 self.br_mgr.network_map['net_id'] = segment self.lb_rpc = linuxbridge_neutron_agent.LinuxBridgeRpcCallbacks( object(), FakeLBAgent(), object() ) def test_network_delete(self): with mock.patch.object(self.lb_rpc.agent.br_mgr, "get_bridge_name") as get_br_fn,\ mock.patch.object(self.lb_rpc.agent.br_mgr, "delete_vlan_bridge") as del_fn: get_br_fn.return_value = "br0" self.lb_rpc.network_delete("anycontext", network_id="123") get_br_fn.assert_called_with("123") del_fn.assert_called_with("br0") def test_fdb_add(self): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_add(None, fdb_entries) expected = [ mock.call(['bridge', 'fdb', 'show', 'dev', 'vxlan-1'], run_as_root=True), mock.call(['bridge', 'fdb', 'add', constants.FLOODING_ENTRY[0], 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), mock.call(['ip', 'neigh', 'replace', 'port_ip', 'lladdr', 'port_mac', 'dev', 'vxlan-1', 'nud', 'permanent'], run_as_root=True, check_exit_code=False), mock.call(['bridge', 'fdb', 'replace', 'port_mac', 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), ] execute_fn.assert_has_calls(expected) def test_fdb_ignore(self): fdb_entries = {'net_id': {'ports': {LOCAL_IP: [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_add(None, fdb_entries) self.lb_rpc.fdb_remove(None, fdb_entries) self.assertFalse(execute_fn.called) fdb_entries = {'other_net_id': {'ports': {'192.168.0.67': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_add(None, fdb_entries) self.lb_rpc.fdb_remove(None, fdb_entries) self.assertFalse(execute_fn.called) def test_fdb_remove(self): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_remove(None, fdb_entries) expected = [ mock.call(['bridge', 'fdb', 'del', constants.FLOODING_ENTRY[0], 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), mock.call(['ip', 'neigh', 'del', 'port_ip', 'lladdr', 'port_mac', 'dev', 'vxlan-1'], run_as_root=True, check_exit_code=False), mock.call(['bridge', 'fdb', 'del', 'port_mac', 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), ] execute_fn.assert_has_calls(expected) def test_fdb_update_chg_ip(self): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {'before': [['port_mac', 'port_ip_1']], 'after': [['port_mac', 'port_ip_2']]}}}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_update(None, fdb_entries) expected = [ mock.call(['ip', 'neigh', 'replace', 'port_ip_2', 'lladdr', 'port_mac', 'dev', 'vxlan-1', 'nud', 'permanent'], run_as_root=True, check_exit_code=False), mock.call(['ip', 'neigh', 'del', 'port_ip_1', 'lladdr', 'port_mac', 'dev', 'vxlan-1'], run_as_root=True, check_exit_code=False) ] execute_fn.assert_has_calls(expected) def test_fdb_update_chg_ip_empty_lists(self): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {}}}} self.lb_rpc.fdb_update(None, fdb_entries)
''' Created on 10 dec. 2013 @author: TuRz4m ''' import ConfigParser import logging import sys from api.APIUtils import BetaSerieAPI, BadLoginException, SickBeardAPI import os.path logger = logging.getLogger(__name__) logging.getLogger(__name__).setLevel(logging.INFO) logging.getLogger(__name__).addHandler(logging.StreamHandler()) logging.getLogger(__name__).addHandler(logging.FileHandler("logs/BetaBeard.log")) configFile = "BetaBeard.ini" configDbFile = "BetaBeard.db" param = {} paramDb = {} """ Load the config file & fill all the var. """ def checkConfig(config): try: global param param['login'] = config.get("BetaSeries", "login") param['password'] = config.get("BetaSeries", "password") param['sburl'] = config.get("SickBeard", "url") if (config.getboolean("SickBeard", "https")): param['scheme'] = "https" param['apikey'] = config.get("SickBeard", "apikey") param['location'] = config.get("SickBeard", "location") if (param['location'] == ""): param['location'] = None param['lang'] = config.get("SickBeard", "lang") if (param['lang'] == ""): param['lang'] = None param['flatten_folder'] = config.get("SickBeard", "flatten_folder") if (param['flatten_folder'] == ""): param['flatten_folder'] = None param['status'] = config.get("SickBeard", "status") if (param['status'] == ""): param['status'] = None param['initial'] = config.get("SickBeard", "initial") if (param['initial'] == ""): param['initial'] = None param['archive'] = config.get("SickBeard", "archive") if (param['archive'] == ""): param['archive'] = None param['fullUpdate'] = config.getboolean("BetaBeard", "fullUpdate") param['checkTimeLine'] = config.getboolean("BetaBeard", "checkTimeLine") param['demoMode'] = config.getboolean("BetaBeard", "demoMode") except ConfigParser.NoOptionError as ex: logger.error("[BetaBeard] Error in config file : %s", ex) return False except ConfigParser.NoSectionError as ex: logger.error("[BetaBeard] Error in config file : %s", ex) return False return True def loadDb(configToLoad): global paramDb if (os.path.exists(configDbFile)): configToLoad.read(configDbFile) try: paramDb['last_event_id'] = configToLoad.get("BetaBeard", "last_event_id") if (paramDb['last_event_id'] == ""): paramDb['last_event_id'] = None except ConfigParser.NoOptionError: logger.debug("[BetaBeard] Config file Tech not found. Use default.") paramDb['last_event_id'] = None except ConfigParser.NoSectionError: logger.debug("[BetaBeard] Config file Tech not found. Use default.") configToLoad.add_section("BetaBeard") paramDb['last_event_id'] = None """ Update the BetaBeard-tech.ini """ def updateDb(configToSave): logger.debug("[BetaBeard] Update file %s", configDbFile) cfgfile = open(configDbFile,'w') configToSave.write(cfgfile) cfgfile.close() logger.debug("[BetaBeard] File %s updated.", configDbFile) if __name__ == '__main__': # First of all, we need to reed the BetaBeard.ini config file. config = ConfigParser.SafeConfigParser() configDb = ConfigParser.SafeConfigParser() if (os.path.exists(configFile) == False): logger.error("[BetaBeard] Config file %s not found.", configFile) sys.exit(0) config.read(configFile) loadDb(configDb) if checkConfig(config) == False: sys.exit(0) # ----------- Init BetaSeries ----------- # try: beta = BetaSerieAPI(param['login'], param['password']) except BadLoginException as ex: logger.error("[BetaBeard] can't log into BetaSeries.com : %s", ex.value) sys.exit(0) logger.info("[BetaBeard] Login successfull.") # ----------- Init SickBeard ----------- # sickBeard = SickBeardAPI(param['sburl'], param['scheme'], param['apikey']) # ----------- Test SickBeard ----------- # if (sickBeard.ping() == False): logger.error("[BetaBeard] Can't ping SickBeard on url : %s://%s with apikey = %s",param['scheme'], param['sburl'], param['apikey']) sys.exit(0) logger.info("[BetaBeard] Ping SickBeard successfull.") # ----------- If fullUpdate, we retrieve all the current show and add them to sickbear.----------- # if paramDb['last_event_id'] == None: logger.debug("[BetaBeard] last_index_id is None") if param['fullUpdate'] == True: shows = beta.show_list(); logger.debug("[BetaBeard] shows : %s", shows) logger.info("[BetaBeard] Start processing shows.") for show in shows: logger.info("[BetaBeard] Add show in SickBeard : %s (%s)", show[1], show[0]) if (param['demoMode'] == False): success,message = sickBeard.add_show(show[0], param['location'], param['lang'], param['flatten_folder'], param['status'], param['initial'], param['archive']) if (success == False): logger.error("[BetaBeard] Can't add show %s (%s) to sickbeard : %s", show[1], show[0], message) # ----------- retrieve last event processed in betaseries----------- # param['last_event_id'], emptyList = beta.timeline_since(None) elif param['checkTimeLine']: logger.info("[BetaBeard] Start processing timeline.") param['last_event_id'], events = beta.timeline_since(paramDb['last_event_id']) logger.debug("[BetaBeard] Processing timeline : %s", events) if (events != None): for event in events: logger.debug("[BetaBeard] Event : %s", event) # - ADD SERIE - # if (event['type'] == 'add_serie'): betaid = str(event['ref_id']); tvdbid, title = beta.shows_tvdbid(betaid) logger.info("[BetaBeard] Add Show to sickbeard : %s (%s)", title, tvdbid) if (param['demoMode'] == False): success,message = sickBeard.add_show(tvdbid, param['location'], param['lang'], param['flatten_folder'], param['status'], param['initial'], param['archive']) if (success == False): logger.error("[BetaBeard] Can't add show %s (%s) to sickbeard : %s.", title, tvdbid, message) # - DELETE SERIE - # elif (event['type'] == 'del_serie'): betaid = str(event['ref_id']); tvdbid, title = beta.shows_tvdbid(betaid) logger.info("[BetaBeard] Delete Show from sickbeard : %s (%s)", title, tvdbid) if (param['demoMode'] == False): success, message = sickBeard.del_show(tvdbid) if (success == False): logger.error("[BetaBeard] Can't delete show %s (%s) from sickbeard : %s.", title, tvdbid, message) # - PAUSE SERIE - # elif (event['type'] == 'archive'): betaid = str(event['ref_id']); tvdbid, title = beta.shows_tvdbid(betaid) logger.info("[BetaBeard] Archive Show on sickbeard : %s (%s)", title, tvdbid) if (param['demoMode'] == False): success, message = sickBeard.pause_show(tvdbid, 1) if (success == False): logger.error("[BetaBeard] Can't pause show %s (%s) on sickbeard : %s.", title, tvdbid, message) # - UNPAUSE SERIE - # elif (event['type'] == 'unarchive'): betaid = str(event['ref_id']); tvdbid, title = beta.shows_tvdbid(betaid) logger.info("[BetaBeard] UnArchive Show on sickbeard : %s (%s)", title, tvdbid) if (param['demoMode'] == False): success, message = sickBeard.pause_show(tvdbid, 0) if (success == False): logger.error("[BetaBeard] Can't unpause show %s (%s) on sickbeard : %s.", title, tvdbid, message) logger.info("[BetaBeard] Timeline processing done.") # ----------- Update Last_event_id in config file.----------- # if (param['last_event_id'] != None): logger.debug("[BetaBeard] update config with last_event_id=%s", param['last_event_id']) configDb.set("BetaBeard", "last_event_id", str(param['last_event_id'])); updateDb(configDb); else: logger.debug("[BetaBeard] Can't update config file because last_event_id is null")
#!/usr/bin/env python2.7 # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Generates the appropriate build.json data for all the end2end tests.""" import yaml import collections import hashlib FixtureOptions = collections.namedtuple( 'FixtureOptions', 'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing exclude_configs') default_unsecure_fixture_options = FixtureOptions( True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, []) socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False) default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True) uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix']) fd_unsecure_fixture_options = default_unsecure_fixture_options._replace( dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix']) # maps fixture name to whether it requires the security library END2END_FIXTURES = { 'h2_compress': default_unsecure_fixture_options, 'h2_census': default_unsecure_fixture_options, 'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False), 'h2_fd': fd_unsecure_fixture_options, 'h2_full': default_unsecure_fixture_options, 'h2_full+pipe': default_unsecure_fixture_options._replace( platforms=['linux']), 'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True), 'h2_loadreporting': default_unsecure_fixture_options, 'h2_oauth2': default_secure_fixture_options._replace(ci_mac=False), 'h2_proxy': default_unsecure_fixture_options._replace(includes_proxy=True, ci_mac=False), 'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace( ci_mac=False, exclude_configs=['msan']), 'h2_sockpair': socketpair_unsecure_fixture_options._replace(ci_mac=False), 'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace( ci_mac=False, tracing=True), 'h2_ssl': default_secure_fixture_options, 'h2_ssl_cert': default_secure_fixture_options, 'h2_ssl_proxy': default_secure_fixture_options._replace(includes_proxy=True, ci_mac=False), 'h2_uds': uds_fixture_options, } TestOptions = collections.namedtuple( 'TestOptions', 'needs_fullstack needs_dns proxyable secure traceable cpu_cost') default_test_options = TestOptions(False, False, True, False, True, 1.0) connectivity_test_options = default_test_options._replace(needs_fullstack=True) LOWCPU = 0.1 # maps test names to options END2END_TESTS = { 'bad_hostname': default_test_options, 'binary_metadata': default_test_options, 'call_creds': default_test_options._replace(secure=True), 'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_after_client_done': default_test_options, 'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU), 'compressed_payload': default_test_options._replace(proxyable=False), 'connectivity': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU), 'default_host': default_test_options._replace(needs_fullstack=True, needs_dns=True), 'disappearing_server': connectivity_test_options, 'empty_batch': default_test_options, 'filter_causes_close': default_test_options, 'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU), 'hpack_size': default_test_options._replace(proxyable=False, traceable=False), 'high_initial_seqno': default_test_options, 'idempotent_request': default_test_options, 'invoke_large_request': default_test_options, 'large_metadata': default_test_options, 'max_concurrent_streams': default_test_options._replace(proxyable=False), 'max_message_length': default_test_options, 'negative_deadline': default_test_options, 'network_status_change': default_test_options, 'no_op': default_test_options, 'payload': default_test_options, 'ping_pong_streaming': default_test_options, 'ping': connectivity_test_options._replace(proxyable=False), 'registered_call': default_test_options, 'request_with_flags': default_test_options._replace( proxyable=False, cpu_cost=LOWCPU), 'request_with_payload': default_test_options, 'server_finishes_request': default_test_options, 'shutdown_finishes_calls': default_test_options, 'shutdown_finishes_tags': default_test_options, 'simple_delayed_request': connectivity_test_options, 'simple_metadata': default_test_options, 'simple_request': default_test_options, 'streaming_error_response': default_test_options, 'trailing_metadata': default_test_options, } def compatible(f, t): if END2END_TESTS[t].needs_fullstack: if not END2END_FIXTURES[f].fullstack: return False if END2END_TESTS[t].needs_dns: if not END2END_FIXTURES[f].dns_resolver: return False if not END2END_TESTS[t].proxyable: if END2END_FIXTURES[f].includes_proxy: return False if not END2END_TESTS[t].traceable: if END2END_FIXTURES[f].tracing: return False return True def without(l, e): l = l[:] l.remove(e) return l def main(): sec_deps = [ 'grpc_test_util', 'grpc', 'gpr_test_util', 'gpr' ] unsec_deps = [ 'grpc_test_util_unsecure', 'grpc_unsecure', 'gpr_test_util', 'gpr' ] json = { '#': 'generated with test/end2end/gen_build_json.py', 'libs': [ { 'name': 'end2end_tests', 'build': 'private', 'language': 'c', 'secure': True, 'src': ['test/core/end2end/end2end_tests.c'] + [ 'test/core/end2end/tests/%s.c' % t for t in sorted(END2END_TESTS.keys())], 'headers': ['test/core/end2end/tests/cancel_test_helpers.h', 'test/core/end2end/end2end_tests.h'], 'deps': sec_deps, 'vs_proj_dir': 'test/end2end/tests', } ] + [ { 'name': 'end2end_nosec_tests', 'build': 'private', 'language': 'c', 'secure': False, 'src': ['test/core/end2end/end2end_nosec_tests.c'] + [ 'test/core/end2end/tests/%s.c' % t for t in sorted(END2END_TESTS.keys()) if not END2END_TESTS[t].secure], 'headers': ['test/core/end2end/tests/cancel_test_helpers.h', 'test/core/end2end/end2end_tests.h'], 'deps': unsec_deps, 'vs_proj_dir': 'test/end2end/tests', } ], 'targets': [ { 'name': '%s_test' % f, 'build': 'test', 'language': 'c', 'run': False, 'src': ['test/core/end2end/fixtures/%s.c' % f], 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'deps': [ 'end2end_tests' ] + sec_deps, 'vs_proj_dir': 'test/end2end/fixtures', } for f in sorted(END2END_FIXTURES.keys()) ] + [ { 'name': '%s_nosec_test' % f, 'build': 'test', 'language': 'c', 'secure': 'no', 'src': ['test/core/end2end/fixtures/%s.c' % f], 'run': False, 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'deps': [ 'end2end_nosec_tests' ] + unsec_deps, 'vs_proj_dir': 'test/end2end/fixtures', } for f in sorted(END2END_FIXTURES.keys()) if not END2END_FIXTURES[f].secure ], 'tests': [ { 'name': '%s_test' % f, 'args': [t], 'exclude_configs': [], 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'flaky': False, 'language': 'c', 'cpu_cost': END2END_TESTS[t].cpu_cost, } for f in sorted(END2END_FIXTURES.keys()) for t in sorted(END2END_TESTS.keys()) if compatible(f, t) ] + [ { 'name': '%s_nosec_test' % f, 'args': [t], 'exclude_configs': END2END_FIXTURES[f].exclude_configs, 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'flaky': False, 'language': 'c', 'cpu_cost': END2END_TESTS[t].cpu_cost, } for f in sorted(END2END_FIXTURES.keys()) if not END2END_FIXTURES[f].secure for t in sorted(END2END_TESTS.keys()) if compatible(f, t) and not END2END_TESTS[t].secure ], 'core_end2end_tests': dict( (t, END2END_TESTS[t].secure) for t in END2END_TESTS.keys() ) } print yaml.dump(json) if __name__ == '__main__': main()
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r''' --- module: bigip_monitor_dns short_description: Manage DNS monitors on a BIG-IP description: - Manages DNS health monitors on a BIG-IP. version_added: "1.0.0" options: name: description: - Specifies the name of the monitor. type: str required: True parent: description: - The parent template of this monitor template. Once this value has been set, it cannot be changed. By default, this value is the C(dns) parent on the C(Common) partition. type: str default: /Common/dns description: description: - The description of the monitor. type: str interval: description: - The interval specifying how frequently the monitor instance of this template will run. - This value B(must) be less than the C(timeout) value. - When creating a new monitor, if this parameter is not provided, the default C(5) will be used. type: int up_interval: description: - Specifies the interval for the system to use to perform the health check when a resource is up. - When C(0), specifies the system uses the interval specified in C(interval) to check the health of the resource. - When any other number, enables you to specify a different interval to use when checking the health of a resource that is up. - When creating a new monitor, if this parameter is not provided, the default C(0) will be used. type: int timeout: description: - The number of seconds in which the node or service must respond to the monitor request. - If the target responds within the set time period, it is considered up. - If the target does not respond within the set time period, it is considered down. - You can change this to any number, however, it should be 3 times the interval number of seconds plus 1 second. - If this parameter is not provided when creating a new monitor, the default value will be C(16). type: int transparent: description: - Specifies whether the monitor operates in transparent mode. - Monitors in transparent mode can monitor pool members through firewalls. - When creating a new monitor, if this parameter is not provided, the default value will be C(no). type: bool reverse: description: - Specifies whether the monitor operates in reverse mode. - When the monitor is in reverse mode, a successful receive string match marks the monitored object down instead of up. You can use the this mode only if you configure the C(receive) option. - This parameter is not compatible with the C(time_until_up) parameter. If C(time_until_up) is specified, it must be C(0). Or, if it already exists, it must be C(0). type: bool receive: description: - Specifies the IP address the monitor uses from the resource record sections of the DNS response. - The IP address should be specified in the dotted-decimal notation or IPv6 notation. type: str time_until_up: description: - Specifies the amount of time in seconds after the first successful response before a node will be marked up. - A value of 0 will cause a node to be marked up immediately after a valid response is received from the node. - If this parameter is not provided when creating a new monitor, the default value will be C(0). type: int manual_resume: description: - Specifies whether the system automatically changes the status of a resource to B(enabled) at the next successful monitor check. - If C(yes), you must manually re-enable the resource before the system can use it for load balancing connections. - When creating a new monitor, if this parameter is not specified, the default value is C(no). - When C(yes), specifies you must manually re-enable the resource after an unsuccessful monitor check. - When C(no), specifies the system automatically changes the status of a resource to B(enabled) at the next successful monitor check. type: bool ip: description: - IP address part of the IP/port definition. - If this parameter is not provided when creating a new monitor, the default value will be C(*). type: str port: description: - Port address part of the IP/port definition. - If this parameter is not provided when creating a new monitor, the default value will be C(*). - Note that if specifying an IP address, you must use a value between 1 and 65535. type: str query_name: description: - Specifies a query name for the monitor to use in a DNS query. type: str query_type: description: - Specifies the type of DNS query the monitor sends. - When creating a new monitor, if this parameter is not specified, the default value is C(a). - When C(a), specifies the monitor will send a DNS query of type A. - When C(aaaa), specifies the monitor will send a DNS query of type AAAA. type: str choices: - a - aaaa answer_section_contains: description: - Specifies the type of DNS query the monitor sends. - When creating a new monitor, if this value is not specified, the default value is C(query-type). - When C(query-type), specifies that the response should contain at least one answer of which the resource record type matches the query type. - When C(any-type), specifies the DNS message should contain at least one answer. - When C(anything), specifies an empty answer is enough to mark the status of the node up. type: str choices: - any-type - anything - query-type accept_rcode: description: - Specifies the RCODE required in the response for an up status. - When creating a new monitor, if this parameter is not specified, the default value is C(no-error). - When C(no-error), specifies the status of the node will be marked up if the received DNS message has no error. - When C(anything), specifies the status of the node will be marked up irrespective of the RCODE in the DNS message received. - If this parameter is set to C(anything), it will disregard the C(receive) string, and nullify it if the monitor is being updated. type: str choices: - no-error - anything adaptive: description: - Specifies whether adaptive response time monitoring is enabled for this monitor. - When C(yes), the monitor determines the state of a service based on how divergent from the mean latency a monitor probe for that service is allowed to be. Also, values for the C(allowed_divergence), C(adaptive_limit), and and C(sampling_timespan) will be enforced. - When C(disabled), the monitor determines the state of a service based on the C(interval), C(up_interval), C(time_until_up), and C(timeout) monitor settings. type: bool allowed_divergence_type: description: - When specifying a new monitor, if C(adaptive) is C(yes), the default is C(relative). - When C(absolute), the number of milliseconds the latency of a monitor probe can exceed the mean latency of a monitor probe for the service being probed. In typical cases, if the monitor detects three probes in a row that miss the latency value you set, the pool member or node is marked down. - When C(relative), the percentage of deviation the latency of a monitor probe can exceed the mean latency of a monitor probe for the service being probed. type: str choices: - relative - absolute allowed_divergence_value: description: - When specifying a new monitor, if C(adaptive) is C(yes), and C(type) is C(relative), the default is C(25) percent. type: int adaptive_limit: description: - Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of C(allowed_divergence) setting, for a probe to be considered successful. - This value applies regardless of the value of the C(allowed_divergence) setting. - While this value can be configured when C(adaptive) is C(no), it will not take effect on the system until C(adaptive) is C(yes). type: int sampling_timespan: description: - Specifies the length, in seconds, of the probe history window the system uses to calculate the mean latency and standard deviation of a monitor probe. - While this value can be configured when C(adaptive) is C(no), it will not take effect on the system until C(adaptive) is C(yes). type: int partition: description: - Device partition to manage resources on. type: str default: Common state: description: - When C(present), ensures the monitor exists. - When C(absent), ensures the monitor is removed. type: str choices: - present - absent default: present extends_documentation_fragment: f5networks.f5_modules.f5 author: - Tim Rupp (@caphrim007) - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Create a DNS monitor bigip_monitor_dns: name: DNS-UDP-V6 interval: 2 query_name: localhost query_type: aaaa up_interval: 5 adaptive: no state: present provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost ''' RETURN = r''' parent: description: New parent template of the monitor. returned: changed type: str sample: http ip: description: The new IP of IP/port definition. returned: changed type: str sample: 10.12.13.14 interval: description: The new interval in which to run the monitor check. returned: changed type: int sample: 2 timeout: description: The new timeout in which the remote system must respond to the monitor. returned: changed type: int sample: 10 time_until_up: description: The new time in which to mark a system as up after first successful response. returned: changed type: int sample: 2 adaptive: description: Whether adaptive is enabled or not. returned: changed type: bool sample: yes accept_rcode: description: RCODE required in the response for an up status. returned: changed type: str sample: no-error allowed_divergence_type: description: Type of divergence used for adaptive response time monitoring. returned: changed type: str sample: absolute allowed_divergence_value: description: - Value of the type of divergence used for adaptive response time monitoring. - May be C(percent) or C(ms) depending on whether C(relative) or C(absolute). returned: changed type: int sample: 25 description: description: The description of the monitor. returned: changed type: str sample: Important Monitor adaptive_limit: description: Absolute number of milliseconds that may not be exceeded by a monitor probe. returned: changed type: int sample: 200 sampling_timespan: description: Absolute number of milliseconds that may not be exceeded by a monitor probe. returned: changed type: int sample: 200 answer_section_contains: description: Type of DNS query that the monitor sends. returned: changed type: str sample: query-type manual_resume: description: - Whether the system automatically changes the status of a resource to enabled at the next successful monitor check. returned: changed type: str sample: query-type up_interval: description: Interval for the system to use to perform the health check when a resource is up. returned: changed type: int sample: 0 query_name: description: Query name for the monitor to use in a DNS query. returned: changed type: str sample: foo query_type: description: Type of DNS query the monitor sends. Either C(a) or C(aaaa). returned: changed type: str sample: aaaa receive: description: IP address the monitor uses from the resource record sections of the DNS response. returned: changed type: str sample: 2.3.2.4 reverse: description: Whether the monitor operates in reverse mode. returned: changed type: bool sample: yes port: description: - Alias port or service for the monitor to check, on behalf of the pools or pool members with which the monitor is associated. returned: changed type: str sample: 80 transparent: description: Whether the monitor operates in transparent mode. returned: changed type: bool sample: no ''' from datetime import datetime from ansible.module_utils.basic import ( AnsibleModule, env_fallback ) from ..module_utils.bigip import F5RestClient from ..module_utils.common import ( F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, flatten_boolean, fq_name ) from ..module_utils.compare import cmp_str_with_none from ..module_utils.ipaddress import ( is_valid_ip, validate_ip_v6_address, validate_ip_address ) from ..module_utils.icontrol import tmos_version from ..module_utils.teem import send_teem class Parameters(AnsibleF5Parameters): api_map = { 'acceptRcode': 'accept_rcode', 'adaptiveDivergenceType': 'allowed_divergence_type', 'adaptiveDivergenceValue': 'allowed_divergence_value', 'adaptiveLimit': 'adaptive_limit', 'adaptiveSamplingTimespan': 'sampling_timespan', 'answerContains': 'answer_section_contains', 'manualResume': 'manual_resume', 'timeUntilUp': 'time_until_up', 'upInterval': 'up_interval', 'qname': 'query_name', 'qtype': 'query_type', 'recv': 'receive', 'defaultsFrom': 'parent', } api_attributes = [ 'adaptive', 'acceptRcode', 'adaptiveDivergenceType', 'adaptiveDivergenceValue', 'adaptiveLimit', 'adaptiveSamplingTimespan', 'answerContains', 'defaultsFrom', 'description', 'destination', 'interval', 'manualResume', 'qname', 'qtype', 'recv', 'reverse', 'timeout', 'timeUntilUp', 'transparent', 'upInterval', 'destination', ] returnables = [ 'adaptive', 'accept_rcode', 'allowed_divergence_type', 'allowed_divergence_value', 'description', 'adaptive_limit', 'sampling_timespan', 'answer_section_contains', 'manual_resume', 'time_until_up', 'up_interval', 'query_name', 'query_type', 'receive', 'reverse', 'timeout', 'interval', 'transparent', 'parent', 'ip', 'port', ] updatables = [ 'adaptive', 'accept_rcode', 'allowed_divergence_type', 'allowed_divergence_value', 'adaptive_limit', 'sampling_timespan', 'answer_section_contains', 'description', 'manual_resume', 'time_until_up', 'up_interval', 'query_name', 'query_type', 'receive', 'reverse', 'timeout', 'transparent', 'parent', 'destination', 'interval', ] @property def type(self): return 'dns' @property def destination(self): if self.ip is None and self.port is None: return None destination = '{0}:{1}'.format(self.ip, self.port) return destination @destination.setter def destination(self, value): ip, port = value.split(':') self._values['ip'] = ip self._values['port'] = port @property def interval(self): if self._values['interval'] is None: return None # Per BZ617284, the BIG-IP UI does not raise a warning about this. # So I do if 1 > int(self._values['interval']) > 86400: raise F5ModuleError( "Interval value must be between 1 and 86400" ) return int(self._values['interval']) @property def timeout(self): if self._values['timeout'] is None: return None return int(self._values['timeout']) @property def ip(self): if self._values['ip'] is None: return None if self._values['ip'] in ['*', '0.0.0.0']: return '*' elif is_valid_ip(self._values['ip']): return self._values['ip'] else: raise F5ModuleError( "The provided 'ip' parameter is not an IP address." ) @property def receive(self): if self._values['receive'] is None: return None if self._values['receive'] == '': return '' if is_valid_ip(self._values['receive']): return self._values['receive'] raise F5ModuleError( "The specified 'receive' parameter must be either an IPv4 or v6 address." ) @property def port(self): if self._values['port'] is None: return None elif self._values['port'] == '*': return '*' return int(self._values['port']) @property def time_until_up(self): if self._values['time_until_up'] is None: return None return int(self._values['time_until_up']) @property def parent(self): if self._values['parent'] is None: return None result = fq_name(self.partition, self._values['parent']) return result class ApiParameters(Parameters): @property def description(self): if self._values['description'] in [None, 'none']: return None return self._values['description'] class ModuleParameters(Parameters): @property def description(self): if self._values['description'] is None: return None elif self._values['description'] in ['none', '']: return '' return self._values['description'] @property def manual_resume(self): if self._values['manual_resume'] is None: return None elif self._values['manual_resume'] is True: return 'enabled' return 'disabled' @property def reverse(self): if self._values['reverse'] is None: return None elif self._values['reverse'] is True: return 'enabled' return 'disabled' @property def transparent(self): if self._values['transparent'] is None: return None elif self._values['transparent'] is True: return 'enabled' return 'disabled' @property def adaptive(self): if self._values['adaptive'] is None: return None elif self._values['adaptive'] is True: return 'enabled' return 'disabled' class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: raise return result class UsableChanges(Changes): pass class ReportableChanges(Changes): @property def manual_resume(self): return flatten_boolean(self._values['manual_resume']) @property def reverse(self): return flatten_boolean(self._values['reverse']) @property def transparent(self): return flatten_boolean(self._values['transparent']) @property def adaptive(self): return flatten_boolean(self._values['adaptive']) class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) @property def parent(self): if self.want.parent != self.have.parent: raise F5ModuleError( "The parent monitor cannot be changed" ) @property def destination(self): if self.want.ip is None and self.want.port is None: return None if self.want.port is None: self.want.update({'port': self.have.port}) if self.want.ip is None: self.want.update({'ip': self.have.ip}) if self.want.port in [None, '*'] and self.want.ip != '*': raise F5ModuleError( "Specifying an IP address requires that a port number be specified" ) if self.want.destination != self.have.destination: return self.want.destination @property def interval(self): if self.want.timeout is not None and self.want.interval is not None: if self.want.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.timeout is not None: if self.have.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.interval is not None: if self.want.interval >= self.have.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) if self.want.interval != self.have.interval: return self.want.interval def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def description(self): return cmp_str_with_none(self.want.description, self.have.description) class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): start = datetime.now().isoformat() version = tmos_version(self.client) changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) send_teem(start, self.module, version) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/dns/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status == 404 or 'code' in response and response['code'] == 404: return False if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]: return True errors = [401, 403, 409, 500, 501, 502, 503, 504] if resp.status in errors or 'code' in response and response['code'] in errors: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def _address_type_matches_query_type(self, type, validator): if self.want.query_type == type and self.have.query_type == type: if self.want.receive is not None and validator(self.want.receive): return True if self.have.receive is not None and validator(self.have.receive): return True def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.want.reverse == 'enabled': if not self.want.receive and not self.have.receive: raise F5ModuleError( "A 'receive' string must be specified when setting 'reverse'." ) if self.want.time_until_up != 0 and self.have.time_until_up != 0: raise F5ModuleError( "Monitors with the 'reverse' attribute are not currently compatible with 'time_until_up'." ) if self._address_type_matches_query_type('a', validate_ip_v6_address): raise F5ModuleError( "Monitor has a IPv6 address. Only a 'query_type' of 'aaaa' is supported for IPv6." ) elif self._address_type_matches_query_type('aaaa', validate_ip_address): raise F5ModuleError( "Monitor has a IPv4 address. Only a 'query_type' of 'a' is supported for IPv4." ) if self.want.accept_rcode == 'anything': if self.want.receive is not None and is_valid_ip(self.want.receive) and self.have.receive is not None: raise F5ModuleError( "No 'receive' string may be specified, or exist, when 'accept_rcode' is 'anything'." ) elif self.want.receive is None and self.have.receive is not None: self.want.update({'receive': ''}) if self.module.check_mode: return True self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() if self.want.reverse == 'enabled': if self.want.time_until_up != 0: raise F5ModuleError( "Monitors with the 'reverse' attribute are not currently compatible with 'time_until_up'." ) if not self.want.receive: raise F5ModuleError( "A 'receive' string must be specified when setting 'reverse'." ) if self.want.receive is not None and validate_ip_v6_address(self.want.receive) and self.want.query_type == 'a': raise F5ModuleError( "Monitor has a IPv6 address. Only a 'query_type' of 'aaaa' is supported for IPv6." ) elif (self.want.receive is not None and validate_ip_address(self.want.receive) and self.want.query_type == 'aaaa'): raise F5ModuleError( "Monitor has a IPv4 address. Only a 'query_type' of 'a' is supported for IPv4." ) if self.want.accept_rcode == 'anything': if self.want.receive is not None and is_valid_ip(self.want.receive): raise F5ModuleError( "No 'receive' string may be specified, or exist, when 'accept_rcode' is 'anything'." ) elif self.want.receive is None: self.want.update({'receive': ''}) if self.want.query_name is None: raise F5ModuleError( "'query_name' is required when creating a new DNS monitor." ) if self.module.check_mode: return True self.create_on_device() return True def create_on_device(self): params = self.changes.api_params() params['name'] = self.want.name params['partition'] = self.want.partition uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/dns/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]: return True raise F5ModuleError(resp.content) def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/dns/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]: return True raise F5ModuleError(resp.content) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/dns/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.delete(uri) if resp.status == 200: return True def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/dns/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]: return ApiParameters(params=response) raise F5ModuleError(resp.content) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), parent=dict(default='/Common/dns'), receive=dict(), ip=dict(), description=dict(), port=dict(), interval=dict(type='int'), timeout=dict(type='int'), manual_resume=dict(type='bool'), reverse=dict(type='bool'), transparent=dict(type='bool'), time_until_up=dict(type='int'), up_interval=dict(type='int'), accept_rcode=dict(choices=['no-error', 'anything']), adaptive=dict(type='bool'), sampling_timespan=dict(type='int'), adaptive_limit=dict(type='int'), answer_section_contains=dict( choices=['any-type', 'anything', 'query-type'] ), query_name=dict(), query_type=dict(choices=['a', 'aaaa']), allowed_divergence_type=dict(choices=['relative', 'absolute']), allowed_divergence_value=dict(type='int'), state=dict( default='present', choices=['present', 'absent'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
import threading from PyQt4.Qt import (QDialog, QInputDialog, QLineEdit, QTextEdit, QVBoxLayout, QLabel, SIGNAL) import PyQt4.QtCore as QtCore from electrum_vtc.i18n import _ from electrum_vtc.qt.util import * from electrum_vtc.util import print_msg import os, hashlib, websocket, threading, logging, json, copy from electrum_vtc_gui.qt.qrcodewidget import QRCodeWidget, QRDialog from btchip.btchip import * DEBUG = False helpTxt = [_("Your Ledger Wallet wants tell you a one-time PIN code.<br><br>" \ "For best security you should unplug your device, open a text editor on another computer, " \ "put your cursor into it, and plug your device into that computer. " \ "It will output a summary of the transaction being signed and a one-time PIN.<br><br>" \ "Verify the transaction summary and type the PIN code here.<br><br>" \ "Before pressing enter, plug the device back into this computer.<br>" ), _("Verify the address below.<br>Type the character from your security card corresponding to the <u><b>BOLD</b></u> character."), _("Waiting for authentication on your mobile phone"), _("Transaction accepted by mobile phone. Waiting for confirmation."), _("Click Pair button to begin pairing a mobile phone."), _("Scan this QR code with your LedgerWallet phone app to pair it with this Ledger device.<br>" "To complete pairing you will need your security card to answer a challenge." ) ] class LedgerAuthDialog(QDialog): def __init__(self, handler, data): '''Ask user for 2nd factor authentication. Support text, security card and paired mobile methods. Use last method from settings, but support new pairing and downgrade. ''' QDialog.__init__(self, handler.top_level_window()) self.handler = handler self.txdata = data self.idxs = self.txdata['keycardData'] if self.txdata['confirmationType'] > 1 else '' self.setMinimumWidth(600) self.setWindowTitle(_("Ledger Wallet Authentication")) self.cfg = copy.deepcopy(self.handler.win.wallet.get_keystore().cfg) self.dongle = self.handler.win.wallet.get_keystore().get_client().dongle self.ws = None self.pin = '' self.devmode = self.getDevice2FAMode() if self.devmode == 0x11 or self.txdata['confirmationType'] == 1: self.cfg['mode'] = 0 vbox = QVBoxLayout() self.setLayout(vbox) def on_change_mode(idx): if idx < 2 and self.ws: self.ws.stop() self.ws = None self.cfg['mode'] = 0 if self.devmode == 0x11 else idx if idx > 0 else 1 if self.cfg['mode'] > 1 and self.cfg['pair'] and not self.ws: self.req_validation() if self.cfg['mode'] > 0: self.handler.win.wallet.get_keystore().cfg = self.cfg self.handler.win.wallet.save_keystore() self.update_dlg() def add_pairing(): self.do_pairing() def return_pin(): self.pin = self.pintxt.text() if self.txdata['confirmationType'] == 1 else self.cardtxt.text() if self.cfg['mode'] == 1: self.pin = ''.join(chr(int(str(i),16)) for i in self.pin) self.accept() self.modebox = QWidget() modelayout = QHBoxLayout() self.modebox.setLayout(modelayout) modelayout.addWidget(QLabel(_("Method:"))) self.modes = QComboBox() modelayout.addWidget(self.modes, 2) self.addPair = QPushButton(_("Pair")) self.addPair.setMaximumWidth(60) modelayout.addWidget(self.addPair) modelayout.addStretch(1) self.modebox.setMaximumHeight(50) vbox.addWidget(self.modebox) self.populate_modes() self.modes.currentIndexChanged.connect(on_change_mode) self.addPair.clicked.connect(add_pairing) self.helpmsg = QTextEdit() self.helpmsg.setStyleSheet("QTextEdit { background-color: lightgray; }") self.helpmsg.setReadOnly(True) vbox.addWidget(self.helpmsg) self.pinbox = QWidget() pinlayout = QHBoxLayout() self.pinbox.setLayout(pinlayout) self.pintxt = QLineEdit() self.pintxt.setEchoMode(2) self.pintxt.setMaxLength(4) self.pintxt.returnPressed.connect(return_pin) pinlayout.addWidget(QLabel(_("Enter PIN:"))) pinlayout.addWidget(self.pintxt) pinlayout.addWidget(QLabel(_("NOT DEVICE PIN - see above"))) pinlayout.addStretch(1) self.pinbox.setVisible(self.cfg['mode'] == 0) vbox.addWidget(self.pinbox) self.cardbox = QWidget() card = QVBoxLayout() self.cardbox.setLayout(card) self.addrtext = QTextEdit() self.addrtext.setStyleSheet("QTextEdit { color:blue; background-color:lightgray; padding:15px 10px; border:none; font-size:20pt; }") self.addrtext.setReadOnly(True) self.addrtext.setMaximumHeight(120) card.addWidget(self.addrtext) def pin_changed(s): if len(s) < len(self.idxs): i = self.idxs[len(s)] addr = self.txdata['address'] addr = addr[:i] + '<u><b>' + addr[i:i+1] + '</u></b>' + addr[i+1:] self.addrtext.setHtml(str(addr)) else: self.addrtext.setHtml(_("Press Enter")) pin_changed('') cardpin = QHBoxLayout() cardpin.addWidget(QLabel(_("Enter PIN:"))) self.cardtxt = QLineEdit() self.cardtxt.setEchoMode(2) self.cardtxt.setMaxLength(len(self.idxs)) self.cardtxt.textChanged.connect(pin_changed) self.cardtxt.returnPressed.connect(return_pin) cardpin.addWidget(self.cardtxt) cardpin.addWidget(QLabel(_("NOT DEVICE PIN - see above"))) cardpin.addStretch(1) card.addLayout(cardpin) self.cardbox.setVisible(self.cfg['mode'] == 1) vbox.addWidget(self.cardbox) self.pairbox = QWidget() pairlayout = QVBoxLayout() self.pairbox.setLayout(pairlayout) pairhelp = QTextEdit(helpTxt[5]) pairhelp.setStyleSheet("QTextEdit { background-color: lightgray; }") pairhelp.setReadOnly(True) pairlayout.addWidget(pairhelp, 1) self.pairqr = QRCodeWidget() pairlayout.addWidget(self.pairqr, 4) self.pairbox.setVisible(False) vbox.addWidget(self.pairbox) self.update_dlg() if self.cfg['mode'] > 1 and not self.ws: self.req_validation() def populate_modes(self): self.modes.blockSignals(True) self.modes.clear() self.modes.addItem(_("Summary Text PIN (requires dongle replugging)") if self.txdata['confirmationType'] == 1 else _("Summary Text PIN is Disabled")) if self.txdata['confirmationType'] > 1: self.modes.addItem(_("Security Card Challenge")) if not self.cfg['pair']: self.modes.addItem(_("Mobile - Not paired")) else: self.modes.addItem(_("Mobile - %s") % self.cfg['pair'][1]) self.modes.blockSignals(False) def update_dlg(self): self.modes.setCurrentIndex(self.cfg['mode']) self.modebox.setVisible(True) self.addPair.setText(_("Pair") if not self.cfg['pair'] else _("Re-Pair")) self.addPair.setVisible(self.txdata['confirmationType'] > 2) self.helpmsg.setText(helpTxt[self.cfg['mode'] if self.cfg['mode'] < 2 else 2 if self.cfg['pair'] else 4]) self.helpmsg.setMinimumHeight(180 if self.txdata['confirmationType'] == 1 else 100) self.pairbox.setVisible(False) self.helpmsg.setVisible(True) self.pinbox.setVisible(self.cfg['mode'] == 0) self.cardbox.setVisible(self.cfg['mode'] == 1) self.pintxt.setFocus(True) if self.cfg['mode'] == 0 else self.cardtxt.setFocus(True) self.setMaximumHeight(200) def do_pairing(self): rng = os.urandom(16) pairID = rng.encode('hex') + hashlib.sha256(rng).digest()[0].encode('hex') self.pairqr.setData(pairID) self.modebox.setVisible(False) self.helpmsg.setVisible(False) self.pinbox.setVisible(False) self.cardbox.setVisible(False) self.pairbox.setVisible(True) self.pairqr.setMinimumSize(300,300) if self.ws: self.ws.stop() self.ws = LedgerWebSocket(self, pairID) self.ws.pairing_done.connect(self.pairing_done) self.ws.start() def pairing_done(self, data): if data is not None: self.cfg['pair'] = [ data['pairid'], data['name'], data['platform'] ] self.cfg['mode'] = 2 self.handler.win.wallet.get_keystore().cfg = self.cfg self.handler.win.wallet.save_keystore() self.pin = 'paired' self.accept() def req_validation(self): if self.cfg['pair'] and 'secureScreenData' in self.txdata: if self.ws: self.ws.stop() self.ws = LedgerWebSocket(self, self.cfg['pair'][0], self.txdata) self.ws.req_updated.connect(self.req_updated) self.ws.start() def req_updated(self, pin): if pin == 'accepted': self.helpmsg.setText(helpTxt[3]) else: self.pin = str(pin) self.accept() def getDevice2FAMode(self): apdu = [0xe0, 0x24, 0x01, 0x00, 0x00, 0x01] # get 2fa mode try: mode = self.dongle.exchange( bytearray(apdu) ) return mode except BTChipException, e: debug_msg('Device getMode Failed') return 0x11 def closeEvent(self, evnt): debug_msg("CLOSE - Stop WS") if self.ws: self.ws.stop() if self.pairbox.isVisible(): evnt.ignore() self.update_dlg() class LedgerWebSocket(QThread): pairing_done = pyqtSignal(object) req_updated = pyqtSignal(str) def __init__(self, dlg, pairID, txdata=None): QThread.__init__(self) self.stopping = False self.pairID = pairID self.txreq = '{"type":"request","second_factor_data":"' + str(txdata['secureScreenData']).encode('hex') + '"}' if txdata else None self.dlg = dlg self.dongle = self.dlg.dongle self.data = None #websocket.enableTrace(True) logging.basicConfig(level=logging.INFO) self.ws = websocket.WebSocketApp('wss://ws.ledgerwallet.com/2fa/channels', on_message = self.on_message, on_error = self.on_error, on_close = self.on_close, on_open = self.on_open) def run(self): while not self.stopping: self.ws.run_forever() def stop(self): debug_msg("WS: Stopping") self.stopping = True self.ws.close() def on_message(self, ws, msg): data = json.loads(msg) if data['type'] == 'identify': debug_msg('Identify') apdu = [0xe0, 0x12, 0x01, 0x00, 0x41] # init pairing apdu.extend(data['public_key'].decode('hex')) try: challenge = self.dongle.exchange( bytearray(apdu) ) ws.send( '{"type":"challenge","data":"%s" }' % str(challenge).encode('hex') ) self.data = data except BTChipException, e: debug_msg('Identify Failed') if data['type'] == 'challenge': debug_msg('Challenge') apdu = [0xe0, 0x12, 0x02, 0x00, 0x10] # confirm pairing apdu.extend(data['data'].decode('hex')) try: self.dongle.exchange( bytearray(apdu) ) debug_msg('Pairing Successful') ws.send( '{"type":"pairing","is_successful":"true"}' ) self.data['pairid'] = self.pairID self.pairing_done.emit(self.data) except BTChipException, e: debug_msg('Pairing Failed') ws.send( '{"type":"pairing","is_successful":"false"}' ) self.pairing_done.emit(None) ws.send( '{"type":"disconnect"}' ) self.stopping = True ws.close() if data['type'] == 'accept': debug_msg('Accepted') self.req_updated.emit('accepted') if data['type'] == 'response': debug_msg('Responded', data) self.req_updated.emit(str(data['pin']) if data['is_accepted'] else '') self.txreq = None self.stopping = True ws.close() if data['type'] == 'repeat': debug_msg('Repeat') if self.txreq: ws.send( self.txreq ) debug_msg("Req Sent", self.txreq) if data['type'] == 'connect': debug_msg('Connected') if self.txreq: ws.send( self.txreq ) debug_msg("Req Sent", self.txreq) if data['type'] == 'disconnect': debug_msg('Disconnected') ws.close() def on_error(self, ws, error): message = getattr(error, 'strerror', '') if not message: message = getattr(error, 'message', '') debug_msg("WS: %s" % message) def on_close(self, ws): debug_msg("WS: ### socket closed ###") def on_open(self, ws): debug_msg("WS: ### socket open ###") debug_msg("Joining with pairing ID", self.pairID) ws.send( '{"type":"join","room":"%s"}' % self.pairID ) ws.send( '{"type":"repeat"}' ) if self.txreq: ws.send( self.txreq ) debug_msg("Req Sent", self.txreq) def debug_msg(*args): if DEBUG: print_msg(*args)
import copy from PySide import QtGui, QtCore from ..fe import FE from ..widget_factory import EditorFactory from ..base_editor import BaseValueEditor from ..core.undo_redo_manager import UndoRedoManager from ..core.value_controller import ElementController class ArrayEditor(BaseValueEditor): def __init__(self, valueController, parent=None): super(ArrayEditor, self).__init__(valueController, parent=parent) self.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding) self._enableAddElements = True #valueController.getOption('enableAddElements', valueController.getOption('enableAddRemoveElements', True)) self._enableRemoveElements = True #valueController.getOption('enableRemoveElements', valueController.getOption('enableAddRemoveElements', True)) self._addElementButtonLabel = 'add' #valueController.getOption('addElementButtonLabel', 'add') self._removeElementButtonLabel = 'remove' #valueController.getOption('removeElementButtonLabel', 'remove') self._displayGroupBox = False #self._valueController.getOption('displayArrayLimit', True) self._displayIndex = False #self._valueController.getOption('displayArrayLimit', True) self._displayArrayLimit = False #self._valueController.getOption('displayArrayLimit', True) self._displayNumElements = False #self._valueController.getOption('displayNumElements', True) self._arrayLimit = 3 #self._valueController.getOption('arrayLimit', 3) self._dataType = valueController.getDataType() self._valueArray = self._invokeGetter() self.determineElementType() vbox = QtGui.QVBoxLayout() if self._displayArrayLimit or self._displayNumElements: topToolbar = QtGui.QWidget(self) topToolbarLayout = QtGui.QHBoxLayout() topToolbar.setLayout(topToolbarLayout) vbox.addWidget(topToolbar, 0) if self._displayNumElements: topToolbarLayout.addWidget(QtGui.QLabel('Num Elements:'+str(len(self._valueArray)), self)) if self._displayArrayLimit: # display a widget to enable setting the maximum number of displayed elements. label = QtGui.QLabel('Max Displayed elements:', self) # label.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred) topToolbarLayout.addWidget(label, 0) spinBox = QtGui.QSpinBox(self) spinBox.setMinimum(0) spinBox.setMaximum(100) # spinBox.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed) spinBox.setValue(self._arrayLimit) def setArrayLimit(value): self._arrayLimit = value self.rebuild() spinBox.valueChanged.connect(setArrayLimit) topToolbarLayout.addWidget(spinBox, 0) topToolbarLayout.addStretch(1) self._grid = QtGui.QGridLayout() self._grid.setContentsMargins(0, 0, 0, 0) widget = QtGui.QWidget(self) widget.setLayout(self._grid) vbox.addWidget(widget) if self._displayGroupBox: groupBox = QtGui.QGroupBox(self._valueController.getDataType()) groupBox.setLayout(vbox) groupBox.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) groupBoxLayout = QtGui.QVBoxLayout() groupBoxLayout.addWidget(groupBox, 0) self.setLayout(groupBoxLayout) else: self.setLayout(vbox) self.build() if self._elementValueType == 'String': self.setAcceptDrops(self.isEditable()) def dragEnterEvent(self, event): if event.mimeData().hasText(): event.accept() def dropEvent(self, viewport, event): if event.mimeData().hasText(): undoManager = UndoRedoManager.getInstance() undoManager.openBracket("Add element to :" + self.getName()) self.addElement() self._valueArray[len(self._valueArray) - 1] = event.mimeData().text() self._setValueToController() undoManager.closeBracket() self.rebuild() event.acceptProposedAction() def determineElementType(self): # Determine the element value type from the value type of the array. if self._dataType.endswith('Array'): self._elementValueType = self._dataType[:len(self._dataType)-5] self._constSizeArray = False else: openBraceIdx = self._dataType.find('[') closeBraceIdx = self._dataType.find(']') keyType = '' self._constSizeArray = False if closeBraceIdx > openBraceIdx+1: try: keyType = self._dataType[openBraceIdx+1:closeBraceIdx] int(keyType) self._constSizeArray = True except: raise Exception("Value type is not an array:'" + self._dataType + "'") self._elementValueType = self._dataType.replace('['+keyType+']', '', 1) def addElement(self): index = len(self._valueArray) newArray = FE.getInstance().rtVal(self._dataType) newArray.resize(index + 1) for i in range(0, len(newArray)-1): newArray[i] = self._valueArray[i] try: # If the element type is an object, then we should create it here. newValue = FE.getInstance().rtVal(self._elementValueType) newArray[index] = newValue except: pass self._valueArray = newArray def removeElement(self, index): newArray = FE.getInstance().rtVal(self._dataType) newArray.resize(len(self._valueArray) - 1) for i in range(0, index): newArray[i] = self._valueArray[i] for i in range(index, len(newArray)): newArray[i] = self._valueArray[i+1] self._valueArray = newArray def build(self): self._editors = [] for i in range(0, len(self._valueArray)): if self._displayArrayLimit and i == self._arrayLimit: break self.constructAndAddElementEditor(i) if self.isEditable() and self._enableAddElements: if not self._displayArrayLimit or self._displayArrayLimit and len(self._valueArray) < self._arrayLimit and not self._constSizeArray: self.addElementButton = QtGui.QPushButton(self._addElementButtonLabel, self) def addElement(): undoManager = UndoRedoManager.getInstance() undoManager.openBracket("Add element to :" + self.getName()) self.addElement() self._setValueToController() undoManager.closeBracket() self.rebuild() self.addElementButton.clicked.connect(addElement) if self._displayIndex: self._grid.addWidget(self.addElementButton, len(self._valueArray), 1, 1, 2) else: self._grid.addWidget(self.addElementButton, len(self._valueArray), 0, 1, 2) def constructElementEditor(self, index): elementController = ElementController(index, self._elementValueType, self._valueArray, self.isEditable()) def elementChanged(value): self._setValueToController() elementController.valueChanged.connect(elementChanged) return EditorFactory.constructEditor(elementController, parent=self) def constructAndAddElementEditor(self, index): elementEditor = self.constructElementEditor(index) row = index column = 0 if self._displayIndex: self._grid.addWidget(QtGui.QLabel(str(index), self), row, column, QtCore.Qt.AlignRight | QtCore.Qt.AlignTop) column += 1 if elementEditor is not None: self._grid.addWidget(elementEditor, row, column) column += 1 if self.isEditable() and self._enableRemoveElements and not self._constSizeArray: self.removeElementButton = QtGui.QPushButton(self._removeElementButtonLabel, self) def removeElement(): undoManager = UndoRedoManager.getInstance() undoManager.openBracket("Remove element from :" + self.getName()) self.removeElement(index) self._setValueToController() undoManager.closeBracket() self.rebuild() self.removeElementButton.clicked.connect(removeElement) self._grid.addWidget(self.removeElementButton, row, column) if elementEditor is not None: self._editors.append(elementEditor) def rebuild(self): """ Rebuild the sub-widgets because the number of elements in the array changed.""" while self._grid.count(): self._grid.takeAt(0).widget().deleteLater() self.build() def getEditorValue(self): return self._valueArray def setEditorValue(self, valueArray): self._valueArray = valueArray if not len(self._valueArray) == len(self._editors): self.rebuild() else: for i in range(len(valueArray)): if i < len(self._editors): self._editors[i].setEditorValue(valueArray[i]) def getColumnSpan(self): """Returns the number of columns in the layout grid this widget takes up. Wide widgets can return values greater than 1 to modify thier alignment relative to the label.""" return 2 @classmethod def canDisplay(cls, valueController): dataType = valueController.getDataType() if dataType.endswith('Array'): return True openBraceIdx = dataType.find('[') closeBraceIdx = dataType.find(']') if closeBraceIdx == openBraceIdx+1: return True if closeBraceIdx > openBraceIdx+1: try: keyType = dataType[openBraceIdx+1:closeBraceIdx] constInt = int(keyType) return True except: return False return False EditorFactory.registerEditorClass(ArrayEditor)
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six import string_types from testtools import TestCase from kmip.core.enums import Tags from kmip.core.enums import Types from kmip.core.enums import OpaqueDataType from kmip.core.utils import BytearrayStream import kmip.core.errors as errors from kmip.core.errors import ErrorStrings from kmip.core.primitives import Base from kmip.core.primitives import Integer from kmip.core.primitives import LongInteger from kmip.core.primitives import BigInteger from kmip.core.primitives import Enumeration from kmip.core.primitives import TextString from kmip.core.primitives import ByteString class TestBase(TestCase): def setUp(self): super(TestBase, self).setUp() self.stream = BytearrayStream() self.bad_init = 'Bad Base initialization: attribute {0} missing' self.bad_write = ErrorStrings.BAD_EXP_RECV.format('Base.{0}', 'write', '{1}', '{2}') self.bad_encoding = ErrorStrings.BAD_ENCODING.format('Base.{0}', 'write') self.bad_match = ErrorStrings.BAD_EXP_RECV.format('Base.{0}', 'comparison', '{1}', '{2}') def tearDown(self): super(TestBase, self).tearDown() def test_is_oversized(self): base = Base() # Check no exception thrown base.is_oversized(self.stream) def test_is_oversized_error(self): self.stream.write(b'\x00') base = Base() self.assertRaises(errors.StreamNotEmptyError, base.is_oversized, self.stream) def test_read_tag(self): encoding = (b'\x42\x00\x00') base = Base() self.stream = BytearrayStream(encoding) # Check no exception thrown base.read_tag(self.stream) def test_read_tag_invalid(self): encoding = (b'\x42\x00\x01') base = Base() self.stream = BytearrayStream(encoding) self.assertRaises(errors.ReadValueError, base.read_tag, self.stream) def test_read_type(self): self.stream.write(b'\x00') base = Base() # Check no exception thrown base.read_type(self.stream) def test_read_type_error(self): self.stream.write(b'\x01') base = Base() self.assertRaises(errors.ReadValueError, base.read_type, self.stream) def test_read_type_underflow(self): base = Base() self.assertRaises(errors.ReadValueError, base.read_type, self.stream) def test_read_type_overflow(self): self.stream.write(b'\x00\x00') base = Base() # Check no exception thrown base.read_type(self.stream) def test_read_length(self): self.stream.write(b'\x00\x00\x00\x04') base = Base() base.length = 4 # Check no exception thrown base.read_length(self.stream) def test_read_length_underflow(self): self.stream.write(b'\x00') base = Base() base.length = 4 self.assertRaises(errors.ReadValueError, base.read_length, self.stream) def test_read_length_overflow(self): self.stream.write(b'\x00\x00\x00\x04\x00') base = Base() base.length = 4 # Check no exception thrown base.read_length(self.stream) def test_read_value(self): base = Base() self.assertRaises(NotImplementedError, base.read_value, self.stream) def test_read(self): self.stream.write(b'\x42\x00\x00\x00\x00\x00\x00\x04') base = Base() base.length = 4 # Check no exception thrown base.read(self.stream) def test_write_tag(self): encoding = (b'\x42\x00\x00') base = Base() base.write_tag(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format('tag', '{0} bytes'.format(len_exp), '{0} bytes'.format(len_rcv))) self.assertEqual(encoding, result, self.bad_encoding.format('tag')) def test_write_type(self): encoding = b'\x00' base = Base() base.write_type(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format('type', '{0} bytes'.format(len_exp), '{0} bytes'.format(len_rcv))) self.assertEqual(encoding, result, self.bad_encoding.format('type')) def test_write_type_invalid(self): base = Base() base.type = '' self.assertRaises(TypeError, base.write_type, self.stream) def test_write_length(self): encoding = b'\x00\x00\x00\x04' base = Base() base.length = 4 base.write_length(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format('length', '{0} bytes'.format(len_exp), '{0} bytes'.format(len_rcv))) self.assertEqual(encoding, result, self.bad_encoding.format('length')) def test_write_length_invalid(self): base = Base() base.length = '' self.assertRaises(TypeError, base.write_length, self.stream) def test_write_length_overflow(self): self.skip('No easy way to test with a number requiring more than ' '2 ** 0xffffffff bytes for representation. Test preserved ' 'for completeness.') def test_write_value(self): base = Base() self.assertRaises(NotImplementedError, base.write_value, self.stream) def test_write(self): encoding = b'\x42\x00\x00\x00\x00\x00\x00\x04' base = Base() base.length = 4 base.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format('type/length', '{0} bytes'.format(len_exp), '{0} bytes'.format(len_rcv))) self.assertEqual(encoding, result, self.bad_encoding.format('type/length')) def test_is_tag_next(self): encoding = (b'\x42\x00\x00') base = Base() self.stream = BytearrayStream(encoding) self.assertTrue(Base.is_tag_next(base.tag, self.stream), self.bad_match.format('tag', 'match', 'mismatch')) def test_is_tag_next_invalid(self): encoding = (b'\x42\x00\x01') base = Base() self.stream = BytearrayStream(encoding) self.assertFalse(Base.is_tag_next(base.tag, self.stream), self.bad_match.format('tag', 'mismatch', 'match')) class TestInteger(TestCase): def setUp(self): super(TestInteger, self).setUp() self.stream = BytearrayStream() self.max_byte_int = 4294967295 self.max_int = 2147483647 self.bad_value = ('Bad Integer.{0} after init: expected {1}, ' 'received {2}') self.bad_write = ('Bad Integer write: expected {0} bytes, ' 'received {1} bytes') self.bad_encoding = 'Bad Integer write: encoding mismatch' self.bad_read = ('Bad Integer.value read: expected {0}, received {1}') def tearDown(self): super(TestInteger, self).tearDown() def test_init(self): i = Integer(0) self.assertEqual(0, i.value, self.bad_value.format('value', 0, i.value)) self.assertEqual(i.LENGTH, i.length, self.bad_value.format('length', i.LENGTH, i.length)) self.assertEqual(i.LENGTH, i.padding_length, self.bad_value.format('padding_length', i.LENGTH, i.padding_length)) def test_init_unset(self): i = Integer() self.assertEqual(0, i.value, self.bad_value.format('value', 0, i.value)) self.assertEqual(i.LENGTH, i.length, self.bad_value.format('length', i.LENGTH, i.length)) self.assertEqual(i.LENGTH, i.padding_length, self.bad_value.format('padding_length', i.LENGTH, i.padding_length)) def test_validate_on_valid(self): i = Integer() i.value = 0 # Check no exception thrown i.validate() def test_validate_on_valid_unset(self): i = Integer() # Check no exception thrown i.validate() def test_validate_on_invalid_type(self): """ Test that a TypeError is thrown on input of invalid type (e.g., str). """ self.assertRaises(TypeError, Integer, 'invalid') def test_validate_on_invalid_value_too_big(self): """ Test that a ValueError is thrown on input that is too large. """ self.assertRaises(ValueError, Integer, Integer.MAX + 1) def test_validate_on_invalid_value_too_small(self): """ Test that a ValueError is thrown on input that is too small. """ self.assertRaises(ValueError, Integer, Integer.MIN - 1) def test_read_value(self): encoding = (b'\x00\x00\x00\x01\x00\x00\x00\x00') self.stream = BytearrayStream(encoding) i = Integer() i.read_value(self.stream) self.assertEqual(1, i.value, self.bad_read.format(1, i.value)) def test_read_value_zero(self): encoding = (b'\x00\x00\x00\x00\x00\x00\x00\x00') self.stream = BytearrayStream(encoding) i = Integer() i.read_value(self.stream) self.assertEqual(0, i.value, self.bad_read.format(0, i.value)) def test_read_value_max_positive(self): encoding = (b'\x7f\xff\xff\xff\x00\x00\x00\x00') self.stream = BytearrayStream(encoding) i = Integer() i.read_value(self.stream) self.assertEqual(self.max_int, i.value, self.bad_read.format(1, i.value)) def test_read_value_min_negative(self): encoding = (b'\xff\xff\xff\xff\x00\x00\x00\x00') self.stream = BytearrayStream(encoding) i = Integer() i.read_value(self.stream) self.assertEqual(-1, i.value, self.bad_read.format(1, i.value)) def test_read(self): encoding = (b'\x42\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) i = Integer() i.read(self.stream) self.assertEqual(1, i.value, self.bad_read.format(1, i.value)) def test_read_on_invalid_length(self): encoding = (b'\x42\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) i = Integer() self.assertRaises(errors.ReadValueError, i.read, self.stream) def test_read_on_invalid_padding(self): encoding = (b'\x42\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x00\xff\xff' b'\xff\xff') self.stream = BytearrayStream(encoding) i = Integer() self.assertRaises(errors.ReadValueError, i.read, self.stream) def test_write_value(self): encoding = (b'\x00\x00\x00\x01\x00\x00\x00\x00') i = Integer(1) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_zero(self): encoding = (b'\x00\x00\x00\x00\x00\x00\x00\x00') i = Integer(0) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_max_positive(self): encoding = (b'\x7f\xff\xff\xff\x00\x00\x00\x00') i = Integer(self.max_int) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_min_negative(self): encoding = (b'\xff\xff\xff\xff\x00\x00\x00\x00') i = Integer(-1) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write(self): encoding = (b'\x42\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00' b'\x00\x00') i = Integer(1) i.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) class TestLongInteger(TestCase): def setUp(self): super(TestLongInteger, self).setUp() self.stream = BytearrayStream() self.max_byte_long = 18446744073709551615 self.max_long = 9223372036854775807 self.bad_value = ('Bad LongInteger.{0} after init: expected {1}, ' 'received {2}') self.bad_write = ('Bad LongInteger write: expected {0} bytes, ' 'received {1} bytes') self.bad_encoding = 'Bad LongInteger write: encoding mismatch' self.bad_read = ('Bad LongInteger.value read: expected {0}, received ' '{1}') def tearDown(self): super(TestLongInteger, self).tearDown() def test_init(self): i = LongInteger(0) self.assertEqual(0, i.value, self.bad_value.format('value', 0, i.value)) self.assertEqual(i.LENGTH, i.length, self.bad_value.format('length', i.LENGTH, i.length)) def test_init_unset(self): i = LongInteger() self.assertEqual(None, i.value, self.bad_value.format('value', None, i.value)) self.assertEqual(i.LENGTH, i.length, self.bad_value.format('length', i.LENGTH, i.length)) def test_validate_on_valid(self): i = LongInteger() i.value = 0 # Check no exception thrown i.validate() def test_validate_on_valid_long(self): i = LongInteger() i.value = self.max_long + 1 # Check no exception thrown i.validate() def test_validate_on_valid_unset(self): i = LongInteger() # Check no exception thrown i.validate() def test_validate_on_invalid_type(self): i = LongInteger() i.value = 'test' self.assertRaises(errors.StateTypeError, i.validate) def test_validate_on_invalid_value(self): self.assertRaises(errors.StateOverflowError, LongInteger, self.max_byte_long + 1) def test_read_value(self): encoding = (b'\x00\x00\x00\x00\x00\x00\x00\x01') self.stream = BytearrayStream(encoding) i = LongInteger() i.read_value(self.stream) self.assertEqual(1, i.value, self.bad_read.format(1, i.value)) def test_read_value_zero(self): encoding = (b'\x00\x00\x00\x00\x00\x00\x00\x00') self.stream = BytearrayStream(encoding) i = LongInteger() i.read_value(self.stream) self.assertEqual(0, i.value, self.bad_read.format(0, i.value)) def test_read_value_max_positive(self): encoding = (b'\x7f\xff\xff\xff\xff\xff\xff\xff') self.stream = BytearrayStream(encoding) i = LongInteger() i.read_value(self.stream) self.assertEqual(self.max_long, i.value, self.bad_read.format(1, i.value)) def test_read_value_min_negative(self): encoding = (b'\xff\xff\xff\xff\xff\xff\xff\xff') self.stream = BytearrayStream(encoding) i = LongInteger() i.read_value(self.stream) self.assertEqual(-1, i.value, self.bad_read.format(1, i.value)) def test_read(self): encoding = (b'\x42\x00\x00\x03\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00' b'\x00\x01') self.stream = BytearrayStream(encoding) i = LongInteger() i.read(self.stream) self.assertEqual(1, i.value, self.bad_read.format(1, i.value)) def test_read_on_invalid_length(self): encoding = (b'\x42\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) i = LongInteger() self.assertRaises(errors.ReadValueError, i.read, self.stream) def test_write_value(self): encoding = (b'\x00\x00\x00\x00\x00\x00\x00\x01') i = LongInteger(1) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_zero(self): encoding = (b'\x00\x00\x00\x00\x00\x00\x00\x00') i = LongInteger(0) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_max_positive(self): encoding = (b'\x7f\xff\xff\xff\xff\xff\xff\xff') i = LongInteger(self.max_long) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_min_negative(self): encoding = (b'\xff\xff\xff\xff\xff\xff\xff\xff') i = LongInteger(-1) i.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write(self): encoding = (b'\x42\x00\x00\x03\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00' b'\x00\x01') i = LongInteger(1) i.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) class TestBigInteger(TestCase): def setUp(self): super(TestBigInteger, self).setUp() self.stream = BytearrayStream() self.max_byte_long = 18446744073709551615 self.max_long = 9223372036854775807 self.bad_value = ('Bad BigInteger.{0} after init: expected {1}, ' 'received {2}') self.bad_write = ('Bad BigInteger write: expected {0} bytes, ' 'received {1} bytes') self.bad_encoding = 'Bad BigInteger write: encoding mismatch' self.bad_read = ('Bad BigInteger.value read: expected {0}, ' 'received {1}') def tearDown(self): super(TestBigInteger, self).tearDown() def test_big_integer(self): self.skip('BigInteger implementation incomplete') i = BigInteger(0) self.assertEqual(0, i.value, self.bad_value.format('value', 0, i.value)) self.assertEqual(1, i.length, self.bad_value.format('length', 1, i.length)) self.assertEqual(i.BLOCK_SIZE - 1, i.padding_length, self.bad_value.format('padding_length', i.BLOCK_SIZE - 1, i.padding_length)) def test_big_integer_unset(self): self.skip('BigInteger implementation incomplete') i = BigInteger() self.assertEqual(None, i.value, self.bad_value.format('value', None, i.value)) self.assertEqual(None, i.length, self.bad_value.format('length', None, i.length)) self.assertEqual(None, i.padding_length, self.bad_value.format('padding_length', None, i.padding_length)) def test_validate_on_valid(self): self.skip('BigInteger implementation incomplete') i = BigInteger() i.value = 0 i.length = i.BLOCK_SIZE i.padding_length = 0 # Check no exception thrown i.validate() def test_validate_on_valid_long(self): self.skip('BigInteger implementation incomplete') i = BigInteger() i.value = self.max_long + 1 i.length = i.BLOCK_SIZE i.padding_length = 0 # Check no exception thrown i.validate() def test_validate_on_valid_unset(self): self.skip('BigInteger implementation incomplete') i = BigInteger() # Check no exception thrown i.validate() def test_validate_on_invalid_type(self): self.skip('BigInteger implementation incomplete') i = BigInteger() i.value = 'test' self.assertRaises(errors.StateTypeError, i.validate) def test_write(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00' b'\x00\x01') i = BigInteger(1) i.TAG = Tags.ACTIVATION_DATE i.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_zero(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00' b'\x00\x00') i = BigInteger(0) i.TAG = Tags.ACTIVATION_DATE i.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_max_positive_value(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\x7f\xff\xff\xff\xff\xff' b'\xff\xff') i = BigInteger(self.max_long) i.TAG = Tags.ACTIVATION_DATE i.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_min_negative_value(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\xff\xff\xff\xff\xff\xff' b'\xff\xff') i = BigInteger(-1) i.TAG = Tags.ACTIVATION_DATE i.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_read(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00' b'\x00\x01') self.stream = BytearrayStream(encoding) i = BigInteger() i.TAG = Tags.ACTIVATION_DATE i.read(self.stream) self.assertEqual(1, i.value, self.bad_read.format(1, i.value)) def test_read_zero(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) i = BigInteger() i.TAG = Tags.ACTIVATION_DATE i.read(self.stream) self.assertEqual(0, i.value, self.bad_read.format(0, i.value)) def test_read_max_positive_value(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\x7f\xff\xff\xff\xff\xff' b'\xff\xff') self.stream = BytearrayStream(encoding) i = BigInteger() i.TAG = Tags.ACTIVATION_DATE i.read(self.stream) self.assertEqual(self.max_long, i.value, self.bad_read.format(1, i.value)) def test_read_min_negative_value(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x08\xff\xff\xff\xff\xff\xff' b'\xff\xff') self.stream = BytearrayStream(encoding) i = BigInteger() i.TAG = Tags.ACTIVATION_DATE i.read(self.stream) self.assertEqual(-1, i.value, self.bad_read.format(1, i.value)) def test_read_on_invalid_length(self): self.skip('BigInteger implementation incomplete') encoding = (b'\x42\x00\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) i = BigInteger() i.TAG = Tags.ACTIVATION_DATE self.assertRaises(errors.InvalidLengthError, i.read, self.stream) class TestEnumeration(TestCase): def setUp(self): super(TestEnumeration, self).setUp() self.stream = BytearrayStream() Enumeration.ENUM_TYPE = Types self.bad_type = ErrorStrings.BAD_EXP_RECV.format('Enumeration.{0}', 'type', '{1}', '{2}') self.bad_value = ErrorStrings.BAD_EXP_RECV.format('Enumeration.{0}', 'value', '{1}', '{2}') self.bad_write = ErrorStrings.BAD_EXP_RECV.format('Enumeration', 'write', '{0} bytes', '{1} bytes') self.bad_encoding = ErrorStrings.BAD_ENCODING.format('Enumeration', 'write') def tearDown(self): super(TestEnumeration, self).tearDown() def test_init(self): e = Enumeration(Types.DEFAULT) self.assertIsInstance(e.enum, Types, self.bad_type.format('enum', Types, type(e.enum))) self.assertEqual(Types.DEFAULT, e.enum, self.bad_value.format('enum', Types.DEFAULT, e.enum)) default = Types.DEFAULT self.assertEqual(default.value, e.value, self.bad_value.format('value', default.value, e.value)) def test_init_unset(self): e = Enumeration() self.assertEqual(None, e.enum, self.bad_value.format('enum', None, e.enum)) self.assertEqual(0, e.value, self.bad_value.format('value', 0, e.value)) def test_validate_on_valid(self): e = Enumeration() e.enum = Types.DEFAULT # Check no exception thrown e.validate() def test_validate_on_valid_unset(self): e = Enumeration() # Check no exception thrown e.validate() def test_validate_on_invalid_type(self): e = Enumeration() e.enum = 0 self.assertRaises(TypeError, e.validate) def test_read(self): encoding = (b'\x42\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) e = Enumeration() e.read(self.stream) self.assertIsInstance(e.enum, Types, self.bad_type.format('enum', Types, type(e.enum))) self.assertEqual(Types.DEFAULT, e.enum, self.bad_value.format('enum', Types.DEFAULT, type(e.enum))) default = Types.DEFAULT self.assertEqual(default.value, e.value, self.bad_value.format('value', default.value, e.value)) def test_write(self): encoding = (b'\x42\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00' b'\x00\x00') e = Enumeration(Types.DEFAULT) e.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_write.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_unsigned(self): """ Test that a large enumeration value is written correctly as an unsigned integer. """ encoding = (b'\x42\x00\x00\x05\x00\x00\x00\x04\x80\x00\x00\x00\x00\x00' b'\x00\x00') e = Enumeration(OpaqueDataType.NONE) e.write(self.stream) result = self.stream.read() self.assertEqual(len(encoding), len(result)) self.assertEqual(encoding, result) class TestTextString(TestCase): def setUp(self): super(TestTextString, self).setUp() self.stream = BytearrayStream() self.bad_type = ErrorStrings.BAD_EXP_RECV.format('TextString.{0}', 'type', '{1}', '{2}') self.bad_value = ErrorStrings.BAD_EXP_RECV.format('TextString.{0}', 'value', '{1}', '{2}') self.bad_read = ErrorStrings.BAD_EXP_RECV.format('TextString.{0}', '', '{1}', '{2}') self.bad_write = ErrorStrings.BAD_EXP_RECV.format('TextString.{0}', 'write', '{1}', '{2}') self.bad_encoding = ErrorStrings.BAD_ENCODING.format('TextString', '') self.bad_length = ErrorStrings.BAD_EXP_RECV.format('TextString', 'length', '{0} bytes', '{1} bytes') def tearDown(self): super(TestTextString, self).tearDown() def test_init(self): value = 'Hello World' ts = TextString(value) self.assertIsInstance(ts.value, str, self.bad_type.format('value', str, type(ts.value))) self.assertEqual(value, ts.value, self.bad_value.format('value', value, ts.value)) def test_init_unset(self): text_string = TextString() expected = string_types observed = text_string.value msg = "expected {0}, observed {1}".format(expected, observed) self.assertIsInstance(observed, expected, msg) expected = '' msg = "expected {0}, observed {1}".format(expected, observed) self.assertEqual(expected, observed, msg) def test_validate_on_valid(self): ts = TextString() ts.value = 'Hello World' # Check no exception thrown. ts.validate() def test_validate_on_valid_unset(self): ts = TextString() # Check no exception thrown. ts.validate() def test_validate_on_invalid_type(self): ts = TextString() ts.value = 0 self.assertRaises(TypeError, ts.validate) def test_read_value(self): encoding = (b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) ts = TextString() ts.length = 0x0B ts.read_value(self.stream) expected = 'Hello World' self.assertEqual(expected, ts.value, self.bad_read.format('value', expected, ts.value)) def test_read_value_no_padding(self): encoding = (b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F') self.stream = BytearrayStream(encoding) ts = TextString() ts.length = 0x08 ts.read_value(self.stream) expected = 'Hello Wo' self.assertEqual(expected, ts.value, self.bad_read.format('value', expected, ts.value)) def test_read_value_max_padding(self): encoding = (b'\x48\x00\x00\x00\x00\x00\x00\x00') self.stream = BytearrayStream(encoding) ts = TextString() ts.length = 0x01 ts.read_value(self.stream) expected = 'H' self.assertEqual(expected, ts.value, self.bad_read.format('value', expected, ts.value)) def test_read(self): encoding = (b'\x42\x00\x00\x07\x00\x00\x00\x0B\x48\x65\x6C\x6C\x6F\x20' b'\x57\x6F\x72\x6C\x64\x00\x00\x00\x00\x00') self.stream = BytearrayStream(encoding) ts = TextString() ts.read(self.stream) expected = 'Hello World' self.assertEqual(expected, ts.value, self.bad_read.format('value', expected, ts.value)) def test_read_on_invalid_padding(self): encoding = (b'\x42\x00\x00\x07\x00\x00\x00\x0B\x48\x65\x6C\x6C\x6F\x20' b'\x57\x6F\x72\x6C\x64\xff\xff\xff\xff\xff') self.stream = BytearrayStream(encoding) ts = TextString() self.assertRaises(errors.ReadValueError, ts.read, self.stream) def test_write_value(self): encoding = (b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream() value = 'Hello World' ts = TextString(value) ts.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_no_padding(self): encoding = (b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F') self.stream = BytearrayStream() value = 'Hello Wo' ts = TextString(value) ts.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_max_padding(self): encoding = (b'\x48\x00\x00\x00\x00\x00\x00\x00') self.stream = BytearrayStream() value = 'H' ts = TextString(value) ts.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write(self): encoding = (b'\x42\x00\x00\x07\x00\x00\x00\x0B\x48\x65\x6C\x6C\x6F\x20' b'\x57\x6F\x72\x6C\x64\x00\x00\x00\x00\x00') self.stream = BytearrayStream() value = 'Hello World' ts = TextString(value) ts.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) class TestByteString(TestCase): def setUp(self): super(TestByteString, self).setUp() self.stream = BytearrayStream() self.bad_type = ErrorStrings.BAD_EXP_RECV.format('ByteString.{0}', 'type', '{1}', '{2}') self.bad_value = ErrorStrings.BAD_EXP_RECV.format('ByteString.{0}', 'value', '{1}', '{2}') self.bad_read = ErrorStrings.BAD_EXP_RECV.format('ByteString.{0}', '', '{1}', '{2}') self.bad_write = ErrorStrings.BAD_EXP_RECV.format('ByteString.{0}', 'write', '{1}', '{2}') self.bad_encoding = ErrorStrings.BAD_ENCODING.format('ByteString', '') self.bad_length = ErrorStrings.BAD_EXP_RECV.format('ByteString', 'length', '{0} bytes', '{1} bytes') def tearDown(self): super(TestByteString, self).tearDown() def test_init(self): value = b'\x01\x02\x03' bs = ByteString(value) self.assertIsInstance(bs.value, bytes, self.bad_type.format('value', bytes, type(bs.value))) self.assertEqual(value, bs.value, self.bad_value.format('value', value, bs.value)) def test_init_unset(self): bs = ByteString() self.assertIsInstance(bs.value, bytes, self.bad_type.format('value', type(None), type(bs.value))) self.assertEqual(bytes(), bs.value, self.bad_value.format('value', None, bs.value)) def test_validate_on_valid(self): bs = ByteString() bs.value = b'\x00' # Check no exception thrown. bs.validate() def test_validate_on_valid_unset(self): bs = ByteString() # Check no exception thrown. bs.validate() def test_validate_on_invalid_type(self): bs = ByteString() bs.value = 0 self.assertRaises(TypeError, bs.validate) def test_read_value(self): encoding = b'\x01\x02\x03\x00\x00\x00\x00\x00' self.stream = BytearrayStream(encoding) bs = ByteString() bs.length = 0x03 bs.read_value(self.stream) expected = b'\x01\x02\x03' self.assertEqual(expected, bs.value, self.bad_read.format('value', expected, bs.value)) def test_read_value_no_padding(self): encoding = b'\x01\x02\x03\x04\x05\x06\x07\x08' self.stream = BytearrayStream(encoding) bs = ByteString() bs.length = 0x08 bs.read_value(self.stream) expected = b'\x01\x02\x03\x04\x05\x06\x07\x08' self.assertEqual(expected, bs.value, self.bad_read.format('value', expected, bs.value)) def test_read_value_max_padding(self): encoding = b'\x01\x00\x00\x00\x00\x00\x00\x00' self.stream = BytearrayStream(encoding) bs = ByteString() bs.length = 0x01 bs.read_value(self.stream) expected = b'\x01' self.assertEqual(expected, bs.value, self.bad_read.format('value', expected, bs.value)) def test_read_value_zero(self): encoding = b'\x00\x00\x00\x00\x00\x00\x00\x00' self.stream = BytearrayStream(encoding) bs = ByteString() bs.length = 0x01 bs.read_value(self.stream) expected = b'\x00' self.assertEqual(expected, bs.value, self.bad_read.format('value', expected, bs.value)) def test_read(self): encoding = (b'\x42\x00\x00\x08\x00\x00\x00\x03\x01\x02\x03\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream(encoding) bs = ByteString() bs.read(self.stream) expected = b'\x01\x02\x03' self.assertEqual(expected, bs.value, self.bad_read.format('value', expected, bs.value)) def test_read_on_invalid_padding(self): encoding = (b'\x42\x00\x00\x08\x00\x00\x00\x03\x01\x02\x03\xff\xff\xff' b'\xff\xff') self.stream = BytearrayStream(encoding) bs = ByteString() self.assertRaises(errors.ReadValueError, bs.read, self.stream) def test_write_value(self): encoding = b'\x01\x02\x03\x00\x00\x00\x00\x00' self.stream = BytearrayStream() value = b'\x01\x02\x03' bs = ByteString(value) bs.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_no_padding(self): encoding = b'\x01\x02\x03\x04\x05\x06\x07\x08' self.stream = BytearrayStream() value = b'\x01\x02\x03\x04\x05\x06\x07\x08' bs = ByteString(value) bs.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_max_padding(self): encoding = b'\x01\x00\x00\x00\x00\x00\x00\x00' self.stream = BytearrayStream() value = b'\x01' bs = ByteString(value) bs.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write_value_zero(self): encoding = b'\x00\x00\x00\x00\x00\x00\x00\x00' self.stream = BytearrayStream() value = b'\x00' bs = ByteString(value) bs.write_value(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) def test_write(self): encoding = (b'\x42\x00\x00\x08\x00\x00\x00\x03\x01\x02\x03\x00\x00\x00' b'\x00\x00') self.stream = BytearrayStream() value = b'\x01\x02\x03' bs = ByteString(value) bs.write(self.stream) result = self.stream.read() len_exp = len(encoding) len_rcv = len(result) self.assertEqual(len_exp, len_rcv, self.bad_length.format(len_exp, len_rcv)) self.assertEqual(encoding, result, self.bad_encoding) class TestDateTime(TestCase): def setUp(self): super(TestDateTime, self).setUp() self.stream = BytearrayStream() def tearDown(self): super(TestDateTime, self).tearDown() def test_init(self): self.skip('') def test_init_unset(self): self.skip('') def test_validate_on_valid(self): self.skip('') def test_validate_on_valid_unset(self): self.skip('') def test_validate_on_invalid_type(self): self.skip('') def test_read(self): self.skip('') def test_write(self): self.skip('') class TestInterval(TestCase): def setUp(self): super(TestInterval, self).setUp() self.stream = BytearrayStream() def tearDown(self): super(TestInterval, self).tearDown() def test_init(self): self.skip('') def test_init_unset(self): self.skip('') def test_validate_on_valid(self): self.skip('') def test_validate_on_valid_unset(self): self.skip('') def test_validate_on_invalid_type(self): self.skip('') def test_read(self): self.skip('') def test_write(self): self.skip('')
import numpy as np import scipy.optimize as spopt import cvxopt as cvx from cvxopt import solvers from scipy.special import digamma, gammaln, polygamma import time, math, pdb # suppress optimizer output solvers.options['show_progress'] = False solvers.options['maxiters'] = 40 np.random.seed(10) # defining some constants EPS = np.finfo(np.double).tiny MAX = np.finfo(np.double).max # defining some simple functions logistic = lambda x: 1./(1+np.exp(x)) insum = lambda x,axes: np.apply_over_axes(np.sum,x,axes) def outsum(arr): """Summation over the first axis, without changing length of shape. Arguments arr : array Returns thesum : array .. note:: This implementation is much faster than `numpy.sum`. """ thesum = sum([a for a in arr]) shape = [1] shape.extend(list(thesum.shape)) thesum = thesum.reshape(tuple(shape)) return thesum def nplog(x): """Compute the natural logarithm, handling very small floats appropriately. """ try: x[x<EPS] = EPS except TypeError: x = max([x,EPS]) return np.log(x) class Data: """ A data structure to store a multiscale representation of chromatin accessibility read counts across `N` genomic windows of length `L` in `R` replicates. Arguments reads : array """ def __init__(self, reads=None): if reads is None: self.N = 0 self.L = 0 self.R = 0 self.J = 0 self.value = dict() self.total = dict() else: self.N, self.L, self.R = reads.shape self.J = math.frexp(self.L)[1]-1 self.value = dict() self.total = dict() self.transform(reads) def transform(self, profile): """Transform a vector of read counts or parameter values into a multiscale representation. .. note:: See msCentipede manual for more details. """ for j in xrange(self.J): size = self.L/(2**(j+1)) self.total[j] = np.array([profile[:,k*size:(k+2)*size,:].sum(1) for k in xrange(0,2**(j+1),2)]).T self.value[j] = np.array([profile[:,k*size:(k+1)*size,:].sum(1) for k in xrange(0,2**(j+1),2)]).T def inverse_transform(self): """Transform a multiscale representation of the data or parameters, into vector representation. """ if self.data: profile = np.array([val for k in xrange(2**self.J) \ for val in [self.value[self.J-1][k][0],self.value[self.J-1][k][1]-self.value[self.J-1][k][0]]]) else: profile = np.array([1]) for j in xrange(self.J): profile = np.array([p for val in profile for p in [val,val]]) vals = np.array([i for v in self.value[j] for i in [v,1-v]]) profile = vals*profile return profile def copy(self): """ Create a copy of the class instance """ newcopy = Data() newcopy.J = self.J newcopy.N = self.N newcopy.L = self.L newcopy.R = self.R for j in xrange(self.J): newcopy.value[j] = self.value[j] newcopy.total[j] = self.total[j] return newcopy class Zeta(): """ Inference class to store and update (E-step) the posterior probability that a transcription factor is bound to a motif instance. Arguments data : Data totalreads : array """ def __init__(self, data, totalreads, infer=False): self.N = data.N self.total = totalreads if infer: self.prior_log_odds = np.zeros((self.N,1), dtype=float) self.footprint_log_likelihood_ratio = np.zeros((self.N,1), dtype=float) self.total_log_likelihood_ratio = np.zeros((self.N,1), dtype=float) self.posterior_log_odds = np.zeros((self.N,1), dtype=float) else: self.estim = np.zeros((self.N, 2),dtype=float) order = np.argsort(self.total.sum(1)) indices = order[:self.N/2] self.estim[indices,1:] = -MAX indices = order[self.N/2:] self.estim[indices,1:] = MAX self.estim = np.exp(self.estim - np.max(self.estim,1).reshape(self.N,1)) self.estim = self.estim / insum(self.estim,[1]) def update(self, data, scores, pi, tau, alpha, beta, omega, \ pi_null, tau_null, model): footprint_logodds = np.zeros((self.N,1),dtype=float) lhoodA, lhoodB = compute_footprint_likelihood(data, pi, tau, pi_null, tau_null, model) for j in xrange(data.J): footprint_logodds += insum(lhoodA.value[j] - lhoodB.value[j],[1]) prior_logodds = insum(beta.estim * scores, [1]) negbin_logodds = insum(gammaln(self.total + alpha.estim.T[1]) \ - gammaln(self.total + alpha.estim.T[0]) \ + gammaln(alpha.estim.T[0]) - gammaln(alpha.estim.T[1]) \ + alpha.estim.T[1] * nplog(omega.estim.T[1]) - alpha.estim.T[0] * nplog(omega.estim.T[0]) \ + self.total * (nplog(1 - omega.estim.T[1]) - nplog(1 - omega.estim.T[0])),[1]) self.estim[:,1:] = prior_logodds + footprint_logodds + negbin_logodds self.estim[:,0] = 0. self.estim[self.estim==np.inf] = MAX self.estim = np.exp(self.estim-np.max(self.estim,1).reshape(self.N,1)) self.estim = self.estim/insum(self.estim,[1]) def infer(self, data, scores, pi, tau, alpha, beta, omega, \ pi_null, tau_null, model): lhoodA, lhoodB = compute_footprint_likelihood(data, pi, tau, pi_null, tau_null, model) for j in xrange(data.J): self.footprint_log_likelihood_ratio += insum(lhoodA.value[j] - lhoodB.value[j],[1]) self.footprint_log_likelihood_ratio = self.footprint_log_likelihood_ratio / np.log(10) self.prior_log_odds = insum(beta.estim * scores, [1]) / np.log(10) self.total_log_likelihood_ratio = insum(gammaln(self.total + alpha.estim.T[1]) \ - gammaln(self.total + alpha.estim.T[0]) \ + gammaln(alpha.estim.T[0]) - gammaln(alpha.estim.T[1]) \ + alpha.estim.T[1] * nplog(omega.estim.T[1]) - alpha.estim.T[0] * nplog(omega.estim.T[0]) \ + self.total * (nplog(1 - omega.estim.T[1]) - nplog(1 - omega.estim.T[0])),[1]) self.total_log_likelihood_ratio = self.total_log_likelihood_ratio / np.log(10) self.posterior_log_odds = self.prior_log_odds \ + self.footprint_log_likelihood_ratio \ + self.total_log_likelihood_ratio class Pi(Data): """ Class to store and update (M-step) the parameter `p` in the msCentipede model. It is also used for the parameter `p_o` in the msCentipede-flexbg model. Arguments J : int number of scales """ def __init__(self, J): Data.__init__(self) self.J = J for j in xrange(self.J): self.value[j] = np.empty((2**j,), dtype='float') def update(self, data, zeta, tau): """Update the estimates of parameter `p` (and `p_o`) in the model. """ def function(x, kwargs): """Computes part of the likelihood function that has terms containing `pi`. """ data = kwargs['data'] zeta = kwargs['zeta'] tau = kwargs['tau'] j = kwargs['j'] func = np.zeros(data.value[j][0].shape, dtype=float) for r in xrange(data.R): func += gammaln(data.value[j][r] + tau.estim[j] * x) \ + gammaln(data.total[j][r] - data.value[j][r] + tau.estim[j] * (1-x)) \ - gammaln(tau.estim[j] * x) - gammaln(tau.estim[j] * (1-x)) f = -1. * np.sum(zeta.estim[:,1] * np.sum(func,1)) return f def gradient(x, kwargs): """Computes gradient of the likelihood function with respect to `pi`. """ data = kwargs['data'] zeta = kwargs['zeta'] tau = kwargs['tau'] j = kwargs['j'] df = np.zeros(data.value[j][0].shape, dtype=float) for r in xrange(data.R): df += digamma(data.value[j][r] + tau.estim[j] * x) \ - digamma(data.total[j][r] - data.value[j][r] + tau.estim[j] * (1-x)) \ - digamma(tau.estim[j] * x) + digamma(tau.estim[j] * (1-x)) Df = -1. * tau.estim[j] * np.sum(zeta.estim[:,1:] * df,0) return Df def hessian(x, kwargs): """Computes hessian of the likelihood function with respect to `pi`. """ data = kwargs['data'] zeta = kwargs['zeta'] tau = kwargs['tau'] j = kwargs['j'] hf = np.zeros(data.value[j][0].shape, dtype=float) for r in xrange(data.R): hf += polygamma(1, data.value[j][r] + tau.estim[j] * x) \ + polygamma(1, data.total[j][r] - data.value[j][r] + tau.estim[j] * (1-x)) \ - polygamma(1, tau.estim[j] * x) - polygamma(1, tau.estim[j] * (1-x)) hess = -1. * tau.estim[j]**2 * np.sum(zeta.estim[:,1:] * hf,0) Hf = np.diag(hess) return Hf for j in xrange(self.J): # initialize optimization variable xo = self.value[j] X = xo.size # set constraints for optimization variable if tau.estim[j]<2: xmin = 0.5*np.ones((X,1), dtype='float') xmax = 0.5*np.ones((X,1), dtype='float') else: xmin = 1./tau.estim[j]*np.ones((X,1), dtype='float') xmax = (1-1./tau.estim[j])*np.ones((X,1), dtype='float') G = np.vstack((np.diag(-1*np.ones((X,), dtype='float')), \ np.diag(np.ones((X,), dtype='float')))) h = np.vstack((-1*xmin,xmax)) args = dict([('G',G),('h',h),('data',data),('zeta',zeta),('tau',tau),('j',j)]) # call optimizer optimized = False while not optimized: try: self.value[j] = optimizer(xo, function, gradient, hessian, args) optimized = True except ValueError: dx = xmax-xmin xo[dx>0] = xmin + np.random.rand(X,1)/dx xo[dx==0] = xmin if np.isnan(self.value[j]).any(): print "Nan in Pi" raise ValueError if np.isinf(self.value[j]).any(): print "Inf in Pi" raise ValueError def avoid_edges(self): for j in xrange(self.J): self.value[j][self.value[j]<1e-10] = 1e-10 self.value[j][self.value[j]>1-1e-10] = 1-1e-10 class Tau(): """ Class to store and update (M-step) the parameter `tau` in the msCentipede model. It is also used for the parameter `tau_o` in the msCentipede-flexbg model. Arguments J : int number of scales """ def __init__(self, J): self.J = J self.estim = np.empty((self.J,), dtype='float') def update(self, data, zeta, pi): """Update the estimates of parameter `tau` (and `tau_o`) in the model. """ def function(x, kwargs): """Computes part of the likelihood function that has terms containing `tau`. """ data = kwargs['data'] zeta = kwargs['zeta'] pi = kwargs['pi'] j = kwargs['j'] func = np.zeros(zeta.estim[:,1].shape, dtype=float) # loop over replicates for r in xrange(data.R): F = gammaln(data.value[j][r] + pi.value[j] * x) \ + gammaln(data.total[j][r] - data.value[j][r] + (1 - pi.value[j]) * x) \ - gammaln(data.total[j][r] + x) + gammaln(x) \ - gammaln(pi.value[j] * x) - gammaln((1 - pi.value[j]) * x) func += np.sum(F, 1) F = -1. * np.sum(zeta.estim[:,1] * func) return F def gradient(x, kwargs): """Computes gradient of the likelihood function with respect to `tau`. """ data = kwargs['data'] zeta = kwargs['zeta'] pi = kwargs['pi'] j = kwargs['j'] # loop over replicates Df = np.empty((1,), dtype='float') df = np.zeros(zeta.estim[:,1].shape, dtype=float) for r in xrange(data.R): f = pi.value[j] * digamma(data.value[j][r] + pi.value[j] * x) \ + (1 - pi.value[j]) * digamma(data.total[j][r] - data.value[j][r] + (1 - pi.value[j]) * x) \ - digamma(data.total[j][r] + x) + digamma(x) \ - pi.value[j] * digamma(pi.value[j] * x) - (1 - pi.value[j]) * digamma((1 - pi.value[j]) * x) df += np.sum(f, 1) Df[0] = -1 * np.sum(zeta.estim[:,1] * df) return Df def hessian(x, kwargs): """Computes hessian of the likelihood function with respect to `tau`. """ data = kwargs['data'] zeta = kwargs['zeta'] pi = kwargs['pi'] j = kwargs['j'] # loop over replicates hess = np.empty((1,), dtype='float') hf = np.zeros(zeta.estim[:,1].shape, dtype=float) for r in xrange(data.R): f = pi.value[j]**2 * polygamma(1, data.value[j][r] + pi.value[j] * x) \ + (1 - pi.value[j])**2 * polygamma(1, data.total[j][r] - data.value[j][r] + (1 - pi.value[j]) * x) \ - polygamma(1, data.total[j][r] + x) + polygamma(1, x) \ - pi.value[j]**2 * polygamma(1, pi.value[j] * x) \ - (1 - pi.value[j])**2 * polygamma(1, (1 - pi.value[j]) * x) hf += np.sum(f, 1) hess[0] = -1 * np.sum(zeta.estim[:,1] * hf) Hf = np.diag(hess) return Hf for j in xrange(self.J): # initialize optimization variables xo = self.estim[j:j+1] # set constraints for optimization variables G = np.diag(-1 * np.ones((1,), dtype=float)) minj = 1./min([pi.value[j].min(), (1-pi.value[j]).min()]) xmin = np.array(minj).reshape(1,1) h = -1*xmin args = dict([('j',j),('G',G),('h',h),('data',data),('zeta',zeta),('pi',pi)]) # call optimizer optimized = False while not optimized: try: x_final = optimizer(xo, function, gradient, hessian, args) optimized = True except ValueError as err: xo = xmin.ravel()+100*np.random.rand() bounds = [(minj, None)] solution = spopt.fmin_l_bfgs_b(function, xo, fprime=gradient, \ args=(args,), bounds=bounds) x_final = solution[0] optimized = True self.estim[j:j+1] = x_final if np.isnan(self.estim).any(): print "Nan in Tau" raise ValueError if np.isinf(self.estim).any(): print "Inf in Tau" raise ValueError class Alpha(): """ Class to store and update (M-step) the parameter `alpha` in negative binomial part of the msCentipede model. There is a separate parameter for bound and unbound states, for each replicate. Arguments R : int number of replicate measurements """ def __init__(self, R): self.R = R self.estim = np.random.rand(self.R,2)*10 def update(self, zeta, omega): """Update the estimates of parameter `alpha` in the model. """ def function(x, kwargs): """Computes part of the likelihood function that has terms containing `alpha`. """ zeta = kwargs['zeta'] omega = kwargs['omega'] constant = kwargs['constant'] zetaestim = kwargs['zetaestim'] func = np.array([outsum(gammaln(zeta.total[:,r:r+1] + x[2*r:2*r+2]) * zeta.estim) \ - gammaln(x[2*r:2*r+2]) * zetaestim[0] + constant[r] * x[2*r:2*r+2] \ for r in xrange(omega.R)]) f = -1.*func.sum() return f def gradient(x, kwargs): """Computes gradient of the likelihood function with respect to `omega`. """ zeta = kwargs['zeta'] omega = kwargs['omega'] zetaestim = kwargs['zetaestim'] constant = kwargs['constant'] df = [] for r in xrange(omega.R): df.append(outsum(digamma(zeta.total[:,r:r+1] + x[2*r:2*r+2]) * zeta.estim)[0] \ - digamma(x[2*r:2*r+2]) * zetaestim[0] + constant[r]) Df = -1. * np.hstack(df) return Df def hessian(x, kwargs): """Computes hessian of the likelihood function with respect to `omega`. """ zeta = kwargs['zeta'] omega = kwargs['omega'] zetaestim = kwargs['zetaestim'] constant = kwargs['constant'] hess = [] for r in xrange(omega.R): hess.append(outsum(polygamma(1, zeta.total[:,r:r+1] + x[2*r:2*r+2]) * zeta.estim)[0] \ - polygamma(1, x[2*r:2*r+2]) * zetaestim[0]) Hf = -1. * np.diag(np.hstack(hess)) return Hf constant = [nplog(omega.estim[r]) * outsum(zeta.estim)[0] for r in xrange(self.R)] zetaestim = outsum(zeta.estim) # initialize optimization variables xo = self.estim.ravel() # set constraints for optimization variables G = np.diag(-1 * np.ones(xo.shape, dtype=float)) h = np.zeros((xo.size,1), dtype=float) args = dict([('G',G),('h',h),('omega',omega),('zeta',zeta),('constant',constant),('zetaestim',zetaestim)]) # call optimizer x_final = optimizer(xo, function, gradient, hessian, args) self.estim = x_final.reshape(self.estim.shape) if np.isnan(self.estim).any(): print "Nan in Alpha" raise ValueError if np.isinf(self.estim).any(): print "Inf in Alpha" raise ValueError class Omega(): """ Class to store and update (M-step) the parameter `omega` in negative binomial part of the msCentipede model. There is a separate parameter for bound and unbound states, for each replicate. Arguments R : int number of replicate measurements """ def __init__(self, R): self.R = R self.estim = np.random.rand(self.R,2) self.estim[:,1] = self.estim[:,1]/100 def update(self, zeta, alpha): """Update the estimates of parameter `omega` in the model. """ numerator = outsum(zeta.estim)[0] * alpha.estim denominator = np.array([outsum(zeta.estim * (estim + zeta.total[:,r:r+1]))[0] \ for r,estim in enumerate(alpha.estim)]) self.estim = numerator / denominator if np.isnan(self.estim).any(): print "Nan in Omega" raise ValueError if np.isinf(self.estim).any(): print "Inf in Omega" raise ValueError class Beta(): """ Class to store and update (M-step) the parameter `beta` in the logistic function in the prior of the msCentipede model. Arguments scores : array an array of scores for each motif instance. these could include PWM score, conservation score, a measure of various histone modifications, outputs from other algorithms, etc. """ def __init__(self, scores): self.S = scores.shape[1] self.estim = np.random.rand(self.S) def update(self, scores, zeta): """Update the estimates of parameter `beta` in the model. """ def function(x, kwargs): """Computes part of the likelihood function that has terms containing `beta`. """ scores = kwargs['scores'] zeta = kwargs['zeta'] arg = insum(x * scores,[1]) func = arg * zeta.estim[:,1:] - nplog(1 + np.exp(arg)) f = -1. * func.sum() return f def gradient(x, kwargs): """Computes gradient of the likelihood function with respect to `beta`. """ scores = kwargs['scores'] zeta = kwargs['zeta'] arg = insum(x * scores,[1]) Df = -1 * np.sum(scores * (zeta.estim[:,1:] - logistic(-arg)),0) return Df def hessian(x, kwargs): """Computes hessian of the likelihood function with respect to `beta`. """ scores = kwargs['scores'] zeta = kwargs['zeta'] arg = insum(x * scores,[1]) larg = scores * logistic(arg) * logistic(-arg) Hf = np.dot(scores.T, larg) return Hf xo = self.estim.copy() args = dict([('scores',scores),('zeta',zeta)]) self.estim = optimizer(xo, function, gradient, hessian, args) if np.isnan(self.estim).any(): print "Nan in Beta" raise ValueError if np.isinf(self.estim).any(): print "Inf in Beta" raise ValueError def optimizer(xo, function, gradient, hessian, kwargs): """Calls the appropriate nonlinear convex optimization solver in the package `cvxopt` to find optimal values for the relevant parameters, given subroutines that evaluate a function, its gradient, and hessian, this subroutine Arguments function : function object evaluates the function at the specified parameter values gradient : function object evaluates the gradient of the function hessian : function object evaluates the hessian of the function """ def F(x=None, z=None): """A subroutine that the cvxopt package can call to get values of the function, gradient and hessian during optimization. """ if x is None: return 0, cvx.matrix(x_init) xx = np.array(x).ravel().astype(np.float64) # compute likelihood function f = function(xx, kwargs) if np.isnan(f) or np.isinf(f): f = np.array([np.finfo('float32').max]).astype('float') else: f = np.array([f]).astype('float') # compute gradient Df = gradient(xx, kwargs) if np.isnan(Df).any() or np.isinf(Df).any(): Df = -1 * np.finfo('float32').max * np.ones((1,xx.size), dtype=float) else: Df = Df.reshape(1,xx.size) if z is None: return cvx.matrix(f), cvx.matrix(Df) # compute hessian hess = hessian(xx, kwargs) Hf = z[0] * hess return cvx.matrix(f), cvx.matrix(Df), cvx.matrix(Hf) # warm start for the optimization V = xo.size x_init = xo.reshape(V,1) # call the optimization subroutine in cvxopt if kwargs.has_key('G'): # call a constrained nonlinear solver solution = solvers.cp(F, G=cvx.matrix(kwargs['G']), h=cvx.matrix(kwargs['h'])) else: # call an unconstrained nonlinear solver solution = solvers.cp(F) x_final = np.array(solution['x']).ravel() return x_final def compute_footprint_likelihood(data, pi, tau, pi_null, tau_null, model): """Evaluates the likelihood function for the footprint part of the bound model and background model. Arguments data : Data transformed read count data pi : Pi estimate of mean footprint parameters at bound sites tau : Tau estimate of footprint heterogeneity at bound sites pi_null : Pi estimate of mean cleavage pattern at unbound sites tau_null : Tau or None estimate of cleavage heterogeneity at unbound sites model : string {msCentipede, msCentipede-flexbgmean, msCentipede-flexbg} """ lhood_bound = Data() lhood_unbound = Data() for j in xrange(data.J): value = outsum(data.value[j])[0] total = outsum(data.total[j])[0] lhood_bound.value[j] = outsum([gammaln(data.value[j][r] + pi.value[j] * tau.estim[j]) \ + gammaln(data.total[j][r] - data.value[j][r] + (1 - pi.value[j]) * tau.estim[j]) \ - gammaln(data.total[j][r] + tau.estim[j]) + gammaln(tau.estim[j]) \ - gammaln(pi.value[j] * tau.estim[j]) - gammaln((1 - pi.value[j]) * tau.estim[j]) \ for r in xrange(data.R)])[0] if model in ['msCentipede','msCentipede_flexbgmean']: lhood_unbound.value[j] = value * nplog(pi_null.value[j]) \ + (total - value) * nplog(1 - pi_null.value[j]) elif model=='msCentipede_flexbg': lhood_unbound.value[j] = outsum([gammaln(data.value[j][r] + pi_null.value[j] * tau_null.estim[j]) \ + gammaln(data.total[j][r] - data.value[j][r] + (1 - pi_null.value[j]) * tau_null.estim[j]) \ - gammaln(data.total[j][r] + tau_null.estim[j]) + gammaln(tau_null.estim[j]) \ - gammaln(pi_null.value[j] * tau_null.estim[j]) - gammaln((1 - pi_null.value[j]) * tau_null.estim[j]) \ for r in xrange(data.R)])[0] return lhood_bound, lhood_unbound def likelihood(data, scores, zeta, pi, tau, \ alpha, beta, omega, pi_null, tau_null, model): """Evaluates the likelihood function of the full model, given estimates of model parameters. Arguments data : Data transformed read count data scores : array an array of scores for each motif instance. these could include PWM score, conservation score, a measure of various histone modifications, outputs from other algorithms, etc. zeta : zeta expected value of factor binding state for each site. pi : Pi estimate of mean footprint parameters at bound sites tau : Tau estimate of footprint heterogeneity at bound sites alpha : Alpha estimate of negative binomial parameters for each replicate beta : Beta weights for various scores in the logistic function omega : Omega estimate of negative binomial parameters for each replicate pi_null : Pi estimate of mean cleavage pattern at unbound sites tau_null : Tau or None estimate of cleavage heterogeneity at unbound sites model : string {msCentipede, msCentipede-flexbgmean, msCentipede-flexbg} """ apriori = insum(beta.estim * scores,[1]) lhoodA, lhoodB = compute_footprint_likelihood(data, pi, tau, pi_null, tau_null, model) footprint = np.zeros((data.N,1),dtype=float) for j in xrange(data.J): footprint += insum(lhoodA.value[j],[1]) P_1 = footprint + insum(gammaln(zeta.total + alpha.estim[:,1]) - gammaln(alpha.estim[:,1]) \ + alpha.estim[:,1] * nplog(omega.estim[:,1]) + zeta.total * nplog(1 - omega.estim[:,1]), [1]) P_1[P_1==np.inf] = MAX P_1[P_1==-np.inf] = -MAX null = np.zeros((data.N,1), dtype=float) for j in xrange(data.J): null += insum(lhoodB.value[j],[1]) P_0 = null + insum(gammaln(zeta.total + alpha.estim[:,0]) - gammaln(alpha.estim[:,0]) \ + alpha.estim[:,0] * nplog(omega.estim[:,0]) + zeta.total * nplog(1 - omega.estim[:,0]), [1]) P_0[P_0==np.inf] = MAX P_0[P_0==-np.inf] = -MAX L = P_0 * zeta.estim[:,:1] + insum(P_1 * zeta.estim[:,1:],[1]) + apriori * (1 - zeta.estim[:,:1]) \ - nplog(1 + np.exp(apriori)) - insum(zeta.estim * nplog(zeta.estim),[1]) L = L.sum() / data.N if np.isnan(L): print "Nan in LogLike" return -np.inf if np.isinf(L): print "Inf in LogLike" return -np.inf return L def EM(data, scores, zeta, pi, tau, alpha, beta, omega, pi_null, tau_null, model): """This subroutine updates all model parameters once and computes an estimate of the posterior probability of binding. Arguments data : Data transformed read count data scores : array an array of scores for each motif instance. these could include PWM score, conservation score, a measure of various histone modifications, outputs from other algorithms, etc. zeta : zeta expected value of factor binding state for each site. pi : Pi estimate of mean footprint parameters at bound sites tau : Tau estimate of footprint heterogeneity at bound sites alpha : Alpha estimate of negative binomial parameters for each replicate beta : Beta weights for various scores in the logistic function omega : Omega estimate of negative binomial parameters for each replicate pi_null : Pi estimate of mean cleavage pattern at unbound sites tau_null : Tau or None estimate of cleavage heterogeneity at unbound sites model : string {msCentipede, msCentipede-flexbgmean, msCentipede-flexbg} """ # update binding posteriors zeta.update(data, scores, pi, tau, \ alpha, beta, omega, pi_null, tau_null, model) # update multi-scale parameters starttime = time.time() pi.update(data, zeta, tau) print "p_jk update in %.3f secs"%(time.time()-starttime) starttime = time.time() tau.update(data, zeta, pi) print "tau update in %.3f secs"%(time.time()-starttime) # update negative binomial parameters starttime = time.time() omega.update(zeta, alpha) print "omega update in %.3f secs"%(time.time()-starttime) starttime = time.time() alpha.update(zeta, omega) print "alpha update in %.3f secs"%(time.time()-starttime) # update prior parameters starttime = time.time() beta.update(scores, zeta) print "beta update in %.3f secs"%(time.time()-starttime) def square_EM(data, scores, zeta, pi, tau, alpha, beta, omega, pi_null, tau_null, model): """Accelerated update of model parameters and posterior probability of binding. Arguments data : Data transformed read count data scores : array an array of scores for each motif instance. these could include PWM score, conservation score, a measure of various histone modifications, outputs from other algorithms, etc. zeta : zeta expected value of factor binding state for each site. pi : Pi estimate of mean footprint parameters at bound sites tau : Tau estimate of footprint heterogeneity at bound sites alpha : Alpha estimate of negative binomial parameters for each replicate beta : Beta weights for various scores in the logistic function omega : Omega estimate of negative binomial parameters for each replicate pi_null : Pi estimate of mean cleavage pattern at unbound sites tau_null : Tau or None estimate of cleavage heterogeneity at unbound sites model : string {msCentipede, msCentipede-flexbgmean, msCentipede-flexbg} """ parameters = [pi, tau, alpha, omega] oldvar = [] for parameter in parameters: try: oldvar.append(parameter.estim.copy()) except AttributeError: oldvar.append(np.hstack([parameter.value[j].copy() for j in xrange(parameter.J)])) oldvars = [oldvar] # take two update steps for step in [0,1]: EM(data, scores, zeta, pi, tau, alpha, beta, omega, pi_null, tau_null, model) oldvar = [] for parameter in parameters: try: oldvar.append(parameter.estim.copy()) except AttributeError: oldvar.append(np.hstack([parameter.value[j].copy() for j in xrange(parameter.J)])) oldvars.append(oldvar) R = [oldvars[1][j]-oldvars[0][j] for j in xrange(len(parameters))] V = [oldvars[2][j]-oldvars[1][j]-R[j] for j in xrange(len(parameters))] a = -1.*np.sqrt(np.sum([(r*r).sum() for r in R]) / np.sum([(v*v).sum() for v in V])) if a>-1: a = -1. # given two update steps, compute an optimal step that achieves # a better likelihood than the best of the two steps. a_ok = False while not a_ok: invalid = np.zeros((0,), dtype='bool') for parameter,varA,varB,varC in zip(parameters,oldvars[0],oldvars[1],oldvars[2]): try: parameter.estim = (1+a)**2*varA - 2*a*(1+a)*varB + a**2*varC # ensure constraints on variables are satisfied invalid = np.hstack((invalid,(parameter.estim<=0).ravel())) except AttributeError: newparam = (1+a)**2*varA - 2*a*(1+a)*varB + a**2*varC # ensure constraints on variables are satisfied invalid = np.hstack((invalid, np.logical_or(newparam<0, newparam>1))) parameter.value = dict([(j,newparam[2**j-1:2**(j+1)-1]) \ for j in xrange(parameter.J)]) if np.any(invalid): a = (a-1)/2. if np.abs(a+1)<1e-4: a = -1. else: a_ok = True EM(data, scores, zeta, pi, tau, alpha, beta, omega, pi_null, tau_null, model) def estimate_optimal_model(reads, totalreads, scores, background, model, restarts, mintol): """Learn the model parameters by running an EM algorithm till convergence. Return the optimal parameter estimates from a number of EM results starting from random restarts. Arguments reads : array array of read counts at each base in a genomic window, across motif instances and several measurement replicates. totalreads : array array of total read counts in a genomic window, across motif instances and several measurement replicates. the size of the genomic window can be different for `reads` and `totalreads`. scores : array an array of scores for each motif instance. these could include PWM score, conservation score, a measure of various histone modifications, outputs from other algorithms, etc. background : array a uniform, normalized array for a uniform background model. when sequencing reads from genomic DNA are available, this is an array of read counts at each base in a genomic window, across motif instances. model : string {msCentipede, msCentipede-flexbgmean, msCentipede-flexbg} restarts : int number of independent runs of model learning mintol : float convergence criterion """ # transform data into multiscale representation data = Data(reads) data_null = Data(background) scores = np.hstack((np.ones((data.N,1), dtype=float), scores)) del reads # set background model pi_null = Pi(data_null.J) for j in xrange(pi_null.J): pi_null.value[j] = np.sum(np.sum(data_null.value[j],0),0) / np.sum(np.sum(data_null.total[j],0),0).astype('float') tau_null = Tau(data_null.J) tau_null = None if model=='msCentipede_flexbg': tau_null = Tau(data_null.J) zeta_null = Zeta(data_null, background.sum(1)) zeta_null.estim[:,1] = 1 zeta_null.estim[:,0] = 0 # iterative update of background model; # evaluate convergence based on change in estimated # background overdispersion change = np.inf while change>1e-2: change = tau_null.estim.copy() tau_null.update(data_null, zeta_null, pi_null) pi_null.update(data_null, zeta_null, tau_null) change = np.abs(change-tau_null.estim).sum() / tau_null.J maxLoglike = -np.inf restart = 0 err = 1 runlog = ['Number of sites = %d'%data.N] while restart<restarts: try: totaltime = time.time() print "Restart %d ..."%(restart+1) # initialize multi-scale model parameters pi = Pi(data.J) tau = Tau(data.J) # initialize negative binomial parameters alpha = Alpha(data.R) omega = Omega(data.R) # initialize prior parameters beta = Beta(scores) # initialize posterior over latent variables zeta = Zeta(data, totalreads) for j in xrange(pi.J): pi.value[j] = np.sum(data.value[j][0] * zeta.estim[:,1:],0) \ / np.sum(data.total[j][0] * zeta.estim[:,1:],0).astype('float') mask = pi.value[j]>0 pi.value[j][~mask] = pi.value[j][mask].min() mask = pi.value[j]<1 pi.value[j][~mask] = pi.value[j][mask].max() minj = 1./min([pi.value[j].min(), (1-pi.value[j]).min()]) if minj<2: minj = 2. tau.estim[j] = minj+10*np.random.rand() # initial log likelihood of the model Loglike = likelihood(data, scores, zeta, pi, tau, \ alpha, beta, omega, pi_null, tau_null, model) print Loglike tol = np.inf iter = 0 while np.abs(tol)>mintol: itertime = time.time() EM(data, scores, zeta, pi, tau, \ alpha, beta, omega, pi_null, tau_null, model) newLoglike = likelihood(data, scores, zeta, pi, tau, \ alpha, beta, omega, pi_null, tau_null, model) tol = newLoglike - Loglike Loglike = newLoglike print "Iteration %d: log likelihood = %.7f, change in log likelihood = %.7f, iteration time = %.3f secs"%(iter+1, Loglike, tol, time.time()-itertime) iter += 1 totaltime = (time.time()-totaltime)/60. # test if mean cleavage rate at bound sites is greater than at # unbound sites, for each replicate; avoids local optima issues. negbinmeans = alpha.estim * (1-omega.estim)/omega.estim if np.any(negbinmeans[:,0]<negbinmeans[:,1]): restart += 1 log = "%d. Log likelihood (per site) = %.3f (Completed in %.3f minutes)"%(restart,Loglike,totaltime) runlog.append(log) # choose these parameter estimates, if the likelihood is greater. if Loglike>maxLoglike: maxLoglikeres = Loglike if model in ['msCentipede','msCentipede_flexbgmean']: footprint_model = (pi, tau, pi_null) elif model=='msCentipede_flexbg': footprint_model = (pi, tau, pi_null, tau_null) count_model = (alpha, omega) prior = beta except ValueError: print "encountered an invalid value" if err<5: print "re-initializing learning for Restart %d ... %d"%(restart,err) err += 1 else: print "Error in learning model parameters. Please ensure the inputs are all valid" sys.exit(1) return footprint_model, count_model, prior, runlog def infer_binding_posterior(reads, totalreads, scores, background, footprint, negbinparams, prior, model): """Infer posterior probability of factor binding, given optimal model parameters. Arguments reads : array array of read counts at each base in a genomic window, across motif instances and several measurement replicates. totalreads : array array of total read counts in a genomic window, across motif instances and several measurement replicates. the size of the genomic window can be different for `reads` and `totalreads`. scores : array an array of scores for each motif instance. these could include PWM score, conservation score, a measure of various histone modifications, outputs from other algorithms, etc. background : array a uniform, normalized array for a uniform background model. when sequencing reads from genomic DNA are available, this is an array of read counts at each base in a genomic window, across motif instances. footprint : tuple (Pi, Tau) instances estimate of footprint model parameters negbinparams : tuple (Alpha, Omega) instances estimate of negative binomial model parameters prior : Beta estimate of weights in logistic function in the prior model : string {msCentipede, msCentipede-flexbgmean, msCentipede-flexbg} """ (N,L,R) = reads.shape data = Data(reads) data_null = Data(background) scores = np.hstack((np.ones((data.N,1), dtype=float), scores)) del reads # negative binomial parameters alpha = negbinparams[0] omega = negbinparams[1] # weights in logistic function in the prior beta = prior # multiscale parameters pi = footprint[0] tau = footprint[1] # setting background model pi_null = footprint[2] for j in xrange(pi_null.J): pi_null.value[j] = np.sum(np.sum(data_null.value[j],0),0) \ / np.sum(np.sum(data_null.total[j],0),0).astype('float') tau_null = None if model=='msCentipede_flexbg': tau_null = footprint[3] if data_null.N>1000: zeta_null = Zeta(data_null, background.sum(1)) zeta_null.estim[:,1] = 1 zeta_null.estim[:,0] = 0 # iterative update of background model, when # accounting for overdispersion change = np.inf while change>1e-1: change = tau_null.estim.copy() pi_null.update(data_null, zeta_null, tau_null) tau_null.update(data_null, zeta_null, pi_null) change = np.abs(change-tau_null.estim).sum() zeta = Zeta(data, totalreads, infer=True) zeta.infer(data, scores, pi, tau, alpha, beta, omega, \ pi_null, tau_null, model) return zeta.posterior_log_odds, \ zeta.prior_log_odds, zeta.footprint_log_likelihood_ratio, \ zeta.total_log_likelihood_ratio
#!/usr/bin/env python # -*- coding: utf-8 -*- #...for the logging. import logging as lg #...for copying. from copy import deepcopy #...for the MATH. import numpy as np #...for the data values. from datavals import * #...for the pixels. from pixel import * #...for the linearity calculations. from helpers import getLinearity, countEdgePixels class Kluster: """ Wrapper class for klusters. @param [in] rows The number of rows in the originating frame. @param [in] cols The number of columns in the originating frame. @param [in] ismc Is the cluster from a Monte Carlo simulation? """ def __init__(self, rows, cols, ismc): """ Constructor. """ lg.debug(" Instantiating a Kluster object.") ## Debug flag (deprecated). self.dbg = True ## The number of rows in the frame. self.__frame_rows = rows ## The number of columns in the frame. self.__frame_cols = cols ## The total counts in the cluster. self.total_counts = 0 ## A list of the XY positions of the pixels used in clustering. ## (Note: we cannot use a dictionary for clustering because ## the list changes during iteration.) self.pixel_xy_list = [] ## A dictionary of the pixels {X:C} (populated after clustering). self.__pixel_dict = {} ## Multiline JSON entry for the pixel. self.pixel_string = "" # # Spatial properties. # ## The minimum x value. self.__xmin = None ## The maximum x value. self.__xmax = None ## The minimum y value. self.__ymin = None ## The maximum y value. self.__ymax = None ## The cluster width [pixels]. self.__width = None ## The cluster height [pixels]. self.__height = None ## The unweighted cluster x position [pixels]. self.__x_uw = None ## The unweighted cluster y position [pixels]. self.__y_uw = None # Unweighted (u subsctript) properties. ## The cluster radius (unweighted). self.r_u = -1.0 ## The cluster's spatial density (unweighted). self.spatial_density_u = -1.0 # Counts-related properties # ## Total counts. self.__total_counts = None ## The maximum count value. self.__count_max = None # Linearity-related properties. # ## Line of best fit - gradient (m). self.__lin_m = None ## Line of best fit - y intercept (c). self.__lin_c = None ## Line of best fit - sum of the residuals (Sum{R}). self.__lin_sumR = None ## Linearity. self.__linearity = None # Energy. ## Total energy [keV]. self.__energy_total = None ## Max. energy [keV]. self.__energy_max = None # Edge pixel information. ## The number of edge pixels. self.__n_edge = None ## The fraction of inner pixels in the cluster. self.__inner_pixels_frac = None ## The fraction of outer pixels in the cluster. self.__outer_pixels_frac = None ## Is the cluster from Monte Carlo simulation? self.__is_mc = ismc ## Is the cluster on the edge of the frames? self.__is_edge_kluster = None def __lt__(self, other): return self.getNumberOfPixels() < other.getNumberOfPixels() def get_pixel_xy_list(self): return self.pixel_xy_list def get_pixel_dict(self): return self.__pixel_dict def insert(self, pixel_xy, pixel): self.pixel_xy_list.append(pixel_xy) self.total_counts += pixel.getC() def contains_pixel(self, pixel_xy): return pixel_xy in self.pixel_xy_list def getNumberOfPixels(self): return len(self.pixel_xy_list) def getTotalCounts(self): return self.total_counts def getWidth(self): if self.__width is None: raise IOError("UNPROCESSED_KLUSTER") return self.__width def getHeight(self): if self.__height is None: raise IOError("UNPROCESSED_KLUSTER") return self.__height def getXMin(self): if self.__xmin is None: raise IOError("UNPROCESSED_KLUSTER") return self.__xmin def getXMax(self): if self.__xmax is None: raise IOError("UNPROCESSED_KLUSTER") return self.__xmax def getYMin(self): if self.__ymin is None: raise IOError("UNPROCESSED_KLUSTER") return self.__ymin def getYMax(self): if self.__ymax is None: raise IOError("UNPROCESSED_KLUSTER") return self.__ymax def getXUW(self): return self.__x_uw def getYUW(self): return self.__y_uw def getRadiusUW(self): return self.__r_uw def getDensityUW(self): return self.__rho_uw def getMaxCountValue(self): return self.__count_max def getLineOfBestFitValues(self): return self.__lin_m, self.__lin_c, self.__lin_sumR def getLinearity(self): return self.__linearity def getTotalEnergy(self): return self.__energy_total def getMaxEnergy(self): return self.__energy_max def getNumberOfEdgePixels(self): return self.__n_edge def getInnerPixelFraction(self): return self.__inner_pixels_frac def getOuterPixelFraction(self): return self.__outer_pixels_frac def isEdgeCluster(self): return self.__is_edge_kluster def isMC(self): return self.__is_mc def isGamma(self): """ Is the cluster a gamma candidate? """ npix = self.getNumberOfPixels() rad = self.getRadiusUW() return npix == 1 or npix == 2 or (npix==3 and rad<TRIPIXEL_RADIUS) or (npix==4 and rad<TETRAPIXEL_RADIUS) def process(self, pixels): # # Note that the pixels are stored in and obtained from the KlusterFinder. # Start the string for the pixel JSON self.pixels_string = "pixels = [\n" xs = [] ys = [] cs = [] # Loop over the pixels found in the clustering process. #for bxy in self.pixel_xy_list: for X in self.pixel_xy_list: x = float(pixels[X].get_x()) y = float(pixels[X].get_y()) c = float(pixels[X].getC()) xs.append(x) ys.append(y) cs.append(c) # Add the pixel to the pixel JSON text. self.pixels_string += " " + pixels[X].pixel_entry() # Add to the cluster's own pixel dictionary. self.__pixel_dict[X] = c # End the string for the pixel JSON self.pixels_string += "]" self.__xmin = min(xs) self.__xmax = max(xs) self.__ymin = min(ys) self.__ymax = max(ys) self.__width = self.__xmax - self.__xmin + 1 self.__height = self.__ymax - self.__ymin + 1 ## The unweighted cluster x position [pixels]. self.__x_uw = np.mean(xs) ## The unweighted cluster y position [pixels]. self.__y_uw = np.mean(ys) # Calculate the cluster radius #------------------------------ # Firstly, we calculate the distance between each pixel and the centre. r_i = [np.sqrt( (float(X%256) - self.__x_uw)*(float(X%256) - self.__x_uw) \ + (float(X/256) - self.__y_uw)*(float(X/256) - self.__y_uw) ) \ for X in self.__pixel_dict.keys()] # Then we find the maximum of these distances. This is the cluster radius. self.__r_uw = max(r_i) # Find the spatial density if self.__r_uw > 0.0: self.__rho_uw = float(len(self.pixel_xy_list))/(self.__r_uw * self.__r_uw * np.pi) else: self.__rho_uw = 0.0 # Cluster counts #---------------- ## The total counts in the cluster. self.__total_counts = self.getTotalCounts() ## The maximum count value in the cluster. self.__count_max = max(cs) # Linearity information #----------------------- self.__lin_m, self.__lin_c, self.__lin_sumR, self.__linearity = getLinearity(self.__pixel_dict) # Edge pixel information. self.__n_edge = countEdgePixels(self.__pixel_dict, self.__frame_rows, self.__frame_cols) self.__outer_pixels_frac = float(self.__n_edge)/float(len(self.__pixel_dict)) self.__inner_pixels_frac = 1.0 - self.__outer_pixels_frac if 0 in xs or 0 in ys or 255 in xs or 255 in ys: self.__is_edge_kluster = True else: self.__is_edge_kluster = False # TMP self.__energy_total = 0.0 self.__energy_max = 0.0 lg.debug("*") lg.debug("* NEW CLUSTER:") lg.debug("*") lg.debug(self.pixels_string) lg.debug("*") lg.debug("* Cluster properties:") lg.debug("*") lg.debug("* x_min = %5d" % self.getXMin()) lg.debug("* x_max = %5d" % self.getXMax()) lg.debug("* y_min = %5d" % self.getYMin()) lg.debug("* y_max = %5d" % self.getYMax()) lg.debug("*") lg.debug("* Width = %5d" % (self.getWidth())) lg.debug("* Height = %5d" % (self.getHeight())) lg.debug("*") lg.debug("* Size (N_h) = %5d" % (self.getNumberOfPixels())) lg.debug("*") lg.debug("* Total Counts = %5d" % (self.getTotalCounts())) lg.debug("* Max. Count Value = %5d" % (self.getMaxCountValue())) lg.debug("*") lg.debug("* UNWEIGHTED:") lg.debug("* Cluster (x, y) = (%6.2f, %6.2f)" % (self.__x_uw, self.__y_uw)) lg.debug("* Cluster radius = %10.5f" % self.__r_uw) lg.debug("* Cluster spatial density \\rho = %10.5f" % self.__rho_uw) lg.debug("*") lg.debug("* Line of best fit: %f * x %+f" % (self.__lin_m, self.__lin_c)) lg.debug("* Sum of residuals \Sum{R} = %f" % (self.__lin_sumR)) lg.debug("* Linearity = %f" % (self.__linearity)) lg.debug("*") lg.debug("* Number of edge pixels = %5d" % (self.__n_edge)) lg.debug("*") def getKlusterPropertiesJson(self): m, c, sumR = self.getLineOfBestFitValues() p = {\ "size" : self.getNumberOfPixels(), \ "xmin" : self.getXMin(), \ "xmax" : self.getXMax(), \ "ymin" : self.getYMin(), \ "ymax" : self.getYMax(), \ "width" : self.getWidth(), \ "height" : self.getHeight(), \ "x_uw" : self.getXUW(), \ "y_uw" : self.getYUW(), \ "radius_uw" : self.getRadiusUW(), \ "density_uw" : self.getDensityUW(), \ "totalcounts" : self.getTotalCounts(), \ "maxcounts" : self.getMaxCountValue(), \ "lin_m" : m, \ "lin_c" : c, \ "lin_sumofres" : sumR, \ "lin_linearity" : self.getLinearity(), \ "n_edgepixels" : self.getNumberOfEdgePixels(), \ "edgefrac" : self.getOuterPixelFraction(), \ "innerfrac" : self.getInnerPixelFraction(), \ "ismc" : self.isMC(), \ "isedgekluster" : self.isEdgeCluster(), \ #"totalenergy" :, \ #"maxenergy" :, \ #"frameid" :\ } return p def getPixelMap(self): return self.__pixel_dict class KlusterFinder: """ Finds Klusters (blobs) in Timepix frames. @author Son Hoang (principle author). @author T. Whyntie (editor). """ dir_x = [-1, -1, 0, 1, 1, 1, 0, -1] dir_y = [ 0, 1, 1, 1, 0, -1, -1, -1] def __init__(self, data, r, c, ismc=False, maskdict={}): """ Constructor. @param [in] data A dictionary of pixel data - {X:C}. @param [in] r The number of rows in the originating frame. @param [in] c The number of columns in the originating frame. @param [in] ismc Is the cluster from simulated data? @param [in] maskdict A dictionary of masked pixels. """ lg.debug(""); lg.debug(" Instantiating a cluster finder object."); lg.debug("") self.dbg = False # True - for debugging print statements. ## A map of the pixels in a dictionary: {X:Pixel}. self.pixels = {} # map of {XY: Pixel} ## A list of the clusters found. self.blob_list = [] ## The number of pixel rows in the data frame. self.rows = r ## The number of pixel columns in the data frame. self.cols = c ## Are we looking at simulated data? self.__is_mc = ismc ## The dictionary of pixels of the frame. self.__pixel_map = deepcopy(data) # Remove the masked pixels from the data. if maskdict is not None: for X in maskdict: if X in self.__pixel_map.keys(): del self.__pixel_map[X] #print "DEBUG: Data supplied has %6d pixels." % \ # (len(data)) # # An arbitrary check on number of pixels in the frame. # if data.size() >= 10000 then return # # Loop over the data supplied to the KlusterFinder. # * Puts all of the data into the pixel map; # * Assigns neighbouring pixels where it find them. for xy, c in self.__pixel_map.iteritems(): x = xy % self.cols; y = xy / self.cols self.pixels[xy] = Pixel(x,y,c,-1, self.rows, self.cols) # Loop over the eight possible directions. for direction in range(8): ny = y + self.dir_y[direction] # Next row. nx = x + self.dir_x[direction] # Next column. nxy = ny * self.cols + nx # The next xy value. # If the next row or column is on an edge, skip it. if ny<0 or ny>=self.rows or nx<0 or nx>=self.cols: continue if nxy in self.pixels: #print "DEBUG: *-----* Found neighbour in self.pixels!" #print "DEBUG: * \\--* xy = %d" % (nxy) self.pixels[ xy].set_neighbour( direction, nxy) self.pixels[nxy].set_neighbour((direction+4)%8, xy) # Print out all pixels and neighbours if self.dbg: ## puts "DEBUG:" print "DEBUG: Looping over pixels in the @pixel_map:" print "DEBUG:---------------------------------------" for xy, p in self.pixels.iteritems(): print "DEBUG: * Pixel at (%10d) -> (%3d, %3d) = %3d" % \ (xy, p.get_x(),p.get_y(),p.get_c()) for direction, n in p.get_neighbours().iteritems(): print "DEBUG: *---> Neighbour in direction % 1d with ID=%s" % \ (direction, n) print "DEBUG:" print "DEBUG:" # Now loop over the pixels in the KlusterFinder's pixel_map # in order to create the blobs. #print "DEBUG: Creating the Klusters!" #print "DEBUG:" #print "DEBUG: Looping over the pixels in the @pixel_map:" #print "DEBUG:-------------------------------------------" for xy, p in self.pixels.iteritems(): #print "DEBUG: * Pixel at (%3d, %3d) = %3d, mask = %3d" % \ # (p.get_x(),p.get_y(),p.get_c(),p.get_mask()) # Start a new blob if the pixel hasn't been blobed yet. if p.get_mask() == -1: blob = Kluster(self.rows, self.cols, self.__is_mc) p.set_mask(0) #print "DEBUG: Mask set to %3d" % (p.get_mask()) blob.insert(xy, p) # Loop over the list of pixels in the blob. for bxy in blob.get_pixel_xy_list(): #pb = self.pixels[bxy] for direction in range(8): if direction in self.pixels[bxy].get_neighbours(): #print "DEBUG: *---> Pixel found in direction %1d" % (direction) nxy = self.pixels[bxy].get_neighbour(direction) self.pixels[bxy].set_mask(self.pixels[bxy].get_mask() + pow(2, direction)) #print "DEBUG: *---> Setting mask to %3d" % \ # (self.pixels[bxy].get_mask() + 2.0 ** direction) # If the Pixel isn't already in the Kluster, add it. if not blob.contains_pixel(nxy): blob.insert(nxy, self.pixels[bxy]) # end of Pixel presence check. # end of Pixel neighbour in direction existence check. # end of loop over the directions. self.insert(blob) ## end of check on the Pixel mask (as to whether or not blobed). # end of loop over the Pixels #print "DEBUG:-------------------------------------------" if self.dbg: print "DEBUG:" print "DEBUG: Looping over the found blobs:" print "DEBUG:------------------------------" for b in self.blob_list: print "DEBUG: * %3d pixel(s) found." % (len(b.get_pixel_xy_list())) # end of loop over blobs print "DEBUG:------------------------------" ## The number of gamma candidates. self.__n_gammas = 0 ## The number of monopixel candidates. self.__n_g1 = 0 ## The number of bipixel candidates. self.__n_g2 = 0 ## The number of tripixel candidates. self.__n_g3 = 0 ## The number of tetrapixel candidates. self.__n_g4 = 0 # Calculate the blob properties for b in self.blob_list: b.process(self.pixels) # Count the gamma candidates - we won't store these so we need to # know the numbers. if b.getNumberOfPixels() == 1: self.__n_g1 += 1 # Bipixel gamma. elif b.getNumberOfPixels() == 2: self.__n_g2 += 1 # Tripixel... elif b.getNumberOfPixels() == 3: # Tripixel gamma. if b.r_u < TRIPIXEL_RADIUS: #0.75 self.__n_g3 += 1 # Tetrapixel... elif b.getNumberOfPixels() == 4: # Tetrapixel gamma. if b.r_u < TETRAPIXEL_RADIUS: #0.71 self.__n_g4 += 1 self.__n_gammas = self.__n_g1 + self.__n_g2 + self.__n_g3 + self.__n_g4 # Sort the cluster list by cluster size. self.blob_list.sort(reverse=True) def insert(self, blob): self.blob_list.append(blob) def getNumberOfKlusters(self): return len(self.blob_list) def getListOfKlusters(self): return self.blob_list def getNumberOfGammas(self): return self.__n_gammas def getNumberOfMonopixels(self): return self.__n_g1 def getNumberOfBipixels(self): return self.__n_g2 def getNumberOfTripixelGammas(self): return self.__n_g3 def getNumberOfTetrapixelGammas(self): return self.__n_g4
#!/usr/bin/env python # Copyright (c) 2011-2021, wradlib developers. # Distributed under the MIT License. See LICENSE.txt for more info. from dataclasses import dataclass import numpy as np import pytest from wradlib import comp, georef, io, ipol, util from . import requires_data, requires_gdal @pytest.fixture def comp_data(): @dataclass(init=False, repr=False, eq=False) class Data: filename = "dx/raa00-dx_10908-0806021655-fbg---bin.gz" dx_file = util.get_wradlib_data_file(filename) data, metadata = io.read_dx(dx_file) radar_location = (8.005, 47.8744, 1517) elevation = 0.5 # in degree azimuths = np.arange(0, 360) # in degrees ranges = np.arange(0, 128000.0, 1000.0) # in meters polargrid = np.meshgrid(ranges, azimuths) coords, rad = georef.spherical_to_xyz( polargrid[0], polargrid[1], elevation, radar_location ) x = coords[..., 0] y = coords[..., 1] yield Data class TestCompose: @requires_data @requires_gdal def test_extract_circle(self, comp_data): x = comp_data.x y = comp_data.y xgrid = np.linspace(x.min(), x.mean(), 100) ygrid = np.linspace(y.min(), y.mean(), 100) grid_xy = np.meshgrid(xgrid, ygrid) grid_xy = np.vstack((grid_xy[0].ravel(), grid_xy[1].ravel())).transpose() comp.extract_circle(np.array([x.mean(), y.mean()]), 128000.0, grid_xy) @requires_data @requires_gdal def test_togrid(self, comp_data): x = comp_data.x y = comp_data.y data = comp_data.data xgrid = np.linspace(x.min(), x.mean(), 100) ygrid = np.linspace(y.min(), y.mean(), 100) grid_xy = np.meshgrid(xgrid, ygrid) grid_xy = np.vstack((grid_xy[0].ravel(), grid_xy[1].ravel())).transpose() xy = np.concatenate([x.ravel()[:, None], y.ravel()[:, None]], axis=1) comp.togrid( xy, grid_xy, 128000.0, np.array([x.mean(), y.mean()]), data.ravel(), ipol.Nearest, ) def test_compose(self): g1 = np.array( [ np.nan, np.nan, 10.0, np.nan, np.nan, np.nan, 10.0, 10.0, 10.0, np.nan, 10.0, 10.0, 10.0, 10.0, np.nan, np.nan, 10.0, 10.0, 10.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, ] ) g2 = np.array( [ np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 11.0, 11.0, 11.0, np.nan, np.nan, 11.0, 11.0, 11.0, 11.0, np.nan, 11.0, 11.0, 11.0, np.nan, np.nan, np.nan, 11.0, np.nan, np.nan, ] ) q1 = np.array( [ np.nan, np.nan, 3.47408756e09, np.nan, np.nan, np.nan, 8.75744493e08, 8.75744493e08, 1.55045236e09, np.nan, 3.47408756e09, 8.75744493e08, 5.98145272e04, 1.55045236e09, np.nan, np.nan, 1.55045236e09, 1.55045236e09, 1.55045236e09, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, ] ) q2 = np.array( [ np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 1.55045236e09, 1.55045236e09, 1.55045236e09, np.nan, np.nan, 1.55045236e09, 5.98145272e04, 8.75744493e08, 3.47408756e09, np.nan, 1.55045236e09, 8.75744493e08, 8.75744493e08, np.nan, np.nan, np.nan, 3.47408756e09, np.nan, np.nan, ] ) composite = comp.compose_weighted( [g1, g2], [1.0 / (q1 + 0.001), 1.0 / (q2 + 0.001)] ) composite1 = comp.compose_ko([g1, g2], [1.0 / (q1 + 0.001), 1.0 / (q2 + 0.001)]) res = np.array( [ np.nan, np.nan, 10.0, np.nan, np.nan, np.nan, 10.3609536, 10.3609536, 10.5, np.nan, 10.0, 10.3609536, 10.5, 10.6390464, 11.0, np.nan, 10.5, 10.6390464, 10.6390464, np.nan, np.nan, np.nan, 11.0, np.nan, np.nan, ] ) res1 = np.array( [ np.nan, np.nan, 10.0, np.nan, np.nan, np.nan, 10.0, 10.0, 10.0, np.nan, 10.0, 10.0, 10.0, 11.0, 11.0, np.nan, 10.0, 11.0, 11.0, np.nan, np.nan, np.nan, 11.0, np.nan, np.nan, ] ) np.testing.assert_allclose(composite, res) np.testing.assert_allclose(composite1, res1)
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2016-2018 CERN. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """Test case for CERN oauth remote app_rest.""" from flask import g, session, url_for from flask_principal import AnonymousIdentity, Identity, RoleNeed, UserNeed from flask_security import login_user, logout_user from flask_security.utils import hash_password from helpers import check_response_redirect_url_args, get_state, \ mock_remote_get, mock_response from six.moves.urllib_parse import parse_qs, urlparse from invenio_oauthclient.contrib.cern import OAUTHCLIENT_CERN_SESSION_KEY, \ account_info_rest, disconnect_rest_handler, fetch_extra_data, \ fetch_groups, get_dict_from_response def test_fetch_groups(app_rest, example_cern): """Test group extraction.""" example_response, example_token, _ = example_cern res = get_dict_from_response(example_response) # Override hidden group configuration import re app_rest.config['OAUTHCLIENT_CERN_HIDDEN_GROUPS'] = ('hidden_group',) app_rest.config['OAUTHCLIENT_CERN_HIDDEN_GROUPS_RE'] = ( re.compile(r'Group[1-3]'), ) # Check that groups were hidden as required groups = fetch_groups(res['Group']) assert all(group in groups for group in ('Group{}'.format(i) for i in range(4, 6))) def test_fetch_extra_data(app_rest, example_cern): """Test extra data extraction.""" example_response, example_token, _ = example_cern res = get_dict_from_response(example_response) # Check that groups were hidden as required extra_data = fetch_extra_data(res) assert 'person_id' in extra_data assert extra_data['person_id'] == "234567" assert 'identity_class' in extra_data assert extra_data['identity_class'] == "CERN Registered" assert 'department' in extra_data assert extra_data['department'] == "IT/CDA" def test_fetch_extra_data_fields_missing(app_rest, example_cern): """Test extra data extraction when fields are missing.""" example_response, example_token, _ = example_cern res = get_dict_from_response(example_response) del res['PersonID'] del res['IdentityClass'] del res['Department'] # Check that groups were hidden as required extra_data = fetch_extra_data(res) assert 'person_id' in extra_data assert extra_data['person_id'] is None assert 'identity_class' in extra_data assert extra_data['identity_class'] is None assert 'department' in extra_data assert extra_data['department'] is None def test_account_info(app_rest, example_cern): """Test account info extraction.""" client = app_rest.test_client() ioc = app_rest.extensions['oauthlib.client'] # Ensure remote apps have been loaded (due to before first request) client.get(url_for('invenio_oauthclient.rest_login', remote_app='cern')) example_response, _, example_account_info = example_cern mock_remote_get(ioc, 'cern', example_response) assert account_info_rest( ioc.remote_apps['cern'], None) == example_account_info assert g.oauth_logged_in_with_remote == ioc.remote_apps['cern'] def test_account_setup(app_rest, example_cern, models_fixture): """Test account setup after login.""" with app_rest.test_client() as c: ioc = app_rest.extensions['oauthlib.client'] # Ensure remote apps have been loaded (due to before first request) resp = c.get(url_for( 'invenio_oauthclient.rest_login', remote_app='cern')) assert resp.status_code == 302 example_response, example_token, example_account_info = example_cern mock_response(app_rest.extensions['oauthlib.client'], 'cern', example_token) mock_remote_get(ioc, 'cern', example_response) resp = c.get(url_for( 'invenio_oauthclient.rest_authorized', remote_app='cern', code='test', state=get_state('cern'))) assert resp.status_code == 302 expected_url_args = { "message": "Successfully authorized.", "code": 200, } check_response_redirect_url_args(resp, expected_url_args) assert len(g.identity.provides) == 7 datastore = app_rest.extensions['invenio-accounts'].datastore user = datastore.find_user(email='test.account@cern.ch') user.password = hash_password("1234") assert user with app_rest.test_request_context(): resp = disconnect_rest_handler(ioc.remote_apps['cern']) assert resp.status_code >= 300 # simulate login (account_info fetch) g.oauth_logged_in_with_remote = ioc.remote_apps['cern'] login_user(user) assert isinstance(g.identity, Identity) assert g.identity.provides == set([ UserNeed(4), UserNeed('test.account@cern.ch'), RoleNeed('Group1@cern.ch'), RoleNeed('Group2@cern.ch'), RoleNeed('Group3@cern.ch'), RoleNeed('Group4@cern.ch'), RoleNeed('Group5@cern.ch'), ]) logout_user() assert isinstance(g.identity, AnonymousIdentity) # NOTE: Wrong role, g.identity.provides = {Need(['id', 4])} read more # https://github.com/inveniosoftware/invenio-access/blob/e28e76d5361a29202b94d498f1968454c24c5c80/tests/test_loaders.py#L47 assert len(g.identity.provides) == 1 assert "cern_resource" not in session assert OAUTHCLIENT_CERN_SESSION_KEY not in session # Login again to test the disconnect handler g.oauth_logged_in_with_remote = ioc.remote_apps['cern'] login_user(user) assert isinstance(g.identity, Identity) assert len(g.identity.provides) == 7 disconnect_rest_handler(ioc.remote_apps['cern']) def test_login(app_rest): """Test CERN login.""" client = app_rest.test_client() resp = client.get( url_for('invenio_oauthclient.rest_login', remote_app='cern', next='/someurl/') ) assert resp.status_code == 302 params = parse_qs(urlparse(resp.location).query) assert params['response_type'], ['code'] assert params['scope'] == ['Name Email Bio Groups'] assert params['redirect_uri'] assert params['client_id'] assert params['state'] def test_authorized_reject(app_rest): """Test a rejected request.""" with app_rest.test_client() as c: c.get(url_for('invenio_oauthclient.rest_login', remote_app='cern')) resp = c.get( url_for('invenio_oauthclient.rest_authorized', remote_app='cern', error='access_denied', error_description='User denied access', state=get_state('cern'))) assert resp.status_code in (301, 302) expected_url_args = { "message": "You rejected the authentication request.", "code": 400, } check_response_redirect_url_args(resp, expected_url_args) def test_account_info_not_allowed_account(app_rest, example_cern): """Test account info extraction.""" client = app_rest.test_client() app_rest.config['OAUTHCLIENT_CERN_ALLOWED_IDENTITY_CLASSES'] = [ 'another cern type' ] ioc = app_rest.extensions['oauthlib.client'] # Ensure remote apps have been loaded (due to before first request) client.get(url_for('invenio_oauthclient.rest_login', remote_app='cern')) example_response, _, example_account_info = example_cern mock_remote_get(ioc, 'cern', example_response) resp = account_info_rest(ioc.remote_apps['cern'], None) assert resp.status_code == 302 expected_url_args = { "message": "CERN account not allowed.", "code": 400, } check_response_redirect_url_args(resp, expected_url_args)
r"""Puiseux Series :mod:`abelfunctions.puiseux` =========================================== Tools for computing Puiseux series. A necessary component for computing integral bases and with Riemann surfaces. Classes ------- .. autosummary:: PuiseuxTSeries PuiseuxXSeries Functions --------- .. autosummary:: puiseux newton_iteration newton_iteration_step References ---------- .. [Duval] D. Duval, "Rational puiseux expansions", Compositio Mathematica, vol. 70, no. 2, pp. 119-154, 1989. .. [Poteaux] A. Poteaux, M. Rybowicz, "Towards a Symbolic-Numeric Method to Compute Puiseux Series: The Modular Part", preprint Examples -------- Contents -------- """ import numpy import sympy from abelfunctions.puiseux_series_ring import PuiseuxSeriesRing from sage.all import xgcd from sage.functions.log import log from sage.functions.other import ceil from sage.rings.big_oh import O from sage.rings.infinity import infinity from sage.rings.laurent_series_ring import LaurentSeriesRing from sage.rings.qqbar import QQbar from sage.rings.rational_field import QQ from sympy import Point, Segment def newton_polygon_exceptional(H): r"""Computes the exceptional Newton polygon of `H`.""" R = H.parent() x,y = R.gens() d = H(0,y).degree(y) return [[(0,0),(d,0)]] def newton_polygon(H, additional_points=[]): r"""Computes the Newton polygon of `H`. It's assumed that the first generator of `H` here is the "dependent variable". For example, if `H = H(x,y)` and we are aiming to compute a `y`-covering of the complex `x`-sphere then each monomial of `H` is of the form .. math:: a_{ij} x^j y^i. Parameters ---------- H : bivariate polynomial Returns ------- list Returns a list where each element is a list, representing a side of the polygon, which in turn contains tuples representing the points on the side. Note ---- This is written using Sympy's convex hull algorithm for legacy purposes. It can certainly be rewritten to use Sage's Polytope but do so *very carefully*! There are a number of subtle things going on here due to the fact that boundary points are ignored. """ # because of the way sympy.convex_hull computes the convex hull we # need to remove all points of the form (0,j) and (i,0) where j > j0 # and i > i0, the points on the axes closest to the origin R = H.parent() x, y = R.gens() monomials = H.monomials() points = [(monom.degree(y), monom.degree(x)) for monom in monomials] support = [Point(pt) for pt in points] + additional_points i0 = min(P.x for P in support if P.y == 0) j0 = min(P.y for P in support if P.x == 0) support = [P for P in support if P.x <= i0 and P.y <= j0] convex_hull = sympy.convex_hull(*support) # special treatment when the hull is just a point or a segment if isinstance(convex_hull, Point): P = (convex_hull.x, convex_hull.y) return [[P]] elif isinstance(convex_hull, Segment): P = convex_hull.p1 convex_hull = generalized_polygon_side(convex_hull) support.remove(P) support.append(convex_hull.p1) sides = [convex_hull] else: # recursive call with generalized point if a generalized newton # polygon is needed. sides = convex_hull.sides first_side = generalized_polygon_side(sides[0]) if first_side != sides[0]: P = first_side.p1 return newton_polygon(H, additional_points=[P]) # convert the sides to lists of points polygon = [] for side in sides: polygon_side = [P for P in support if P in side] polygon_side = sorted(map(lambda P: (int(P.x),int(P.y)), polygon_side)) polygon.append(polygon_side) # stop the moment we hit the i-axis. despite the filtration at # the start of this function we need this condition to prevent # returning to the starting point of the newton polygon. # # (See test_puiseux.TestNewtonPolygon.test_multiple) if side.p2.y == 0: break return polygon def generalized_polygon_side(side): r"""Returns the generalization of a side on the Newton polygon. A generalized Newton polygon is one where every side has slope no less than -1. Parameters ---------- side : sympy.Segment Returns ------- side """ if side.slope < -1: p1,p2 = side.points p1y = p2.x + p2.y side = Segment((0,p1y),p2) return side def bezout(q,m): r"""Returns :math:`u,v` such that :math:`uq+mv=1`. Parameters ---------- q,m : integer Two coprime integers with :math:`q > 0`. Returns ------- tuple of integers """ if q == 1: return (1,0) g,u,v = xgcd(q,-m) return (u,v) def transform_newton_polynomial(H, q, m, l, xi): r"""Recenters a Newton polynomial at a given singular term. Given the Puiseux data :math:`x=\mu x^q, y=x^m(\beta+y)` this function returns the polynomial .. math:: \tilde{H} = H(\xi^v x^q, x^m(\xi^u+y)) / x^l. where :math:`uq+mv=1`. Parameters ---------- H : polynomial in `x` and `y` q, m, l, xi : constants See above for the definitions of these parameters. Returns ------- polynomial """ R = H.parent() x,y = R.gens() u,v = bezout(q,m) newx = (xi**v)*(x**q) newy = (x**m)*(xi**u + y) newH = H(newx,newy) # divide by x**l R = newH.parent() x,y = R.gens() exponents, coefficients = zip(*(newH.dict().items())) exponents = [(e[0] - l, e[1]) for e in exponents] newH = R(dict(zip(exponents, coefficients))) return newH def newton_data(H, exceptional=False): r"""Determines the "newton data" associated with each side of the polygon. For each side :math:`\Delta` of the Newton polygon of `H` we associate the data :math:`(q,m,l,`phi)` where .. math:: \Delta: qj + mi = l \\ \phi_{\Delta}(t) = \sum_{(i,j) \in \Delta} a_{ij} t^{(i-i_0)/q} Here, :math:`a_ij x^j y_i` is a term in the polynomial :math:`H` and :math:`i_0` is the smallest value of :math:`i` belonging to the polygon side :math:`\Delta`. Parameters ---------- H : sympy.Poly Polynomial in `x` and `y`. Returns ------- list A list of the tuples :math:`(q,m,l,\phi)`. """ R = H.parent() x,y = R.gens() if exceptional: newton = newton_polygon_exceptional(H) else: newton = newton_polygon(H) # special case when the newton polygon is a single point if len(newton[0]) == 1: return [] # for each side determine the corresponding newton data: side slope # information and corresponding side characteristic polynomial, phi result = [] for side in newton: i0, j0 = side[0] i1, j1 = side[1] slope = QQ(j1 - j0) / QQ(i1 - i0) q = slope.denom() m = -slope.numer() l = min(q*j0 + m*i0, q*j1 + m*i1) phi = sum(H.coefficient({y:i, x:j})*x**((i - i0)//int(q)) for i, j in side) phi = phi.univariate_polynomial() result.append((q, m, l, phi)) return result def newton_iteration(G, n): r"""Returns a truncated series `y = y(x)` satisfying .. math:: G(x,y(x)) \equiv 0 \bmod{x^r} where $r = \ceil{\log_2{n}}$. Based on the algorithm in [XXX]. Parameters ---------- G, x, y : polynomial A polynomial in `x` and `y`. n : int Requested degree of the series expansion. Notes ----- This algorithm returns the series up to order :math:`2^r > n`. Any choice of order below :math:`2^r` will return the same series. """ R = G.parent() x,y = R.gens() if n < 0: raise ValueError('Number of terms must be positive. (n=%d'%n) elif n == 0: return R(0) phi = G phiprime = phi.derivative(y) try: pi = R(x).polynomial(x) gi = R(0) si = R(phiprime(x,gi)).polynomial(x).inverse_mod(pi) except NotImplementedError: raise ValueError('Newton iteration for computing regular part of ' 'Puiseux expansion failed. Curve is most likely ' 'not regular at center.') r = ceil(log(n,2)) for i in range(r): gi,si,pi = newton_iteration_step(phi,phiprime,gi,si,pi) return R(gi) def newton_iteration_step(phi, phiprime, g, s, p): r"""Perform a single step of the newton iteration algorithm. Parameters ---------- phi, phiprime : sympy.Poly Equation and its `y`-derivative. g, s : sympy.Poly Current solution and inverse (conjugate) modulo `p`. p : sympy.Poly The current modulus. That is, `g` is the Taylor series solution to `phi(t,g) = 0` modulo `p`. x,y : sympy.Symbol Dependent and independent variables, respectively. Returns ------- gnext,snext,pnext """ R = phi.parent() x,y = R.gens() g = R(g).univariate_polynomial() s = R(s).univariate_polynomial() p = R(p).univariate_polynomial() pnext = p**2 gnext = g - phi(x,g).univariate_polynomial()*s gnext = gnext % pnext snext = 2*s - phiprime(x,gnext).univariate_polynomial()*s**2 snext = snext % pnext gnext = R(gnext) snext = R(snext) pnext = R(pnext) return gnext,snext,pnext def puiseux_rational(H, recurse=False): r"""Puiseux data for the curve :math:`H` above :math:`(x,y)=(0,0)`. Given a polynomial :math:`H = H(x,y)` :func:`puiseux_rational` returns the singular parts of all of the Puiseux series centered at :math:`x=0, y=0`. Parameters ---------- H : polynomial A plane curve in `x` and `y`. recurse : boolean (Default: `True`) A flag used internally to keep track of which term in the singular expansion is being computed. Returns ------- list of `(G,P,Q)` List of tuples where `P` and `Q` are the x- and y-parts of the Puiseux series, respectively, and `G` is a polynomial used in :func:`newton_iteration` to generate additional terms in the y-series. """ R = H.parent() x,y = R.gens() # when recurse is true, return if the leading order of H(0,y) is y if recurse: IH = H(0,y).polynomial(y).ord() if IH == 1: return [(H,x,y)] # for each newton polygon side branch out a new puiseux series data = newton_data(H, exceptional=(not recurse)) singular_terms = [] for q,m,l,phi in data: u,v = bezout(q,m) for psi,k in phi.squarefree_decomposition(): roots = psi.roots(ring=QQbar, multiplicities=False) map(lambda x: x.exactify(), roots) for xi in roots: Hprime = transform_newton_polynomial(H, q, m, l, xi) next_terms = puiseux_rational(Hprime, recurse=True) for (G,P,Q) in next_terms: singular_term = (G, xi**v*P**q, P**m*(xi**u + Q)) singular_terms.append(singular_term) return singular_terms def almost_monicize(f): r"""Transform `f` to an "almost monic" polynomial. Perform a sequence of substitutions of the form .. math:: f \mapsto x^d f(x,y/x) such that :math:`l(0) \neq 0` where :math:`l=l(x)` is the leading order coefficient of :math:`f`. Parameters ---------- f,x,y : sympy.Expr An algebraic curve in `x` and `y`. Returns ------- g, transform A new, almost monic polynomial `g` and a polynomial `transform` such that `y -> y/transform`. """ R = f.parent() x,y = R.gens() transform = R(1) monic = False while not monic: if f.polynomial(y).leading_coefficient()(0) == 0: # the denominator is always of the form x**d. Sage, however, has # trouble reducing the expression to simplest terms. the following # is a manual version r = f(x,y/x) n = r.numerator().polynomial(x) d = r.denominator().degree(x) shift = min(n.exponents() + [d]) n = n.shift(-shift) f = R(n(x,y)) # XXX numerator evaluation is important! transform *= x else: monic = True return f, transform def puiseux(f, alpha, beta=None, order=None, parametric=True): r"""Singular parts of the Puiseux series above :math:`x=\alpha`. Parameters ---------- f : polynomial A plane algebraic curve in `x` and `y`. alpha : complex The x-point over which to compute the Puiseux series of `f`. t : variable Variable used in the Puiseux series expansions. beta : complex (Optional) The y-point at which to compute the Puiseux series. order : int (Default: `None`) If provided, returns Puiseux series expansions up the the specified order. Returns ------- list of PuiseuxTSeries """ R = f.parent() x,y = R.gens() # recenter the curve at x=alpha if alpha in [infinity,'oo']: alpha = infinity d = f.degree(x) F = f(1/x,y)*x**d n,d = F.numerator(), F.denominator() falpha,_ = n.polynomial(x).quo_rem(d.univariate_polynomial()) falpha = falpha(x).numerator() else: falpha = f(x+alpha,y) # determine the points on the curve lying above x=alpha R = falpha.parent() x,y = R.gens() g, transform = almost_monicize(falpha) galpha = R(g(0,y)).univariate_polynomial() betas = galpha.roots(ring=QQbar, multiplicities=False) # filter for requested value of beta. raise error if not found if not beta is None: betas = [b for b in betas if b == beta] if not betas: raise ValueError('The point ({0}, {1}) is not on the ' 'curve {2}.'.format(alpha, beta, f)) # for each (alpha, beta) determine the corresponding singular parts of the # Puiseux series expansions. note that there may be multiple, distinct # places above the same point. singular_parts = [] for beta in betas: H = g(x,y+beta) singular_part_ab = puiseux_rational(H) # recenter the result back to (alpha, beta) from (0,0) for G,P,Q in singular_part_ab: Q += beta Q = Q/transform.univariate_polynomial()(P) if alpha == infinity: P = 1/P else: P += alpha # append to list of singular data singular_parts.append((G,P,Q)) # instantiate PuiseuxTSeries from the singular data series = [PuiseuxTSeries(f, alpha, singular_data, order=order) for singular_data in singular_parts] return series class PuiseuxTSeries(object): r"""A Puiseux t-series about some place :math:`(\alpha, \beta) \in X`. A parametric Puiseux series :math:`P(t)` centered at :math:`(x,y) = (\alpha, \beta)` is given in terms of a pair of functions .. math:: x(t) = \alpha + \lambda t^e, \\ y(t) = \sum_{h=0}^\infty \alpha_h t^{n_h}, where :math:`x(0) = \alpha, y(0) = \beta`. The primary reference for the notation and computational method of these Puiseux series is D. Duval. Attributes ---------- f, x, y : polynomial x0 : complex The x-center of the Puiseux series expansion. ramification_index : rational The ramification index :math:`e`. terms : list A list of exponent-coefficient pairs representing the y-series. order : int The order of the Puiseux series expansion. Methods ------- xseries extend eval_x eval_y """ @property def xdata(self): return (self.center, self.xcoefficient, self.ramification_index) @xdata.setter def xdata(self, value): self.center, self.xcoefficient, self.ramification_index = value @property def is_symbolic(self): return self._is_symbolic @property def is_numerical(self): return not self._is_symbolic @property def terms(self): terms = self.ypart.laurent_polynomial().dict().items() # note that the following greatly affects singularities() and Int() if not terms: terms = [(0,0)] return terms @property def xdatan(self): if self.is_numerical: return self.xdata else: return (numpy.complex(self.center), numpy.complex(self.xcoefficient), numpy.int(self.ramification_index)) @property def order(self): return self._singular_order + self._regular_order @property def nterms(self): """Returns the number of non-zero computed terms. Parameters ---------- None Returns ------- int """ terms = self.ypart.laurent_polynomial().dict().items() return len(terms) def __init__(self, f, x0, singular_data, order=None): r"""Initialize a PuiseuxTSeries using a set of :math:`\pi = \{\tau\}` data. Parameters ---------- f, x, y : polynomial A plane algebraic curve. x0 : complex The x-center of the Puiseux series expansion. singular_data : list The output of :func:`singular`. t : variable The variable in which the Puiseux t series is represented. """ R = f.parent() x,y = R.gens() extension_polynomial, xpart, ypart = singular_data L = LaurentSeriesRing(ypart.base_ring(), 't') t = L.gen() self.f = f self.t = t self._xpart = xpart self._ypart = ypart # store x-part attributes. handle the centered at infinity case self.x0 = x0 if x0 == infinity: x0 = QQ(0) self.center = x0 # extract and store information about the x-part of the puiseux series xpart = xpart(t,0) xpartshift = xpart - x0 ramification_index, xcoefficient = xpartshift.laurent_polynomial().dict().popitem() self.xcoefficient = xcoefficient self.ramification_index = QQ(ramification_index).numerator() self.xpart = xpart # extract and store information about the y-part of the puiseux series self.ypart = L(ypart(t,0)) self._initialize_extension(extension_polynomial) # determine the initial order. See the order property val = L(ypart(t,O(t))).prec() self._singular_order = 0 if val == infinity else val self._regular_order = self._p.degree(x) # extend to have at least two elements self.extend(nterms=1) # the curve, x-part, and terms output by puiseux make the puiseux # series unique. any mutability only adds terms self.__parent = self.ypart.parent() self._hash = hash((self.f, self.xpart, self.ypart)) def parent(self): return self.__parent def _initialize_extension(self, extension_polynomial): r"""Set up regular part extension machinery. RootOfs in expressions are not preserved under this transformation. (that is, actual algebraic representations are calculated.) each RootOf is temporarily replaced by a dummy variable Parameters ---------- extension_polynomial, x, y : polynomial Returns ------- None : None Internally sets hidden regular extension attributes. """ R = extension_polynomial.parent() x,y = R.gens() # store attributes _phi = extension_polynomial _p = R(x) _g = R(0) self._phi = _phi self._phiprime = _phi.derivative(y) self._p = _p self._g = _g # compute inverse of phi'(g) modulo x and store _g = _g.univariate_polynomial() _p = _p.univariate_polynomial() ppg = self._phiprime.subs({y:_g}).univariate_polynomial() _s = ppg.inverse_mod(_p) self._s = _s def __repr__(self): """Print the x- and y-parts of the Puiseux series.""" s = '(' s += str(self.xpart) s += ', ' s += str(self.ypart) s += ' + O(%s^%s))'%(self.t,self.order) return s def __hash__(self): return self._hash def __eq__(self, other): r"""Check equality. A `PuiseuxTSeries` is uniquely identified by the curve it's defined on, its center, x-part terms, and the singular terms of the y-part. Parameters ---------- other : PuiseuxTSeries Returns ------- boolean """ if isinstance(other, PuiseuxTSeries): if self._hash == other._hash: return True return False def xseries(self, all_conjugates=True): r"""Returns the corresponding x-series. Parameters ---------- all_conjugates : bool (default: True) If ``True``, returns all conjugates x-representations of this Puiseux t-series. If ``False``, only returns one representative. Returns ------- list List of PuiseuxXSeries representations of this PuiseuxTSeries. """ # obtain relevant rings: # o R = parent ring of curve # o L = parent ring of T-series # o S = temporary polynomial ring over base ring of T-series # o P = Puiseux series ring L = self.ypart.parent() t = L.gen() S = L.base_ring()['z'] z = S.gen() R = self.f.parent() x,y = R.gens() P = PuiseuxSeriesRing(L.base_ring(), str(x)) x = P.gen() # given x = alpha + lambda*t^e solve for t. this involves finding an # e-th root of either (1/lambda) or of lambda, depending on e's sign ## (A sign on a ramification index ? hm) e = self.ramification_index abse = abs(e) lamb = S(self.xcoefficient) order = self.order if e > 0: phi = lamb*z**e - 1 else: phi = z**abse - lamb mu = phi.roots(QQbar, multiplicities=False)[0] if all_conjugates: zeta_e=QQbar.zeta(abse) conjugates = [mu*zeta_e**k for k in range(abse)] else: conjugates = [mu] map(lambda x: x.exactify(), conjugates) # determine the resulting x-series xseries = [] for c in conjugates: t = self.ypart.parent().gen() fconj = self.ypart(c*t) p = P(fconj(x**(QQ(1)/e))) p = p.add_bigoh(QQ(order+1)/abse) xseries.append(p) return xseries def add_term(self, order=None): r"""Extend the y-series terms in-place using Newton iteration. The modular Newtion iteration algorithm in :func:`newton_iteration` efficiently computes the series up to order :math:`t^{2^n}` where :math:`2^n` is the smallest power of two greater than the current order. """ g,s,p = newton_iteration_step( self._phi, self._phiprime, self._g, self._s, self._p) self._g = g self._s = s self._p = p # operation below: yseries = ypart(y=g)(y=0) t = self.t L = self.ypart.parent() g = g.univariate_polynomial()(t) self.ypart = L(self._ypart(t,g)) self._regular_order = self._p.degree() def extend(self, order=None, nterms=None): r"""Extends the series in place. Computes additional terms in the Puiseux series up to the specified `order` or with `nterms` number of non-zero terms. If neither `degree` nor `nterms` are provided then the next non-zero term will be added to this t-series. Remember that :meth:`add_term` updates `self.order` in-place. Parameters ---------- order : int, optional The desired degree to extend the series to. nterms : int, optional The desired number of non-zero terms to extend the series to. Returns ------- None """ # order takes precedence if order: while self.order < order: self.add_term() elif nterms: while self.nterms < nterms: self.add_term() else: # if neither order or nterms is given, just call add_term self.add_term() def extend_to_t(self, t, curve_tol=1e-8): r"""Extend the series to accurately determine the y-values at `t`. Add terms to the t-series until the the regular place :math:`(x(t), y(t))` is within a particular tolerance of the curve that the Puiseux series is approximating. Parameters ---------- t : complex eps : double curve_tol : double The tolerance for the corresponding point to lie on the curve. Returns ------- none The PuiseuxTSeries is modified in-place. Note ---- This doesn't work well in the infinite case. (Puiseux series centered at x=oo.) """ num_iter = 0 max_iter = 16 while num_iter < max_iter: xt = self.eval_x(t) yt = self.eval_y(t) n,a = max(self.terms) curve_error = abs(self.f(xt,yt)) if (curve_error < curve_tol): break else: self.add_term() num_iter += 1 def extend_to_x(self, x, curve_tol=1e-8): r"""Extend the series to accurately determine the y-values at `x`. Add terms to the t-series until the the regular place :math:`(x, y)` is within a particular tolerance of the curve that the Puiseux series is approximating. Parameters ---------- x : complex curve_tol : double The tolerance for the corresponding point to lie on the curve. Returns ------- none The PuiseuxTSeries is modified in-place. """ # simply convert to t and pass to extend. choose any conjugate since # the convergence rates between each conjugate is equal center, xcoefficient, ramification_index = self.xdata t = numpy.power((x-center)/xcoefficient, 1.0/ramification_index) self.extend_to_t(t, curve_tol=curve_tol) def eval_x(self, t): r"""Evaluate the x-part of the Puiseux series at `t`. Parameters ---------- t : sympy.Expr or complex Returns ------- val = complex """ try: center, xcoefficient, ramification_index = self.xdata val = center + xcoefficient*t**ramification_index except ZeroDivisionError: val = infinity return val def eval_dxdt(self, t): r"""Evaluate the derivative of the x-part of the Puiseux series at 't'. Parameters ---------- t : complex Returns ------- val : complex """ try: center, xcoefficient, ramification_index = self.xdata val = xcoefficient*ramification_index*t**(ramification_index-1) except ZeroDivisionError: val = infinity return val def eval_y(self, t, order=None): r"""Evaluate of the y-part of the Puiseux series at `t`. The y-part can be evaluated up to a certain order or with a certain number of terms. Parameters ---------- t : complex nterms : int, optional If provided, only evaluates using `nterms` in the y-part of the series. If set to zero, will evaluate the principal part of the series: the terms in the series which distinguishes places with the same x-projection. order : int, optional If provided, only evaluates up to `order`. Returns ------- complex Notes ----- This can be sped up using a Holder-like fast exponent evaluation trick. """ if order: self.extend(order=order) # set which terms will be used for evaluation if order is not None and order >= 0: terms = [(n,alpha) for n,alpha in self.terms if n < order] else: terms = self.terms try: val = sum(alpha*t**n for n,alpha in terms) except ZeroDivisionError: val = infinity return val
from rpython.jit.backend.arm import registers as r from rpython.jit.backend.arm import codebuilder from rpython.jit.backend.arm import conditions from rpython.jit.backend.arm import instructions from rpython.jit.backend.arm.test.support import requires_arm_as from rpython.jit.backend.arm.test.support import get_as_version from rpython.jit.backend.arm.test.support import define_test from rpython.jit.backend.arm.test.support import gen_test_function from gen import assemble import py requires_arm_as() class CodeBuilder(codebuilder.InstrBuilder): def __init__(self, arch_version=7): self.arch_version = arch_version self.buffer = [] def writechar(self, char): self.buffer.append(char) def hexdump(self): return ''.join(self.buffer) class ASMTest(object): def assert_equal(self, asm): assert self.cb.hexdump() == assemble(asm) class TestInstrCodeBuilder(ASMTest): def setup_method(self, ffuu_method): self.cb = CodeBuilder() def test_ldr(self): self.cb.LDR_ri(r.r0.value, r.r1.value) self.assert_equal('LDR r0, [r1]') def test_ldr_neg(self): self.cb.LDR_ri(r.r3.value, r.fp.value, -16) self.assert_equal('LDR r3, [fp, #-16]') def test_add_ri(self): self.cb.ADD_ri(r.r0.value, r.r1.value, 1) self.assert_equal('ADD r0, r1, #1') def test_mov_rr(self): self.cb.MOV_rr(r.r7.value, r.r12.value) self.assert_equal('MOV r7, r12') def test_mov_ri(self): self.cb.MOV_ri(r.r9.value, 123) self.assert_equal('MOV r9, #123') def test_mov_ri2(self): self.cb.MOV_ri(r.r9.value, 255) self.assert_equal('MOV r9, #255') def test_mov_ri_max(self): self.cb.MOV_ri(r.r9.value, 0xFF) self.assert_equal('MOV r9, #255') def test_str_ri(self): self.cb.STR_ri(r.r9.value, r.r14.value) self.assert_equal('STR r9, [r14]') def test_str_ri_offset(self): self.cb.STR_ri(r.r9.value, r.r14.value, 23) self.assert_equal('STR r9, [r14, #23]') def test_str_ri_offset(self): self.cb.STR_ri(r.r9.value, r.r14.value, -20) self.assert_equal('STR r9, [r14, #-20]') def test_asr_ri(self): self.cb.ASR_ri(r.r7.value, r.r5.value, 24) self.assert_equal('ASR r7, r5, #24') def test_orr_rr_no_shift(self): self.cb.ORR_rr(r.r0.value, r.r7.value, r.r12.value) self.assert_equal('ORR r0, r7, r12') def test_orr_rr_lsl_8(self): self.cb.ORR_rr(r.r0.value, r.r7.value, r.r12.value, 8) self.assert_equal('ORR r0, r7, r12, lsl #8') def test_push_one_reg(self): if get_as_version() < (2, 23): py.test.xfail("GNU as before version 2.23 generates encoding A1 for " "pushing only one register") self.cb.PUSH([r.r1.value]) self.assert_equal('PUSH {r1}') def test_push_multiple(self): self.cb.PUSH([reg.value for reg in [r.r1, r.r3, r.r6, r.r8, r.pc]]) self.assert_equal('PUSH {r1, r3, r6, r8, pc}') def test_push_multiple2(self): self.cb.PUSH([reg.value for reg in [r.fp, r.ip, r.lr, r.pc]]) self.assert_equal('PUSH {fp, ip, lr, pc}') def test_vpush_one_reg(self): self.cb.VPUSH([r.d3.value]) self.assert_equal('VPUSH {d3}') def test_vpush_one_reg2(self): self.cb.VPUSH([r.d12.value]) self.assert_equal('VPUSH {d12}') def test_vpush_multiple(self): self.cb.VPUSH([reg.value for reg in [r.d11, r.d12, r.d13, r.d14, r.d15]]) self.assert_equal('VPUSH {D11, D12, D13, D14, D15}') def test_sub_ri(self): self.cb.SUB_ri(r.r2.value, r.r4.value, 123) self.assert_equal('SUB r2, r4, #123') def test_sub_ri2(self): self.cb.SUB_ri(r.r3.value, r.r7.value, 0xFF) self.assert_equal('SUB r3, r7, #255') def test_cmp_ri(self): self.cb.CMP_ri(r.r3.value, 123) self.assert_equal('CMP r3, #123') def test_mcr(self): self.cb.MCR(15, 0, r.r1.value, 7, 10,0) self.assert_equal('MCR P15, 0, r1, c7, c10, 0') def test_push_eq_stmdb(self): # XXX check other conditions in STMDB self.cb.PUSH([reg.value for reg in r.caller_resp], cond=conditions.AL) self.assert_equal('STMDB SP!, {r0, r1, r2, r3}') def test_push(self): self.cb.PUSH([reg.value for reg in r.caller_resp], cond=conditions.AL) self.assert_equal('PUSH {r0, r1, r2, r3}') def test_push_raises_sp(self): assert py.test.raises(AssertionError, 'self.cb.PUSH([r.sp.value])') def test_stm(self): self.cb.STM(r.fp.value, [reg.value for reg in r.caller_resp], cond=conditions.AL) self.assert_equal('STM fp, {r0, r1, r2, r3}') def test_ldm(self): self.cb.LDM(r.fp.value, [reg.value for reg in r.caller_resp], cond=conditions.AL) self.assert_equal('LDM fp, {r0, r1, r2, r3}') def test_vstm(self): self.cb.VSTM(r.fp.value, [reg.value for reg in r.caller_vfp_resp], cond=conditions.AL) self.assert_equal('VSTM fp, {d0, d1, d2, d3, d4, d5, d6, d7}') def test_vldm(self): self.cb.VLDM(r.fp.value, [reg.value for reg in r.caller_vfp_resp], cond=conditions.AL) self.assert_equal('VLDM fp, {d0, d1, d2, d3, d4, d5, d6, d7}') def test_pop(self): self.cb.POP([reg.value for reg in r.caller_resp], cond=conditions.AL) self.assert_equal('POP {r0, r1, r2, r3}') def test_pop_eq_ldm(self): # XXX check other conditions in LDM self.cb.POP([reg.value for reg in r.caller_resp], cond=conditions.AL) self.assert_equal('LDM SP!, {r0, r1, r2, r3}') def test_double_add(self): self.cb.VADD(r.d1.value, r.d2.value, r.d3.value, conditions.LE) self.assert_equal("VADDLE.F64 D1, D2, D3") def test_double_sub(self): self.cb.VSUB(r.d1.value, r.d2.value, r.d3.value, conditions.GT) self.assert_equal("VSUBGT.F64 D1, D2, D3") def test_vstr_offset(self): assert py.test.raises(AssertionError, 'self.cb.VSTR(r.d1, r.r4, 3)') def test_vmrs(self): self.cb.VMRS(conditions.AL) self.assert_equal("vmrs APSR_nzcv, fpscr") def test_movw(self): self.cb.MOVW_ri(r.r3.value, 0xFFFF, conditions.NE) self.assert_equal("MOVWNE r3, #65535") def test_movt(self): self.cb.MOVT_ri(r.r3.value, 0xFFFF, conditions.NE) self.assert_equal("MOVTNE r3, #65535") def test_ldrex(self): self.cb.LDREX(r.r10.value, r.r11.value) self.assert_equal('LDREX r10, [r11]') def test_strex(self): self.cb.STREX(r.r9.value, r.r1.value, r.r14.value, conditions.NE) self.assert_equal('STREXNE r9, r1, [r14]') def test_dmb(self): self.cb.DMB() self.assert_equal('DMB') def test_fmdrr(self): self.cb.FMDRR(r.d11.value, r.r9.value, r.r14.value) self.assert_equal('FMDRR d11, r9, r14') def test_fmrrd(self): self.cb.FMRRD(r.r9.value, r.r14.value, r.d11.value) self.assert_equal('FMRRD r9, r14, d11') def test_size_of_gen_load_int(): for v, n in [(5, 4), (6, 4), (7, 2)]: c = CodeBuilder(v) assert c.get_max_size_of_gen_load_int() == n class TestInstrCodeBuilderForGeneratedInstr(ASMTest): def setup_method(self, ffuu_method): self.cb = CodeBuilder() def gen_test_float_load_store_func(name, table): tests = [] for c,v in [('EQ', conditions.EQ), ('LE', conditions.LE), ('AL', conditions.AL)]: for reg in range(15): for creg in range(2): asm = 'd%d, [r%d]' % (creg, reg) tests.append((asm, (creg, reg))) asm = 'd%d, [r%d, #16]' % (creg, reg) tests.append((asm, (creg, reg, 16))) return tests def gen_test_float64_data_proc_instructions_func(name, table): tests = [] for c,v in [('EQ', conditions.EQ), ('LE', conditions.LE), ('AL', conditions.AL)]: for reg in range(15): if 'result' in table and not table['result']: asm = 'd%d, d2' % reg tests.append((asm, (reg, r.d2.value), {}, '.F64')) elif 'base' in table and not table['base']: asm = 'd%d, d2' % reg tests.append((asm, (reg, r.d2.value), {}, '.F64')) else: asm = 'd%d, d1, d2' % reg tests.append((asm, (reg, r.d1.value, r.d2.value), {}, '.F64')) return tests def gen_test_data_proc_imm_func(name, table): if table['result'] and table['base']: def f(self): func = getattr(self.cb, name) func(r.r3.value, r.r7.value, 23) self.assert_equal('%s r3, r7, #23' % name[:name.index('_')]) py.test.raises(ValueError, 'func(r.r3.value, r.r7.value, -12)') return [f] else: return [('r3, #23', [r.r3.value, 23])] def gen_test_load_store_func(name, table): if table['imm']: return [('r3, [r7, #23]', [r.r3.value, r.r7.value, 23]), ('r3, [r7, #-23]', [r.r3.value, r.r7.value, -23]) ] else: return [('r3, [r7, r12]', [r.r3.value, r.r7.value, r.r12.value])] def gen_test_extra_load_store_func(name, table): if name[-4] == 'D': if name[-2:] == 'rr': return [('r4, [r8, r12]', [r.r4.value, r.r5.value, r.r8.value, r.r12.value])] else: return [('r4, [r8, #223]', [r.r4.value, r.r5.value, r.r8.value, 223])] else: if name[-2:] == 'rr': return [('r4, [r5, r12]', [r.r4.value, r.r5.value, r.r12.value])] else: return [('r4, [r5, #223]', [r.r4.value, r.r5.value, 223])] return f def gen_test_multiply_func(name, table): if 'acc' in table and table['acc']: if 'update_flags' in table and table['update_flags']: return [ ('r3, r7, r12, r13', (r.r3.value, r.r7.value, r.r12.value, r.r13.value)), ('r3, r7, r12, r13', (r.r3.value, r.r7.value, r.r12.value, r.r13.value), {'s':1}, 'S') ] else: return [('r3, r7, r12, r13', (r.r3.value, r.r7.value, r.r12.value, r.r13.value))] elif 'long' in table and table['long']: return [('r3, r13, r7, r12', (r.r3.value, r.r13.value, r.r7.value, r.r12.value))] else: return [('r3, r7, r12', (r.r3.value, r.r7.value, r.r12.value))] def gen_test_data_proc_reg_shift_reg_func(name, table): if name[-2:] == 'rr': return [('r3, r7, r12', [r.r3.value, r.r7.value, r.r12.value])] else: result = 'result' not in table or table['result'] if result: return [('r3, r7, r8, ASR r11', [r.r3.value, r.r7.value, r.r8.value, r.r11.value], {'shifttype':0x2})] else: return [('r3, r7, ASR r11', [r.r3.value, r.r7.value, r.r11.value], {'shifttype':0x2})] def gen_test_data_proc_func(name, table): op_name = name[:name.index('_')] if name[-2:] == 'ri': return [('r3, r7, #12', (r.r3.value, r.r7.value, 12)), ('r3, r7, #12', (r.r3.value, r.r7.value, 12), {'s':1}, 'S')] elif table['base'] and table['result']: return [('r3, r7, r12', (r.r3.value, r.r7.value, r.r12.value)), ('r3, r7, r12', (r.r3.value, r.r7.value, r.r12.value), {'s':1}, 'S')] else: return [('r3, r7', [r.r3.value, r.r7.value])] def gen_test_supervisor_and_coproc_func(name, table): def f(self): py.test.skip('not used at the moment') return [f] def gen_test_branch_func(name, table): def f(self): py.test.skip('not used at the moment') return [f] def gen_test_block_data_func(name, table): tests = [] for c,v in [('EQ', conditions.EQ), ('LE', conditions.LE), ('AL', conditions.AL)]: for regs in range(16): asm = 'r3, {%s}' % ','.join(['r%d' % i for i in range(regs+1)]) tests.append((asm, (r.r3.value, range(regs+1)))) return tests def gen_test_simd_instructions_3regs_func(name, table): op_name = name[:name.index('_')] return [('d1, d2, d3', (r.d1.value, r.d2.value, r.d3.value), {}, '.i64')] def build_tests(): cls = TestInstrCodeBuilderForGeneratedInstr test_name = 'test_generated_%s' ins = [k for k in instructions.__dict__.keys() if not k.startswith('__')] for name in ins: try: func = globals()['gen_test_%s_func' % name] except KeyError: print 'No test generator for %s instructions' % name continue for key, value in getattr(instructions, name).iteritems(): for test_case in func(key, value): define_test(cls, key, test_case, name) build_tests()
# -*- coding: utf-8 -*- from __future__ import absolute_import import os import six import sys import lupa from ipykernel.kernelapp import IPKernelApp from ipykernel.eventloops import loop_qt5 from jupyter_client.kernelspec import install_kernel_spec from twisted.internet import defer import splash from splash.lua import get_version, get_main_sandboxed, get_main from splash.browser_tab import BrowserTab from splash.lua_runtime import SplashLuaRuntime from splash.qtrender_lua import ( Splash, MainCoroutineRunner, StoredExceptions, Extras ) from splash.qtutils import init_qt_app from splash.render_options import RenderOptions from splash import defaults from splash.kernel.kernelbase import Kernel from splash.utils import BinaryCapsule from splash.kernel.completer import Completer from splash.kernel.inspections import Inspector from splash.kernel.errors import error_repr import splash.server as server def install(user=True): """ Install IPython kernel specification """ name = 'splash-py2' if six.PY2 else 'splash-py3' folder = os.path.join(os.path.dirname(__file__), 'kernels', name) install_kernel_spec(folder, kernel_name="splash", user=user, replace=True) def init_browser(network_manager_factory): # TODO: support the same command-line options as HTTP server. # from splash.server import start_logging # class opts(object): # logfile = "./kernel.log" # start_logging(opts) proxy_factory = None # TODO data = {} data['uid'] = id(data) tab = BrowserTab( network_manager=network_manager_factory(), splash_proxy_factory=proxy_factory, verbosity=2, # TODO render_options=RenderOptions(data, defaults.MAX_TIMEOUT), # TODO: timeout visible=True, ) return tab class DeferredSplashRunner(object): def __init__(self, lua, splash, sandboxed, log=None, render_options=None): self.lua = lua self.splash = splash self.sandboxed = sandboxed if log is None: self.log = self.splash.tab.logger.log else: self.log = log self.runner = MainCoroutineRunner( lua=self.lua, log=self.log, splash=splash, sandboxed=self.sandboxed, ) def run(self, main_coro): """ Run main_coro Lua coroutine, passing it a Splash instance as an argument. Return a Deferred. """ d = defer.Deferred() def return_result(result): d.callback(result) def return_error(err): d.errback(err) self.runner.start( main_coro=main_coro, return_result=return_result, return_error=return_error, ) return d class SplashKernel(Kernel): implementation = 'Splash' implementation_version = splash.__version__ language = 'Lua' language_version = get_version() language_info = { 'name': 'Splash', 'mimetype': 'application/x-lua', 'display_name': 'Splash', 'language': 'lua', 'codemirror_mode': { "name": "text/x-lua", }, 'file_extension': '.lua', 'pygments_lexer': 'lua', 'version': get_version(), } banner = "Splash kernel - write browser automation scripts interactively" help_links = [ { 'text': "Splash Tutorial", 'url': 'http://splash.readthedocs.org/en/latest/scripting-tutorial.html' }, { 'text': "Splash Reference", 'url': 'http://splash.readthedocs.org/en/latest/scripting-ref.html' }, { 'text': "Programming in Lua", 'url': 'http://www.lua.org/pil/contents.html' }, { 'text': "Lua 5.2 Manual", 'url': 'http://www.lua.org/manual/5.2/' }, ] sandboxed = False def __init__(self, **kwargs): super(SplashKernel, self).__init__(**kwargs) self.tab = init_browser(SplashKernel.network_manager_factory) self.lua = SplashLuaRuntime(self.sandboxed, lua_package_path="", lua_sandbox_allowed_modules=()) self.exceptions = StoredExceptions() self.splash = Splash( lua=self.lua, exceptions=self.exceptions, tab=self.tab ) self.lua.add_to_globals("splash", self.splash.get_wrapped()) self.extras = Extras(self.lua, self.exceptions) self.extras.inject_to_globals() self.runner = DeferredSplashRunner(self.lua, self.splash, self.sandboxed) #, self.log_msg) self.completer = Completer(self.lua) self.inspector = Inspector(self.lua) # # try: # sys.stdout.write = self._print # sys.stderr.write = self._print # except: # pass # Can't change stdout def send_execute_reply(self, stream, ident, parent, md, reply_content): def done(result): reply, result, ct = result if result: data = { 'text/plain': result if isinstance(result, six.text_type) else str(result), } if isinstance(result, BinaryCapsule): if result.content_type in {'image/png', 'image/jpeg'}: data[result.content_type] = result.as_b64() self._publish_execute_result(parent, data, {}, self.execution_count) super(SplashKernel, self).send_execute_reply(stream, ident, parent, md, reply) assert isinstance(reply_content, defer.Deferred) reply_content.addCallback(done) def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): def success(res): result, content_type, headers, status_code = res reply = { 'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, } return reply, result, content_type or 'text/plain' def error(failure): text = "<unknown error>" try: failure.raiseException() except Exception as e: text = error_repr(e) reply = { 'status': 'error', 'execution_count': self.execution_count, 'ename': '', 'evalue': text, 'traceback': [] } return reply, text, 'text/plain' try: try: # XXX: this ugly formatting is important for exception # line numbers to be displayed properly! lua_source = 'local repr = require("repr"); function main(splash) return repr(%s) end' % code main_coro = self._get_main(lua_source) except lupa.LuaSyntaxError: try: lines = code.splitlines(False) lua_source = '''local repr = require("repr"); function main(splash) %s return repr(%s) end ''' % ("\n".join(lines[:-1]), lines[-1]) main_coro = self._get_main(lua_source) except lupa.LuaSyntaxError: lua_source = "function main(splash) %s end" % code main_coro = self._get_main(lua_source) except (lupa.LuaSyntaxError, lupa.LuaError) as e: d = defer.Deferred() d.addCallbacks(success, error) d.errback(e) return d except Exception: d = defer.Deferred() d.addCallbacks(success, error) d.errback() return d d = self.runner.run(main_coro) d.addCallbacks(success, error) return d def do_complete(self, code, cursor_pos): return self.completer.complete(code, cursor_pos) def do_inspect(self, code, cursor_pos, detail_level=0): return self.inspector.help(code, cursor_pos, detail_level) def _publish_execute_result(self, parent, data, metadata, execution_count): msg = { u'data': data, u'metadata': metadata, u'execution_count': execution_count } self.session.send(self.iopub_socket, u'execute_result', msg, parent=parent, ident=self._topic('execute_result') ) def log_msg(self, text, min_level=2): self._print(text + "\n") def _print(self, message): stream_content = {'name': 'stdout', 'text': message, 'metadata': dict()} self.log.debug('Write: %s' % message) self.send_response(self.iopub_socket, 'stream', stream_content) def _get_main(self, lua_source): if self.sandboxed: main, env = get_main_sandboxed(self.lua, lua_source) else: main, env = get_main(self.lua, lua_source) return self.lua.create_coroutine(main) def server_factory(network_manager_factory, verbosity, **kwargs): init_qt_app(verbose=verbosity >= 5) SplashKernel.network_manager_factory = network_manager_factory kernel = IPKernelApp.instance(kernel_class=SplashKernel) kernel.initialize() kernel.kernel.eventloop = loop_qt5 kernel.start() def start(): splash_args = os.environ.get('SPLASH_ARGS', '').split() server.main(jupyter=True, argv=splash_args, server_factory=server_factory)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Linear Estimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import re import six from tensorflow.contrib import layers from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.learn.python.learn import evaluable from tensorflow.contrib.learn.python.learn import trainable from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.contrib.linear_optimizer.python import sdca_optimizer from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import session_run_hook from tensorflow.python.training import training as train # The default learning rate of 0.2 is a historical artifact of the initial # implementation, but seems a reasonable choice. _LEARNING_RATE = 0.2 def _get_optimizer(spec): if isinstance(spec, six.string_types): return layers.OPTIMIZER_CLS_NAMES[spec]( learning_rate=_LEARNING_RATE) elif callable(spec): return spec() return spec # TODO(ispir): Remove this function by fixing '_infer_model' with single outputs # and as_iteable case. def _as_iterable(preds, output): for pred in preds: yield pred[output] def _add_bias_column(feature_columns, columns_to_tensors, bias_variable, labels, columns_to_variables): # TODO(b/31008490): Move definition to a common constants place. bias_column_name = "tf_virtual_bias_column" if any(col.name is bias_column_name for col in feature_columns): raise ValueError("%s is a reserved column name." % bias_column_name) bias_column = layers.real_valued_column(bias_column_name) columns_to_tensors[bias_column] = array_ops.ones_like(labels, dtype=dtypes.float32) columns_to_variables[bias_column] = [bias_variable] def _linear_model_fn(features, labels, mode, params): """A model_fn for linear models that use a gradient-based optimizer. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `Head` instance. * feature_columns: An iterable containing all the feature columns used by the model. * optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training. * gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. * num_ps_replicas: The number of parameter server replicas. * joint_weights: If True, the weights for all columns will be stored in a single (possibly partitioned) variable. It's more efficient, but it's incompatible with SDCAOptimizer, and requires all feature columns are sparse and use the 'sum' combiner. Returns: An `estimator.ModelFnOps` instance. Raises: ValueError: If mode is not any of the `ModeKeys`. """ head = params["head"] feature_columns = params["feature_columns"] optimizer = params["optimizer"] gradient_clip_norm = params.get("gradient_clip_norm", None) num_ps_replicas = params.get("num_ps_replicas", 0) joint_weights = params.get("joint_weights", False) if not isinstance(features, dict): features = {"": features} parent_scope = "linear" partitioner = partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20) with variable_scope.variable_scope( parent_scope, values=features.values(), partitioner=partitioner) as scope: if joint_weights: logits, _, _ = ( layers.joint_weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=feature_columns, num_outputs=head.logits_dimension, weight_collections=[parent_scope], scope=scope)) else: logits, _, _ = ( layers.weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=feature_columns, num_outputs=head.logits_dimension, weight_collections=[parent_scope], scope=scope)) def _train_op_fn(loss): global_step = contrib_variables.get_global_step() my_vars = ops.get_collection("linear") grads = gradients.gradients(loss, my_vars) if gradient_clip_norm: grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm) return (optimizer.apply_gradients( zip(grads, my_vars), global_step=global_step)) return head.head_ops(features, labels, mode, _train_op_fn, logits) def sdca_model_fn(features, labels, mode, params): """A model_fn for linear models that use the SDCA optimizer. Args: features: A dict of `Tensor` keyed by column name. labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `Head` instance. Type must be one of `_BinarySvmHead`, `_RegressionHead` or `_MultiClassHead`. * feature_columns: An iterable containing all the feature columns used by the model. * optimizer: An `SDCAOptimizer` instance. * weight_column_name: A string defining the weight feature column, or None if there are no weights. * update_weights_hook: A `SessionRunHook` object or None. Used to update model weights. Returns: An `estimator.ModelFnOps` instance. Raises: ValueError: If `optimizer` is not an `SDCAOptimizer` instance. ValueError: If the type of head is neither `_BinarySvmHead`, nor `_RegressionHead` nor `_MultiClassHead`. ValueError: If mode is not any of the `ModeKeys`. """ head = params["head"] feature_columns = params["feature_columns"] optimizer = params["optimizer"] weight_column_name = params["weight_column_name"] update_weights_hook = params.get("update_weights_hook", None) if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer): raise ValueError("Optimizer must be of type SDCAOptimizer") if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access loss_type = "hinge_loss" elif isinstance(head, head_lib._MultiClassHead): # pylint: disable=protected-access loss_type = "logistic_loss" elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access loss_type = "squared_loss" else: return ValueError("Unsupported head type: {}".format(head)) parent_scope = "linear" with variable_scope.variable_op_scope( features.values(), parent_scope) as scope: logits, columns_to_variables, bias = ( layers.weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=feature_columns, num_outputs=1, scope=scope)) _add_bias_column(feature_columns, features, bias, labels, columns_to_variables) def _train_op_fn(unused_loss): global_step = contrib_variables.get_global_step() sdca_model, train_op = optimizer.get_train_step(columns_to_variables, weight_column_name, loss_type, features, labels, global_step) if update_weights_hook is not None: update_weights_hook.set_parameters(sdca_model, train_op) return train_op return head.head_ops(features, labels, mode, _train_op_fn, logits) # Ensures consistency with LinearComposableModel. def _get_default_optimizer(feature_columns): learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns))) return train.FtrlOptimizer(learning_rate=learning_rate) class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook): """SessionRunHook to update and shrink SDCA model weights.""" def __init__(self): pass def set_parameters(self, sdca_model, train_op): self._sdca_model = sdca_model self._train_op = train_op def begin(self): """Construct the update_weights op. The op is implicitly added to the default graph. """ self._update_op = self._sdca_model.update_weights(self._train_op) def before_run(self, run_context): """Return the update_weights op so that it is executed during this run.""" return session_run_hook.SessionRunArgs(self._update_op) class LinearClassifier(evaluable.Evaluable, trainable.Trainable): """Linear classifier model. Train a linear model to classify instances into one of multiple possible classes. When number of possible classes is 2, this is binary classification. Example: ```python sparse_column_a = sparse_column_with_hash_bucket(...) sparse_column_b = sparse_column_with_hash_bucket(...) sparse_feature_a_x_sparse_feature_b = crossed_column(...) # Estimator using the default optimizer. estimator = LinearClassifier( feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b]) # Or estimator using the FTRL optimizer with regularization. estimator = LinearClassifier( feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b], optimizer=tf.train.FtrlOptimizer( learning_rate=0.1, l1_regularization_strength=0.001 )) # Or estimator using the SDCAOptimizer. estimator = LinearClassifier( feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b], optimizer=tf.contrib.linear_optimizer.SDCAOptimizer( example_id_column='example_id', num_loss_partitions=..., symmetric_l2_regularization=2.0 )) # Input builders def input_fn_train: # returns x, y ... def input_fn_eval: # returns x, y ... estimator.fit(input_fn=input_fn_train) estimator.evaluate(input_fn=input_fn_eval) estimator.predict(x=x) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: * if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. * for each `column` in `feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn`, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, # _joint_weight pylint: disable=invalid-name feature_columns, model_dir=None, n_classes=2, weight_column_name=None, optimizer=None, gradient_clip_norm=None, enable_centered_bias=False, _joint_weight=False, config=None, feature_engineering_fn=None): """Construct a `LinearClassifier` estimator object. Args: feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: The optimizer used to train the model. If specified, it should be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`, the Ftrl optimizer will be used. gradient_clip_norm: A `float` > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See `tf.clip_by_global_norm` for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. _joint_weight: If True, the weights for all columns will be stored in a single (possibly partitioned) variable. It's more efficient, but it's incompatible with SDCAOptimizer, and requires all feature columns are sparse and use the 'sum' combiner. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. Returns: A `LinearClassifier` estimator. Raises: ValueError: if n_classes < 2. """ # TODO(zoy): Give an unsupported error if enable_centered_bias is # requested for SDCA once its default changes to False. self._feature_columns = feature_columns assert self._feature_columns self._optimizer = _get_default_optimizer(feature_columns) if optimizer: self._optimizer = _get_optimizer(optimizer) chief_hook = None if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and enable_centered_bias): enable_centered_bias = False logging.warning("centered_bias is not supported with SDCA, " "please disable it explicitly.") head = head_lib._multi_class_head( # pylint: disable=protected-access n_classes, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias) params = { "head": head, "feature_columns": feature_columns, "optimizer": self._optimizer, } if isinstance(optimizer, sdca_optimizer.SDCAOptimizer): assert not _joint_weight, ("_joint_weight is incompatible with the" " SDCAOptimizer") assert n_classes == 2, "SDCA only applies to binary classification." model_fn = sdca_model_fn # The model_fn passes the model parameters to the chief_hook. We then use # the hook to update weights and shrink step only on the chief. chief_hook = _SdcaUpdateWeightsHook() params.update({ "weight_column_name": weight_column_name, "update_weights_hook": chief_hook, }) else: model_fn = _linear_model_fn params.update({ "gradient_clip_norm": gradient_clip_norm, "num_ps_replicas": config.num_ps_replicas if config else 0, "joint_weights": _joint_weight, }) self._estimator = estimator.Estimator( model_fn=model_fn, model_dir=model_dir, config=config, params=params, feature_engineering_fn=feature_engineering_fn) self._additional_run_hook = (chief_hook if self._estimator.config.is_chief else None) def get_estimator(self): return self._estimator def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): """See trainable.Trainable.""" # TODO(roumposg): Remove when deprecated monitors are removed. if monitors is None: monitors = [] deprecated_monitors = [ m for m in monitors if not isinstance(m, session_run_hook.SessionRunHook) ] for monitor in deprecated_monitors: monitor.set_estimator(self) monitor._lock_estimator() # pylint: disable=protected-access if self._additional_run_hook: monitors.append(self._additional_run_hook) result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors, max_steps=max_steps) for monitor in deprecated_monitors: monitor._unlock_estimator() # pylint: disable=protected-access return result def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): """See evaluable.Evaluable.""" return self._estimator.evaluate(x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, steps=steps, metrics=metrics, name=name) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Runs inference to determine the predicted class.""" key = prediction_key.PredictionKey.CLASSES preds = self._estimator.predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Runs inference to determine the class probability predictions.""" key = prediction_key.PredictionKey.PROBABILITIES preds = self._estimator.predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] def get_variable_names(self): return self._estimator.get_variable_names() def get_variable_value(self, name): return self._estimator.get_variable_value(name) def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BaseEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return self._estimator.export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=(signature_fn or export.classification_signature_fn_with_prob), prediction_key=prediction_key.PredictionKey.PROBABILITIES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def weights_(self): values = {} optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$" for name in self.get_variable_names(): if (name.startswith("linear/") and name != "linear/bias_weight" and not re.match(optimizer_regex, name)): values[name] = self.get_variable_value(name) if len(values) == 1: return values[list(values.keys())[0]] return values @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def bias_(self): return self.get_variable_value("linear/bias_weight") @property def config(self): return self._estimator.config @property def model_dir(self): return self._estimator.model_dir class LinearRegressor(evaluable.Evaluable, trainable.Trainable): """Linear regressor model. Train a linear regression model to predict label value given observation of feature values. Example: ```python sparse_column_a = sparse_column_with_hash_bucket(...) sparse_column_b = sparse_column_with_hash_bucket(...) sparse_feature_a_x_sparse_feature_b = crossed_column(...) estimator = LinearRegressor( feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b]) # Input builders def input_fn_train: # returns x, y ... def input_fn_eval: # returns x, y ... estimator.fit(input_fn=input_fn_train) estimator.evaluate(input_fn=input_fn_eval) estimator.predict(x=x) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a KeyError: * if `weight_column_name` is not `None`: key=weight_column_name, value=a `Tensor` * for column in `feature_columns`: - if isinstance(column, `SparseColumn`): key=column.name, value=a `SparseTensor` - if isinstance(column, `WeightedSparseColumn`): {key=id column name, value=a `SparseTensor`, key=weight column name, value=a `SparseTensor`} - if isinstance(column, `RealValuedColumn`): key=column.name, value=a `Tensor` """ def __init__(self, # _joint_weights: pylint: disable=invalid-name feature_columns, model_dir=None, weight_column_name=None, optimizer=None, gradient_clip_norm=None, enable_centered_bias=False, label_dimension=1, _joint_weights=False, config=None, feature_engineering_fn=None): """Construct a `LinearRegressor` estimator object. Args: feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph, etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: An instance of `tf.Optimizer` used to train the model. If `None`, will use an Ftrl optimizer. gradient_clip_norm: A `float` > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See `tf.clip_by_global_norm` for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. label_dimension: dimension of the label for multilabels. _joint_weights: If True use a single (possibly partitioned) variable to store the weights. It's faster, but requires all feature columns are sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. Returns: A `LinearRegressor` estimator. """ self._feature_columns = feature_columns assert self._feature_columns self._optimizer = _get_default_optimizer(feature_columns) if optimizer: self._optimizer = _get_optimizer(optimizer) chief_hook = None if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and enable_centered_bias): enable_centered_bias = False logging.warning("centered_bias is not supported with SDCA, " "please disable it explicitly.") head = head_lib._regression_head( # pylint: disable=protected-access weight_column_name=weight_column_name, label_dimension=label_dimension, enable_centered_bias=enable_centered_bias) params = { "head": head, "feature_columns": feature_columns, "optimizer": self._optimizer, } if isinstance(optimizer, sdca_optimizer.SDCAOptimizer): assert label_dimension == 1, "SDCA only applies for label_dimension=1." assert not _joint_weights, ("_joint_weights is incompatible with" " SDCAOptimizer.") model_fn = sdca_model_fn # The model_fn passes the model parameters to the chief_hook. We then use # the hook to update weights and shrink step only on the chief. chief_hook = _SdcaUpdateWeightsHook() params.update({ "weight_column_name": weight_column_name, "update_weights_hook": chief_hook, }) else: model_fn = _linear_model_fn params.update({ "gradient_clip_norm": gradient_clip_norm, "num_ps_replicas": config.num_ps_replicas if config else 0, "joint_weights": _joint_weights, }) self._estimator = estimator.Estimator( model_fn=model_fn, model_dir=model_dir, config=config, params=params, feature_engineering_fn=feature_engineering_fn) self._additional_run_hook = (chief_hook if self._estimator.config.is_chief else None) def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): """See trainable.Trainable.""" # TODO(roumposg): Remove when deprecated monitors are removed. if monitors is None: monitors = [] deprecated_monitors = [ m for m in monitors if not isinstance(m, session_run_hook.SessionRunHook) ] for monitor in deprecated_monitors: monitor.set_estimator(self) monitor._lock_estimator() # pylint: disable=protected-access if self._additional_run_hook: monitors.append(self._additional_run_hook) result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors, max_steps=max_steps) for monitor in deprecated_monitors: monitor._unlock_estimator() # pylint: disable=protected-access return result def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): """See evaluable.Evaluable.""" return self._estimator.evaluate(x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, steps=steps, metrics=metrics, name=name) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Runs inference to determine the predicted class.""" key = prediction_key.PredictionKey.SCORES preds = self._estimator.predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] def get_variable_names(self): return self._estimator.get_variable_names() def get_variable_value(self, name): return self._estimator.get_variable_value(name) def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BaseEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return self._estimator.export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=(signature_fn or export.regression_signature_fn), prediction_key=prediction_key.PredictionKey.SCORES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def weights_(self): values = {} optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$" for name in self.get_variable_names(): if (name.startswith("linear/") and name != "linear/bias_weight" and not re.match(optimizer_regex, name)): values[name] = self.get_variable_value(name) if len(values) == 1: return values[list(values.keys())[0]] return values @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def bias_(self): return self.get_variable_value("linear/bias_weight") @property def config(self): return self._estimator.config @property def model_dir(self): return self._estimator.model_dir
from OpenSSL import SSL, crypto, rand from socket import socket, AF_INET, SOCK_STREAM from threading import Thread from select import select from log import Logger class Worker(Thread): counter = 0 READ_LENGTH = 2048 def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): super(Worker, self).__init__(group, name, args, kwargs) self.id = Worker.counter Worker.counter += 1 self.cliSock = kwargs['socket'] self.cliSock.setblocking(0) self.servSock = None self.sockL = [self.cliSock] self.log = kwargs['log'] self.key = kwargs['key'] self.ca = kwargs['ca'] self.cliIn = '' self.cliOut = '' self.servIn = '' self.servOut = '' self.tlsEstablished = False self.__stop = False def run(self): self.log.send(__name__, Logger.WARNING, 'Worker(%d) thread started' % (self.id)) while(1): if(self.__stop): break self.__send() if(not self.servSock): self.__servConnect() else: if(not self.tlsEstablished): self.__tlsTransition() else: self.__proxify() self.__recv() self.__clean() self.log.send(__name__, Logger.WARNING, 'Worker(%d) thread stopped' % (self.id)) def stop(self): self.__stop = True def isStopped(self): return self.__stop def __send(self): _, wList, _ = select([], self.sockL, [], 0.5) if(self.cliSock in wList > 0): if(len(self.cliOut) != 0): try: self.cliSock.send(self.cliOut) except Exception as e: self.log.send(__name__, Logger.ERROR, ('Worker(%d) fail to send %d bytes to ' 'client, killing myself!') % (self.id, len(self.cliOut))) self.stop() return self.log.send(__name__, Logger.INFO, ('Worker(%d) %d bytes sent ' 'to client') % (self.id, len(self.cliOut))) self.cliOut = '' if(self.servSock in wList): if(len(self.servOut) != 0): try: self.servSock.send(self.servOut) except Exception as e: self.log.send(__name__, Logger.ERROR, ('Worker(%d) fail to send %d bytes ' 'to server, killing myself!') % (self.id, len(self.servOut))) self.stop() return self.log.send(__name__, Logger.INFO, 'Worker(%d) %d bytes sent to ' 'server' % (self.id, len(self.servOut))) self.servOut = '' def __recv(self): rList, _, _ = select(self.sockL, [], [], 0.5) if(rList): if(self.cliSock in rList): rcv = None while(rcv == None): try: rcv = self.cliSock.recv(Worker.READ_LENGTH) if(len(rcv) == 0): raise SSL.ZeroReturnError except SSL.ZeroReturnError: self.log.send(__name__, Logger.ERROR, ('Worker(%d) client disconnected, ' 'killing myself!') % (self.id)) self.stop() return except SSL.WantReadError: pass except Exception as e: self.log.send(__name__, Logger.ERROR, 'Worker(%d) %s' % (self.id, str(e))) self.stop() return self.log.send(__name__,Logger.INFO, ('Worker(%d) %d bytes received ' 'from client') % (self.id, len(rcv))) self.cliIn += rcv if(self.servSock in rList): rcv = None while(rcv == None): try: rcv = self.servSock.recv(Worker.READ_LENGTH) if(len(rcv) == 0): raise SSL.ZeroReturnError except SSL.ZeroReturnError: self.log.send(__name__, Logger.ERROR, ('Worker(%d) server disconnected,' ' killing myself!') % (self.id)) self.stop() return except SSL.WantReadError: pass except Exception as e: self.log.send(__name__, Logger.ERROR, 'Worker(%d) %s' % (self.id, str(e))) self.stop() return self.log.send(__name__, Logger.INFO, ('Worker(%d) %d bytes received ' 'from server') % (self.id, len(rcv))) self.servIn += rcv def __servConnect(self): servInfos = self.__parseHTTPHeader() if(not servInfos): return self.servSock = socket(AF_INET, SOCK_STREAM) try: self.servSock.connect(servInfos) except Exception as e: self.log.send(__name__, Logger.ERROR, ('Work(%d) Connection with %s:%d ' 'failed, killing myself!') % (self.id, servInfos[0], servInfos[1])) self.stop() return self.log.send(__name__, Logger.INFO, ('Worker(%d) Connection with ' '%s:%d established') % (self.id, servInfos[0], servInfos[1])) self.servSock.setblocking(0) self.sockL.append(self.servSock) self.cliOut = '200\r\n\r\n' def __parseHTTPHeader(self): header = '' i = self.cliIn.find('\r\n\r\n') if(i != -1): header = self.cliIn[:i + 4] self.cliIn = self.cliIn[i + 4:] try: tmp = header.split('\n')[0].split(' ') if(tmp[0] != 'CONNECT'): raise Exception host, port = tmp[1].split(':') port = int(port) except Exception as e: self.log.send(__name__, Logger.ERROR, ('Worker(%d) Invalid CONNECT HTTP ' 'header received from client') % (self.id)) return None self.log.send(__name__, Logger.INFO, ('Worker(%d) client requested ' 'to connect to %s:%d') % (self.id, host, port)) return (host, port) return None def __tlsTransition(self): ctx = SSL.Context(SSL.TLSv1_2_METHOD) self.servSock.setblocking(1) self.servSock = SSL.Connection(ctx, self.servSock) self.servSock.set_connect_state() self.servSock.do_handshake() self.servSock.setblocking(0) self.sockL[1] = self.servSock mitmCert = self.servSock.get_peer_certificate() mitmCert.set_pubkey(self.key) tmp = rand.bytes(100) serial = 0 for x in tmp: serial += ord(x) mitmCert.set_serial_number(serial) self.log.send(__name__, Logger.DEBUG, 'Worker(%d) cert serial = %d' % (self.id, serial)) mitmCert = self.ca.signCert(mitmCert) ctx.use_certificate(mitmCert) ctx.use_privatekey(self.key) self.cliSock.setblocking(1) self.cliSock = SSL.Connection(ctx, self.cliSock) self.cliSock.set_accept_state() self.cliSock.do_handshake() self.cliSock.setblocking(0) self.sockL[0] = self.cliSock self.tlsEstablished = True def __proxify(self): self.servOut = self.cliIn self.cliOut = self.servIn self.cliIn = '' self.servIn = '' def __clean(self): self.cliSock.close() if(self.servSock): self.servSock.close()
from proteus import Domain, Context #from proteus.mprans import SpatialTools as st from proteus import Gauges as ga from proteus import WaveTools as wt from math import * import numpy as np opts=Context.Options([ # predefined test cases ("water_level", 0.425, "Height of free surface above bottom"), # Choose waterLevels=[0.425, 0.463] # waves ('waveType', 'Fenton', 'Wavetype for regular waves, Linear or Fenton'), ("wave_period", 0.5, "Period of the waves"), # Choose periods=[0.5, 0.8, 1.1, 1.5, 2.8, 3.9, 4.0] ("wave_height", 0.025, "Height of the waves"), # # Choose for d=0.425-->[0.025, 0.075, 0.125, 0.234]. Choose for d=0.463-->[0.025, 0.075, 0.125, 0.254]. ('wavelength', 0.4, 'Wavelength only if Fenton is activated'), # Choose for d=0.425-->[0.4, 1.0, 1.8, 2.9]. Choose for d=0.463-->[0.4, 1.0, 1.8, 2.9, 3.0, 5.7, 5.9, 8.8, 9.4]. ('Ycoeff', [0.19167938 , 0.01943414 , 0.00299676 , 0.00055096 , 0.00011165 , 0.00002413 , 0.00000571 , 0.00000251], 'Ycoeff only if Fenton is activated'), ('Bcoeff', [0.19063009 , 0.00072851 , 0.00002905 , 0.00000131 , 0.00000006 , 0.00000000 , 0.00000000 , 0.00000000], 'Bcoeff only if Fenton is activated'), # Geometry of the tank - left lower boundary at (0.,0.,0.) ("Ls", 2.0, "Distance of the front toe of the structure end from generation zone in wavelengths"), ("Lend", 2.0, "Distance of the back toe of the structure end from absorption zone in wavelengths"), ("Lgen", 1., "Length of generation zone in wavelegths"), ("Labs", 2., "Length of absorption zone in wavelegths"), ("h", 1.0, "Height of domain in meters"), # breakwater ("hs", 0.075, "Height of the breakwater"), ("slope", 1./2., "Slope of the breakwater"), ('porosity', 0.4, "Porosity of the medium"), ('d50', None, "Mean diameter of the medium"), ('d15', 0.038, "15% grading curve diameter of the medium"), # caisson ("caisson", True, "Switch on/off caisson"), ('dimx', 0.44, 'X-dimension of the caisson'), # Choose dimx=[0.44] ('dimy', 0.4, 'Y-dimension of the caisson'), ("rotation", not True, "Initial position for free oscillation"), ("friction", not True, "Switch on/off friction module"), ("m_static", 0.0, "Static friction factor between caisson and rubble mound"), ("m_dynamic", 0.0, "Dynamic friction factor between caisson and rubble mound"), # numerical options ("refinement_level", 200. ,"he=walength/refinement_level"), ("cfl", 0.9 ,"Target cfl"), ("freezeLevelSet", True, "No motion to the levelset"), ("useVF", 0.0, "For density and viscosity smoothing"), ('movingDomain', not True, "Moving domain and mesh option"), ('conservativeFlux', not True,'Fix post-processing velocity bug for porous interface'), ]) # ----- DOMAIN ----- # domain = Domain.PlanarStraightLineGraphDomain() # ----- WAVE CONDITIONS ----- # period=opts.wave_period waterLevel=opts.water_level waveDir=np.array([1, 0., 0.]) mwl=waterLevel #coordinate of the initial mean level of water surface waveHeight=opts.wave_height inflowHeightMean=waterLevel inflowVelocityMean =np.array([0.,0.,0.]) windVelocity = np.array([0.,0.,0.]) # ----- Phisical constants ----- # rho_0=998.2 nu_0 =1.004e-6 rho_1=1.205 nu_1 =1.500e-5 sigma_01=0.0 g =np.array([0.,-9.8,0.]) gAbs=sqrt(sum(g**2)) # ----- WAVE input ----- # if opts.waveType=='Linear': waveinput = wt.MonochromaticWaves(period=period, waveHeight=waveHeight, mwl=mwl, depth=waterLevel, g=g, waveDir=waveDir, wavelength=None, # if wave is linear I can use None waveType=opts.waveType) if opts.waveType=='Fenton': waveinput = wt.MonochromaticWaves(period=period, waveHeight=waveHeight, mwl=mwl, depth=waterLevel, g=g, waveDir=waveDir, wavelength=opts.wavelength, # if wave is linear I can use None waveType=opts.waveType, Ycoeff=opts.Ycoeff, Bcoeff=opts.Bcoeff, ) #---------Domain Dimension nd = 2 wl = waveinput.wavelength #################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### # ----- SHAPES ----- # #################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### L_leftSpo = opts.Lgen*wl L_rightSpo = opts.Labs*wl hs=opts.hs slope=opts.slope #-Caisson dimx=opts.dimx dimy=opts.dimy b=dimx #-Tank x1=L_leftSpo x2=x1+opts.Ls*wl x3=x2+(hs/slope) xc1=x3+0.13 xc2=xc1+b yc1=yc2=hs x4=xc2+0.13 x5=x4+(hs/slope) x6=x5+opts.Lend*wl x7=x6+L_rightSpo tank_dim = [x7, opts.h] boundaryOrientations = {'bottom': [0., -1.,0.], 'right': [1., 0.,0.], 'top': [0., 1.,0.], 'left': [-1., 0.,0.], 'sponge': None, 'moving_sponge': None, } boundaryTags = {'bottom': 1, 'right': 2, 'top': 3, 'left': 4, 'sponge': 5, 'moving_sponge': 6, } ############################################################################################################################################################################################################## # Caisson ############################################################################################################################################################################################################ if opts.caisson: dimx=dimx dimy=dimy dim=(dimx,dimy) coords=[xc1+b/2., hs+dimy/2.] # For bodyDimensions and barycenter VCG=dim[1]/2. # For barycenter width=1.0 # The 3rd dimension density=100000 #kg/m3 volume=dimx*dimy*width mass=density*volume It=(dimx**2.+dimy**2.)/12. caisson2D = st.Rectangle(domain, dim=dim, coords=coords) caisson2D.vertices[0][0]=xc1 caisson2D.vertices[0][1]=yc1 caisson2D.vertices[1][0]=xc2 caisson2D.vertices[1][1]=yc2 free_x=(1.0, 1.0, 1.0) # Translational DOFs free_r=(0.0, 0.0, 1.0) # Rotational DOFs m_static=opts.m_static # Static friction m_dynamic=opts.m_dynamic # Dynamic friction if opts.movingDomain==True: free_x=(1.0, 1.0, 0.0) # Translational DOFs free_r=(0.0, 0.0, 1.0) # Rotational DOFs caisson2D.setMass(mass) caisson2D.setConstraints(free_x=free_x, free_r=free_r) caisson2D.setFriction(friction=opts.friction, m_static=m_static, m_dynamic=m_dynamic) if opts.rotation==True: # Initial position for free oscillation caisson2D.rotate(rotation) caisson2D.It= It/caisson2D.mass/width caisson2D.setRecordValues(all_values=True) caisson2D.setRigidBody() ############################################################################################################################################################################################################## # Tank ######################################################################################################################################################################################################### if opts.caisson==False: vertices=[[0.0, 0.0],#0 [x1, 0.0],#1 [x2, 0.0],#2 [x3, hs ],#3 [x4, hs ],#4 [x5, 0.0],#5 [x6, 0.0],#6 [x7, 0.0],#7 [x7, tank_dim[1]],#8 [x6, tank_dim[1]],#9 [x1, tank_dim[1]],#10 [0.0, tank_dim[1]],#11 ] vertexFlags=np.array([1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, ]) segments=[[0,1], [1,2], [2,3], [3,4], [4,5], [5,6], [6,7], [7,8], [8,9], [9,10], [10,11], [11,0], [1,10], [6,9], [2,5], ] segmentFlags=np.array([1, 1, 5, 5, 5, 1, 1, 2, 3, 3, 3, 4, 5, 5, 1, ]) else: vertices=[[0.0, 0.0],#0 [x1, 0.0],#1 [x2, 0.0],#2 [x3, hs ],#3 [x4, hs ],#4 [x5, 0.0],#5 [x6, 0.0],#6 [x7, 0.0],#7 [x7, tank_dim[1]],#8 [x6, tank_dim[1]],#9 [x1, tank_dim[1]],#10 [0.0, tank_dim[1]],#11 [xc1, yc1],#12 [xc2, yc2],#13 ] vertexFlags=np.array([1, 1, 1, 5, 5, 1, 1, 1, 3, 3, 3, 3, 6, 6, ]) segments=[[0,1], [1,2], [2,3], [4,5], [5,6], [6,7], [7,8], [8,9], [9,10], [10,11], [11,0], [1,10], [6,9], [2,5], [3,12], [13,4], ] segmentFlags=np.array([1, 1, 5, 5, 1, 1, 2, 3, 3, 3, 4, 5, 5, 1, 6, 6, ]) regions = [ [ 0.90*x1 , 0.10*tank_dim[1] ], [ 0.90*x2 , 0.10*tank_dim[1] ], [ xc1 , 0.50*hs ], [ 0.95*x7 , 0.95*tank_dim[1] ] ] regionFlags=np.array([1, 2, 3, 4]) tank = st.CustomShape(domain, vertices=vertices, vertexFlags=vertexFlags, segments=segments, segmentFlags=segmentFlags, regions=regions, regionFlags=regionFlags, boundaryTags=boundaryTags, boundaryOrientations=boundaryOrientations) ################################################################################################################################################################################################################## # POROUS MEDIA ################################################################################################################################################################################################################## porosity=opts.porosity voidFrac=1.0-porosity d50=opts.d50 if d50==None: d15=opts.d15 else: d15=d50/1.2 term1=3.12*(10**-3.) term2=(gAbs/(nu_0**2.))**(2./3.) term3=(d15**2.) Alpha1=1684+term1*term2*term3 #Shih #Alpha1=150 #Ergun #Alpha1=360 #Engelund term1=-5.10*(10**-3.) term2=(gAbs/(nu_0**2.))**(1./3.) term3=(d15) Beta1=1.72+1.57*exp(term1*term2*term3) #Shih #Beta1=1.75 #Ergun #Beta1=3.6 #Engelund #Alpha=Alpha1*nu_0*(voidFrac**3)/((porosity**2)*(d15**2)) #Engelund Alpha=Alpha1*nu_0*(voidFrac**2)/((porosity**3)*(d15**2)) #Ergun Beta=Beta1*voidFrac/((porosity**3)*d15) #Proteus scale in viscosity, so i need to divide alpha and beta by nu_0 dragAlpha=(porosity**2)*Alpha/nu_0 dragBeta=0.0#(porosity**3)*Beta/nu_0 ############################################################################################################################################################################################################################################################################################################################################################################################# # ----- BOUNDARY CONDITIONS ----- # ############################################################################################################################################################################################################################################################################################################################################################################################# if opts.caisson: for bc in caisson2D.BC_list: bc.setFreeSlip() tank.BC.top.setOpenAir() tank.BC.left.setUnsteadyTwoPhaseVelocityInlet(wave=waveinput, vert_axis=1, windSpeed=windVelocity) #tank.BC.bottom.setFreeSlip() tank.BC.bottom.setNoSlip() #tank.BC.right.setFreeSlip() tank.BC.right.setNoSlip() tank.BC.sponge.setNonMaterial() tank.BC.moving_sponge.setNonMaterial() if opts.movingDomain==True: for tb in [tank.BC.right, tank.BC.left, tank.BC.top, tank.BC.bottom, tank.BC.sponge]: tb.hx_dirichlet= lambda x, t: 0.0 tb.hy_dirichlet= lambda x, t: 0.0 tb.hz_dirichlet= lambda x, t: 0.0 tb.u_stress=None tb.v_stress=None tb.w_stress=None ms=tank.BC.moving_sponge ms.hx_dirichlet= None ms.hy_dirichlet= None ms.hz_dirichlet= lambda x, t: 0.0 ms.u_stress=None ms.v_stress=None ms.w_stress=None ######################################################################################################################################################################################################################################################################################################################################################## # ----- GENERATION ZONE & ABSORPTION ZONE ----- # ######################################################################################################################################################################################################################################################################################################################################################## tank.setGenerationZones(flags=1, epsFact_solid=float(L_leftSpo/2.), orientation=[1., 0.], center=(float(L_leftSpo/2.), 0., 0.), waves=waveinput, windSpeed=windVelocity, ) tank.setPorousZones(flags=3, epsFact_solid=float((x5-x2)/2.), dragAlpha=dragAlpha, dragBeta=dragBeta, porosity=porosity, ) tank.setAbsorptionZones(flags=4, epsFact_solid=float(L_rightSpo/2.), orientation=[-1., 0.], center=(float(x7-L_rightSpo/2.), 0., 0.), ) ############################################################################################################################################################################ # ----- Output Gauges ----- # ############################################################################################################################################################################ T = 20.*period PG=[] LG=[] LG1=[] LG2=[] LG3=[] LG4=[] #-------------Pressure and vof gauges for WaveHeight--------------------------------# z_probes = waterLevel*0.5 PG=[(0.0,z_probes,0.), (xc1-0.73,z_probes,0.), (xc1+0.53,z_probes,0.), (xc1+1.03,z_probes,0.), (xc1+1.23,z_probes,0.)] pressureGauges=ga.PointGauges(gauges=((('p',),PG), ), activeTime = (0., T), sampleRate=0., fileName='pressureProbes.csv') VG=[((0.0,0.,0.),(0.0,tank_dim[1],0.)), ((xc1-0.73,0.,0.),(xc1-0.73,tank_dim[1],0.)), ((xc1+0.53,0.,0.),(xc1+0.53,tank_dim[1],0.)), ((xc1+1.03,0.,0.),(xc1+1.03,tank_dim[1],0.)), ((xc1+1.23,0.,0.),(xc1+1.23,tank_dim[1],0.))] VG=tuple(map(tuple,VG)) fields=(('vof',)) vof_probes=ga.LineIntegralGauges(gauges=((fields, VG), ), activeTime = (0., T), sampleRate=0., fileName='waveProbes.csv') if opts.caisson: xc1=caisson2D.vertices[0][0]-1*(10**-5) #to avoid floating point error yc1=caisson2D.vertices[0][1]-1*(10**-5) #to avoid floating point error xc2=caisson2D.vertices[1][0]+1*(10**-5) #to avoid floating point error yc2=caisson2D.vertices[1][1]-1*(10**-5) #to avoid floating point error xc3=caisson2D.vertices[2][0]+1*(10**-5) #to avoid floating point error yc3=caisson2D.vertices[2][1]+1*(10**-5) #to avoid floating point error xc4=caisson2D.vertices[3][0]-1*(10**-5) #to avoid floating point error yc4=caisson2D.vertices[3][1]+1*(10**-5) #to avoid floating point error #-------------Wave overtopping-----------------------------------------------# dx=0.01 probes=np.linspace(yc1, tank_dim[1], (tank_dim[1]-yc1)/dx+1) for i in probes: LG.append((xc1,i,0.),) overtoppingGauges=ga.PointGauges(gauges=((('u',), LG),), activeTime = (0., T), sampleRate=0., fileName='overtoppingVelGauges.csv') vofGauges=ga.PointGauges(gauges=(((('vof'),), LG),), activeTime = (0., T), sampleRate=0., fileName='overtoppingVofGauges.csv') #-------------Wave loading---------------------------------------------------# probes1=np.linspace(xc1, xc2, (xc2-xc1)/dx+1) for i in probes1: LG1.append((i,yc1,0.),) probes2=np.linspace(yc2, yc3, (yc3-yc2)/dx+1) for i in probes2: LG2.append((xc2,i,0.),) probes3=np.linspace(xc4, xc3, (xc3-xc4)/dx+1) for i in probes3: LG3.append((i,yc3,0.),) probes4=np.linspace(yc1, yc4, (yc4-yc1)/dx+1) for i in probes4: LG4.append((xc4,i,0.),) loadingsGauges=ga.PointGauges(gauges=((('p',), LG1), (('p',), LG2), (('p',), LG3), (('p',), LG4),), activeTime = (0., T), sampleRate=0., fileName='loadingGauges.csv') domain.auxiliaryVariables += [pressureGauges,vof_probes,overtoppingGauges,vofGauges, loadingsGauges, ] else: domain.auxiliaryVariables += [pressureGauges, vof_probes, ] ###################################################################################################################################################################################################################### # Numerical Options and other parameters # ###################################################################################################################################################################################################################### he = waveinput.wavelength/opts.refinement_level domain.MeshOptions.he = he from math import * from proteus import MeshTools, AuxiliaryVariables import numpy import proteus.MeshTools from proteus import Domain from proteus.Profiling import logEvent from proteus.default_n import * from proteus.ctransportCoefficients import smoothedHeaviside from proteus.ctransportCoefficients import smoothedHeaviside_integral st.assembleDomain(domain) #---------------------------------------------------- # Time stepping and velocity #---------------------------------------------------- weak_bc_penalty_constant = 10.0/nu_0 #100 dt_fixed = 0.1 dt_init = min(0.1*dt_fixed,0.001) T = T nDTout= int(round(T/dt_fixed)) runCFL = opts.cfl #---------------------------------------------------- # Discretization -- input options #---------------------------------------------------- checkMass=False applyCorrection=True applyRedistancing=True freezeLevelSet=opts.freezeLevelSet useOnlyVF = False # if TRUE proteus uses only these modules --> twp_navier_stokes_p + twp_navier_stokes_n # vof_p + vof_n movingDomain=opts.movingDomain useRANS = 0 # 0 -- None # 1 -- K-Epsilon # 2 -- K-Omega, 1998 # 3 -- K-Omega, 1988 genMesh=True # By DEFAULT on the other files.py --> fullNewtonFlag = True # multilevelNonlinearSolver & levelNonlinearSolver == NonlinearSolvers.Newton useOldPETSc=False # if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.PETSc # if FALSE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.KSP_petsc4py useSuperlu = False #if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.LU spaceOrder = 1 useHex = False # used for discretization, if 1.0 --> CubeGaussQuadrature # ELSE --> SimplexGaussQuadrature useRBLES = 0.0 # multiplied with subGridError useMetrics = 1.0 # if 1.0 --> use of user's parameters as (ns_shockCapturingFactor, ns_lag_shockCapturing, ecc ...) useVF = opts.useVF # used in the smoothing functions as (1.0-useVF)*smoothedHeaviside(eps_rho,phi) + useVF*fmin(1.0,fmax(0.0,vf)) # Input checks if spaceOrder not in [1,2]: print "INVALID: spaceOrder" + spaceOrder sys.exit() if useRBLES not in [0.0, 1.0]: print "INVALID: useRBLES" + useRBLES sys.exit() if useMetrics not in [0.0, 1.0]: print "INVALID: useMetrics" sys.exit() # Discretization nd = 2 if spaceOrder == 1: hFactor=1.0 if useHex: basis=C0_AffineLinearOnCubeWithNodalBasis elementQuadrature = CubeGaussQuadrature(nd,3) elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,3) else: basis=C0_AffineLinearOnSimplexWithNodalBasis elementQuadrature = SimplexGaussQuadrature(nd,3) elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3) #elementBoundaryQuadrature = SimplexLobattoQuadrature(nd-1,1) elif spaceOrder == 2: hFactor=0.5 if useHex: basis=C0_AffineLagrangeOnCubeWithNodalBasis elementQuadrature = CubeGaussQuadrature(nd,4) elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4) else: basis=C0_AffineQuadraticOnSimplexWithNodalBasis elementQuadrature = SimplexGaussQuadrature(nd,4) elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4) # Numerical parameters ns_forceStrongDirichlet = False backgroundDiffusionFactor=0.01 if useMetrics: ns_shockCapturingFactor = 0.5 # magnifies numerical viscosity in NS (smoothening velocity fields) ns_lag_shockCapturing = True # lagging numerical viscosity speedsup Newton but destabilzes the solution ns_lag_subgridError = True # less nonlinear but less stable ls_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening phi) ls_lag_shockCapturing = True # less nonlinear but less stable ls_sc_uref = 1.0 # reference gradient in numerical solution (higher=more diffusion) ls_sc_beta = 1.5 # 1 is fully nonlinear, 2 is linear vof_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening volume of fraction) vof_lag_shockCapturing = True # less nonlinear but less stable vof_sc_uref = 1.0 vof_sc_beta = 1.5 rd_shockCapturingFactor = 0.5 rd_lag_shockCapturing = False epsFact_density = 3.0 # control width of water/air transition zone epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density epsFact_redistance = 0.33 epsFact_consrv_diffusion = 1.0 # affects smoothing diffusion in mass conservation redist_Newton = True kappa_shockCapturingFactor = 0.5 kappa_lag_shockCapturing = True # False kappa_sc_uref = 1.0 kappa_sc_beta = 1.5 dissipation_shockCapturingFactor = 0.5 dissipation_lag_shockCapturing = True # False dissipation_sc_uref = 1.0 dissipation_sc_beta = 1.5 else: ns_shockCapturingFactor = 0.9 ns_lag_shockCapturing = True ns_lag_subgridError = True ls_shockCapturingFactor = 0.9 ls_lag_shockCapturing = True ls_sc_uref = 1.0 ls_sc_beta = 1.0 vof_shockCapturingFactor = 0.9 vof_lag_shockCapturing = True vof_sc_uref = 1.0 vof_sc_beta = 1.0 rd_shockCapturingFactor = 0.9 rd_lag_shockCapturing = False epsFact_density = 1.5 epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density epsFact_redistance = 0.33 epsFact_consrv_diffusion = 10.0 redist_Newton = False kappa_shockCapturingFactor = 0.9 kappa_lag_shockCapturing = True#False kappa_sc_uref = 1.0 kappa_sc_beta = 1.0 dissipation_shockCapturingFactor = 0.9 dissipation_lag_shockCapturing = True#False dissipation_sc_uref = 1.0 dissipation_sc_beta = 1.0 ns_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2) vof_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2) ls_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2) mcorr_nl_atol_res = max(1.0e-12,0.0001*domain.MeshOptions.he**2) rd_nl_atol_res = max(1.0e-12,0.01*domain.MeshOptions.he) kappa_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2) dissipation_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2) mesh_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2) #turbulence ns_closure=0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega if useRANS == 1: ns_closure = 3 elif useRANS >= 2: ns_closure == 4 # Initial condition waterLine_x = 2*tank_dim[0] waterLine_z = waterLevel def waveHeight(x,t): waterDepth = waveinput.eta(x, t) + waveinput.mwl return waterDepth def wavePhi(x,t): [nd-1]- waveHeight(x,t) def waveVF(x,t): return smoothedHeaviside(epsFact_consrv_heaviside*he,wavePhi(x,t)) def signedDistance(x): phi_x = x[0]-waterLine_x phi_z = x[nd-1]-waterLine_z if phi_x < 0.0: if phi_z < 0.0: return max(phi_x,phi_z) else: return phi_z else: if phi_z < 0.0: return phi_x else: return sqrt(phi_x**2 + phi_z**2)
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Some unit tests for S3 Key """ from tests.unit import unittest import time import StringIO from boto.s3.connection import S3Connection from boto.s3.key import Key from boto.exception import S3ResponseError class S3KeyTest (unittest.TestCase): s3 = True def setUp(self): self.conn = S3Connection() self.bucket_name = 'keytest-%d' % int(time.time()) self.bucket = self.conn.create_bucket(self.bucket_name) def tearDown(self): for key in self.bucket: key.delete() self.bucket.delete() def test_set_contents_from_file_dataloss(self): # Create an empty stringio and write to it. content = "abcde" sfp = StringIO.StringIO() sfp.write(content) # Try set_contents_from_file() without rewinding sfp k = self.bucket.new_key("k") try: k.set_contents_from_file(sfp) self.fail("forgot to rewind so should fail.") except AttributeError: pass # call with rewind and check if we wrote 5 bytes k.set_contents_from_file(sfp, rewind=True) self.assertEqual(k.size, 5) # check actual contents by getting it. kn = self.bucket.new_key("k") ks = kn.get_contents_as_string() self.assertEqual(ks, content) # finally, try with a 0 length string sfp = StringIO.StringIO() k = self.bucket.new_key("k") k.set_contents_from_file(sfp) self.assertEqual(k.size, 0) # check actual contents by getting it. kn = self.bucket.new_key("k") ks = kn.get_contents_as_string() self.assertEqual(ks, "") def test_set_contents_as_file(self): content="01234567890123456789" sfp = StringIO.StringIO(content) # fp is set at 0 for just opened (for read) files. # set_contents should write full content to key. k = self.bucket.new_key("k") k.set_contents_from_file(sfp) self.assertEqual(k.size, 20) kn = self.bucket.new_key("k") ks = kn.get_contents_as_string() self.assertEqual(ks, content) # set fp to 5 and set contents. this should # set "567890123456789" to the key sfp.seek(5) k = self.bucket.new_key("k") k.set_contents_from_file(sfp) self.assertEqual(k.size, 15) kn = self.bucket.new_key("k") ks = kn.get_contents_as_string() self.assertEqual(ks, content[5:]) # set fp to 5 and only set 5 bytes. this should # write the value "56789" to the key. sfp.seek(5) k = self.bucket.new_key("k") k.set_contents_from_file(sfp, size=5) self.assertEqual(k.size, 5) self.assertEqual(sfp.tell(), 10) kn = self.bucket.new_key("k") ks = kn.get_contents_as_string() self.assertEqual(ks, content[5:10]) def test_set_contents_with_md5(self): content="01234567890123456789" sfp = StringIO.StringIO(content) # fp is set at 0 for just opened (for read) files. # set_contents should write full content to key. k = self.bucket.new_key("k") good_md5 = k.compute_md5(sfp) k.set_contents_from_file(sfp, md5=good_md5) kn = self.bucket.new_key("k") ks = kn.get_contents_as_string() self.assertEqual(ks, content) # set fp to 5 and only set 5 bytes. this should # write the value "56789" to the key. sfp.seek(5) k = self.bucket.new_key("k") good_md5 = k.compute_md5(sfp, size=5) k.set_contents_from_file(sfp, size=5, md5=good_md5) self.assertEqual(sfp.tell(), 10) kn = self.bucket.new_key("k") ks = kn.get_contents_as_string() self.assertEqual(ks, content[5:10]) # let's try a wrong md5 by just altering it. k = self.bucket.new_key("k") sfp.seek(0) hexdig, base64 = k.compute_md5(sfp) bad_md5 = (hexdig, base64[3:]) try: k.set_contents_from_file(sfp, md5=bad_md5) self.fail("should fail with bad md5") except S3ResponseError: pass def test_get_contents_with_md5(self): content="01234567890123456789" sfp = StringIO.StringIO(content) k = self.bucket.new_key("k") k.set_contents_from_file(sfp) kn = self.bucket.new_key("k") s = kn.get_contents_as_string() self.assertEqual(kn.md5, k.md5) self.assertEqual(s, content) def test_file_callback(self): def callback(wrote, total): self.my_cb_cnt += 1 self.assertNotEqual(wrote, self.my_cb_last, "called twice with same value") self.my_cb_last = wrote # Zero bytes written => 1 call self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 sfp = StringIO.StringIO("") k.set_contents_from_file(sfp, cb=callback, num_cb=10) self.assertEqual(self.my_cb_cnt, 1) self.assertEqual(self.my_cb_last, 0) sfp.close() # Read back zero bytes => 1 call self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback) self.assertEqual(self.my_cb_cnt, 1) self.assertEqual(self.my_cb_last, 0) content="01234567890123456789" sfp = StringIO.StringIO(content) # expect 2 calls due start/finish self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.set_contents_from_file(sfp, cb=callback, num_cb=10) self.assertEqual(self.my_cb_cnt, 2) self.assertEqual(self.my_cb_last, 20) # Read back all bytes => 2 calls self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback) self.assertEqual(self.my_cb_cnt, 2) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # rewind sfp and try upload again. -1 should call # for every read/write so that should make 11 when bs=2 sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=-1) self.assertEqual(self.my_cb_cnt, 11) self.assertEqual(self.my_cb_last, 20) # Read back all bytes => 11 calls self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=-1) self.assertEqual(self.my_cb_cnt, 11) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # no more than 1 times => 2 times # last time always 20 bytes sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=1) self.assertTrue(self.my_cb_cnt <= 2) self.assertEqual(self.my_cb_last, 20) # no more than 1 times => 2 times self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=1) self.assertTrue(self.my_cb_cnt <= 2) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # no more than 2 times # last time always 20 bytes sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=2) self.assertTrue(self.my_cb_cnt <= 2) self.assertEqual(self.my_cb_last, 20) # no more than 2 times self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=2) self.assertTrue(self.my_cb_cnt <= 2) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # no more than 3 times # last time always 20 bytes sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=3) self.assertTrue(self.my_cb_cnt <= 3) self.assertEqual(self.my_cb_last, 20) # no more than 3 times self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=3) self.assertTrue(self.my_cb_cnt <= 3) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # no more than 4 times # last time always 20 bytes sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=4) self.assertTrue(self.my_cb_cnt <= 4) self.assertEqual(self.my_cb_last, 20) # no more than 4 times self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=4) self.assertTrue(self.my_cb_cnt <= 4) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # no more than 6 times # last time always 20 bytes sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=6) self.assertTrue(self.my_cb_cnt <= 6) self.assertEqual(self.my_cb_last, 20) # no more than 6 times self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=6) self.assertTrue(self.my_cb_cnt <= 6) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # no more than 10 times # last time always 20 bytes sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=10) self.assertTrue(self.my_cb_cnt <= 10) self.assertEqual(self.my_cb_last, 20) # no more than 10 times self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=10) self.assertTrue(self.my_cb_cnt <= 10) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) # no more than 1000 times # last time always 20 bytes sfp.seek(0) self.my_cb_cnt = 0 self.my_cb_last = None k = self.bucket.new_key("k") k.BufferSize = 2 k.set_contents_from_file(sfp, cb=callback, num_cb=1000) self.assertTrue(self.my_cb_cnt <= 1000) self.assertEqual(self.my_cb_last, 20) # no more than 1000 times self.my_cb_cnt = 0 self.my_cb_last = None s = k.get_contents_as_string(cb=callback, num_cb=1000) self.assertTrue(self.my_cb_cnt <= 1000) self.assertEqual(self.my_cb_last, 20) self.assertEqual(s, content) def test_website_redirects(self): self.bucket.configure_website('index.html') key = self.bucket.new_key('redirect-key') self.assertTrue(key.set_redirect('http://www.amazon.com/')) self.assertEqual(key.get_redirect(), 'http://www.amazon.com/') self.assertTrue(key.set_redirect('http://aws.amazon.com/')) self.assertEqual(key.get_redirect(), 'http://aws.amazon.com/') def test_website_redirect_none_configured(self): key = self.bucket.new_key('redirect-key') key.set_contents_from_string('') self.assertEqual(key.get_redirect(), None) def test_website_redirect_with_bad_value(self): self.bucket.configure_website('index.html') key = self.bucket.new_key('redirect-key') with self.assertRaises(key.provider.storage_response_error): # Must start with a / or http key.set_redirect('ftp://ftp.example.org') with self.assertRaises(key.provider.storage_response_error): # Must start with a / or http key.set_redirect('')
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import unittest from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import WebDriverException class FormHandlingTests(unittest.TestCase): def testShouldClickOnSubmitInputElements(self): self._loadPage("formPage") self.driver.find_element_by_id("submitButton").click() self.driver.implicitly_wait(5) self.assertEqual(self.driver.title, "We Arrive Here") def testClickingOnUnclickableElementsDoesNothing(self): self._loadPage("formPage") self.driver.find_element_by_xpath("//body").click() def testShouldBeAbleToClickImageButtons(self): self._loadPage("formPage") self.driver.find_element_by_id("imageButton").click() self.driver.implicitly_wait(5) self.assertEqual(self.driver.title, "We Arrive Here") def testShouldBeAbleToSubmitForms(self): self._loadPage("formPage") self.driver.find_element_by_name("login").submit() self.driver.implicitly_wait(5) self.assertEqual(self.driver.title, "We Arrive Here") def testShouldSubmitAFormWhenAnyInputElementWithinThatFormIsSubmitted(self): self._loadPage("formPage") self.driver.find_element_by_id("checky").submit() self.driver.implicitly_wait(5) self.assertEqual(self.driver.title, "We Arrive Here") def testShouldSubmitAFormWhenAnyElementWihinThatFormIsSubmitted(self): self._loadPage("formPage") self.driver.find_element_by_xpath("//form/p").submit() self.driver.implicitly_wait(5) self.assertEqual(self.driver.title, "We Arrive Here") def testShouldNotBeAbleToSubmitAFormThatDoesNotExist(self): self._loadPage("formPage") try: self.driver.find_element_by_name("there is no spoon").submit() self.fail("Expected NoSuchElementException to have been thrown") except NoSuchElementException as e: pass except Exception as e: self.fail("Expected NoSuchElementException but got " + str(e)) def testShouldBeAbleToEnterTextIntoATextAreaBySettingItsValue(self): self._loadPage("javascriptPage") textarea = self.driver.find_element_by_id("keyUpArea") cheesey = "Brie and cheddar" textarea.send_keys(cheesey) self.assertEqual(textarea.get_attribute("value"), cheesey) def testShouldEnterDataIntoFormFields(self): self._loadPage("xhtmlTest") element = self.driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']") originalValue = element.get_attribute("value") self.assertEqual(originalValue, "change") element.clear() element.send_keys("some text") element = self.driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']") newFormValue = element.get_attribute("value") self.assertEqual(newFormValue, "some text") def testShouldBeAbleToSelectACheckBox(self): self._loadPage("formPage") checkbox = self.driver.find_element_by_id("checky") self.assertEqual(checkbox.is_selected(), False) checkbox.click() self.assertEqual(checkbox.is_selected(), True) checkbox.click() self.assertEqual(checkbox.is_selected(), False) def testShouldToggleTheCheckedStateOfACheckbox(self): self._loadPage("formPage") checkbox = self.driver.find_element_by_id("checky") self.assertEqual(checkbox.is_selected(), False) checkbox.click() self.assertEqual(checkbox.is_selected(), True) checkbox.click() self.assertEqual(checkbox.is_selected(), False) def testTogglingACheckboxShouldReturnItsCurrentState(self): self._loadPage("formPage") checkbox = self.driver.find_element_by_id("checky") self.assertEqual(checkbox.is_selected(), False) checkbox.click() self.assertEqual(checkbox.is_selected(), True) checkbox.click() self.assertEqual(checkbox.is_selected(), False) def testShouldBeAbleToSelectARadioButton(self): self._loadPage("formPage") radioButton = self.driver.find_element_by_id("peas") self.assertEqual(radioButton.is_selected(), False) radioButton.click() self.assertEqual(radioButton.is_selected(), True) def testShouldBeAbleToSelectARadioButtonByClickingOnIt(self): self._loadPage("formPage") radioButton = self.driver.find_element_by_id("peas") self.assertEqual(radioButton.is_selected(), False) radioButton.click() self.assertEqual(radioButton.is_selected(), True) def testShouldReturnStateOfRadioButtonsBeforeInteration(self): self._loadPage("formPage") radioButton = self.driver.find_element_by_id("cheese_and_peas") self.assertEqual(radioButton.is_selected(), True) radioButton = self.driver.find_element_by_id("cheese") self.assertEqual(radioButton.is_selected(), False) # [ExpectedException(typeof(NotImplementedException))] # def testShouldThrowAnExceptionWhenTogglingTheStateOfARadioButton(self): # self._loadPage("formPage") # radioButton = self.driver.find_element_by_id("cheese")) # radioButton.click() # [IgnoreBrowser(Browser.IE, "IE allows toggling of an option not in a multiselect")] # [ExpectedException(typeof(NotImplementedException))] # def testTogglingAnOptionShouldThrowAnExceptionIfTheOptionIsNotInAMultiSelect(self): # self._loadPage("formPage") # select = self.driver.find_element_by_name("selectomatic")) # option = select.find_elements_by_tag_name("option"))[0] # option.click() def testTogglingAnOptionShouldToggleOptionsInAMultiSelect(self): if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16: pytest.skip("deselecting preselected values only works on chrome >= 16") self._loadPage("formPage") select = self.driver.find_element_by_name("multi") option = select.find_elements_by_tag_name("option")[0] selected = option.is_selected() option.click() self.assertFalse(selected == option.is_selected()) option.click() self.assertTrue(selected == option.is_selected()) def testShouldThrowAnExceptionWhenSelectingAnUnselectableElement(self): self._loadPage("formPage") element = self.driver.find_element_by_xpath("//title") try: element.click() self.fail("Expected WebDriverException to have been thrown") except WebDriverException as e: pass except Exception as e: self.fail("Expected WebDriverException but got " + str(type(e))) def testSendingKeyboardEventsShouldAppendTextInInputs(self): self._loadPage("formPage") element = self.driver.find_element_by_id("working") element.send_keys("Some") value = element.get_attribute("value") self.assertEqual(value, "Some") element.send_keys(" text") value = element.get_attribute("value") self.assertEqual(value, "Some text") def testShouldBeAbleToClearTextFromInputElements(self): self._loadPage("formPage") element = self.driver.find_element_by_id("working") element.send_keys("Some text") value = element.get_attribute("value") self.assertTrue(len(value) > 0) element.clear() value = element.get_attribute("value") self.assertEqual(len(value), 0) def testEmptyTextBoxesShouldReturnAnEmptyStringNotNull(self): self._loadPage("formPage") emptyTextBox = self.driver.find_element_by_id("working") self.assertEqual(emptyTextBox.get_attribute("value"), "") emptyTextArea = self.driver.find_element_by_id("emptyTextArea") self.assertEqual(emptyTextArea.get_attribute("value"), "") def testShouldBeAbleToClearTextFromTextAreas(self): self._loadPage("formPage") element = self.driver.find_element_by_id("withText") element.send_keys("Some text") value = element.get_attribute("value") self.assertTrue(len(value) > 0) element.clear() value = element.get_attribute("value") self.assertEqual(len(value), 0) def testRadioShouldNotBeSelectedAfterSelectingSibling(self): self._loadPage("formPage") cheese = self.driver.find_element_by_id("cheese") peas = self.driver.find_element_by_id("peas") cheese.click() self.assertEqual(True, cheese.is_selected()) self.assertEqual(False, peas.is_selected()) peas.click() self.assertEqual(False, cheese.is_selected()) self.assertEqual(True, peas.is_selected()) def _pageURL(self, name): return self.webserver.where_is(name + '.html') def _loadSimplePage(self): self._loadPage("simpleTest") def _loadPage(self, name): self.driver.get(self._pageURL(name))
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import itertools import logging import os import posixpath import subprocess import shutil import time from devil import base_error from devil.android import crash_handler from devil.android import device_errors from devil.android import device_temp_file from devil.android import logcat_monitor from devil.android import ports from devil.android.sdk import version_codes from devil.utils import reraiser_thread from incremental_install import installer from pylib import constants from pylib.base import base_test_result from pylib.gtest import gtest_test_instance from pylib.local import local_test_server_spawner from pylib.local.device import local_device_environment from pylib.local.device import local_device_test_run from pylib.utils import google_storage_helper from pylib.utils import logdog_helper from py_trace_event import trace_event from py_utils import contextlib_ext from py_utils import tempfile_ext import tombstones _MAX_INLINE_FLAGS_LENGTH = 50 # Arbitrarily chosen. _EXTRA_COMMAND_LINE_FILE = ( 'org.chromium.native_test.NativeTest.CommandLineFile') _EXTRA_COMMAND_LINE_FLAGS = ( 'org.chromium.native_test.NativeTest.CommandLineFlags') _EXTRA_COVERAGE_DEVICE_FILE = ( 'org.chromium.native_test.NativeTest.CoverageDeviceFile') _EXTRA_STDOUT_FILE = ( 'org.chromium.native_test.NativeTestInstrumentationTestRunner' '.StdoutFile') _EXTRA_TEST = ( 'org.chromium.native_test.NativeTestInstrumentationTestRunner' '.Test') _EXTRA_TEST_LIST = ( 'org.chromium.native_test.NativeTestInstrumentationTestRunner' '.TestList') _MAX_SHARD_SIZE = 256 _SECONDS_TO_NANOS = int(1e9) # The amount of time a test executable may run before it gets killed. _TEST_TIMEOUT_SECONDS = 30*60 # Tests that use SpawnedTestServer must run the LocalTestServerSpawner on the # host machine. # TODO(jbudorick): Move this up to the test instance if the net test server is # handled outside of the APK for the remote_device environment. _SUITE_REQUIRES_TEST_SERVER_SPAWNER = [ 'components_browsertests', 'content_unittests', 'content_browsertests', 'net_unittests', 'services_unittests', 'unit_tests' ] # No-op context manager. If we used Python 3, we could change this to # contextlib.ExitStack() class _NullContextManager(object): def __enter__(self): pass def __exit__(self, *args): pass def _GenerateSequentialFileNames(filename): """Infinite generator of names: 'name.ext', 'name_1.ext', 'name_2.ext', ...""" yield filename base, ext = os.path.splitext(filename) for i in itertools.count(1): yield '%s_%d%s' % (base, i, ext) def _ExtractTestsFromFilter(gtest_filter): """Returns the list of tests specified by the given filter. Returns: None if the device should be queried for the test list instead. """ # Empty means all tests, - means exclude filter. if not gtest_filter or '-' in gtest_filter: return None patterns = gtest_filter.split(':') # For a single pattern, allow it even if it has a wildcard so long as the # wildcard comes at the end and there is at least one . to prove the scope is # not too large. # This heuristic is not necessarily faster, but normally is. if len(patterns) == 1 and patterns[0].endswith('*'): no_suffix = patterns[0].rstrip('*') if '*' not in no_suffix and '.' in no_suffix: return patterns if '*' in gtest_filter: return None return patterns def _PullCoverageFiles(device, device_coverage_dir, output_dir): """Pulls coverage files on device to host directory. Args: device: The working device. device_coverage_dir: The directory to store coverage data on device. output_dir: The output directory on host. """ try: if not os.path.exists(output_dir): os.makedirs(output_dir) device.PullFile(device_coverage_dir, output_dir) if not os.listdir(os.path.join(output_dir, 'profraw')): logging.warning('No coverage data was generated for this run') except (OSError, base_error.BaseError) as e: logging.warning('Failed to handle coverage data after tests: %s', e) finally: device.RemovePath(device_coverage_dir, force=True, recursive=True) def _GetDeviceCoverageDir(device): """Gets the directory to generate coverage data on device. Args: device: The working device. Returns: The directory path on the device. """ return posixpath.join(device.GetExternalStoragePath(), 'chrome', 'test', 'coverage', 'profraw') def _GetLLVMProfilePath(device_coverage_dir, suite, coverage_index): """Gets 'LLVM_PROFILE_FILE' environment variable path. Dumping data to ONLY 1 file may cause warning and data overwrite in browsertests, so that pattern "%2m" is used to expand to 2 raw profiles at runtime. Args: device_coverage_dir: The directory to generate data on device. suite: Test suite name. coverage_index: The incremental index for this test suite. Returns: The path pattern for environment variable 'LLVM_PROFILE_FILE'. """ return posixpath.join(device_coverage_dir, '_'.join([suite, str(coverage_index), '%2m.profraw'])) class _ApkDelegate(object): def __init__(self, test_instance, tool): self._activity = test_instance.activity self._apk_helper = test_instance.apk_helper self._test_apk_incremental_install_json = ( test_instance.test_apk_incremental_install_json) self._package = test_instance.package self._runner = test_instance.runner self._permissions = test_instance.permissions self._suite = test_instance.suite self._component = '%s/%s' % (self._package, self._runner) self._extras = test_instance.extras self._wait_for_java_debugger = test_instance.wait_for_java_debugger self._tool = tool self._coverage_dir = test_instance.coverage_dir self._coverage_index = 0 def GetTestDataRoot(self, device): # pylint: disable=no-self-use return posixpath.join(device.GetExternalStoragePath(), 'chromium_tests_root') def Install(self, device): if self._test_apk_incremental_install_json: installer.Install(device, self._test_apk_incremental_install_json, apk=self._apk_helper, permissions=self._permissions) else: device.Install( self._apk_helper, allow_downgrade=True, reinstall=True, permissions=self._permissions) def ResultsDirectory(self, device): return device.GetApplicationDataDirectory(self._package) def Run(self, test, device, flags=None, **kwargs): extras = dict(self._extras) device_api = device.build_version_sdk if self._coverage_dir and device_api >= version_codes.LOLLIPOP: device_coverage_dir = _GetDeviceCoverageDir(device) extras[_EXTRA_COVERAGE_DEVICE_FILE] = _GetLLVMProfilePath( device_coverage_dir, self._suite, self._coverage_index) self._coverage_index += 1 if ('timeout' in kwargs and gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in extras): # Make sure the instrumentation doesn't kill the test before the # scripts do. The provided timeout value is in seconds, but the # instrumentation deals with nanoseconds because that's how Android # handles time. extras[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int( kwargs['timeout'] * _SECONDS_TO_NANOS) # pylint: disable=redefined-variable-type command_line_file = _NullContextManager() if flags: if len(flags) > _MAX_INLINE_FLAGS_LENGTH: command_line_file = device_temp_file.DeviceTempFile(device.adb) device.WriteFile(command_line_file.name, '_ %s' % flags) extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name else: extras[_EXTRA_COMMAND_LINE_FLAGS] = flags test_list_file = _NullContextManager() if test: if len(test) > 1: test_list_file = device_temp_file.DeviceTempFile(device.adb) device.WriteFile(test_list_file.name, '\n'.join(test)) extras[_EXTRA_TEST_LIST] = test_list_file.name else: extras[_EXTRA_TEST] = test[0] # pylint: enable=redefined-variable-type stdout_file = device_temp_file.DeviceTempFile( device.adb, dir=device.GetExternalStoragePath(), suffix='.gtest_out') extras[_EXTRA_STDOUT_FILE] = stdout_file.name if self._wait_for_java_debugger: cmd = ['am', 'set-debug-app', '-w', self._package] device.RunShellCommand(cmd, check_return=True) logging.warning('*' * 80) logging.warning('Waiting for debugger to attach to process: %s', self._package) logging.warning('*' * 80) with command_line_file, test_list_file, stdout_file: try: device.StartInstrumentation( self._component, extras=extras, raw=False, **kwargs) except device_errors.CommandFailedError: logging.exception('gtest shard failed.') except device_errors.CommandTimeoutError: logging.exception('gtest shard timed out.') except device_errors.DeviceUnreachableError: logging.exception('gtest shard device unreachable.') except Exception: device.ForceStop(self._package) raise finally: if self._coverage_dir and device_api >= version_codes.LOLLIPOP: _PullCoverageFiles( device, device_coverage_dir, os.path.join(self._coverage_dir, str(self._coverage_index))) return device.ReadFile(stdout_file.name).splitlines() def PullAppFiles(self, device, files, directory): device_dir = device.GetApplicationDataDirectory(self._package) host_dir = os.path.join(directory, str(device)) for f in files: device_file = posixpath.join(device_dir, f) host_file = os.path.join(host_dir, *f.split(posixpath.sep)) for host_file in _GenerateSequentialFileNames(host_file): if not os.path.exists(host_file): break device.PullFile(device_file, host_file) def Clear(self, device): device.ClearApplicationState(self._package, permissions=self._permissions) class _ExeDelegate(object): def __init__(self, tr, test_instance, tool): self._host_dist_dir = test_instance.exe_dist_dir self._exe_file_name = os.path.basename( test_instance.exe_dist_dir)[:-len('__dist')] self._device_dist_dir = posixpath.join( constants.TEST_EXECUTABLE_DIR, os.path.basename(test_instance.exe_dist_dir)) self._test_run = tr self._tool = tool self._suite = test_instance.suite self._coverage_dir = test_instance.coverage_dir self._coverage_index = 0 def GetTestDataRoot(self, device): # pylint: disable=no-self-use # pylint: disable=unused-argument return posixpath.join(constants.TEST_EXECUTABLE_DIR, 'chromium_tests_root') def Install(self, device): # TODO(jbudorick): Look into merging this with normal data deps pushing if # executables become supported on nonlocal environments. device.PushChangedFiles([(self._host_dist_dir, self._device_dist_dir)], delete_device_stale=True) def ResultsDirectory(self, device): # pylint: disable=no-self-use # pylint: disable=unused-argument return constants.TEST_EXECUTABLE_DIR def Run(self, test, device, flags=None, **kwargs): tool = self._test_run.GetTool(device).GetTestWrapper() if tool: cmd = [tool] else: cmd = [] cmd.append(posixpath.join(self._device_dist_dir, self._exe_file_name)) if test: cmd.append('--gtest_filter=%s' % ':'.join(test)) if flags: # TODO(agrieve): This won't work if multiple flags are passed. cmd.append(flags) cwd = constants.TEST_EXECUTABLE_DIR env = { 'LD_LIBRARY_PATH': self._device_dist_dir } if self._coverage_dir: device_coverage_dir = _GetDeviceCoverageDir(device) env['LLVM_PROFILE_FILE'] = _GetLLVMProfilePath( device_coverage_dir, self._suite, self._coverage_index) self._coverage_index += 1 if self._tool != 'asan': env['UBSAN_OPTIONS'] = constants.UBSAN_OPTIONS try: gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP'] external = device.GetExternalStoragePath() env['GCOV_PREFIX'] = '%s/gcov' % external env['GCOV_PREFIX_STRIP'] = gcov_strip_depth except (device_errors.CommandFailedError, KeyError): pass # Executable tests return a nonzero exit code on test failure, which is # fine from the test runner's perspective; thus check_return=False. output = device.RunShellCommand( cmd, cwd=cwd, env=env, check_return=False, large_output=True, **kwargs) if self._coverage_dir: _PullCoverageFiles( device, device_coverage_dir, os.path.join(self._coverage_dir, str(self._coverage_index))) return output def PullAppFiles(self, device, files, directory): pass def Clear(self, device): device.KillAll(self._exe_file_name, blocking=True, timeout=30, quiet=True) class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun): def __init__(self, env, test_instance): assert isinstance(env, local_device_environment.LocalDeviceEnvironment) assert isinstance(test_instance, gtest_test_instance.GtestTestInstance) super(LocalDeviceGtestRun, self).__init__(env, test_instance) # pylint: disable=redefined-variable-type if self._test_instance.apk: self._delegate = _ApkDelegate(self._test_instance, env.tool) elif self._test_instance.exe_dist_dir: self._delegate = _ExeDelegate(self, self._test_instance, self._env.tool) if self._test_instance.isolated_script_test_perf_output: self._test_perf_output_filenames = _GenerateSequentialFileNames( self._test_instance.isolated_script_test_perf_output) else: self._test_perf_output_filenames = itertools.repeat(None) # pylint: enable=redefined-variable-type self._crashes = set() self._servers = collections.defaultdict(list) #override def TestPackage(self): return self._test_instance.suite #override def SetUp(self): @local_device_environment.handle_shard_failures_with( on_failure=self._env.BlacklistDevice) @trace_event.traced def individual_device_set_up(device, host_device_tuples): def install_apk(dev): # Install test APK. self._delegate.Install(dev) def push_test_data(dev): # Push data dependencies. device_root = self._delegate.GetTestDataRoot(dev) host_device_tuples_substituted = [ (h, local_device_test_run.SubstituteDeviceRoot(d, device_root)) for h, d in host_device_tuples] local_device_environment.place_nomedia_on_device(dev, device_root) dev.PushChangedFiles( host_device_tuples_substituted, delete_device_stale=True, # Some gtest suites, e.g. unit_tests, have data dependencies that # can take longer than the default timeout to push. See # crbug.com/791632 for context. timeout=600) if not host_device_tuples: dev.RemovePath(device_root, force=True, recursive=True, rename=True) dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True) def init_tool_and_start_servers(dev): tool = self.GetTool(dev) tool.CopyFiles(dev) tool.SetupEnvironment() try: # See https://crbug.com/1030827. # This is a hack that may break in the future. We're relying on the # fact that adb doesn't use ipv6 for it's server, and so doesn't # listen on ipv6, but ssh remote forwarding does. 5037 is the port # number adb uses for its server. if "[::1]:5037" in subprocess.check_output( "ss -o state listening 'sport = 5037'", shell=True): logging.error( 'Test Server cannot be started with a remote-forwarded adb ' 'server. Continuing anyways, but some tests may fail.') return except subprocess.CalledProcessError: pass self._servers[str(dev)] = [] if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER: self._servers[str(dev)].append( local_test_server_spawner.LocalTestServerSpawner( ports.AllocateTestServerPort(), dev, tool)) for s in self._servers[str(dev)]: s.SetUp() def bind_crash_handler(step, dev): return lambda: crash_handler.RetryOnSystemCrash(step, dev) steps = [ bind_crash_handler(s, device) for s in (install_apk, push_test_data, init_tool_and_start_servers)] if self._env.concurrent_adb: reraiser_thread.RunAsync(steps) else: for step in steps: step() self._env.parallel_devices.pMap( individual_device_set_up, self._test_instance.GetDataDependencies()) #override def _ShouldShard(self): return True #override def _CreateShards(self, tests): # _crashes are tests that might crash and make the tests in the same shard # following the crashed testcase not run. # Thus we need to create separate shards for each crashed testcase, # so that other tests can be run. device_count = len(self._env.devices) shards = [] # Add shards with only one suspect testcase. shards += [[crash] for crash in self._crashes if crash in tests] # Delete suspect testcase from tests. tests = [test for test in tests if not test in self._crashes] for i in xrange(0, device_count): unbounded_shard = tests[i::device_count] shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE] for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)] return shards #override def _GetTests(self): if self._test_instance.extract_test_list_from_filter: # When the exact list of tests to run is given via command-line (e.g. when # locally iterating on a specific test), skip querying the device (which # takes ~3 seconds). tests = _ExtractTestsFromFilter(self._test_instance.gtest_filter) if tests: return tests # Even when there's only one device, it still makes sense to retrieve the # test list so that tests can be split up and run in batches rather than all # at once (since test output is not streamed). @local_device_environment.handle_shard_failures_with( on_failure=self._env.BlacklistDevice) def list_tests(dev): timeout = 30 retries = 1 if self._test_instance.wait_for_java_debugger: timeout = None flags = [ f for f in self._test_instance.flags if f not in ['--wait-for-debugger', '--wait-for-java-debugger'] ] flags.append('--gtest_list_tests') # TODO(crbug.com/726880): Remove retries when no longer necessary. for i in range(0, retries+1): logging.info('flags:') for f in flags: logging.info(' %s', f) raw_test_list = crash_handler.RetryOnSystemCrash( lambda d: self._delegate.Run( None, d, flags=' '.join(flags), timeout=timeout), device=dev) tests = gtest_test_instance.ParseGTestListTests(raw_test_list) if not tests: logging.info('No tests found. Output:') for l in raw_test_list: logging.info(' %s', l) logging.info('Logcat:') for line in dev.adb.Logcat(dump=True): logging.info(line) dev.adb.Logcat(clear=True) if i < retries: logging.info('Retrying...') else: break return tests # Query all devices in case one fails. test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None) # If all devices failed to list tests, raise an exception. # Check that tl is not None and is not empty. if all(not tl for tl in test_lists): raise device_errors.CommandFailedError( 'Failed to list tests on any device') tests = list(sorted(set().union(*[set(tl) for tl in test_lists if tl]))) tests = self._test_instance.FilterTests(tests) tests = self._ApplyExternalSharding( tests, self._test_instance.external_shard_index, self._test_instance.total_external_shards) return tests def _UploadTestArtifacts(self, device, test_artifacts_dir): # TODO(jbudorick): Reconcile this with the output manager once # https://codereview.chromium.org/2933993002/ lands. if test_artifacts_dir: with tempfile_ext.NamedTemporaryDirectory() as test_artifacts_host_dir: device.PullFile(test_artifacts_dir.name, test_artifacts_host_dir) with tempfile_ext.NamedTemporaryDirectory() as temp_zip_dir: zip_base_name = os.path.join(temp_zip_dir, 'test_artifacts') test_artifacts_zip = shutil.make_archive( zip_base_name, 'zip', test_artifacts_host_dir) link = google_storage_helper.upload( google_storage_helper.unique_name( 'test_artifacts', device=device), test_artifacts_zip, bucket='%s/test_artifacts' % ( self._test_instance.gs_test_artifacts_bucket)) logging.info('Uploading test artifacts to %s.', link) return link return None #override def _RunTest(self, device, test): # Run the test. timeout = (self._test_instance.shard_timeout * self.GetTool(device).GetTimeoutScale()) if self._test_instance.wait_for_java_debugger: timeout = None if self._test_instance.store_tombstones: tombstones.ClearAllTombstones(device) test_perf_output_filename = next(self._test_perf_output_filenames) with device_temp_file.DeviceTempFile( adb=device.adb, dir=self._delegate.ResultsDirectory(device), suffix='.xml') as device_tmp_results_file: with contextlib_ext.Optional( device_temp_file.NamedDeviceTemporaryDirectory( adb=device.adb, dir='/sdcard/'), self._test_instance.gs_test_artifacts_bucket) as test_artifacts_dir: with (contextlib_ext.Optional( device_temp_file.DeviceTempFile( adb=device.adb, dir=self._delegate.ResultsDirectory(device)), test_perf_output_filename)) as isolated_script_test_perf_output: flags = list(self._test_instance.flags) if self._test_instance.enable_xml_result_parsing: flags.append('--gtest_output=xml:%s' % device_tmp_results_file.name) if self._test_instance.gs_test_artifacts_bucket: flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name) if test_perf_output_filename: flags.append('--isolated_script_test_perf_output=%s' % isolated_script_test_perf_output.name) logging.info('flags:') for f in flags: logging.info(' %s', f) stream_name = 'logcat_%s_%s_%s' % ( hash(tuple(test)), time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial) with self._env.output_manager.ArchivedTempfile( stream_name, 'logcat') as logcat_file: with logcat_monitor.LogcatMonitor( device.adb, filter_specs=local_device_environment.LOGCAT_FILTERS, output_file=logcat_file.name, check_error=False) as logmon: with contextlib_ext.Optional( trace_event.trace(str(test)), self._env.trace_output): output = self._delegate.Run( test, device, flags=' '.join(flags), timeout=timeout, retries=0) logmon.Close() if logcat_file.Link(): logging.info('Logcat saved to %s', logcat_file.Link()) if self._test_instance.enable_xml_result_parsing: try: gtest_xml = device.ReadFile( device_tmp_results_file.name, as_root=True) except device_errors.CommandFailedError as e: logging.warning( 'Failed to pull gtest results XML file %s: %s', device_tmp_results_file.name, str(e)) gtest_xml = None if test_perf_output_filename: try: device.PullFile(isolated_script_test_perf_output.name, test_perf_output_filename) except device_errors.CommandFailedError as e: logging.warning( 'Failed to pull chartjson results %s: %s', isolated_script_test_perf_output.name, str(e)) test_artifacts_url = self._UploadTestArtifacts(device, test_artifacts_dir) for s in self._servers[str(device)]: s.Reset() if self._test_instance.app_files: self._delegate.PullAppFiles(device, self._test_instance.app_files, self._test_instance.app_file_dir) if not self._env.skip_clear_data: self._delegate.Clear(device) for l in output: logging.info(l) # Parse the output. # TODO(jbudorick): Transition test scripts away from parsing stdout. if self._test_instance.enable_xml_result_parsing: results = gtest_test_instance.ParseGTestXML(gtest_xml) else: results = gtest_test_instance.ParseGTestOutput( output, self._test_instance.symbolizer, device.product_cpu_abi) tombstones_url = None for r in results: if logcat_file: r.SetLink('logcat', logcat_file.Link()) if self._test_instance.gs_test_artifacts_bucket: r.SetLink('test_artifacts', test_artifacts_url) if r.GetType() == base_test_result.ResultType.CRASH: self._crashes.add(r.GetName()) if self._test_instance.store_tombstones: if not tombstones_url: resolved_tombstones = tombstones.ResolveTombstones( device, resolve_all_tombstones=True, include_stack_symbols=False, wipe_tombstones=True) stream_name = 'tombstones_%s_%s' % ( time.strftime('%Y%m%dT%H%M%S', time.localtime()), device.serial) tombstones_url = logdog_helper.text( stream_name, '\n'.join(resolved_tombstones)) r.SetLink('tombstones', tombstones_url) tests_stripped_disabled_prefix = set() for t in test: tests_stripped_disabled_prefix.add( gtest_test_instance.TestNameWithoutDisabledPrefix(t)) not_run_tests = tests_stripped_disabled_prefix.difference( set(r.GetName() for r in results)) return results, list(not_run_tests) if results else None #override def TearDown(self): # By default, teardown will invoke ADB. When receiving SIGTERM due to a # timeout, there's a high probability that ADB is non-responsive. In these # cases, sending an ADB command will potentially take a long time to time # out. Before this happens, the process will be hard-killed for not # responding to SIGTERM fast enough. if self._received_sigterm: return @local_device_environment.handle_shard_failures @trace_event.traced def individual_device_tear_down(dev): for s in self._servers.get(str(dev), []): s.TearDown() tool = self.GetTool(dev) tool.CleanUpEnvironment() self._env.parallel_devices.pMap(individual_device_tear_down)
# Copyright (c) 2019-2022 Manfred Moitzi # License: MIT License import pytest import math from ezdxf.math import Vec3, Matrix44, arc_angle_span_deg from ezdxf.entities.arc import Arc from ezdxf.lldxf.const import DXF12, DXF2000 from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text from ezdxf.path import make_path TEST_CLASS = Arc TEST_TYPE = "ARC" ENTITY_R12 = """0 ARC 5 0 8 0 10 0.0 20 0.0 30 0.0 40 1.0 50 0 51 360 """ ENTITY_R2000 = """0 ARC 5 0 330 0 100 AcDbEntity 8 0 100 AcDbCircle 10 0.0 20 0.0 30 0.0 40 1.0 100 AcDbArc 50 0 51 360 """ @pytest.fixture(params=[ENTITY_R12, ENTITY_R2000]) def entity(request): return TEST_CLASS.from_text(request.param) def test_registered(): from ezdxf.entities.factory import ENTITY_CLASSES assert TEST_TYPE in ENTITY_CLASSES def test_default_init(): entity = TEST_CLASS() assert entity.dxftype() == TEST_TYPE assert entity.dxf.handle is None assert entity.dxf.owner is None def test_default_new(): entity = TEST_CLASS.new( handle="ABBA", owner="0", dxfattribs={ "color": "7", "center": (1, 2, 3), "radius": 2.5, "start_angle": 30, "end_angle": 290, }, ) assert entity.dxf.layer == "0" assert entity.dxf.color == 7 assert entity.dxf.linetype == "BYLAYER" assert entity.dxf.center == (1, 2, 3) assert entity.dxf.center.x == 1, "is not Vec3 compatible" assert entity.dxf.center.y == 2, "is not Vec3 compatible" assert entity.dxf.center.z == 3, "is not Vec3 compatible" assert entity.dxf.radius == 2.5 assert entity.dxf.start_angle == 30 assert entity.dxf.end_angle == 290 # can set DXF R2007 value entity.dxf.shadow_mode = 1 assert entity.dxf.shadow_mode == 1 assert entity.dxf.extrusion == (0.0, 0.0, 1.0) assert entity.dxf.hasattr("extrusion") is False, "just the default value" def test_get_start_and_end_vertices_with_ocs(): arc = TEST_CLASS.new( handle="ABBA", owner="0", dxfattribs={ "center": (1, 2, 3), "radius": 2.5, "start_angle": 90, "end_angle": 180, "extrusion": (0, 0, -1), }, ) # convenient properties assert arc.start_point.isclose(Vec3(-1, 4.5, -3), abs_tol=1e-6) assert arc.end_point.isclose(Vec3(1.5, 2, -3), abs_tol=1e-6) # more efficient method: start, end = list(arc.vertices([arc.dxf.start_angle, arc.dxf.end_angle])) assert start.isclose(Vec3(-1, 4.5, -3), abs_tol=1e-6) assert end.isclose(Vec3(1.5, 2, -3), abs_tol=1e-6) def test_load_from_text(entity): assert entity.dxf.layer == "0" assert entity.dxf.color == 256, "default color is 256 (by layer)" assert entity.dxf.center == (0, 0, 0) assert entity.dxf.radius == 1 assert entity.dxf.start_angle == 0 assert entity.dxf.end_angle == 360 @pytest.mark.parametrize( "txt,ver", [(ENTITY_R2000, DXF2000), (ENTITY_R12, DXF12)] ) def test_write_dxf(txt, ver): expected = basic_tags_from_text(txt) arc = TEST_CLASS.from_text(txt) collector = TagCollector(dxfversion=ver, optional=True) arc.export_dxf(collector) assert collector.tags == expected collector2 = TagCollector(dxfversion=ver, optional=False) arc.export_dxf(collector2) assert collector.has_all_tags(collector2) def test_angles(): arc = Arc.new(dxfattribs={"radius": 1, "start_angle": 30, "end_angle": 60}) assert tuple(arc.angles(2)) == (30, 60) assert tuple(arc.angles(3)) == (30, 45, 60) arc.dxf.start_angle = 180 arc.dxf.end_angle = 0 assert tuple(arc.angles(2)) == (180, 0) assert tuple(arc.angles(3)) == (180, 270, 0) arc.dxf.start_angle = -90 arc.dxf.end_angle = -180 assert tuple(arc.angles(2)) == (270, 180) assert tuple(arc.angles(4)) == (270, 0, 90, 180) def test_arc_default_ocs(): arc = Arc.new( dxfattribs={ "center": (2, 3, 4), "thickness": 2, "start_angle": 30, "end_angle": 60, } ) # 1. rotation - 2. scaling - 3. translation m = Matrix44.chain(Matrix44.scale(2, 2, 3), Matrix44.translate(1, 1, 1)) # default extrusion is (0, 0, 1), therefore scale(2, 2, ..) is a uniform scaling in the xy-play of the OCS arc.transform(m) assert arc.dxf.center == (5, 7, 13) assert arc.dxf.extrusion == (0, 0, 1) assert arc.dxf.thickness == 6 assert math.isclose(arc.dxf.start_angle, 30, abs_tol=1e-9) assert math.isclose(arc.dxf.end_angle, 60, abs_tol=1e-9) arc.transform(Matrix44.z_rotate(math.radians(30))) assert math.isclose(arc.dxf.start_angle, 60, abs_tol=1e-9) assert math.isclose(arc.dxf.end_angle, 90, abs_tol=1e-9) # See also ConstructionArc(): test suite 645 - test_flattening() @pytest.mark.parametrize( "r, s, e, sagitta, count", [ (1, 0, 180, 0.10, 5), (0, 0, 360, 0.10, 0), # radius 0 works but yields nothing (-1, 0, 180, 0.35, 3), # negative radius same as positive radius (1, 270, 90, 0.10, 5), # start angle > end angle ], ) def test_arc_flattening(r, s, e, sagitta, count): arc = Arc.new( dxfattribs={ "radius": r, "start_angle": s, "end_angle": e, } ) points = list(arc.flattening(sagitta)) assert len(points) == count def test_arc_flattening_returns_Vec3(): arc = Arc.new( dxfattribs={ "radius": 1, "start_angle": 0, "end_angle": 180, } ) points = list(arc.flattening(0.1)) assert isinstance(points[0], Vec3), "must return Vec3() instances" def test_360_deg_arc_transformation(): arc = Arc.new( dxfattribs={ "radius": 1, "start_angle": 0, "end_angle": 360, } ) count1 = len(list(make_path(arc).flattening(0.01))) arc.transform(Matrix44.translate(1, 0, 0)) count2 = len(list(make_path(arc).flattening(0.01))) assert count1 == count2 arc.transform(Matrix44.z_rotate(math.pi / 2)) p = make_path(arc) count3 = len(list(p.flattening(0.01))) assert count1 == count3 @pytest.mark.parametrize("angle", [30, 180, 360]) @pytest.mark.parametrize( "reflexion", [(-1, 1, 1), (1, -1, 1), (1, 1, -1)], ids=["x", "y", "z"] ) def test_30_deg_arc_reflexion(reflexion, angle): arc = Arc.new( dxfattribs={ "radius": 1, "start_angle": 0, "end_angle": angle, } ) x, y, z = reflexion arc.transform(Matrix44.scale(x, y, z)) assert arc_angle_span_deg( arc.dxf.start_angle, arc.dxf.end_angle ) == pytest.approx(angle) MALFORMED_ARC = """0 ARC 5 0 62 7 330 0 6 LT_EZDXF 8 LY_EZDXF 100 AcDbCircle 10 1.0 20 2.0 30 3.0 100 AcDbEntity 40 2.0 50 30 51 330 """ def test_load_malformed_circle(): arc = Arc.from_text(MALFORMED_ARC) assert arc.dxf.layer == "LY_EZDXF" assert arc.dxf.linetype == "LT_EZDXF" assert arc.dxf.color == 7 assert arc.dxf.center.isclose((1, 2, 3)) assert arc.dxf.radius == 2.0 assert arc.dxf.start_angle == 30.0 assert arc.dxf.end_angle == 330.0
from datetime import datetime, timedelta from typing import List import warnings from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE # noqa import numpy as np from pandas.errors import PerformanceWarning from pandas import DateOffset, Series, Timestamp, date_range from pandas.tseries.offsets import Day, Easter def next_monday(dt): """ If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead """ if dt.weekday() == 5: return dt + timedelta(2) elif dt.weekday() == 6: return dt + timedelta(1) return dt def next_monday_or_tuesday(dt): """ For second holiday of two adjacent ones! If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday or Monday, use following Tuesday instead (because Monday is already taken by adjacent holiday on the day before) """ dow = dt.weekday() if dow == 5 or dow == 6: return dt + timedelta(2) elif dow == 0: return dt + timedelta(1) return dt def previous_friday(dt): """ If holiday falls on Saturday or Sunday, use previous Friday instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt - timedelta(2) return dt def sunday_to_monday(dt): """ If holiday falls on Sunday, use day thereafter (Monday) instead. """ if dt.weekday() == 6: return dt + timedelta(1) return dt def weekend_to_monday(dt): """ If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. Needed for holidays such as Christmas observation in Europe """ if dt.weekday() == 6: return dt + timedelta(1) elif dt.weekday() == 5: return dt + timedelta(2) return dt def nearest_workday(dt): """ If holiday falls on Saturday, use day before (Friday) instead; if holiday falls on Sunday, use day thereafter (Monday) instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt + timedelta(1) return dt def next_workday(dt): """ returns next weekday used for observances """ dt += timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt += timedelta(days=1) return dt def previous_workday(dt): """ returns previous weekday used for observances """ dt -= timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt -= timedelta(days=1) return dt def before_nearest_workday(dt): """ returns previous workday after nearest workday """ return previous_workday(nearest_workday(dt)) def after_nearest_workday(dt): """ returns next workday after nearest workday needed for Boxing day or multiple holidays in a series """ return next_workday(nearest_workday(dt)) class Holiday: """ Class that defines a holiday with start/end dates and rules for observance. """ def __init__( self, name, year=None, month=None, day=None, offset=None, observance=None, start_date=None, end_date=None, days_of_week=None, ): """ Parameters ---------- name : str Name of the holiday , defaults to class name offset : array of pandas.tseries.offsets or class from pandas.tseries.offsets computes offset from date observance: function computes when holiday is given a pandas Timestamp days_of_week: provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday Monday=0,..,Sunday=6 Examples -------- >>> from pandas.tseries.holiday import Holiday, nearest_workday >>> from dateutil.relativedelta import MO >>> USMemorialDay = Holiday('Memorial Day', month=5, day=31, offset=pd.DateOffset(weekday=MO(-1))) >>> USLaborDay = Holiday('Labor Day', month=9, day=1, offset=pd.DateOffset(weekday=MO(1))) >>> July3rd = Holiday('July 3rd', month=7, day=3,) >>> NewYears = Holiday('New Years Day', month=1, day=1, observance=nearest_workday), >>> July3rd = Holiday('July 3rd', month=7, day=3, days_of_week=(0, 1, 2, 3)) """ if offset is not None and observance is not None: raise NotImplementedError("Cannot use both offset and observance.") self.name = name self.year = year self.month = month self.day = day self.offset = offset self.start_date = ( Timestamp(start_date) if start_date is not None else start_date ) self.end_date = Timestamp(end_date) if end_date is not None else end_date self.observance = observance assert days_of_week is None or type(days_of_week) == tuple self.days_of_week = days_of_week def __repr__(self): info = "" if self.year is not None: info += "year={year}, ".format(year=self.year) info += "month={mon}, day={day}, ".format(mon=self.month, day=self.day) if self.offset is not None: info += "offset={offset}".format(offset=self.offset) if self.observance is not None: info += "observance={obs}".format(obs=self.observance) repr = "Holiday: {name} ({info})".format(name=self.name, info=info) return repr def dates(self, start_date, end_date, return_name=False): """ Calculate holidays observed between start date and end date Parameters ---------- start_date : starting date, datetime-like, optional end_date : ending date, datetime-like, optional return_name : bool, optional, default=False If True, return a series that has dates and holiday names. False will only return dates. """ start_date = Timestamp(start_date) end_date = Timestamp(end_date) filter_start_date = start_date filter_end_date = end_date if self.year is not None: dt = Timestamp(datetime(self.year, self.month, self.day)) if return_name: return Series(self.name, index=[dt]) else: return [dt] dates = self._reference_dates(start_date, end_date) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: holiday_dates = holiday_dates[ np.in1d(holiday_dates.dayofweek, self.days_of_week) ] if self.start_date is not None: filter_start_date = max( self.start_date.tz_localize(filter_start_date.tz), filter_start_date ) if self.end_date is not None: filter_end_date = min( self.end_date.tz_localize(filter_end_date.tz), filter_end_date ) holiday_dates = holiday_dates[ (holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date) ] if return_name: return Series(self.name, index=holiday_dates) return holiday_dates def _reference_dates(self, start_date, end_date): """ Get reference dates for the holiday. Return reference dates for the holiday also returning the year prior to the start_date and year following the end_date. This ensures that any offsets to be applied will yield the holidays within the passed in dates. """ if self.start_date is not None: start_date = self.start_date.tz_localize(start_date.tz) if self.end_date is not None: end_date = self.end_date.tz_localize(start_date.tz) year_offset = DateOffset(years=1) reference_start_date = Timestamp( datetime(start_date.year - 1, self.month, self.day) ) reference_end_date = Timestamp( datetime(end_date.year + 1, self.month, self.day) ) # Don't process unnecessary holidays dates = date_range( start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz, ) return dates def _apply_rule(self, dates): """ Apply the given offset/observance to a DatetimeIndex of dates. Parameters ---------- dates : DatetimeIndex Dates to apply the given offset/observance rule Returns ------- Dates with rules applied """ if self.observance is not None: return dates.map(lambda d: self.observance(d)) if self.offset is not None: if not isinstance(self.offset, list): offsets = [self.offset] else: offsets = self.offset for offset in offsets: # if we are adding a non-vectorized value # ignore the PerformanceWarnings: with warnings.catch_warnings(): warnings.simplefilter("ignore", PerformanceWarning) dates += offset return dates holiday_calendars = {} def register(cls): try: name = cls.name except AttributeError: name = cls.__name__ holiday_calendars[name] = cls def get_calendar(name): """ Return an instance of a calendar based on its name. Parameters ---------- name : str Calendar name to return an instance of """ return holiday_calendars[name]() class HolidayCalendarMetaClass(type): def __new__(cls, clsname, bases, attrs): calendar_class = super().__new__(cls, clsname, bases, attrs) register(calendar_class) return calendar_class class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass): """ Abstract interface to create holidays following certain rules. """ rules = [] # type: List[Holiday] start_date = Timestamp(datetime(1970, 1, 1)) end_date = Timestamp(datetime(2030, 12, 31)) _cache = None def __init__(self, name=None, rules=None): """ Initializes holiday object with a given set a rules. Normally classes just have the rules defined within them. Parameters ---------- name : str Name of the holiday calendar, defaults to class name rules : array of Holiday objects A set of rules used to create the holidays. """ super().__init__() if name is None: name = self.__class__.__name__ self.name = name if rules is not None: self.rules = rules def rule_from_name(self, name): for rule in self.rules: if rule.name == name: return rule return None def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception( "Holiday Calendar {name} does not have any " "rules specified".format(name=self.name) ) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if self._cache is None or start < self._cache[0] or end > self._cache[1]: for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index @staticmethod def merge_class(base, other): """ Merge holiday calendars together. The base calendar will take precedence to other. The merge will be done based on each holiday's name. Parameters ---------- base : AbstractHolidayCalendar instance/subclass or array of Holiday objects other : AbstractHolidayCalendar instance/subclass or array of Holiday objects """ try: other = other.rules except AttributeError: pass if not isinstance(other, list): other = [other] other_holidays = {holiday.name: holiday for holiday in other} try: base = base.rules except AttributeError: pass if not isinstance(base, list): base = [base] base_holidays = {holiday.name: holiday for holiday in base} other_holidays.update(base_holidays) return list(other_holidays.values()) def merge(self, other, inplace=False): """ Merge holiday calendars together. The caller's class rules take precedence. The merge will be done based on each holiday's name. Parameters ---------- other : holiday calendar inplace : bool (default=False) If True set rule_table to holidays, else return array of Holidays """ holidays = self.merge_class(self, other) if inplace: self.rules = holidays else: return holidays USMemorialDay = Holiday( "Memorial Day", month=5, day=31, offset=DateOffset(weekday=MO(-1)) ) USLaborDay = Holiday("Labor Day", month=9, day=1, offset=DateOffset(weekday=MO(1))) USColumbusDay = Holiday( "Columbus Day", month=10, day=1, offset=DateOffset(weekday=MO(2)) ) USThanksgivingDay = Holiday( "Thanksgiving", month=11, day=1, offset=DateOffset(weekday=TH(4)) ) USMartinLutherKingJr = Holiday( "Martin Luther King Jr. Day", start_date=datetime(1986, 1, 1), month=1, day=1, offset=DateOffset(weekday=MO(3)), ) USPresidentsDay = Holiday( "Presidents Day", month=2, day=1, offset=DateOffset(weekday=MO(3)) ) GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)]) EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)]) class USFederalHolidayCalendar(AbstractHolidayCalendar): """ US Federal Government Holiday Calendar based on rules specified by: https://www.opm.gov/policy-data-oversight/ snow-dismissal-procedures/federal-holidays/ """ rules = [ Holiday("New Years Day", month=1, day=1, observance=nearest_workday), USMartinLutherKingJr, USPresidentsDay, USMemorialDay, Holiday("July 4th", month=7, day=4, observance=nearest_workday), USLaborDay, USColumbusDay, Holiday("Veterans Day", month=11, day=11, observance=nearest_workday), USThanksgivingDay, Holiday("Christmas", month=12, day=25, observance=nearest_workday), ] def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar): rules = AbstractHolidayCalendar.merge_class(base, other) calendar_class = type(name, (base_class,), {"rules": rules, "name": name}) return calendar_class
# -*- coding: utf-8 -*- from django.conf import settings # from mapcache.settings import MAPSERVER_URL import os import mapscript MAPA_DEFAULT_SRS = 3857 MAPA_DEFAULT_SIZE = (110, 150) MAPA_DEFAULT_EXTENT = (-20037508.3427892480, -20037508.3427892480, 20037508.3427892480, 20037508.3427892480) # 3857 whole world MAPA_DEFAULT_IMAGECOLOR = '#C6E2F2' # debe ser formato Hexa MAPA_FONTSET_FILENAME = os.path.join(settings.MAPAS_PATH, 'fonts.txt') MAPA_SYMBOLSET_FILENAME = os.path.join(settings.MAPAS_PATH, 'symbols.txt') MAPA_SIMBOLOGIA_GRIB_TMP_FILENAME = os.path.join(settings.MAPAS_PATH, 'simbologia_grib_tmp.txt') MAPA_SIMBOLOGIA_GRIB_WIND_FILENAME = os.path.join(settings.MAPAS_PATH, 'simbologia_grib_wind.txt') MAPA_SIMBOLOGIA_GRIB_APCP_FILENAME = os.path.join(settings.MAPAS_PATH, 'simbologia_grib_apcp.txt') MAPA_SIMBOLOGIA_GRIB_PRMSL_FILENAME = os.path.join(settings.MAPAS_PATH, 'simbologia_grib_prmsl.txt') MAPA_SIMBOLOGIA_GRIB_RH_FILENAME = os.path.join(settings.MAPAS_PATH, 'simbologia_grib_rh.txt') MAPA_SIMBOLOGIA_GRIB_DEFAULT_FILENAME = os.path.join(settings.MAPAS_PATH, 'simbologia_grib_default.txt') MAPA_DATA_PATH = '../data' MAPA_ERRORFILE = os.path.join(settings.MAPAS_PATH, 'map-error.log') class MS_LAYER_TYPE: MS_LAYER_POINT, MS_LAYER_LINE, MS_LAYER_POLYGON, MS_LAYER_RASTER, MS_LAYER_ANNOTATION, MS_LAYER_QUERY, MS_LAYER_CIRCLE, MS_LAYER_TILEINDEX, MS_LAYER_CHART = range(9) def __agregar_simbologia_basica__(layer): class1 = mapscript.classObj(layer) class1.name = 'Default' style = mapscript.styleObj(class1) if layer.type == mapscript.MS_LAYER_POINT: style.symbolname = 'circle' style.size = 8 style.minsize = 8 style.maxsize = 10 style.maxwidth = 2 style.outlinecolor.setRGB(30, 30, 30) style.color.setRGB(31, 120, 180) elif layer.type == mapscript.MS_LAYER_POLYGON: style.outlinecolor.setRGB(126, 109, 83) style.color.setRGB(210, 182, 138) elif layer.type == mapscript.MS_LAYER_LINE: style.color.setRGB(76, 38, 0) style.width = 4 style.minwidth = 4 style.maxwidth = 6 style2 = mapscript.styleObj(class1) style2.color.setRGB(255, 206, 128) style2.width = 2 style2.minwidth = 2 style2.maxwidth = 4 elif layer.type == mapscript.MS_LAYER_RASTER: layer.offsite = mapscript.colorObj(0, 0, 0) def __agregar_simbologia_grib__(layer, band_info): # band_info es de la forma ("4", {'rango:'[1,2], 'elemento': 'TMP', 'descripcion': 'Temp'}) if band_info == '': bands = '' band_type = '' minimo = maximo = None else: bands, info = band_info band_type = info['elemento'] minimo = info['rango'][0] maximo = info['rango'][1] if band_type == 'TMP': archivo = MAPA_SIMBOLOGIA_GRIB_TMP_FILENAME elif band_type == 'WIND': archivo = MAPA_SIMBOLOGIA_GRIB_WIND_FILENAME elif band_type == 'APCP': archivo = MAPA_SIMBOLOGIA_GRIB_APCP_FILENAME elif band_type == 'PRMSL': archivo = MAPA_SIMBOLOGIA_GRIB_PRMSL_FILENAME elif band_type == 'RH': archivo = MAPA_SIMBOLOGIA_GRIB_RH_FILENAME else: archivo = MAPA_SIMBOLOGIA_GRIB_DEFAULT_FILENAME with open(archivo, 'r') as definicion_grib: res = layer.updateFromString(definicion_grib.read()) if res == mapscript.MS_FAILURE: print "Error: couldn't set layer grib symbology for {} band".format(band_type) return False # manejamos el caso de viento como un caso particular pues incluir un symbol en el archivo externo genera un Segmentation Fault de Mapscript if band_type == 'WIND': class0 = layer.getClass(0) style0 = class0.getStyle(0) style0.symbolname = 'arrow' if archivo == MAPA_SIMBOLOGIA_GRIB_DEFAULT_FILENAME and minimo and maximo: class0 = layer.getClass(0) style0 = class0.getStyle(0) style0.updateFromString('STYLE\n DATARANGE {} {}\n END'.format(minimo, maximo)) class0.name = '{} < {} < {}'.format(minimo, 'valor', maximo) if bands != '': # deberia venir siempre lleno layer.addProcessing('BANDS={}'.format(bands)) return True def create_mapfile(data, save=True): mapa = mapscript.mapObj() mapa.name = 'mapa_' + unicode(data['idMapa']) try: if data['imageColor']['type'] == 'hex': mapa.imagecolor.setHex(data['imageColor']['color']) else: mapa.imagecolor.setRGB(*(data['imageColor']['color'])) except: mapa.imagecolor.setHex(MAPA_DEFAULT_IMAGECOLOR) mapa.setSymbolSet(MAPA_SYMBOLSET_FILENAME) mapa.setFontSet(MAPA_FONTSET_FILENAME) mapa.shapepath = MAPA_DATA_PATH mapa.outputformat.transparent = True # if data['srs']!='': print "mapserver: Seteando proyeccion para el mapa: %s" % data['srs'] mapa.setProjection(data['srs']) # else: # print "mapserver: Seteando proyeccion para el mapa: epsg:%s"%data['srid'] # mapa.setProjection('epsg:%s' % (data['srid'])) if data['srid'] == '4326': mapa.units = mapscript.MS_DD else: mapa.units = mapscript.MS_METERS mapa.legend.updateFromString('LEGEND\n KEYSIZE 20 10\n KEYSPACING 5 5\n LABEL\n SIZE 10\n OFFSET 0 0\n SHADOWSIZE 1 1\n TYPE TRUETYPE\n FONT "Swz721lc"\nEND # LABEL\n STATUS OFF\nEND # LEGEND\n\n') # primero seteamos extent, luego size. sino hay un comportamiento extranio y el extent no se respeta, quizas para igualar relaciones de aspecto entre ambos try: mapa.setExtent(*(data['mapFullExtent'])) # si tiene un extent overrideado except: if data['mapType'] in ('user', 'public_layers'): # en estos casos no los calculamos mapa.setExtent(*(MAPA_DEFAULT_EXTENT)) else: try: mapa.setExtent(*(data['mapBoundingBox'])) except: mapa.setExtent(*(MAPA_DEFAULT_EXTENT)) try: mapa.setSize(*(data['mapSize'])) except: mapa.setSize(*(MAPA_DEFAULT_SIZE)) output_geojson = mapscript.outputFormatObj('OGR/GEOJSON', 'GeoJson') output_geojson.setMimetype('application/json; subtype=geojson') output_geojson.setOption('STORAGE', 'stream') output_geojson.setOption('FORM', 'SIMPLE') mapa.appendOutputFormat(output_geojson) output_shapefile = mapscript.outputFormatObj('OGR/ESRI Shapefile', 'ShapeZip') output_shapefile.setMimetype('application/shapefile') output_shapefile.setOption('STORAGE', 'filesystem') output_shapefile.setOption('FORM', 'zip') output_shapefile.setOption('FILENAME', data['fileName'] + '.zip') mapa.appendOutputFormat(output_shapefile) output_csv = mapscript.outputFormatObj('OGR/CSV', 'CSV') output_csv.setMimetype('text/csv') # output_csv.setOption('LCO:GEOMETRY', 'AS_WKT') output_csv.setOption('STORAGE', 'filesystem') output_csv.setOption('FORM', 'simple') output_csv.setOption('FILENAME', data['fileName'] + '.csv') mapa.appendOutputFormat(output_csv) mapa.setConfigOption('MS_ERRORFILE', MAPA_ERRORFILE) mapa.setConfigOption('PROJ_LIB', settings.PROJ_LIB) mapa.setConfigOption('MS_OPENLAYERS_JS_URL', settings.MS_OPENLAYERS_JS_URL) mapa.legend.template = 'templates/legend.html' # TODO: general o solo WMS? # mapa.web.validation.set('TEMPLATE', '[a-z/.]+') # TODO: general o solo WMS? mapa.web.template = 'templates/mapa-interactivo.html' # TODO: general o solo WMS? mapa.web.imagepath = settings.MAP_WEB_IMAGEPATH mapa.web.imageurl = settings.MAP_WEB_IMAGEURL try: for k, v in data['metadata'].iteritems(): mapa.setMetaData(k, v) except: pass # No metadata # try: for layer_def in data['layers']: # print(layer_def) # La capa tiene Time Index (WMS-T) if layer_def.get('timeIndexData', None) is not None: mapa.insertLayer(create_tile_index_layer(layer_def)) mapa.insertLayer(create_ms_layer(layer_def)) # except Exception, e: # print "Failed to insert layers on mapfile: %s"%str(e) if save: mapa.save(os.path.join(settings.MAPAS_PATH, data['idMapa'] + '.map')) print "......mapa guardado %s" % (data['idMapa'] + '.map') return mapa def create_tile_index_layer(data): layer = mapscript.layerObj() layer.name = 'time_idx_'+data['layerName'] layer.type = mapscript.MS_LAYER_POLYGON layer.data = data['timeIndexData'] layer.connection = data['layerConnection'] layer.connectiontype = mapscript.MS_POSTGIS layer.setMetaData('wms_title', 'TIME INDEX') layer.setMetaData('wms_extent', data['layerExtent']) layer.setMetaData('wms_timeextent', data['timeExtent']) layer.setMetaData('wms_timedefault', data['timeDefault']) layer.setMetaData('wms_timeitem', data['timeItem']) # Esta capa es auxiliar, no debe figurar en WMS layer.setMetaData('wms_enable_request', '!GetCapabilities !GetMap') return layer def create_ms_layer(data): layer = mapscript.layerObj() layer.name = data['layerName'] layer.status = mapscript.MS_ON layer.group = 'default' # siempre layer.template = 'blank.html' # print('create_ms_layer - connectionType: %s'%ata['connectionType']) if data['connectionType'] == 'RASTER': layer.type = mapscript.MS_LAYER_RASTER if data.get('timeIndexData', None) is not None: layer.tileitem = data['tileItem'] layer.tileindex = 'time_idx_'+data['layerName'] layer.setMetaData('wms_timeextent', data['timeExtent']) layer.setMetaData('wms_timedefault', data['timeDefault']) layer.setMetaData('wms_timeitem', data['timeItem']) layer.setMetaData('wms_enable_request', '*') else: layer.data = data['layerData'] if data['proj4'] != '': layer.setProjection(data['proj4']) else: layer.setProjection('epsg:%s' % (unicode(data['srid']))) if data['driver'] == 'GRIB': # data['rasterBandInfo'] es de la forma ("4", {'rango:'[1,2], 'elemento': 'TMP', 'descripcion': 'Temp'}) __agregar_simbologia_grib__(layer, data['rasterBandInfo']) else: __agregar_simbologia_basica__(layer) if data['layerDefinitionOverride'] != '': layer.updateFromString(data['layerDefinitionOverride']) elif data['connectionType'] == 'WMS': layer.type = mapscript.MS_LAYER_RASTER layer.connectiontype = mapscript.MS_WMS layer.connection = data['layerConnection'] layer.setMetaData('wms_srs', 'epsg:3857') layer.setMetaData('wms_name', data['layerName']) layer.setMetaData('wms_server_version', '1.1.1') layer.setMetaData('wms_format', 'image/png') if data.get('sldUrl', None) is not None and data['sldUrl'] != '': # print("sldUrl: %s"%data['sldUrl']) layer.setMetaData('wms_sld_url', data['sldUrl']) elif data['connectionType'] == 'POSTGIS': layer.type = eval('mapscript.MS_LAYER_' + data['layerType']) layer.addProcessing('LABEL_NO_CLIP=ON') layer.addProcessing('CLOSE_CONNECTION=DEFER') layer.connectiontype = mapscript.MS_POSTGIS layer.connection = data['layerConnection'] srid = data['srid'] layer.data = data['layerData'] # proj='epsg:%s'%(unicode(srid)) if srid!=None else self.capa.dame_projection layer.setProjection('epsg:%s' % (unicode(srid))) layer.setMetaData('ows_title', data['layerTitle']) layer.setMetaData('gml_types', 'auto') # layer.setMetaData('ows_srs','%s epsg:4326'%(proj)) # este campo lo llena el mapa layer.setMetaData('gml_include_items', 'all') # por ahora queda asi, y ademas se suman los campos especificos layer.setMetaData('gml_featureid', 'gid') layer.setMetaData('wms_enable_request', '*') layer.setMetaData('wfs_enable_request', '*') if data.get('timeItem', None) is not None and data['timeItem'] != '': layer.setMetaData('wms_timeitem', data['timeItem']) layer.setMetaData('wms_timeextent', data['timeExtent']) layer.setMetaData('wms_timedefault', data['timeDefault']) if len(data['metadataIncludeItems']) > 0: layer.setMetaData('gml_include_items', ','.join(data['metadataIncludeItems'])) for alias in data['metadataAliases']: layer.setMetaData('gml_%s_alias' % (alias[0]), alias[1]) if data['layerDefinitionOverride'] != '': try: layer.updateFromString(data['layerDefinitionOverride']) except: pass else: __agregar_simbologia_basica__(layer) try: for k, v in data['metadata'].iteritems(): layer.setMetaData(k, v) except: pass # No metadata return layer def get_wms_url(map_id): return '%s?map=%s.map' % ( settings.MAPSERVER_URL, # url mapserver cgi os.path.join(settings.MAPAS_PATH, map_id) # absolute mapfile path ) def get_wms_request_url(map_id, layers, srs, width, height, extent, sld_url=''): wms_req_url = '%s&LAYERS=%s&SRS=epsg:%s&MAP_RESOLUTION=96&SERVICE=WMS&FORMAT=image/png&REQUEST=GetMap&HEIGHT=%d&FORMAT_OPTIONS=dpi:96&WIDTH=%d&VERSION=1.1.1&BBOX=%s&STYLES=&TRANSPARENT=TRUE&DPI=96' url = wms_req_url % ( get_wms_url(map_id), layers, srs, height, width, extent ) if sld_url != '': url += '&sld=' + sld_url # print "get_wms_request_url: %s" % url return url def get_legend_graphic_url(map_id, layer_name, sld_url=''): legend_url = '%s&SERVICE=WMS&VERSION=1.3.0&SLD_VERSION=1.1.0&REQUEST=GetLegendGraphic&FORMAT=image/png&LAYER=%s&STYLE=' url = legend_url % ( get_wms_url(map_id), layer_name ) if sld_url and sld_url != '': url += '&sld=' + sld_url return url def get_map_browser_url(map_id): return '%s&mode=browse&layers=all' % ( get_wms_url(map_id) ) def get_featureinfo_url(map_id, bbox, width, height, query_layers, i, j): req_url = '%s&SERVICE=WMS&VERSION=1.3.0&REQUEST=GetFeatureInfo&BBOX=%s&CRS=epsg:3857&WIDTH=%s&HEIGHT=%s&LAYERS=default&STYLES=&FORMAT=image/png&QUERY_LAYERS=%s&INFO_FORMAT=application/vnd.ogc.gml&I=%s&J=%s' return req_url % ( get_wms_url(map_id), bbox, width, height, query_layers, i, j ) def get_feature_url(map_id, typename, outputformat): req_url = '%s&SERVICE=WFS&VERSION=1.0.0&REQUEST=getfeature&TYPENAME=%s&outputformat=%s' return req_url % ( get_wms_url(map_id), typename, outputformat ) def draw_map_to_file(map_id, output_file): print 'mapserver.draw_map_to_file(%s, %s)' % (map_id, output_file) try: mapa = mapscript.mapObj('%s.map' % os.path.join(settings.MAPAS_PATH, map_id)) mapa.draw().save(output_file) return True except Exception: return False
import pytest, requests from kubernetes.client.rest import ApiException from suite.resources_utils import wait_before_test, replace_configmap_from_yaml from suite.custom_resources_utils import ( read_custom_resource, ) from suite.vs_vsr_resources_utils import ( delete_virtual_server, create_virtual_server_from_yaml, patch_virtual_server_from_yaml, patch_v_s_route_from_yaml, ) from suite.policy_resources_utils import ( create_policy_from_yaml, delete_policy, read_policy, ) from settings import TEST_DATA, DEPLOYMENTS std_cm_src = f"{DEPLOYMENTS}/common/nginx-config.yaml" test_cm_src = f"{TEST_DATA}/access-control/configmap/nginx-config.yaml" std_vs_src = f"{TEST_DATA}/virtual-server-route/standard/virtual-server.yaml" deny_pol_src = f"{TEST_DATA}/access-control/policies/access-control-policy-deny.yaml" allow_pol_src = f"{TEST_DATA}/access-control/policies/access-control-policy-allow.yaml" invalid_pol_src = f"{TEST_DATA}/access-control/policies/access-control-policy-invalid.yaml" deny_vsr_src = f"{TEST_DATA}/access-control/route-subroute/virtual-server-route-deny-subroute.yaml" allow_vsr_src = ( f"{TEST_DATA}/access-control/route-subroute/virtual-server-route-allow-subroute.yaml" ) override_vsr_src = ( f"{TEST_DATA}/access-control/route-subroute/virtual-server-route-override-subroute.yaml" ) invalid_vsr_src = ( f"{TEST_DATA}/access-control/route-subroute/virtual-server-route-invalid-subroute.yaml" ) vs_spec_vsr_override_src = ( f"{TEST_DATA}/access-control/route-subroute/virtual-server-vsr-spec-override.yaml" ) vs_route_vsr_override_src = ( f"{TEST_DATA}/access-control/route-subroute/virtual-server-vsr-route-override.yaml" ) @pytest.fixture(scope="class") def config_setup(request, kube_apis, ingress_controller_prerequisites) -> None: """ Replace configmap to add "set-real-ip-from" :param request: pytest fixture :param kube_apis: client apis :param ingress_controller_prerequisites: IC pre-requisites """ print(f"------------- Replace ConfigMap --------------") replace_configmap_from_yaml( kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, test_cm_src, ) def fin(): print(f"------------- Restore ConfigMap --------------") replace_configmap_from_yaml( kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, std_cm_src, ) request.addfinalizer(fin) @pytest.mark.policies @pytest.mark.parametrize( "crd_ingress_controller, v_s_route_setup", [ ( { "type": "complete", "extra_args": [f"-enable-custom-resources", f"-enable-leader-election=false"], }, {"example": "virtual-server-route"}, ) ], indirect=True, ) class TestAccessControlPoliciesVsr: def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None: """ Function to revert vsr deployments to valid state """ patch_src_m = f"{TEST_DATA}/virtual-server-route/route-multiple.yaml" patch_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, patch_src_m, v_s_route_setup.route_m.namespace, ) wait_before_test() def test_deny_policy_vsr( self, kube_apis, crd_ingress_controller, v_s_route_app_setup, test_namespace, config_setup, v_s_route_setup, ): """ Test if ip (10.0.0.1) block-listing is working (policy specified in vsr subroute): default(no policy) -> deny """ req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" resp = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp.status_code}\n{resp.text}") assert resp.status_code == 200 print(f"Create deny policy") pol_name = create_policy_from_yaml( kube_apis.custom_objects, deny_pol_src, v_s_route_setup.route_m.namespace ) patch_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, deny_vsr_src, v_s_route_setup.route_m.namespace, ) wait_before_test() policy_info = read_custom_resource( kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name ) print(f"\nUse IP listed in deny block: 10.0.0.1") resp1 = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp1.status_code}\n{resp1.text}") print(f"\nUse IP not listed in deny block: 10.0.0.2") resp2 = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.2"}, ) print(f"Response: {resp2.status_code}\n{resp2.text}") delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) assert ( policy_info["status"] and policy_info["status"]["reason"] == "AddedOrUpdated" and policy_info["status"]["state"] == "Valid" ) assert ( resp1.status_code == 403 and "403 Forbidden" in resp1.text and resp2.status_code == 200 and "Server address:" in resp2.text ) def test_allow_policy_vsr( self, kube_apis, crd_ingress_controller, v_s_route_app_setup, test_namespace, config_setup, v_s_route_setup, ): """ Test if ip (10.0.0.1) block-listing is working (policy specified in vsr subroute): default(no policy) -> deny """ req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" resp = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp.status_code}\n{resp.text}") assert resp.status_code == 200 print(f"Create allow policy") pol_name = create_policy_from_yaml( kube_apis.custom_objects, allow_pol_src, v_s_route_setup.route_m.namespace ) patch_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, allow_vsr_src, v_s_route_setup.route_m.namespace, ) wait_before_test() policy_info = read_custom_resource( kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name ) print(f"\nUse IP listed in deny block: 10.0.0.1") resp1 = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp1.status_code}\n{resp1.text}") print(f"\nUse IP not listed in deny block: 10.0.0.2") resp2 = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.2"}, ) print(f"Response: {resp2.status_code}\n{resp2.text}") delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) assert ( policy_info["status"] and policy_info["status"]["reason"] == "AddedOrUpdated" and policy_info["status"]["state"] == "Valid" ) assert ( resp2.status_code == 403 and "403 Forbidden" in resp2.text and resp1.status_code == 200 and "Server address:" in resp1.text ) def test_override_policy_vsr( self, kube_apis, crd_ingress_controller, v_s_route_app_setup, test_namespace, config_setup, v_s_route_setup, ): """ Test if ip (10.0.0.1) allow-listing overrides block-listing (policy specified in vsr subroute) """ req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" resp = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp.status_code}\n{resp.text}") assert resp.status_code == 200 print(f"Create deny policy") deny_pol_name = create_policy_from_yaml( kube_apis.custom_objects, deny_pol_src, v_s_route_setup.route_m.namespace ) print(f"Create allow policy") allow_pol_name = create_policy_from_yaml( kube_apis.custom_objects, allow_pol_src, v_s_route_setup.route_m.namespace ) patch_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, override_vsr_src, v_s_route_setup.route_m.namespace, ) wait_before_test() print(f"\nUse IP listed in deny block: 10.0.0.1") resp1 = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp1.status_code}\n{resp1.text}") print(f"\nUse IP not listed in deny block: 10.0.0.2") resp2 = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.2"}, ) print(f"Response: {resp2.status_code}\n{resp2.text}") delete_policy(kube_apis.custom_objects, deny_pol_name, v_s_route_setup.route_m.namespace) delete_policy(kube_apis.custom_objects, allow_pol_name, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) assert ( resp2.status_code == 403 and "403 Forbidden" in resp2.text and resp1.status_code == 200 and "Server address:" in resp1.text ) def test_invalid_policy_vsr( self, kube_apis, crd_ingress_controller, v_s_route_app_setup, test_namespace, config_setup, v_s_route_setup, ): """ Test if applying invalid-policy results in 500. """ req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" resp = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp.status_code}\n{resp.text}") assert resp.status_code == 200 print(f"Create invalid policy") pol_name = create_policy_from_yaml( kube_apis.custom_objects, invalid_pol_src, v_s_route_setup.route_m.namespace ) patch_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, invalid_vsr_src, v_s_route_setup.route_m.namespace, ) wait_before_test() policy_info = read_custom_resource( kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name ) print(f"\nUse IP listed in deny block: 10.0.0.1") resp = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp.status_code}\n{resp.text}") vsr_info = read_custom_resource( kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "virtualserverroutes", v_s_route_setup.route_m.name, ) delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) assert ( policy_info["status"] and policy_info["status"]["reason"] == "Rejected" and policy_info["status"]["state"] == "Invalid" ) assert resp.status_code == 500 and "500 Internal Server Error" in resp.text assert ( vsr_info["status"]["state"] == "Warning" and vsr_info["status"]["reason"] == "AddedOrUpdatedWithWarning" ) @pytest.mark.parametrize("src", [vs_spec_vsr_override_src, vs_route_vsr_override_src]) def test_overide_vs_vsr( self, kube_apis, crd_ingress_controller, v_s_route_app_setup, test_namespace, config_setup, v_s_route_setup, src, ): """ Test if vsr subroute policy overrides vs spec policy and vsr subroute policy overrides vs route policy """ req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" print(f"Create deny policy") deny_pol_name = create_policy_from_yaml( kube_apis.custom_objects, deny_pol_src, v_s_route_setup.route_m.namespace ) print(f"Create allow policy") allow_pol_name = create_policy_from_yaml( kube_apis.custom_objects, allow_pol_src, v_s_route_setup.route_m.namespace ) patch_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, allow_vsr_src, v_s_route_setup.route_m.namespace, ) # patch vs with blocking policy patch_virtual_server_from_yaml( kube_apis.custom_objects, v_s_route_setup.vs_name, src, v_s_route_setup.namespace ) wait_before_test() print(f"\nUse IP listed in deny block: 10.0.0.1") resp = requests.get( f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host, "X-Real-IP": "10.0.0.1"}, ) print(f"Response: {resp.status_code}\n{resp.text}") delete_policy(kube_apis.custom_objects, deny_pol_name, v_s_route_setup.route_m.namespace) delete_policy(kube_apis.custom_objects, allow_pol_name, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) patch_virtual_server_from_yaml( kube_apis.custom_objects, v_s_route_setup.vs_name, std_vs_src, v_s_route_setup.namespace ) wait_before_test() assert resp.status_code == 200 and "Server address:" in resp.text
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from textwrap import dedent from pants.backend.core.register import build_file_aliases as register_core from pants.backend.jvm.register import build_file_aliases as register_jvm from pants.backend.project_info.tasks.depmap import Depmap from pants.backend.python.register import build_file_aliases as register_python from pants_test.tasks.task_test_base import ConsoleTaskTestBase class BaseDepmapTest(ConsoleTaskTestBase): @classmethod def task_type(cls): return Depmap class DepmapTest(BaseDepmapTest): @property def alias_groups(self): return register_core().merge(register_jvm()).merge(register_python()) def setUp(self): super(DepmapTest, self).setUp() def add_to_build_file(path, name, type, deps=(), **kwargs): self.add_to_build_file(path, dedent(""" {type}(name='{name}', dependencies=[{deps}], {extra} ) """.format( type=type, name=name, deps=','.join("pants('{0}')".format(dep) for dep in list(deps)), extra=('' if not kwargs else ', '.join('{0}={1}'.format(k, v) for k, v in kwargs.items())) ))) def create_python_binary_target(path, name, entry_point, type, deps=()): self.add_to_build_file(path, dedent(""" {type}(name='{name}', entry_point='{entry_point}', dependencies=[{deps}] ) """.format( type=type, entry_point=entry_point, name=name, deps=','.join("pants('{0}')".format(dep) for dep in list(deps))) )) def create_jvm_app(path, name, type, binary, deps=()): self.add_to_build_file(path, dedent(""" {type}(name='{name}', dependencies=[pants('{binary}')], bundles={deps} ) """.format( type=type, name=name, binary=binary, deps=deps) )) add_to_build_file('common/a', 'a', 'target') add_to_build_file('common/b', 'b', 'target') self.add_to_build_file('common/c', dedent(""" java_library(name='c', sources=[], ) """)) add_to_build_file('common/d', 'd', 'python_library') create_python_binary_target('common/e', 'e', 'common.e.entry', 'python_binary') add_to_build_file('common/f', 'f', 'jvm_binary') add_to_build_file('common/g', 'g', 'jvm_binary', deps=['common/f:f']) self.create_dir('common/h') self.create_file('common/h/common.f') create_jvm_app('common/h', 'h', 'jvm_app', 'common/f:f', "[bundle(fileset='common.f')]") self.create_dir('common/i') self.create_file('common/i/common.g') create_jvm_app('common/i', 'i', 'jvm_app', 'common/g:g', "[bundle(fileset='common.g')]") add_to_build_file('overlaps', 'one', 'jvm_binary', deps=['common/h', 'common/i']) self.add_to_build_file('overlaps', dedent(""" java_library(name='two', dependencies=[pants('overlaps:one')], sources=[], ) """)) self.add_to_build_file('resources/a', dedent(""" resources( name='a_resources', sources=['a.resource'] ) """)) self.add_to_build_file('src/java/a', dedent(""" java_library( name='a_java', resources=[pants('resources/a:a_resources')] ) """)) self.add_to_build_file('src/java/a', dedent(""" target( name='a_dep', dependencies=[pants(':a_java')] ) """)) self.add_to_build_file('src/java/b', dedent(""" java_library( name='b_java', dependencies=[':b_dep'] ) target( name='b_dep', dependencies=[':b_lib'] ) java_library( name='b_lib', sources=[], ) """)) self.add_to_build_file('src/java/c', dedent(''' jar_library( name='c_jar_lib', jars=[ jar(org='org.pantsbuild.test', name='c_test', rev='1.0'), jar(org='org.pantsbuild.test', name='d_test', rev=''), ] ) ''')) # It makes no sense whatsoever to have a java_library that depends # on a Python library, but we want to ensure that depmap handles # cases like this anyway because there might be other cases which # do make sense (e.g. things that generate generic resources) self.add_to_build_file('src/java/java_depends_on_python', dedent(""" java_library( name='java_depends_on_python', dependencies=['common/d:d'] ) """)) def test_java_depends_on_python(self): self.assert_console_output_ordered( 'internal-src.java.java_depends_on_python.java_depends_on_python', ' internal-common.d.d', targets=[self.target('src/java/java_depends_on_python')] ) def test_empty(self): self.assert_console_output_ordered( 'internal-common.a.a', targets=[self.target('common/a')] ) self.assert_console_output_ordered( 'internal-common.b.b', targets=[self.target('common/b')], ) def test_java_library(self): self.assert_console_output_ordered( 'internal-common.c.c', targets=[self.target('common/c')] ) def test_python_library(self): self.assert_console_output_ordered( 'internal-common.d.d', targets=[self.target('common/d')] ) def test_python_binary(self): self.assert_console_output_ordered( 'internal-common.e.e', targets=[self.target('common/e')] ) def test_jvm_binary1(self): self.assert_console_output_ordered( 'internal-common.f.f', targets=[self.target('common/f')] ) def test_jvm_binary2(self): self.assert_console_output_ordered( 'internal-common.g.g', ' internal-common.f.f', targets=[self.target('common/g')] ) def test_jvm_app1(self): self.assert_console_output_ordered( 'internal-common.h.h', ' internal-common.f.f', targets=[self.target('common/h')] ) def test_jvm_app2(self): self.assert_console_output_ordered( 'internal-common.i.i', ' internal-common.g.g', ' internal-common.f.f', targets=[self.target('common/i')] ) def test_overlaps_one(self): self.assert_console_output_ordered( 'internal-overlaps.one', ' internal-common.h.h', ' internal-common.f.f', ' internal-common.i.i', ' internal-common.g.g', ' *internal-common.f.f', targets=[self.target('overlaps:one')] ) def test_overlaps_two(self): self.assert_console_output_ordered( 'internal-overlaps.two', ' internal-overlaps.one', ' internal-common.h.h', ' internal-common.f.f', ' internal-common.i.i', ' internal-common.g.g', ' *internal-common.f.f', targets=[self.target('overlaps:two')] ) def test_overlaps_two_minimal(self): self.assert_console_output_ordered( 'internal-overlaps.two', ' internal-overlaps.one', ' internal-common.h.h', ' internal-common.f.f', ' internal-common.i.i', ' internal-common.g.g', targets=[self.target('overlaps:two')], options={'minimal': True} ) def test_multi(self): self.assert_console_output_ordered( 'internal-common.g.g', ' internal-common.f.f', 'internal-common.h.h', ' internal-common.f.f', 'internal-common.i.i', ' internal-common.g.g', ' internal-common.f.f', targets=[self.target('common/g'), self.target('common/h'), self.target('common/i')] ) def test_path_to(self): self.assert_console_output_ordered( 'internal-overlaps.two', ' internal-overlaps.one', ' internal-common.i.i', ' internal-common.g.g', targets=[self.target('overlaps:two')], options={'path_to': 'internal-common.g.g'}, ) def test_resources(self): self.assert_console_output_ordered( 'internal-src.java.a.a_java', ' internal-resources.a.a_resources', targets=[self.target('src/java/a:a_java')] ) def test_resources_dep(self): self.assert_console_output_ordered( 'internal-src.java.a.a_dep', ' internal-src.java.a.a_java', ' internal-resources.a.a_resources', targets=[self.target('src/java/a:a_dep')] ) def test_intermediate_dep(self): self.assert_console_output_ordered( 'internal-src.java.b.b_java', ' internal-src.java.b.b_dep', ' internal-src.java.b.b_lib', targets=[self.target('src/java/b:b_java')] ) def test_graph(self): self.assert_console_output_ordered( 'digraph "common.h.h" {', ' node [shape=rectangle, colorscheme=set312;];', ' rankdir=LR;', ' "internal-common.h.h" [style=filled, fillcolor=1];', ' "internal-common.f.f" [style=filled, fillcolor=2];', ' "internal-common.h.h" -> "internal-common.f.f";', '}', targets=[self.target('common/h')], options={'graph': True} ) def test_graph_show_types(self): self.assert_console_output_ordered( 'digraph "common.h.h" {', ' node [shape=rectangle, colorscheme=set312;];', ' rankdir=LR;', ' "internal-common.h.h\\nJvmApp" [style=filled, fillcolor=1];', ' "internal-common.f.f\\nJvmBinary" [style=filled, fillcolor=2];', ' "internal-common.h.h\\nJvmApp" -> "internal-common.f.f\\nJvmBinary";', '}', targets=[self.target('common/h')], options={'graph': True, 'show_types': True} ) def test_tree(self): self.assert_console_output_ordered( '--internal-overlaps.two', ' |--internal-overlaps.one', ' | |--internal-common.h.h', ' | | |--internal-common.f.f', ' | |--internal-common.i.i', ' | | |--internal-common.g.g', ' | | | |--*internal-common.f.f', targets=[self.target('overlaps:two')], options={'tree': True} ) def test_jar_library_external(self): self.assert_console_output_ordered( 'org.pantsbuild.test-c_test-1.0', 'org.pantsbuild.test-d_test', targets=[self.target('src/java/c:c_jar_lib')], options={'external_only': True} )
import errno import json import os import random import string import socket from concurrent.futures import ThreadPoolExecutor from collections import deque, namedtuple from datetime import datetime, timedelta from tornado import gen from tornado import ioloop from tornado.log import app_log from tornado.httpclient import HTTPRequest, HTTPError, AsyncHTTPClient from tornado.httputil import url_concat import pytz import re import dockworker AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") def sample_with_replacement(a, size=12): '''Get a random path. If Python had sampling with replacement built in, I would use that. The other alternative is numpy.random.choice, but numpy is overkill for this tiny bit of random pathing.''' return "".join([random.choice(a) for x in range(size)]) def new_user(): return sample_with_replacement(string.ascii_letters + string.digits) PooledContainer = namedtuple('PooledContainer', ['id', 'path']) class EmptyPoolError(Exception): '''Exception raised when a container is requested from an empty pool.''' pass class SpawnPool(): '''Manage a pool of precreated Docker containers.''' def __init__(self, proxy_endpoint, proxy_token, spawner, container_config, capacity, max_age, pool_name, static_files=None, static_dump_path=os.path.join(os.path.dirname(__file__), "static")): '''Create a new, empty spawn pool, with nothing preallocated.''' self.spawner = spawner self.container_config = container_config self.capacity = capacity self.max_age = max_age self.pool_name = pool_name self.container_name_pattern = re.compile('tmp\.([^.]+)\.(.+)\Z') self.proxy_endpoint = proxy_endpoint self.proxy_token = proxy_token self.available = deque() self.static_files = static_files self.static_dump_path = static_dump_path self._heart_beating = False def acquire(self): '''Acquire a preallocated container and returns its user path. An EmptyPoolError is raised if no containers are ready.''' if not self.available: raise EmptyPoolError() return self.available.pop() @gen.coroutine def adhoc(self, user): '''Launch a container with a fixed path by taking the place of an existing container from the pool.''' to_release = self.acquire() app_log.debug("Discarding container [%s] to create an ad-hoc replacement.", to_release) yield self.release(to_release, False) launched = yield self._launch_container(user=user, enpool=False) raise gen.Return(launched) @gen.coroutine def release(self, container, replace_if_room=True): '''Shut down a container and delete its proxy entry. Destroy the container in an orderly fashion. If requested and capacity is remaining, create a new one to take its place.''' try: app_log.info("Releasing container [%s].", container) yield [ self.spawner.shutdown_notebook_server(container.id), self._proxy_remove(container.path) ] app_log.debug("Container [%s] has been released.", container) except Exception as e: app_log.error("Unable to release container [%s]: %s", container, e) return if replace_if_room: running = yield self.spawner.list_notebook_servers(self.container_name_pattern, all=False) if len(running) + 1 <= self.capacity: app_log.debug("Launching a replacement container.") yield self._launch_container() else: app_log.info("Declining to launch a new container because [%i] containers are" + " already running, and the capacity is [%i].", len(running), self.capacity) @gen.coroutine def heartbeat(self): '''Examine the pool for any missing, stopped, or idle containers, and replace them. A container is considered "used" if it isn't still present in the pool. If no max_age is specified, an hour is used.''' if self._heart_beating: app_log.debug("Previous heartbeat is still active. Skipping this one.") return try: self._heart_beating = True app_log.debug("Heartbeat begun. Measuring current state.") diagnosis = Diagnosis(self.max_age, self.spawner, self.container_name_pattern, self.proxy_endpoint, self.proxy_token) yield diagnosis.observe() tasks = [] for id in diagnosis.stopped_container_ids: app_log.debug("Removing stopped container [%s].", id) tasks.append(self.spawner.shutdown_notebook_server(id, alive=False)) for path, id in diagnosis.zombie_routes: app_log.debug("Removing zombie route [%s].", path) tasks.append(self._proxy_remove(path)) unpooled_stale_routes = [(path, id) for path, id in diagnosis.stale_routes if id not in self._pooled_ids()] for path, id in unpooled_stale_routes: app_log.debug("Replacing stale route [%s] and container [%s].", path, id) container = PooledContainer(path=path, id=id) tasks.append(self.release(container, replace_if_room=True)) # Normalize the container count to its initial capacity by scheduling deletions if we're # over or scheduling launches if we're under. current = len(diagnosis.living_container_ids) under = xrange(current, self.capacity) over = xrange(self.capacity, current) if under: app_log.debug("Launching [%i] new containers to populate the pool.", len(under)) for i in under: tasks.append(self._launch_container()) if over: app_log.debug("Removing [%i] containers to diminish the pool.", len(over)) for i in over: try: pooled = self.acquire() app_log.debug("Releasing container [%s] to shrink the pool.", pooled.id) tasks.append(self.release(pooled, False)) except EmptyPoolError: app_log.warning("Unable to shrink: pool is diminished, all containers in use.") break yield tasks # Summarize any actions taken to the log. def summarize(message, list): if list: app_log.info(message, len(list)) summarize("Removed [%i] stopped containers.", diagnosis.stopped_container_ids) summarize("Removed [%i] zombie routes.", diagnosis.zombie_routes) summarize("Replaced [%i] stale containers.", unpooled_stale_routes) summarize("Launched [%i] new containers.", under) summarize("Removed [%i] excess containers from the pool.", over) app_log.debug("Heartbeat complete. The pool now includes [%i] containers.", len(self.available)) finally: self._heart_beating = False @gen.coroutine def _launch_container(self, user=None, enpool=True): '''Launch a new notebook server in a fresh container, register it with the proxy, and add it to the pool.''' if user is None: user = new_user() path = "user/" + user # This must match self.container_name_pattern or Bad Things will happen. # You don't want Bad Things to happen, do you? container_name = 'tmp.{}.{}'.format(self.pool_name, user) if not self.container_name_pattern.match(container_name): raise Exception("[{}] does not match [{}]!".format(container_name, self.container_name_pattern.pattern)) app_log.debug("Launching new notebook server [%s] at path [%s].", container_name, path) create_result = yield self.spawner.create_notebook_server(base_path=path, container_name=container_name, container_config=self.container_config) container_id, host_ip, host_port = create_result app_log.debug("Created notebook server [%s] for path [%s] at [%s:%s]", container_name, path, host_ip, host_port) # Wait for the server to launch within the container before adding it to the pool or # serving it to a user. yield self._wait_for_server(host_ip, host_port, path) http_client = AsyncHTTPClient() headers = {"Authorization": "token {}".format(self.proxy_token)} proxy_endpoint = "{}/api/routes/{}".format(self.proxy_endpoint, path) body = json.dumps({ "target": "http://{}:{}".format(host_ip, host_port), "container_id": container_id, }) app_log.debug("Proxying path [%s] to port [%s].", path, host_port) req = HTTPRequest(proxy_endpoint, method="POST", headers=headers, body=body) try: yield http_client.fetch(req) app_log.info("Proxied path [%s] to port [%s].", path, host_port) except HTTPError as e: app_log.error("Failed to create proxy route to [%s]: %s", path, e) container = PooledContainer(id=container_id, path=path) if enpool: app_log.info("Adding container [%s] to the pool.", container) self.available.append(container) raise gen.Return(container) @gen.coroutine def _wait_for_server(self, ip, port, path, timeout=10, wait_time=0.2): '''Wait for a server to show up within a newly launched container.''' app_log.info("Waiting for a container to launch at [%s:%s].", ip, port) loop = ioloop.IOLoop.current() tic = loop.time() # Docker starts listening on a socket before the container is fully launched. Wait for that, # first. while loop.time() - tic < timeout: try: socket.create_connection((ip, port)) except socket.error as e: app_log.warn("Socket error on boot: %s", e) if e.errno != errno.ECONNREFUSED: app_log.warn("Error attempting to connect to [%s:%i]: %s", ip, port, e) yield gen.Task(loop.add_timeout, loop.time() + wait_time) else: break # Fudge factor of IPython notebook bootup. # TODO: Implement a webhook in IPython proper to call out when the # notebook server is booted. yield gen.Task(loop.add_timeout, loop.time() + .5) # Now, make sure that we can reach the Notebook server. http_client = AsyncHTTPClient() req = HTTPRequest("http://{}:{}/{}".format(ip, port, path)) while loop.time() - tic < timeout: try: yield http_client.fetch(req) except HTTPError as http_error: code = http_error.code app_log.info("Booting server at [%s], getting HTTP status [%s]", path, code) yield gen.Task(loop.add_timeout, loop.time() + wait_time) else: break app_log.info("Server [%s] at address [%s:%s] has booted! Have at it.", path, ip, port) def _pooled_ids(self): '''Build a set of container IDs that are currently waiting in the pool.''' return set(container.id for container in self.available) @gen.coroutine def _proxy_remove(self, path): '''Remove a path from the proxy.''' url = "{}/api/routes/{}".format(self.proxy_endpoint, path.lstrip('/')) headers = {"Authorization": "token {}".format(self.proxy_token)} req = HTTPRequest(url, method="DELETE", headers=headers) http_client = AsyncHTTPClient() try: yield http_client.fetch(req) except HTTPError as e: app_log.error("Failed to delete route [%s]: %s", path, e) @gen.coroutine def copy_static(self): if(self.static_files is None): raise Exception("static_files must be set in order to dump them") container = self.available[0] app_log.info("Extracting static files from container {}".format(container.id)) tarball = yield self.spawner.copy_files(container.id, self.static_files) tar = open(os.path.join(self.static_dump_path, "static.tar"), "wb") tar.write(tarball.data) tar.close() app_log.debug("Static files extracted") class Diagnosis(): '''Collect and organize information to self-heal a SpawnPool. Measure the current state of Docker and the proxy routes and scan for anomalies so the pool can correct them. This includes zombie containers, containers that are running but not routed in the proxy, proxy routes that exist without a corresponding container, or other strange conditions.''' def __init__(self, cull_time, spawner, name_pattern, proxy_endpoint, proxy_token): self.spawner = spawner self.name_pattern = name_pattern self.proxy_endpoint = proxy_endpoint self.proxy_token = proxy_token self.cull_time = cull_time @gen.coroutine def observe(self): '''Collect Ground Truth of what's actually running from Docker and the proxy.''' results = yield { "docker": self.spawner.list_notebook_servers(self.name_pattern, all=True), "proxy": self._proxy_routes() } self.container_ids = set() self.living_container_ids = [] self.stopped_container_ids = [] self.zombie_container_ids = [] self.routes = set() self.live_routes = [] self.stale_routes = [] self.zombie_routes = [] # Sort Docker results into living and dead containers. for container in results["docker"]: id = container['Id'] self.container_ids.add(id) if container['Status'].startswith('Up'): self.living_container_ids.append(id) else: self.stopped_container_ids.append(id) cutoff = datetime.utcnow() - self.cull_time # Sort proxy routes into living, stale, and zombie routes. living_set = set(self.living_container_ids) for path, route in results["proxy"].items(): last_activity_s = route.get('last_activity', None) container_id = route.get('container_id', None) if container_id: result = (path, container_id) if container_id in living_set: try: last_activity = datetime.strptime(last_activity_s, '%Y-%m-%dT%H:%M:%S.%fZ') self.routes.add(result) if last_activity >= cutoff: self.live_routes.append(result) else: self.stale_routes.append(result) except ValueError as e: app_log.warning("Ignoring a proxy route with an unparsable activity date: %s", e) else: # The container doesn't correspond to a living container. self.zombie_routes.append(result) @gen.coroutine def _proxy_routes(self): routes = [] url = "{}/api/routes".format(self.proxy_endpoint) headers = {"Authorization": "token {}".format(self.proxy_token)} req = HTTPRequest(url, method="GET", headers=headers) http_client = AsyncHTTPClient() try: resp = yield http_client.fetch(req) results = json.loads(resp.body.decode('utf8', 'replace')) raise gen.Return(results) except HTTPError as e: app_log.error("Unable to list existing proxy entries: %s", e) raise gen.Return({})
import math import pynq import pytest import importlib import os import numpy as np from .mock_devices import MockMemoryMappedDevice ZYNQ_SLCR_OFFSET = 0xF8000000 ZU_LPD_OFFSET = 0xFF5E0000 ZU_FPD_OFFSET = 0xFD1A0000 class FakeUname: def __init__(self, machine): self.machine = machine def be_zynq(): return FakeUname(pynq.ps.ZYNQ_ARCH) def be_zu(): return FakeUname(pynq.ps.ZU_ARCH) def be_other(): return FakeUname('Invalid Arch') # Entries of the form PLL_OFFSET, CLK_REG ZYNQ_READ_PLLS = { 'arm_pll': [0x100, 0x0030_0520], 'ddr_pll': [0x104, 0x0030_0530], 'io0_pll': [0x108, 0x0030_0500], 'io1_pll': [0x108, 0x0030_0510], } ZYNQ_READ_CLKS = { 'fclk0_mhz': 0x170, 'fclk1_mhz': 0x180, 'fclk2_mhz': 0x190, 'fclk3_mhz': 0x1A0, } ZYNQ_CPU_PLLS = { 'arm0_pll': [0x100, 0x0200], 'arm1_pll': [0x100, 0x0210], 'ddr_pll': [0x104, 0x0220], 'io_pll': [0x108, 0x0230], } ZU_READ_PLLS = { 'iopll': (1, 0x20, 0x0050300), 'rpll': (1, 0x30, 0x0050302), 'dpll': (0, 0x2C, 0x0050303), } ZU_READ_CLKS = { 'fclk0_mhz': 0xc0, 'fclk1_mhz': 0xc4, 'fclk2_mhz': 0xc8, 'fclk3_mhz': 0xcC, } ZU_CPU_PLLS = { 'apll': (0x20, 0x200), 'dpll': (0x2C, 0x202), 'vpll': (0x38, 0x203), } @pytest.fixture def setup_zynq(monkeypatch): old_arch = pynq.ps.CPU_ARCH monkeypatch.setattr(os, 'uname', be_zynq) device = MockMemoryMappedDevice('zynq_clocks') pynq.Device.active_device = device slcr_buffer = device.mmap(ZYNQ_SLCR_OFFSET, 0x200) slcr_array = np.frombuffer(slcr_buffer, dtype='u4') new_ps = importlib.reload(pynq.ps) Clocks = new_ps.Clocks yield Clocks, slcr_array pynq.Device.active_device = None pynq.ps.CPU_ARCH = old_arch @pytest.fixture def setup_zu(monkeypatch): old_arch = pynq.ps.CPU_ARCH monkeypatch.setattr(os, 'uname', be_zu) device = MockMemoryMappedDevice('zu_clocks') pynq.Device.active_device = device lpd_buffer = device.mmap(ZU_LPD_OFFSET, 0x100) lpd_array = np.frombuffer(lpd_buffer, dtype='u4') fpd_buffer = device.mmap(ZU_FPD_OFFSET, 0x100) fpd_array = np.frombuffer(fpd_buffer, dtype='u4') new_ps = importlib.reload(pynq.ps) Clocks = new_ps.Clocks yield Clocks, lpd_array, fpd_array pynq.Device.active_device = None pynq.ps.CPU_ARCH = old_arch def split_clk_reg(reg_val): source = (reg_val >> 4) & 0x3 div0 = (reg_val >> 8) & 0x3F div1 = (reg_val >> 20) & 0x3F return source, div0, div1 def split_zu_reg(reg_val): source = (reg_val) & 0x07 div0 = (reg_val >> 8) & 0x3F div1 = (reg_val >> 16) & 0x3F return source, div0, div1 @pytest.mark.parametrize('pll_name', ZYNQ_READ_PLLS.keys()) @pytest.mark.parametrize('clk_name', ZYNQ_READ_CLKS.keys()) def test_zynq_pl(setup_zynq, pll_name, clk_name): pll_reg, clk_val = ZYNQ_READ_PLLS[pll_name] clk_reg = ZYNQ_READ_CLKS[clk_name] Clocks, slcr_array = setup_zynq slcr_array[pll_reg >> 2] = 30 << 12 # FBDIV of 30 (1500 MHz) slcr_array[clk_reg >> 2] = clk_val assert getattr(Clocks, clk_name) == 100 setattr(Clocks, clk_name, 50) source, div0, div1 = split_clk_reg(slcr_array[clk_reg >> 2]) assert div0 * div1 == 30 expected_source, _, _ = split_clk_reg(clk_val) assert expected_source == source @pytest.mark.parametrize('pll_name', ZU_READ_PLLS.keys()) @pytest.mark.parametrize('clk_name', ZU_READ_CLKS.keys()) def test_zu_pl(setup_zu, pll_name, clk_name): pll_region, pll_reg, clk_val = ZU_READ_PLLS[pll_name] clk_reg = ZU_READ_CLKS[clk_name] Clocks, lpd_array, fpd_array = setup_zu if pll_region: lpd_array[pll_reg >> 2] = 45 << 8 # FBDIV of 45 (1500 MHz) else: fpd_array[pll_reg >> 2] = 45 << 8 lpd_array[clk_reg >> 2] = clk_val assert math.isclose(getattr(Clocks, clk_name), 99.999) if pll_name == 'iopll': setattr(Clocks, clk_name, 50) source, div0, div1 = split_zu_reg(lpd_array[clk_reg >> 2]) assert div0 * div1 == 30 assert source == 0 @pytest.mark.parametrize('pll_name', ZU_READ_PLLS.keys()) @pytest.mark.parametrize('clk_name', ZU_READ_CLKS.keys()) def test_zu_pl_div2(setup_zu, pll_name, clk_name): pll_region, pll_reg, clk_val = ZU_READ_PLLS[pll_name] clk_reg = ZU_READ_CLKS[clk_name] Clocks, lpd_array, fpd_array = setup_zu if pll_region: lpd_array[pll_reg >> 2] = 0x10000 | (45 << 8) # FBDIV of 45 (1500 MHz) else: fpd_array[pll_reg >> 2] = 0x10000 | (45 << 8) lpd_array[clk_reg >> 2] = clk_val assert math.isclose(getattr(Clocks, clk_name), 49.9995) if pll_name == 'iopll': setattr(Clocks, clk_name, 25) source, div0, div1 = split_zu_reg(lpd_array[clk_reg >> 2]) assert div0 * div1 == 30 assert source == 0 def test_invalid_clkidx(setup_zynq): Clocks, slcr_array = setup_zynq with pytest.raises(ValueError): Clocks.get_pl_clk(-1) with pytest.raises(ValueError): Clocks.get_pl_clk(4) with pytest.raises(ValueError): Clocks.set_pl_clk(-1) with pytest.raises(ValueError): Clocks.set_pl_clk(4) def test_zynq_nearest(setup_zynq): pll_reg, clk_val = ZYNQ_READ_PLLS['arm_pll'] clk_reg = ZYNQ_READ_CLKS['fclk0_mhz'] Clocks, slcr_array = setup_zynq slcr_array[pll_reg >> 2] = 30 << 12 # FBDIV of 30 (1500 MHz) slcr_array[clk_reg >> 2] = clk_val with pytest.warns(UserWarning): Clocks.fclk0_mhz = 440 source, div0, div1 = split_clk_reg(slcr_array[clk_reg >> 2]) assert div0 * div1 == 3 def test_divider0(setup_zynq): pll_reg, clk_val = ZYNQ_READ_PLLS['arm_pll'] clk_reg = ZYNQ_READ_CLKS['fclk0_mhz'] Clocks, slcr_array = setup_zynq slcr_array[pll_reg >> 2] = 30 << 12 # FBDIV of 30 (1500 MHz) slcr_array[clk_reg >> 2] = clk_val Clocks.set_pl_clk(0, div0=5, clk_mhz=50) source, div0, div1 = split_clk_reg(slcr_array[clk_reg >> 2]) assert div0 == 5 assert div1 == 6 def test_divider1(setup_zynq): pll_reg, clk_val = ZYNQ_READ_PLLS['arm_pll'] clk_reg = ZYNQ_READ_CLKS['fclk0_mhz'] Clocks, slcr_array = setup_zynq slcr_array[pll_reg >> 2] = 30 << 12 # FBDIV of 30 (1500 MHz) slcr_array[clk_reg >> 2] = clk_val Clocks.set_pl_clk(0, div1=5, clk_mhz=50) source, div0, div1 = split_clk_reg(slcr_array[clk_reg >> 2]) assert div0 == 6 assert div1 == 5 def test_divider01(setup_zynq): pll_reg, clk_val = ZYNQ_READ_PLLS['arm_pll'] clk_reg = ZYNQ_READ_CLKS['fclk0_mhz'] Clocks, slcr_array = setup_zynq slcr_array[pll_reg >> 2] = 30 << 12 # FBDIV of 30 (1500 MHz) slcr_array[clk_reg >> 2] = clk_val Clocks.set_pl_clk(0, div0=3, div1=5, clk_mhz=50) source, div0, div1 = split_clk_reg(slcr_array[clk_reg >> 2]) assert div0 == 3 assert div1 == 5 def test_div_underflow(setup_zynq): pll_reg, clk_val = ZYNQ_READ_PLLS['arm_pll'] clk_reg = ZYNQ_READ_CLKS['fclk0_mhz'] Clocks, slcr_array = setup_zynq slcr_array[pll_reg >> 2] = 30 << 12 # FBDIV of 30 (1500 MHz) slcr_array[clk_reg >> 2] = clk_val with pytest.raises(ValueError): Clocks.set_pl_clk(0, div0=-1, div1=10) with pytest.raises(ValueError): Clocks.set_pl_clk(0, div0=10, div1=0) with pytest.raises(ValueError): Clocks.set_pl_clk(0, div0=64, div1=10) with pytest.raises(ValueError): Clocks.set_pl_clk(0, div0=10, div1=64) @pytest.mark.parametrize('pll_name', ZU_READ_PLLS.keys()) def test_zu_invalid_source(setup_zu, pll_name): pll_region, pll_reg, clk_val = ZU_READ_PLLS[pll_name] clk_reg = ZU_READ_CLKS['fclk0_mhz'] Clocks, lpd_array, fpd_array = setup_zu if pll_region: lpd_array[pll_reg >> 2] = 0x10000 | (45 << 8) | (4 << 20) else: fpd_array[pll_reg >> 2] = 0x10000 | (45 << 8) | (4 << 20) lpd_array[clk_reg >> 2] = clk_val with pytest.raises(ValueError): Clocks.fclk0_mhz @pytest.mark.parametrize('pll_name', ZU_CPU_PLLS.keys()) def test_zu_cpu(setup_zu, pll_name): pll_reg, clk_val = ZU_CPU_PLLS[pll_name] Clocks, lpd_array, fpd_array = setup_zu fpd_array[pll_reg >> 2] = 45 << 8 fpd_array[0x60 >> 2] = clk_val assert Clocks.cpu_mhz == 749.9925 @pytest.mark.parametrize('pll_name', ZYNQ_CPU_PLLS.keys()) def test_zynq_cpu(setup_zynq, pll_name): pll_reg, clk_val = ZYNQ_CPU_PLLS[pll_name] Clocks, slcr_array = setup_zynq slcr_array[pll_reg >> 2] = 30 << 12 # FBDIV of 30 (1500 MHz) slcr_array[0x120 >> 2] = clk_val assert Clocks.cpu_mhz == 750 def test_invalid_arch(monkeypatch): old_arch = pynq.ps.CPU_ARCH monkeypatch.setattr(os, 'uname', be_other) new_ps = importlib.reload(pynq.ps) Clocks = new_ps.Clocks with pytest.raises(RuntimeError): Clocks.fclk0_mhz def test_delayed_mmio(monkeypatch): old_arch = pynq.ps.CPU_ARCH try: monkeypatch.setattr(os, 'uname', be_zynq) device = MockMemoryMappedDevice('zynq_clocks') pynq.Device.active_device = device new_ps = importlib.reload(pynq.ps) Clocks = new_ps.Clocks assert len(device.regions) == 0 finally: pynq.Device.active_device = None pynq.ps.CPU_ARCH = old_arch
# Copyright (C) 2017 by Kevin L. Mitchell <klmitch@mit.edu> # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You may # obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. import abc import collections import six from state_analyzer import common from state_analyzer import transitions as trans @six.add_metaclass(abc.ABCMeta) class State(object): """ Abstract base class for all states. States must provide a ``name`` property, giving a string name for the state; a ``data`` property, which provides data for serialization to YAML (and which may raise ``state_analyzer.common.Synthetic`` for synthetic states); and a ``dot`` property, which provides the representation of the state which is acceptable as node input for the GraphViz package. They must also provide a ``_state_key()`` class method, which will receive a dictionary of arguments--which may be modified--and must return a hashable object which will be used as a dictionary key. They must also provide a ``_state_init()`` instance method, which will receive keyword arguments (as modified by ``_state_key()``), and must initialize the state instance. Note that ``State`` subclasses MUST NOT have an ``__init__()`` method. """ # Storage for all states _statedb = {} @classmethod def states(cls, state_class=None): """ Obtain a list of states of a given type, sorted by the state name. :param state_class: The class of states to return. If not provided, defaults to the class the method is called on. This uses an ``isinstance()`` test, so this argument can be a tuple or a superclass. :returns: A list of the desired states, in order by the state's name. :rtype: ``list`` of ``State`` """ # Return a sorted list of the states, sorting by name return sorted( (st for st in cls._statedb.values() if isinstance(st, state_class or cls)), key=lambda x: x.name, ) def __new__(cls, **kwargs): """ Obtain a ``State`` instance. States are singleton classes: there can only be one state of a given description. This method searches a database of states for states matching the description contained in ``kwargs``, creating and initializing a new one if needed. :param **kwargs: Keyword arguments describing the state. :returns: A corresponding ``State`` instance matching the description. """ # Obtain a hashable key for described state key = cls._state_key(kwargs) # Is it in the state database? if key not in cls._statedb: # Create a new one obj = super(State, cls).__new__(cls) # Initialize the object obj.key = key obj.transitions = [] obj._state_init(**kwargs) # Save it cls._statedb[key] = obj # Return the state return cls._statedb[key] @abc.abstractmethod def _state_key(cls, kwargs): """ Given a dictionary describing a state, compute and return an object suitable for use as a dictionary key. :param dict kwargs: Dictionary of keyword arguments passed to the constructor. This may be modified for the benefit of a possible later call to ``_state_init()``. :returns: A hashable object suitable for use as a dictionary key. """ pass # pragma: no cover @abc.abstractmethod def _state_init(self, **kwargs): """ Initialize a ``State`` instance. Subclasses MUST implement this method in lieu of an ``__init__()`` method, which MUST NOT be implemented (as ``__init__()`` could be called on an already initialized instance). :param **kwargs: Keyword arguments passed to the constructor, as optionally modified by ``_state_key()``. """ pass # pragma: no cover @abc.abstractproperty def name(self): """ A textual name for the state. """ pass # pragma: no cover @abc.abstractproperty def data(self): """ A dictionary suitable for serialization to an input file. It must be possible to use this dictionary as keyword arguments for a ``State`` constructor. :raises state_analyzer.common.Synthetic: May be raised if the state is a synthetic state that does not need to be serialized to a YAML representation. """ pass # pragma: no cover @abc.abstractproperty def dot(self): """ A string which provides the representation of the state which is acceptable as node input for the GraphViz package. """ pass # pragma: no cover class StartState(State): """ Represents a "start" state. Start states precede Humboldt states and provide an anonymous arrow pointing in to the corresponding Humboldt start state. """ @classmethod def _state_key(cls, kwargs): """ Given a dictionary describing a state, compute and return an object suitable for use as a dictionary key. :param dict kwargs: Dictionary of keyword arguments passed to the constructor. This may be modified for the benefit of a possible later call to ``_state_init()``. :returns: A hashable object suitable for use as a dictionary key. """ return id(kwargs['target']) def _state_init(self, target, group, send=False): """ Initialize a ``State`` instance. Subclasses MUST implement this method in lieu of an ``__init__()`` method, which MUST NOT be implemented (as ``__init__()`` could be called on an already initialized instance). :param target: The target of the transition from the start state. This will be a ``HumboldtState`` marked as a start state. :type target: ``HumboldtState`` :param str group: The start state group. :param bool send: Whether the starting connection state should be communicated to the other end of the connection. Defaults to ``False``. """ self.target = target self.group = group self.transitions.append(trans.StartTransition(self, target, send)) @common.CachedProperty def name(self): """ A textual name for the state. """ return 'start%03d' % self.target.seq @property def data(self): """ A dictionary suitable for serialization to an input file. It must be possible to use this dictionary as keyword arguments for a ``State`` constructor. :raises state_analyzer.common.Synthetic: May be raised if the state is a synthetic state that does not need to be serialized to a YAML representation. """ raise common.Synthetic() @common.CachedProperty def dot(self): """ A string which provides the representation of the state which is acceptable as node input for the GraphViz package. """ return '"%s" [shape=none,label=""];' % self.name class HumboldtState(State): """ Represents a state that Humboldt may be in. These states are primarily composed of a set of state flags and a status code, as well as a connection mode which is never communicated to the other end of a connection. Additional properties include flags indicating whether the state is a start state (``start``) or an acceptance state (``accept``). Acceptance states indicate states where the connection is fully authenticated and secured. """ # Used to assign sequential indices to states _cnt = 0 _cnt_used = set() @classmethod def _state_key(cls, kwargs): """ Given a dictionary describing a state, compute and return an object suitable for use as a dictionary key. This implementation canonicalizes the ``flags`` keyword argument to a ``frozenset``, as it is a component of the hashable key. :param dict kwargs: Dictionary of keyword arguments passed to the constructor. This may be modified for the benefit of a possible later call to ``_state_init()``. :returns: A hashable object suitable for use as a dictionary key. """ # Canonicalize flags kwargs['flags'] = set(kwargs['flags']) # Return a hashable key return (kwargs['mode'], frozenset(kwargs['flags']), kwargs['status']) def _state_init(self, mode, flags, status, **kwargs): """ Initialize a ``State`` instance. Subclasses MUST implement this method in lieu of an ``__init__()`` method, which MUST NOT be implemented (as ``__init__()`` could be called on an already initialized instance). :param str mode: The connection mode. :param frozenset flags: The connection flags. :param str status: The connection status. :param **kwargs: Additional keyword arguments passed to the constructor, as optionally modified by ``_state_key()``. These are generally keyword arguments intended for the ``configure()`` method. """ self.mode = mode self.flags = flags self.status = status self.start = False self.start_send = False self.accept = False def configure(self, start=None, start_send=None, accept=None, transitions=None, seq=None, **kwargs): """ Used to configure a state after initialization. This ensures that state references within transitions cannot alter certain canonical properties, such as whether the state is a start state, while still ensuring that forward references to a state work. :param str start: The start state group. If not provided, the state is considered not to be a start state. :param bool start_send: Whether the starting state should be sent to the other end of the connection when the state is entered. If not provided, the default (``False``) is preserved. :param bool accept: Whether the state is an accepting state. If not provided, the default (``False``) is preserved. :param transitions: A list of transitions. Each element of the list must be a dictionary acceptable for use as keyword arguments to the ``state_analyzer.transitions.HumboldtTransition`` constructor. :type transitions: ``list`` of ``dict`` :param int seq: An integer sequence number to assign to the state. If not provided, one will be automatically assigned based on a counter. :param **kwargs: Additional keyword arguments passed to the method. These are generally keyword arguments intended for the constructor. """ # Assign a sequence number if seq is not None and seq not in self._cnt_used: self.seq = seq self._cnt_used.add(seq) else: # Use an automatically assigned sequence number seq = self.seq # Set the start and accept flags if start is not None: self.start = start if start_send is not None: self.start_send = start_send if accept is not None: self.accept = accept # Set up the transitions if transitions: for tr_data in transitions: self.transitions.append( trans.HumboldtTransition(self, **tr_data) ) # Set up the start state as well if self.start: StartState(target=self, group=self.start, send=self.start_send) # Clear the caches del self.data del self.dot @common.CachedProperty def seq(self): """ Obtain the state's sequence number. Sequence numbers are lazy binding to ensure that the sequence number corresponds to the list index. """ # Look for a free sequence number while self.__class__._cnt in self._cnt_used: self.__class__._cnt += 1 # Assign the next available sequence number seq = self.__class__._cnt self._cnt_used.add(seq) # Increment for the next sequence assignment self.__class__._cnt += 1 return seq @common.CachedProperty def name(self): """ A textual name for the state. """ return 'state%03d' % self.seq @common.CachedProperty def basic_data(self): """ A dictionary suitable for serialization to an input file. This must contain only the basic data about the state. This is extended by the ``data`` property, but used directly by the ``state_analyzer.transitions.HumboldtTransition.data`` property. """ return collections.OrderedDict([ ('seq', self.seq), ('mode', self.mode), ('flags', self.flags), ('status', self.status), ]) @common.CachedProperty def data(self): """ A dictionary suitable for serialization to an input file. It must be possible to use this dictionary as keyword arguments for a ``State`` constructor. :raises state_analyzer.common.Synthetic: May be raised if the state is a synthetic state that does not need to be serialized to a YAML representation. """ data = self.basic_data.copy() if self.start: data['start'] = self.start if self.start_send: data['start_send'] = self.start_send if self.accept: data['accept'] = self.accept if self.transitions: data['transitions'] = [] for tr in self.transitions: try: data['transitions'].append(tr.data) except common.Synthetic: pass return data @common.CachedProperty def dot(self): """ A string which provides the representation of the state which is acceptable as node input for the GraphViz package. """ return ( '"%(name)s" [style=filled,label=<' '<table bgcolor="white">' '<tr>' '<td align="center" colspan="2" bgcolor="black">' '<font color="white"><b>%(seq)d</b></font>' '</td>' '</tr>' '<tr>' '<td align="right"><b>Mode</b></td>' '<td align="left">%(mode)s</td>' '</tr>' '<tr>' '<td align="right"><b>Flags</b></td>' '<td align="left">%(flags)s</td>' '</tr>' '<tr>' '<td align="right"><b>Status</b></td>' '<td align="left">%(status)s</td>' '</tr>' '</table>' '>%(peripheries)s];' ) % { 'seq': self.seq, 'name': self.name, 'mode': self.mode, 'flags': ', '.join(sorted(self.flags)), 'status': self.status, 'peripheries': ',peripheries=2' if self.accept else '', }
#!/usr/bin/env python3 # -+-coding: utf-8 -+- """ """ #-------------------------------------------- # Authors: Frank Boers <f.boers@fz-juelich.de> # #-------------------------------------------- # Date: 20.01.20 #-------------------------------------------- # License: BSD (3-clause) #-------------------------------------------- # Updates #-------------------------------------------- #import copy import os,os.path as op import warnings import time,datetime import numpy as np from distutils.dir_util import mkpath import matplotlib.pyplot as plt import mne from mne.preprocessing import find_ecg_events, find_eog_events from jumeg.base.jumeg_base import jumeg_base as jb from jumeg.base.jumeg_base import JUMEG_SLOTS from jumeg.base.jumeg_base_config import JuMEG_CONFIG as jCFG from jumeg.base import jumeg_logger logger = jumeg_logger.get_logger() __version__= "2020.04.22.001" class ARTEFACT_EVENTS(JUMEG_SLOTS): """ artefact event dict: ch_name: str or list of str event_id: int or list of int tmin: float tmin is s tmax: float tmax in s Example: -------- ecg: ch_name: "ECG" event_id: 999 tmin: -0.4 tmax: 0.4 eog: ch_name: ['EOG ver','EOG hor'] event_id: [997,998] tmin: -0.4 tmax: 0.4 import mne,logging from mne.preprocessing import find_ecg_events, find_eog_events logger = logging.getLogger("jumeg") #--- find ECG ECG = ARTEFACT_EVENTS(raw=raw,ch_name="ECG",event_id=999,tmin=-0.4,tmax=0.4,_call = find_ecg_events) ECG.find_events(raw=raw,**config.get("ecg")) EOG.GetInfo(debug=True) #--- find EOG EOG = ARTEFACT_EVENTS(raw=raw,ch_name=['EOG ver','EOG hor'],event_id=[997,998],tmin=-0.4,tmax=0.4, _call = find_eog_events) EOG.find_events(raw=raw,**config.get("eog")) EOG.GetInfo(debug=True) """ __slots__ = ["raw","ch_name","set_annotations","event_id","events","tmin","tmax","verbose","debug","_call"] def __init__(self,**kwargs): super().__init__(**kwargs) self.init(**kwargs) #--- default for set annotations = True self.set_annotations = kwargs.get("set_annotations",True) def find_events(self,**kwargs): """ raw: ch_name: set_annotations: event_id: tmin: tmax: verbose: debug: parameter for mne.preprocessing find_ecg_events, find_eog_events event_id=999, ch_name=None, tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto', filter_length='10s', return_ecg=False, reject_by_annotation=None, verbose=None events={ ch_name: { events: <onsets> or <mne.events>, # [onsets,offsets,id] pulse: <event counts>} :return: if set annotations raw.annotations """ self.update(**kwargs) self.events = dict() if isinstance(self.ch_name,(list)): channels = [ *self.ch_name ] evt_id = [ *self.event_id ] else: channels = [ self.ch_name ] evt_id = [ self.event_id ] while len(channels): ch_name = channels.pop() event_id = evt_id.pop() if ch_name not in self.raw.info['ch_names']: continue self.events[ch_name]= dict() self.events[ch_name]["index"] = self.raw.ch_names.index(ch_name) res = self._call(self.raw,event_id,ch_name=ch_name,verbose=self.verbose) if isinstance(res[1],(np.ndarray)): self.events[ch_name]["events"] = res self.events[ch_name]["pulse"] = self.events[ch_name]["events"].shape[0] else: self.events[ch_name]["events"] = res[0] self.events[ch_name]["pulse"] = res[2] if self.set_annotations: return self.update_annotations() return None def update_annotations(self,save=False): """ update raw.annotattions with artefact events e.g.: ECG,EOG save: save raw with annotations Talse return annotations """ evt_annot = None try: raw_annot = self.raw.annotations #.copy() #logger.info("Input Annotations in RAW obj:\n -> {}".format(raw_annot)) orig_time = raw_annot.orig_time except: raw_annot = None orig_time = None #self.raw.info.get("meas_date",None) #self.raw.times[0]) #--- store event info into raw.anotations # time_format = '%Y-%m-%d %H:%M:%S.%f' for k in self.events.keys(): msg = ["update raw.annotations: {}".format(k)] onset = self.events[k]['events'][:,0] / self.raw.info["sfreq"] #onset -= self.tmin duration = np.ones( onset.shape[0] ) / self.raw.info["sfreq"] # one line in raw.plot evt_annot = mne.Annotations(onset=onset.tolist(), duration=duration.tolist(), description=k, # [condition for x in range(evt["events"].shape[0])], orig_time=orig_time) if raw_annot: msg.append(" --> found mne.annotations in RAW:\n -> {}".format(raw_annot)) #--- clear old annotations kidx = np.where( raw_annot.description == k)[0] # get index if kidx.any(): msg.append(" -> delete existing annotation {} counts: {}".format(k, kidx.shape[0]) ) raw_annot.delete(kidx) self.raw.set_annotations( raw_annot + evt_annot) raw_annot = self.raw.annotations else: self.raw.set_annotations(evt_annot) raw_annot = self.raw.annotations if save: f = jb.get_raw_filename(self.raw) fanato = f.replace( "-raw.fif","-anato.csv") self.raw.annotations.save( fanato ) msg.append("storing mne.annotations in RAW obj:\n -> {}".format(self.raw.annotations)) logger.info("\n".join(msg)) return self.raw.annotations def GetInfo(self,debug=False): if debug: self.debug=True if not isinstance(self.events,(dict)): logger.warning( "!!! --> Artefact Events: not events found !!!" ) return msg = [" --> Artefact Events:"] for k in self.events.keys(): msg.extend( [" --> {}".format(k)," -> pulse: {}".format( self.events[k]["pulse"] ) ] ) if self.debug: msg.append( " -> events:\n{}".format( self.events[k]["events"] ) ) msg.append( "-"*25) logger.info( "\n".join(msg) ) class CalcSignal(JUMEG_SLOTS): def __init__(self,**kwargs): super().__init__(**kwargs) self.init(**kwargs) def calc_rms(self,data,average=None,rmsmean=None): ''' Calculate the rms value of the signal. Ported from Dr. J. Dammers IDL code. ''' # check input sz = np.shape(data) nchan = np.size(sz) # calc RMS if nchan == 1: ntsl = sz[0] return np.sqrt(np.sum(data ** 2) / ntsl) elif nchan == 2: ntsl = sz[1] powe = data ** 2 if average: return np.sqrt(np.sum(np.sum(powe,1) / nchan) / ntsl) return np.sqrt(sum(powe,2) / ntsl) return -1 def calc_performance(self,evoked_raw,evoked_clean): ''' Gives a measure of the performance of the artifact reduction. Percentage value returned as output. ''' diff = evoked_raw.data - evoked_clean.data # ?? rms_diff = self.calc_rms(diff,average=1) rms_meg = self.calc_rms(evoked_raw.data,average=1) arp = (rms_diff / rms_meg) * 100.0 return np.round(arp) def _calc_signal(self,raw,events,event_id=None,tmin=None,tmax=None,picks=None): """ calc signal from raw -> get epochs -> average :param raw: :param events : mne.events :param event_id: :param tmin: :param tmax: :param picks: :return: signal, min/max-range, times """ signal = None range = None times = None if not isinstance(picks,(list,np.ndarray)): picks = jb.picks.meg_nobads(raw) #--- RAW mk epochs + average ep = mne.Epochs(raw,events,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) if len(picks) > 1: avg = ep.average() times = avg.times data = avg._data range = [data.min(axis=0),data.max(axis=0)] signal = np.average(data,axis=0).flatten() else: # ref channel e.g. ECG, EOG as np.array signal = np.average(ep.get_data(),axis=0).flatten() range = [signal.min(),signal.max()] return signal,range,times def _calc_avg(self,raw,events,event_id=None,tmin=None,tmax=None,picks=None): """ calc signal from raw -> get epochs -> average :param raw: :param events : mne.events :param event_id: :param tmin: :param tmax: :param picks: :return: signal, min/max-range, times """ signal = None range = None times = None if not isinstance(picks,(list,np.ndarray)): picks = jb.picks.meg_nobads(raw) #--- RAW mk epochs + average ep = mne.Epochs(raw,events,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) if len(picks) > 1: avg = ep.average() times = avg.times data = avg._data signal = data.T else: # ref channel e.g. ECG, EOG as np.array signal = np.average(ep.get_data(),axis=0).flatten() range = [signal.min(),signal.max()] return signal,range,times def _calc_gfp(self,raw,events,event_id=None,tmin=None,tmax=None,picks=None): """ calc signal from raw -> get epochs -> average :param raw: :param events : mne.events :param event_id: :param tmin: :param tmax: :param picks: :return: signal, min/max-range, times """ signal = None range = None times = None if not isinstance(picks,(list,np.ndarray)): picks = jb.picks.meg_nobads(raw) #--- RAW mk epochs + average ep = mne.Epochs(raw,events,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) if len(picks) > 1: avg = ep.average() times = avg.times signal = np.sum(avg._data ** 2,axis=0) else: # ref channel e.g. ECG, EOG as np.array signal = np.average(ep.get_data(),axis=0).flatten() range = [signal.min(),signal.max()] return signal,range,times def _calc_ylimits(self,ranges=None,factor=1.0,offset=0.1): """ ranges: list of min max np.arrays :param factor: :param offset: e.g 0.1 => +-10% :return: min,max """ r = np.concatenate(ranges) min = r.min() * factor max = r.max() * factor # return min - (abs(min) * offset), max + (abs(max) * offset) return min - offset, max + offset def _calc_data(self,raw,raw_clean,evt,event_id=999,tmin=-0.4,tmax=0.4,picks=None,type="avg"): """ :param raw: :param raw_clean: :param evt: events from annotation :param event_id: :param tmin: :param tmax: :param picks: :param type: avg,gfp,signal :return: sig_raw,sig_clean,range,t """ if type == "gfp": #--- RAW mk epochs + average sig_raw,range_raw,t = self._calc_gfp(raw,evt,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) #--- RAW clean mk epochs + average sig_cln,range_cln,_ = self._calc_gfp(raw_clean,evt,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) if type == "avg": #--- RAW mk epochs + average sig_raw,range_raw,t = self._calc_avg(raw,evt,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) #--- RAW clean mk epochs + average sig_cln,range_cln,_ = self._calc_avg(raw_clean,evt,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) else: #--- RAW mk epochs + average sig_raw,range_raw,t = self._calc_signal(raw,evt,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) #--- RAW clean mk epochs + average sig_cln,range_cln,_ = self._calc_signal(raw_clean,evt,event_id=event_id,tmin=tmin,tmax=tmax,picks=picks) range = [range_raw,range_cln] return sig_raw,sig_cln,range,t class JuMEG_ICA_PERFORMANCE_PLOT(CalcSignal): __slots__ = ["raw","plot_path","raw_clean","ch_name","event_id","picks","tmin","tmax","title","colors","alpha","grid","show", "scale","offset","fontsize","_n_cols","n_rows","idx","plot_ypos","_figure","figsize","type","dpi","orientation","fout", "plot_extention","verbose","set_title","text","save_as_png","save_as_fig"] """ plotting ica performance plots as png """ def __init__(self,**kwargs): super().__init__() self.init(**kwargs) self.type = "avg" # type = "avg" # gfp,avg,sig self.n_rows = 2 self.n_cols = 1 self.idx = 1 self.set_title = True self.tmin = -0.4 self.tmax = 0.4 self.alpha = 0.33 self.offset = 0.15 self.fontsize = 12 #self.figsize = (11.69,8.27) self.figsize = (16.0,9.0) self.dpi = 300 self.orientation = 'portrait' #"landscape" self.plot_extention = ".png" self.grid = True self.show = False self.save = True self.colors = ["black","yellow","red","magenta","green"] self.scale = { "raw":{ "factor":10.0 ** 15,"unit":"fT" },"ref":{ "factor":10.0 ** 3,"unit":"mV" } } self._update_from_kwargs(**kwargs) def init_plot_parameter(self): #--- A4 landscape plt.rc('figure',figsize=self.figsize,autolayout=True) plt.rcParams.update({ 'font.size':self.fontsize }) plt.subplots_adjust(left=0.1,right=0.95,bottom=0.05,top=0.95,hspace=0.35) plt.rcParams['savefig.facecolor'] = "0.9" @property def figure(self): return self._figure def _plot(self,ax,t,data,ylabel,color,range=None,range_color="cyan",alpha=0.3): ax.plot(t,data,color=color) if range: ax.fill_between(t,range[0],y2=range[1],color=range_color,alpha=alpha) ax.set_xlabel("[s]") ax.set_xlim(t[0],t[-1]) ax.set_ylabel(ylabel) ax.grid(True) def clear(self): try: if self.figure: plt.close('all') self._figure = None except: pass #s = super() #if hasattr(s, "clear"): # s.clear() def plot(self,**kwargs): #raw=None,raw_clean=None,ch_name="ECG",event_id=999,picks=None,tmin=-0.4,tmax=0.4,title=None, # colors=["black","yellow","red","magenta","green"],alpha=0.33,grid=True,show=False, # scale={"raw":{"factor":10.0**15,"unit":"fT"},"ref":{"factor":10.0**3,"unit":"mV"}}, # offset=0.1,fontsize=12): """ :param raw: :param raw_clean: :param ch_name: :param event_id: :param picks: :param tmin: :param tmax: :param colors: :param alpha: :param grid: :param show: :param scale: {"raw":{"factor":10**12,"unit":"pT"},"ref":{"factor":10**3,"unit":"mV"}}, :param offset: 0.1 :return: """ self._update_from_kwargs(**kwargs) logger.info("RAW annotations: {}".format(self.raw.annotations)) #--- get epochs calc avgs + ref annotat = mne.events_from_annotations(self.raw,event_id={ self.ch_name:self.event_id },use_rounding=True,chunk_duration=None) if not annotat: logger.error("!!! ERROR No MNE Annotations found: {}\n".format(jb.get_raw_filename(self.raw))) return None evt = annotat[0] counts = evt.shape[0] sig_raw,sig_clean,range,t = self._calc_data(self.raw,self.raw_clean,evt,event_id=self.event_id,tmin=self.tmin,tmax=self.tmax,picks=self.picks) #--- ref channel e.g.: ECG sig_ref,_,_ = self._calc_signal(self.raw,evt,event_id=self.event_id,tmin=self.tmin,tmax=self.tmax,picks=jb.picks.labels2picks(self.raw,self.ch_name)) if not self.figure: self.init_plot_parameter() self._figure = plt.figure() #self.figure.suptitle(os.path.basename(jb.get_raw_filename(self.raw)),fontsize=12) #--- subplot(nrows,ncols,idx) ax1 = plt.subplot(self.n_rows,self.n_cols,self.idx) #--- sig raw scl = self.scale.get("raw") ylim = self._calc_ylimits(ranges=range,factor=scl.get("factor"),offset=self.offset) self._plot(ax1,t,sig_raw * scl.get("factor"),scl.get("unit"),"black") #--- sig clean ax2 = plt.subplot(self.n_rows,self.n_cols,self.idx + self.n_cols) self._plot(ax2,t,sig_clean * scl.get("factor"),scl.get("unit"),"black") #--- scl = self.scale.get("ref") ax3 = ax1.twinx() # instantiate a second axes that shares the same x-axis color = 'tab:blue' self._plot(ax3,t,sig_ref * scl.get("factor"),scl.get("unit"),"red") ax3.tick_params(axis='y',labelcolor=color) ax3.legend([self.ch_name +" cnts {}".format(counts)], loc=2,prop={'size':8}) ax4 = ax2.twinx() # instantiate a second axes that shares the same x-axis color = 'tab:blue' self._plot(ax4,t,sig_ref * scl.get("factor"),scl.get("unit"),"green") ax4.tick_params(axis='y',labelcolor=color) ax4.legend(["Clean "+self.ch_name + " cnts {}".format(counts)],loc=2,prop={ 'size':8 }) ax1.set_ylim(ylim[0],ylim[1]) ax2.set_ylim(ylim[0],ylim[1]) #if self.save: # self.save_figure() if self.show: plt.ion() self.figure.tight_layout() plt.show() return self.figure def save_figure(self,**kwargs): """ :param kwargs: :return: fig """ self._update_from_kwargs(**kwargs) self.figure.tight_layout() #plt.tight_layout(pad=0.4,w_pad=0.5,h_pad=1.0) if self.fout: fout = self.fout if not fout.endswith(self.plot_extention): fout += self.plot_extention else: fout = "test"+self.plot_extention if self.plot_path: if jb.isPath(self.plot_path,mkdir=True): fout = os.path.basename(fout) fout = os.path.join(self.plot_path,fout) if self.set_title: txt = os.path.basename(fout).rsplit(".",1)[0] if self.text: txt+= " " +self.text self.figure.suptitle(txt,fontsize=10,y=0.02,x=0.05,ha="left") elif self.text: self.figure.suptitle(self.text,fontsize=10,y=0.02,x=0.05,ha="left") #--- save img if self.save: self.figure.savefig(fout,dpi=self.dpi,orientation=self.orientation) logger.info("done saving plot: " +fout) self.fout=fout class JuMEG_ICA_PERFORMANCE(JUMEG_SLOTS): """ find ecg,eog artifacts in raw ->use jumeg or mne make mne.anotations prefromance check init array of figs : overview, n chops for ECg,EOG performance for each chop : avg epochs => ECG plot raw, raw_cleaned evoked ,ECG signal, performance => EOG plot raw, raw_cleaned evoked ,EOG signal, performance plot performance to mne.report jIP = JuMEG_ICA_PERFORMANCE(raw=raw,raw_clean=raw_ar) """ # raw=raw,path=path,fname=raw_fname,config=CFG.GetDataDict("ica") __slots__ = ["raw","path","fname","config","n_figs","_EOG","_ECG","_PLOT","picks","use_jumeg","ecg","eog","verbose"] def __init__(self,**kwargs): super().__init__(**kwargs) self._init(**kwargs) self._ECG = ARTEFACT_EVENTS(raw=self.raw,ch_name="ECG",event_id=999,tmin=-0.4,tmax=0.4,_call=find_ecg_events) self._EOG = ARTEFACT_EVENTS(raw=self.raw,ch_name=['EOG ver','EOG hor'],event_id=[997,998],tmin=-0.4,tmax=0.4, _call=find_eog_events) self._PLOT = JuMEG_ICA_PERFORMANCE_PLOT(**kwargs) @property def ECG(self): return self._ECG @property def EOG(self): return self._EOG @property def Plot(self): return self._PLOT def _mklists(self,obj,channels=list(),event_ids=list()): """ :param obj: :param channels: :param event_ids: :return: """ if isinstance(obj.ch_name,(list)): channels.extend(obj.ch_name) event_ids.extend(obj.event_id) else: channels.append(obj.ch_name) event_ids.append(obj.event_id) return channels,event_ids def plot(self,**kwargs): """ plotting all ref channels e.g. ECG,EOGver,EOGhor |EOG |EOGver|EOGhor| ------------------- |clean|clean |clean| :param kwargs: :return: """ self._update_from_kwargs(**kwargs) self._PLOT._update_from_kwargs(**kwargs) #--- init performance plot self.Plot.idx = 1 idx = 1 ch_names = [] ids = [] self._mklists(self.ECG,channels=ch_names,event_ids=ids) self._mklists(self.EOG,channels=ch_names,event_ids=ids) #self.EOG.GetInfo(debug=True) self.Plot.n_cols = len(ch_names) self.Plot.n_rows = 2 for obj in [self.ECG,self.EOG]: ch_names = [] ids = [] self._mklists(obj,channels=ch_names,event_ids=ids) for i in range(len(ch_names)): self.Plot.plot(ch_name=ch_names[i],event_id=ids[i],picks=self.picks,tmin=obj.tmin,tmax=obj.tmax) idx += 1 self.Plot.idx = idx #self.Plot.figure.show() self.Plot.save_figure(save=True) self.Plot.clear() return self.Plot.fout def test1(): #--- init/update logger jumeg_logger.setup_script_logging(logger=logger) raw = None stage = "$JUMEG_PATH_LOCAL_DATA/exp/MEG94T/mne" fcfg = os.path.join(stage,"meg94t_config01.yaml") fpath = "206720/MEG94T0T2/130820_1335/1/" path = os.path.join(stage,fpath) raw_fname = "206720_MEG94T0T2_130820_1335_1_c,rfDC,meeg,nr,bcc,int-raw.fif" logger.info("JuMEG Pipeline ICA Performance ICA mne-version: {}".format(mne.__version__)) f = os.path.join(path,raw_fname) raw,raw_fname = jb.get_raw_obj(f,raw=None) raw_path = os.path.dirname(raw_fname) #--- get picks from raw picks = jb.picks.meg_nobads(raw) #--- CFG = jCFG() CFG.update(config=fcfg) config = CFG.GetDataDict("ica") #-- ICAPerformance = JuMEG_ICA_PERFORMANCE(raw=raw,path=path,fname=raw_fname,) #--- find ECG ICAPerformance.ECG.find_events(raw=raw,**config.get("ecg")) ICAPerformance.ECG.GetInfo(debug=True) #--- find EOG ICAPerformance.EOG.find_events(raw=raw,**config.get("eog")) ICAPerformance.EOG.GetInfo(debug=True) #--- # raw.plot(block=True) #--- save raw #fout=f.replace("-raw.fif","test-raw.fif") #jb.update_and_save_raw(raw,f,f) def test2(): #--- init/update logger jumeg_logger.setup_script_logging(logger=logger) raw = None stage = "$JUMEG_PATH_LOCAL_DATA/exp/MEG94T/mne" fcfg = os.path.join(stage,"meg94t_config01.yaml") fpath = "206720/MEG94T0T2/130820_1335/1/" path = os.path.join(stage,fpath) #fraw = "206720_MEG94T0T2_130820_1335_1_c,rfDC,meeg,nr,bcc,int,000516-000645-raw.fif" #fraw_ar = "206720_MEG94T0T2_130820_1335_1_c,rfDC,meeg,nr,bcc,int,000516-000645,ar-raw.fif" fraw = "206720_MEG94T0T2_130820_1335_1_c,rfDC,meeg,nr,bcc,int,fibp0.10-45.0-raw.fif" fraw_ar = "206720_MEG94T0T2_130820_1335_1_c,rfDC,meeg,nr,bcc,int,fibp0.10-45.0,ar-raw.fif" logger.info("JuMEG Pipeline ICA Performance ICA mne-version: {}".format(mne.__version__)) #--- f = os.path.join(path,fraw) raw,raw_fname = jb.get_raw_obj(f,raw=None) raw_path = os.path.dirname(raw_fname) picks = jb.picks.meg_nobads(raw) #--- f = os.path.join(path,fraw_ar) raw_ar,raw_ar_fname = jb.get_raw_obj(f,raw=None) #--- read config CFG = jCFG() CFG.update(config=fcfg) config = CFG.GetDataDict("ica") # jIP = JuMEG_ICA_PERFORMANCE(raw=raw,raw_clean=raw_ar,picks=picks) #jIP.report() fout = raw_fname.rsplit("-",1)[0] + "-ar" jIP.plot(verbose=True,fout=fout) if __name__ == "__main__": # test1() test2()
from __future__ import with_statement import sys import socket import Live from _Framework.SubjectSlot import subject_slot from SimpleDeviceComponent import SimpleDeviceComponent from GUtil import debug_out, register_sender from _Generic import GenericScript from _Generic.SpecialMixerComponent import SpecialMixerComponent from _Framework import Task from _Framework.ControlSurface import ControlSurface from _Framework.InputControlElement import * from _Framework.ButtonElement import ButtonElement, ON_VALUE, OFF_VALUE from _Framework.TransportComponent import TransportComponent from _Framework.EncoderElement import EncoderElement import MidiRemoteScript ''' Created on 13.10.2013 @author: Eric Ahrens ''' DEVICE_ROLE_DAW = 'KOMPLETE_KONTROL_DAW' DEVICE_ROLE_MIDI_KEYBOARD = 'KOMPLETE_KONTROL_MIDI_KEYBOARD' IMPLICIT_ARM_IS_ARM_MODE = False GLOBAL_CHANNEL = 0 BUTTON_STATE_OFF = 0 BUTTON_STATE_ON = 127 BUTTON_PRESSED = 1 BUTTON_RELEASED = 0 SID_FIRST = 0 SID_NAV_LEFT = 20 SID_NAV_RIGHT = 21 SID_TRANSPORT_LOOP = 86 SID_TRANSPORT_REWIND = 91 SID_TRANSPORT_FAST_FORWARD = 92 SID_TRANSPORT_STOP = 93 SID_TRANSPORT_PLAY = 94 SID_TRANSPORT_RECORD = 95 SID_LAST = 112 transport_control_switch_ids = { SID_TRANSPORT_LOOP: 'LOOP', SID_TRANSPORT_REWIND: 'REWIND', SID_TRANSPORT_FAST_FORWARD: 'FAST_FORWARD', SID_TRANSPORT_STOP: 'STOP', SID_TRANSPORT_PLAY: 'PLAY', SID_TRANSPORT_RECORD: 'RECORD', } PARAM_PREFIX = 'NIKB' PLUGIN_PREFIX = 'Komplete Kontrol' PLUGIN_CLASS_NAME_VST = 'PluginDevice' PLUGIN_CLASS_NAME_AU = 'AuPluginDevice' ENCODER_CCS = (range(22, 30)) MASTER_VOLUME_CC = 30 VOLUME_CCS = tuple(range(31, 38)) ABSOLUTE_MAP_MODE = Live.MidiMap.MapMode.absolute def log(message): sys.stderr.write("LOG: " + str(message).encode("utf-8")) def device_info(device): if device: debug_out(" # " + str(device.name) + " Class: " + str(device.class_name) + " DisplayName: " + str(device.class_display_name) + " Type: " + str(device.type)) def vindexof(alist, element): index = 0 for ele in alist: if ele == element: return index index = index + 1 return None def arm_smart(song, track=None): if not track: track = song.view.selected_track if track and track.can_be_armed and not track.arm: tracks = song.tracks # Determine if selected track requires a MIDI source use_midi_src = False if '[M]' in track.name: use_midi_src = True for songtrack in tracks: # Detect and arm the MIDI source track if use_midi_src and '[MIDISRC]' in songtrack.name: songtrack.arm = True elif songtrack != track and songtrack and songtrack.can_be_armed and songtrack.arm: songtrack.arm = False track.arm = True # ------------------------------------------------------------------------------------------- # TrackElement class TrackElement: allow_activate_track = False def __init__(self, index, track, receiver, *a, **k): self.index = index self.track = track if receiver.device_role == DEVICE_ROLE_DAW: if track.can_be_armed: debug_out("Track can be armed: %s, %s" % (track.name, str(track))) self.allow_activate_track = True track.add_arm_listener(self._changed_arming) track.add_implicit_arm_listener(self._changed_implicit_arming) else: debug_out("Track cannot be armed: %s, %s" % (track.name, str(track))) track.add_devices_listener(self._changed_devices) self.receiver = receiver def _changed_implicit_arming(self): debug_out("_changed_implicit_arming called on: %s, %s" % (self.track.name, str(self.track))) self._handle_track_armed() def _changed_arming(self): debug_out(" _changed_arming() called") self._handle_track_armed() def _handle_track_armed(self): if not self.track.arm and not self.track.implicit_arm: self.receiver.deactivate_track(self.index, self.track) elif self.allow_activate_track and \ (self.track.arm or (IMPLICIT_ARM_IS_ARM_MODE and (self.track.arm or self.track.implicit_arm))): self.receiver.control_track(self.index, self.track) def _changed_devices(self): if self.allow_activate_track: self.receiver.devices_changed(self.index, self.track) def release(self): if self.track and self.track.can_be_armed: self.track.remove_arm_listener(self._changed_arming) self.track.remove_implicit_arm_listener( self._changed_implicit_arming) if self.track: self.track.remove_devices_listener(self._changed_devices) self.receiver = None self.track = None # ------------------------------------------------------------------------------------------- # FocusControl class FocusControl(ControlSurface): controlled_track = None def __init__(self, c_instance, device_role): super(FocusControl, self).__init__(c_instance) self.song().add_is_playing_listener(self.__update_play_button_led) self.device_role = device_role register_sender(self) # For Debug Output only debug_out(str(dir(self))) self._active = False self._tracks = [] self.rewind_button_down = False self.forward_button_down = False with self.component_guard(): self._set_suppress_rebuild_requests(True) self._suppress_send_midi = True device = SimpleDeviceComponent( device_selection_follows_track_selection=True) device.name = 'Device_Component' self.set_up_encoders(device) self.set_up_mixer_component( VOLUME_CCS, (), { 'NUMSENDS': 0, 'MASTERVOLUME': MASTER_VOLUME_CC, 'NOTOGGLE': 0 }, GLOBAL_CHANNEL, ABSOLUTE_MAP_MODE) self.set_device_component(device) self._on_selected_track_changed() self.set_up_controls() self.request_rebuild_midi_map() self._set_suppress_rebuild_requests(False) self._active = True self._suppress_send_midi = False if self.device_role == DEVICE_ROLE_DAW: # Transport controls - instantiate a TransportComponent. The default behavior when setting transport buttons # works well for most buttons, but not the stop button (since default for stop button is never lighting up) # so that one is handled manually. self.transport = TransportComponent() # ButtonElement(is_momentary, msg_type, channel, identifier) self.transport.set_play_button(ButtonElement( False, MIDI_NOTE_TYPE, 0, SID_TRANSPORT_PLAY)) self.transport.set_record_button(ButtonElement( False, MIDI_NOTE_TYPE, 0, SID_TRANSPORT_RECORD)) self.transport.set_seek_buttons(ButtonElement(True, MIDI_NOTE_TYPE, 0, SID_TRANSPORT_FAST_FORWARD), ButtonElement( True, MIDI_NOTE_TYPE, 0, SID_TRANSPORT_REWIND)) self.transport.set_loop_button(ButtonElement( False, MIDI_NOTE_TYPE, 0, SID_TRANSPORT_LOOP)) # self.transport.set_overdub_button(ButtonElement( # False, MIDI_NOTE_TYPE, 0, SID_TRANSPORT_LOOP)) self._assign_tracks() ctrack = self.get_controlled_track() if ctrack: track = ctrack[0] instr = ctrack[1] self.controlled_track = track index = list(self.song().tracks).index(track) self.update_status_midi(index, track, instr, 1) self.refresh_state() def refresh_state(self): self.__update_play_button_led() def receive_midi(self, midi_bytes): midi_status = midi_bytes[0] & 240 debug_out("receive_midi() called: %s (note_on=%s, note_off=%s)" % ( str(midi_status), MIDI_NOTE_ON_STATUS, MIDI_NOTE_OFF_STATUS)) if midi_status == MIDI_NOTE_ON_STATUS or midi_status == MIDI_NOTE_OFF_STATUS: note = midi_bytes[1] value = BUTTON_PRESSED if midi_bytes[2] > 0 else BUTTON_RELEASED debug_out("midi note received: note=%s, value=%s" % (note, value)) if note in transport_control_switch_ids: if self.device_role == DEVICE_ROLE_DAW: debug_out("transport received: note=%s, value=%s, transport=%s" % ( note, value, transport_control_switch_ids[note])) self.handle_transport_switch_ids(note, value) else: debug_out("transport ignored: note=%s, value=%s, transport=%s" % ( note, value, transport_control_switch_ids[note])) return super(FocusControl, self).receive_midi(midi_bytes) def handle_transport_switch_ids(self, switch_id, value): #debug_out("handle_transport_switch_ids() called. switch_id: " + str(switch_id) + " value: " + str(value)) if switch_id == SID_TRANSPORT_REWIND: if value == BUTTON_PRESSED: self.rewind_button_down = True elif value == BUTTON_RELEASED: self.rewind_button_down = False self.__update_forward_rewind_leds() elif switch_id == SID_TRANSPORT_FAST_FORWARD: if value == BUTTON_PRESSED: self.forward_button_down = True elif value == BUTTON_RELEASED: self.forward_button_down = False self.__update_forward_rewind_leds() elif self.device_role == DEVICE_ROLE_DAW and switch_id == SID_TRANSPORT_STOP and value == BUTTON_PRESSED: self.__stop_song() def __stop_song(self): self.song().stop_playing() self.__update_play_button_led() def __update_play_button_led(self): #debug_out("__update_play_button_led is called: is_playing: " + str(self.song().is_playing)) if self.song().is_playing: self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_PLAY, BUTTON_STATE_ON)) self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_STOP, BUTTON_STATE_OFF)) else: self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_PLAY, BUTTON_STATE_OFF)) self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_STOP, BUTTON_STATE_ON)) def __update_forward_rewind_leds(self): if self.forward_button_down: self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_FAST_FORWARD, BUTTON_STATE_ON)) else: self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_FAST_FORWARD, BUTTON_STATE_OFF)) if self.rewind_button_down: self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_REWIND, BUTTON_STATE_ON)) else: self._send_midi( (MIDI_NOTE_ON_STATUS, SID_TRANSPORT_REWIND, BUTTON_STATE_OFF)) def set_up_controls(self): is_momentary = True self.left_button = ButtonElement( is_momentary, MIDI_CC_TYPE, 0, SID_NAV_LEFT) self.right_button = ButtonElement( is_momentary, MIDI_CC_TYPE, 0, SID_NAV_RIGHT) self._do_left.subject = self.left_button self._do_right.subject = self.right_button if self.device_role == DEVICE_ROLE_DAW: self.stop_button = ButtonElement( False, MIDI_NOTE_TYPE, 0, SID_TRANSPORT_STOP) self._do_stop.subject = self.stop_button def set_up_encoders(self, device): parameter_encoders = [] encoder_ccs = ENCODER_CCS for cc in encoder_ccs: channel = GLOBAL_CHANNEL if cc in range(128) and channel in range(16): encoder = EncoderElement( MIDI_CC_TYPE, channel, cc, ABSOLUTE_MAP_MODE) encoder.name = 'Device_Parameter_' + \ str(list(encoder_ccs).index(cc)) + '_Control' parameter_encoders.append(encoder) log('Encoder: %s, CC: %s, channel: %s' % (encoder.name, cc, channel)) if len(parameter_encoders) > 0: device.set_parameter_controls(tuple(parameter_encoders)) log('Initialized %s encoders' % len(parameter_encoders)) def set_up_mixer_component(self, volume_controls, trackarm_controls, mixer_options, global_channel, volume_map_mode): if volume_controls != None and trackarm_controls != None: num_strips = max(len(volume_controls), len(trackarm_controls)) send_info = [] momentary_buttons = False mixer = SpecialMixerComponent(num_strips) mixer.name = 'Mixer' mixer.master_strip().name = 'Master_Channel_Strip' mixer.selected_strip().name = 'Selected_Channel_Strip' if mixer_options != None: if 'MASTERVOLUME' in mixer_options.keys() and mixer_options['MASTERVOLUME'] in range(128): encoder = EncoderElement( MIDI_CC_TYPE, global_channel, mixer_options['MASTERVOLUME'], volume_map_mode) encoder.name = 'Master_Volume_Control' mixer.master_strip().set_volume_control(encoder) if 'NUMSENDS' in mixer_options.keys() and mixer_options['NUMSENDS'] > 0: for send in range(mixer_options['NUMSENDS']): key = 'SEND' + str(send + 1) if not key in mixer_options.keys(): raise AssertionError None.append(mixer_options[key]) momentary_buttons = 'NOTOGGLE' in mixer_options.keys() next_bank_button = None prev_bank_button = None if 'NEXTBANK' in mixer_options.keys() and mixer_options['NEXTBANK'] in range(128): next_bank_button = ButtonElement( momentary_buttons, MIDI_CC_TYPE, global_channel, mixer_options['NEXTBANK']) next_bank_button.name = 'Mixer_Next_Bank_Button' if 'PREVBANK' in mixer_options.keys() and mixer_options['PREVBANK'] in range(128): prev_bank_button = ButtonElement( momentary_buttons, MIDI_CC_TYPE, global_channel, mixer_options['PREVBANK']) prev_bank_button.name = 'Mixer_Previous_Bank_Button' mixer.set_bank_buttons(next_bank_button, prev_bank_button) for track in range(num_strips): strip = mixer.channel_strip(track) strip.name = 'Channel_Strip_' + str(track) if track in range(len(volume_controls)): channel = global_channel cc = volume_controls[track] if isinstance(volume_controls[track], (tuple, list)): cc = volume_controls[track][0] if volume_controls[track][1] in range(16): channel = volume_controls[track][1] if cc in range(128) and channel in range(16): encoder = EncoderElement( MIDI_CC_TYPE, channel, cc, volume_map_mode) encoder.name = str(track) + '_Volume_Control' strip.set_volume_control(encoder) if track in range(len(trackarm_controls)) and trackarm_controls[track] in range(128): button = ButtonElement( momentary_buttons, MIDI_CC_TYPE, global_channel, trackarm_controls[track]) button.name = str(track) + '_Arm_Button' strip.set_arm_button(button) send_controls = [] for send in send_info: encoder = None if track in range(len(send)): channel = global_channel cc = send[track] if isinstance(send[track], (tuple, list)): cc = send[track][0] if send[track][1] in range(16): channel = send[track][1] if cc in range(128) and channel in range(16): encoder = EncoderElement( MIDI_CC_TYPE, channel, cc, volume_map_mode) encoder.name = str( track) + '_Send_' + str(list(send_info).index(send)) + '_Control' send_controls.append(encoder) strip.set_send_controls(tuple(send_controls)) @subject_slot('value') def _do_stop(self): self.__stop_song() @subject_slot('value') def _do_left(self, value): assert value in range(128) if value != 0: log('Navigating to selection left') self.navigate_midi_track(-1) @subject_slot('value') def _do_right(self, value): assert value in range(128) if value != 0: log('Navigating to selection right') self.navigate_midi_track(1) ''' Selects next available Track. Values for direction are -1 going left and -1 for going left. ''' def navigate_midi_track(self, direction): song = self.song() tracks = song.tracks seltrack = song.view.selected_track index = vindexof(tracks, seltrack) # If the new selection isn't yet armed, arm that before # moving if not seltrack.arm: arm_smart(song, song.view.selected_track) return # Replace with get_next_midi_track to select next available midi track # left or right nxttrack = self.get_next_track(direction, index, tracks) if nxttrack: song.view.selected_track = nxttrack arm_smart(song, nxttrack) else: arm_smart(song, seltrack) ''' Selects next available armable Track. Values for direction are -1 going left and -1 for going left. ''' def get_next_track(self, direction, index, tracks): pos = index if pos == None: pos = len(tracks) pos = pos + direction while pos >= 0 and pos < len(tracks): track = tracks[pos] if track.can_be_armed: return track pos = pos + direction return None ''' Selects next available MIDI Track. Values for direction are -1 going left and -1 for going left. ''' def get_next_midi_track(self, direction, index, tracks): pos = index if pos == None: pos = len(tracks) pos = pos + direction while pos >= 0 and pos < len(tracks): track = tracks[pos] # Looking for a special Device if track.can_be_armed and track.has_midi_input: return track pos = pos + direction return None ''' Returns tuple (track, (device [,Instance No])) ''' def get_controlled_track(self): armed_tracks = [] tracks = self.song().tracks for track in tracks: if track.can_be_armed and (track.arm or track.implicit_arm): armed_tracks.append(track) # if len(armed_tracks) == 1: # return (armed_tracks[0], # self.find_instrument_list(armed_tracks[0].devices)) if len(armed_tracks) > 0: instr = self.find_instrument_ni(armed_tracks) if instr: return instr return self.find_instrument_any(armed_tracks) return None def find_instrument_ni(self, tracks): for track in tracks: instr = self.find_instrument_list(track.devices) if instr and instr[1] is not None: return (track, instr) return None def find_instrument_any(self, tracks): for track in tracks: instr = self.find_instrument_list(track.devices) if instr: return (track, instr) return None def _assign_tracks(self): tracks = self.song().tracks for track in self._tracks: track.release() self._tracks = [] for index in range(len(tracks)): self._tracks.append(TrackElement(index, tracks[index], self)) def control_track(self, index, track): if self.controlled_track != track: self.controlled_track = track instr = self.find_instrument_list(track.devices) debug_out("CONTROL_TRACK(): " + track.name + " " + str(instr)) if track.implicit_arm and not track.arm: debug_out("going to arm implicit_armed track") run_task = Task.run( lambda: self.activate_track(index, track, instr)) task_seq = Task.sequence(Task.delay(1), run_task) self._tasks.add(task_seq) else: debug_out("Not re-activating controlled track %s" % track.name) def activate_track(self, index, track, instr): is_ni = instr is not None and instr[1] is not None debug_out("ACTIVATE_TRACK(): %s %s (%s)" % (track.name, str(instr), "NI" if is_ni else "non-NI")) track.arm = True self.update_status_midi(index, track, instr, 1) def deactivate_track(self, index, track): debug_out("DEACTIVATE_TRACK called: %s" % track.name) # instr = self.find_instrument_list(track.devices) # self.update_status_midi(index, track, instr, 0) if self.controlled_track and self.controlled_track == track: self.controlled_track = None debug_out("Releasing controlled Track") # # Reactivate another track is there were multiple ones armed # ctrack = self.get_controlled_track() # if ctrack: # debug_out("Activating other armed track: %s" % track.name) # track = ctrack[0] # instr = ctrack[1] # self.controlled_track = track # index = list(self.song().tracks).index(track) # self.update_status_midi(index, track, instr, 1) def devices_changed(self, index, track): debug_out(" DEVICES_CHANGED() Track " + str(index) + " " + track.name) instr = self.find_instrument_list(track.devices) self.update_status_midi(index, track, instr, 1) def _on_track_list_changed(self): # This is called whenever the tracks are re-ordered, which we don't really need, # therefore i commented out self.update_status_midi() below. -kurt super(FocusControl, self)._on_track_list_changed() self._assign_tracks() ctrack = self.get_controlled_track() if ctrack: track = ctrack[0] instr = ctrack[1] debug_out("_ON_TRACK_LIST_CHANGED() called " + str(instr)) if track != self.controlled_track: self.controlled_track = track index = list(self.song().tracks).index(track) debug_out( "_ON_TRACK_LIST_CHANGED: current track is not controlled_track") #self.update_status_midi(index, track, instr, 1) elif self.controlled_track: # No Armed Track with Instrument debug_out(" No More Controlled Track") self.controlled_track = None def _on_selected_track_changed(self): super(FocusControl, self)._on_selected_track_changed() self.set_controlled_track(self.song().view.selected_track) # Block below was commented out because focus only follows track # arming, not selection. -kurt # self._on_devices_changed.subject = self.song().view.selected_track # track = self.song().view.selected_track # debug_out(" Changed Selected Track " + track.name) # if track.can_be_armed and track.arm: # self.controlled_track = track # instr = self.find_instrument_list(track.devices) # index = list(self.song().tracks).index(track) # self.update_status_midi(index, track, instr, 1) def broadcast(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if not s: debug_out(" Could Not open Socket ") else: try: s.connect(('localhost', 60090)) s.sendall('Hello, world') s.close() except: debug_out(" No Server ") @subject_slot('devices') def _on_devices_changed(self): #debug_out(" > Changed Device on selected Track ") self.scan_devices() def find_instrument_list(self, devicelist): for device in devicelist: instr = self.find_instrument(device) if instr: return instr return None def find_in_chain(self, chain): devices_instr_pairs = [] for device in chain.devices: instr = self.find_instrument(device) if instr: debug_out("Found instrument. device=%s, instr=%s, chain=%s" % ( device, instr, chain)) devices_instr_pairs.append((device, instr)) for (device, instr) in devices_instr_pairs: if self.device_is_ni(device): return (device, instr) if len(devices_instr_pairs) > 0: return devices_instr_pairs[0] return (None, None) def device_is_ni(self, device): return (device.class_name == PLUGIN_CLASS_NAME_VST or device.class_name == PLUGIN_CLASS_NAME_AU) and (device.class_display_name.startswith(PLUGIN_PREFIX)) def find_instrument(self, device): debug_out("find_instrument() called. type=%s, name=%s, class_name=%s, class_display_name=%s, parameters=%s" % ( device.type, device.name, device.class_name, device.class_display_name, ','.join([p.name for p in device.parameters]))) if device.type == 1: debug_out("find_instrument() found device type 1") if device.can_have_chains: chains = device.chains device_instr_pairs = [] for chain in chains: (device, instr) = self.find_in_chain(chain) if instr: if self.device_is_ni(device): return self.find_instrument(device) device_instr_pairs.append((device, instr)) if len(device_instr_pairs) > 0: return self.find_instrument(device_instr_pairs[0][0]) elif self.device_is_ni(device): device_params = device.parameters debug_out("find_instrument() found NI device") if device_params and len(device_params) > 1: pn = device_params[1].name debug_out("device_params[1].name=%s" % (pn,)) pnLen = len(pn) if pn.startswith(PARAM_PREFIX): #debug_out("pn[1] starts with " + PARAM_PREFIX + " and str(pn[4:pnLen]) = " + str(pn[4:pnLen])) return (str(device.class_display_name), str(pn[4:pnLen])) else: debug_out( "insufficient device parameters. device attrs=%s" % str(dir(device))) return (device.class_display_name, None) return None def scan_chain(self, chain): for device in chain.devices: self.scan_device(device) def scan_device(self, device): # if device.type == 1: # debug_out("SNDDEV " + device.name + " <" + device.class_name + "> " + device.class_display_name + " (" + str(device.type) +")") if device.class_name == 'PluginDevice' and device.class_display_name == 'FocusTester1': parms = device.parameters # if parms and len(parms)>1: # debug_out("# Focus Device " + parms[1].name) elif device.can_have_chains: chains = device.chains for chain in chains: self.scan_chain(chain) def update_status_midi(self, index, track, instrument, value): #debug_out("UPDATE_STATUS(): track: "+track.name+" instr: "+str(instrument)+" value: "+str(value)) msgsysex = [240, 0, 0, 102, 20, 18, 0] tr_name = track.name for c in tr_name: msgsysex.append(ord(c)) msgsysex.append(25) ind_str = str(index) for c in ind_str: msgsysex.append(ord(c)) if instrument != None: msgsysex.append(25) for c in instrument[0]: msgsysex.append(ord(c)) if instrument[1] != None: msgsysex.append(25) for c in instrument[1]: msgsysex.append(ord(c)) # msgsysex.append(25) # msgsysex.append(value) msgsysex.append(247) self._send_midi(tuple(msgsysex)) def send_to_display(self, text, grid=0): if(len(text) > 28): text = text[:27] msgsysex = [240, 0, 0, 102, 23, 18, min(grid, 3) * 28] filled = text.ljust(28) # 27 Characters for c in filled: msgsysex.append(ord(c)) msgsysex.append(247) self._send_midi(tuple(msgsysex)) def scan_devices(self): song = self.song() for track in song.tracks: #debug_out(" Scan Track : " + str(track.name)) for device in track.devices: self.scan_device(device) def disconnect(self): self._active = False self._suppress_send_midi = True self.song().remove_is_playing_listener(self.__update_play_button_led) super(FocusControl, self).disconnect() return None
"""utils.py Convenience functions to build applications that use pyfsa. """ __license__ = "Apache License, Version 2.0" __author__ = "Roland Kwitt, Kitware Inc., University of Salzburg 2013" __email__ = "E-Mail: roland.kwitt@kitware.com" __status__ = "Development" import os import sys import logging import numpy as np from optparse import OptionParser # Define the log levels LOGGING_LEVELS = { 'critical': logging.CRITICAL, 'error': logging.ERROR, 'warning': logging.WARNING, 'info': logging.INFO, 'debug': logging.DEBUG} def _radii_callback(option,opt,value,parser): radii = [] for e in value.split(','): radii.append(int(e)) parser.values.radii = radii def _globalLabelFile_callback(option,opt,value,parser): print value if value is None: parser.values.globalLabelFile = None if not os.path.exists(value): raise Exception("Global label file %s does not exist!" % value) parser.values.globalLabelFile = value def _classInfo_callback(option,opt,value,parser): if not os.path.exists(value): raise Exception("Class info file %s does not exist!" % value) parser.values.classInfo = value def _groupInfo_callback(option,opt,value,parser): if not os.path.exists(value): raise Exception("Group info file %s does not exist!" % value) parser.values.groupInfo = value def _logLevel_callback(option,opt,value,parser): if not value in LOGGING_LEVELS: raise Exception("Level : %s not supported!" % value) parser.values.logLevel = value def _graphList_callback(option,opt,value,parser): if not os.path.exists(value): raise Exception("Graph list file %s does not exist!" % value) parser.values.graphList = value def _baseDir_callback(options,opt,value,parser): if not os.path.exists(value): raise Exception("Directory %s does not exist!" % value) parser.values.baseDir = value def setup_cli_parsing(): """Setup CLI parser for common CLI arguments. Returns ------- parser : OptionParser object """ parser = OptionParser() parser.add_option("", "--labelPrefix", help="prefix of the label files.") parser.add_option("", "--skip", default=0, type="int", help="Skip N header entries.") parser.add_option("", "--omitDegenerate", action="store_true", default=False, help="Omit single vertex subgraphs.") parser.add_option("", "--writeAs", default="/tmp/data", help="feature file base name.") parser.add_option("", "--seed", type="int", help="Seed for random number generator.") parser.add_option("", "--normalize", action="store_true", default=False, help="Enable feature normalization (mean/var).") parser.add_option("", "--cvRuns", default=5, type="int", help="number of cross-validations to run.") parser.add_option("", "--recompute", action="store_true", default=False, help="Force feature recomputation.") parser.add_option("", "--graphList", type="string", action="callback", callback=_graphList_callback, help="input file with graph adj. file names.") parser.add_option("", "--baseDir", type="string", action="callback", callback=_baseDir_callback, help="base directory of graph adj. file names.") parser.add_option("", "--logLevel", type="string", default="debug", action="callback", callback=_logLevel_callback, help="set logging level.") parser.add_option("", "--classInfo", type="string", action='callback', callback=_classInfo_callback, help="class information file.") parser.add_option("", "--groupInfo", type="string", action='callback', callback=_groupInfo_callback, help="group information file.") parser.add_option("", "--logTo", help="Specify logging file.") parser.add_option("", "--globalLabelFile", type="string", action="callback", callback=_globalLabelFile_callback, help="global label file for all graphs.") parser.add_option("", "--radii", type="string", action='callback', callback=_radii_callback, help="list of neighborhood radi(i), e.g., 1,2,3") return parser def setup_logging(options): """Configure logger. Defines the logging format. """ log_format = '%(asctime)s [%(funcName)s] %(levelname)s: %(message)s' logging.basicConfig(level=LOGGING_LEVELS.get(options.logLevel, logging.NOTSET), filename=options.logTo, format=log_format, datefmt='%Y-%m-%d %H:%M:%S') def remap_labels(labels): """Map labels in {c_1,...,c_C} to {0,...,C-1}. Parameters ---------- labels : numpy array, shape (L,) Input labels (could also be 'chars') Returns ------- labels : numpy array, shape (L,) Labels in {0,...C-1} where C is the number of unique labels. """ for i,l in enumerate(np.unique(labels)): labels[labels == l] = i return labels def read_graph_file_list(options): """Create a list of graph file names. Parameters ---------- options : object returned by parse_args() of OptionParser CLI options. Returns ------- graph_files : list, len=N List of N graph file names, contained in the file that was specified as the value to '--graphList'. In case '--labelPrefix' is provided, the value to that option is used as a prefix to each graph file name. """ base = options.baseDir if not base is None: return [os.path.join(base, l.strip()) for l in open(options.graphList)] return [l.split() for l in open(options.graphList)] def read_label_file_list(options, graph_file_list): """Create a list of vertex label files (one for each graph). Paramters --------- options : object returned by parse_args() of OptionParser CLI options. graph_file_list : list List of N graph files. Returns ------- label_files : list len=N List of label files (graph file name + label prefix). """ if options.labelPrefix is None: raise Exception("No label prefix given!") N = len(graph_file_list) label_files = [None for i in range(0, N)] for i in range(0, N): label_files[i] = "%s.%s" % (graph_file_list[i], options.labelPrefix) return label_files def read_class_info(options): """Read class label information from file. The class label file needs to follow the convention that there is only one label per line, e.g., 0 1 0 1 ... Parameters ---------- options : object returned by parse_args() of OptionParser CLI options. Returns ------- class_labels : list of 'int', len=N List of labels (one label per graph). """ return [int(l.strip()) for l in open(options.classInfo)] def read_group_info(options): """Read grouping info from file (in the form of attribute indicators). The format of the group info file is a binary N x G matrix, where a non-zero entry at the j-th entry of the n-th row signifies that this the j-th attribute is present. Example ------- 0 1 0 0 0 0 0 1 0 1 0 1 Parameters ---------- options : object returned by parse_args() of OptionParser CLI options. Returns ------- attributes : numpy matrix, shape (N, G) Binary attribute indicator matrix. """ grp_info = np.asmatrix(np.genfromtxt(options.groupInfo, dtype="int")) if grp_info.shape[0] == 1: grp_info = grp_info.transpose() return grp_info def show_summary(scores): """Print a classification report to stdout. Parameters ---------- scores : numpy array, shape (K,) Classifier scores. """ logger = logging.getLogger() logger.info("Avg(scores) : %.2f" % (np.mean(scores)*100)) logger.info("Std(scores) : %.2f" % (np.std(scores)*100))
import direct from pandac.PandaModules import HttpRequest from direct.directnotify.DirectNotifyGlobal import directNotify from direct.task.TaskManagerGlobal import taskMgr from direct.task import Task from LandingPage import LandingPage from direct.showbase import ElementTree as ET notify = directNotify.newCategory('WebRequestDispatcher') class WebRequest(object): """ Pointer to a single web request (maps to an open HTTP socket). An instance of this class maps to a single client waiting for a response. connection is an instance of libdirect.HttpRequest """ def __init__(self,connection): self.connection = connection def getURI(self): return self.connection.GetRequestURL() def getRequestType(self): return self.connection.GetRequestType() def dictFromGET(self): result = {} for pair in self.connection.GetRequestOptionString().split('&'): arg = pair.split('=',1) if len(arg) > 1: result[arg[0]] = arg[1] return result def respondHTTP(self,status,body): status = str(status) msg = u"HTTP/1.0 %s\r\nContent-Type: text/html\r\n\r\n%s" % (status,body) self.connection.SendThisResponse(encodedUtf8(msg)) def respond(self,body): self.respondHTTP("200 OK",body) def respondXML(self,body): msg = u"HTTP/1.0 200 OK\r\nContent-Type: text/xml\r\n\r\n%s" % body self.connection.SendThisResponse(encodedUtf8(msg)) def respondCustom(self,contentType,body): msg = "HTTP/1.0 200 OK\r\nContent-Type: %s" % contentType if contentType in ["text/css",]: msg += "\nCache-Control: max-age=313977290\nExpires: Tue, 02 May 2017 04:08:44 GMT\n" msg += "\r\n\r\n%s" % (body) self.connection.SendThisResponse(msg) def timeout(self): resp = "<html><body>Error 504: Request timed out</body></html>\r\n" self.respondHTTP("504 Gateway Timeout",resp) def getSourceAddress(self): return self.connection.GetSourceAddress() # -------------------------------------------------------------------------------- class SkinningReplyTo: def __init__(self, replyTo, dispatcher, uri, doSkin): self._replyTo = replyTo self._dispatcher = dispatcher self._uri = uri self._doSkin = doSkin self._headTag = ET.Element('head') self._bodyTag = ET.Element('body') def respondHTTP(self,status,body): if self._doSkin: body = self._dispatcher.landingPage.skin( body, self._uri, headTag=self._headTag, bodyTag=self._bodyTag) self._replyTo.respondHTTP(status, body) def respond(self, response): self.respondHTTP("200 OK", response) # provides access to head and body tags of landing page def getHeadTag(self): return self._headTag def getBodyTag(self): return self._bodyTag def __getattr__(self, attrName): if attrName in self.__dict__: return self.__dict__[attrName] if hasattr(self.__class__, attrName): return getattr(self.__class__, attrName) # pass-through to replyTo object which this object is a proxy to return getattr(self._replyTo, attrName) class WebRequestDispatcher(object): """ Request dispatcher for HTTP requests. Contains registration and dispatching functionality. Single-state class--multiple instances share all data. This is because we're wrapping a singleton webserver. How to use: w = WebRequestDispatcher() w.listenOnPort(8888) def test(replyTo,**kw): print 'test got called with these options: %s' % str(kw) replyTo.respond('<html><body>Thank you for the yummy arguments: %s' % str(kw)) w.registerGETHandler('test',test) while 1: w.poll() Browse to http://localhost:8888/test?foo=bar and see the result! """ _shared_state = {} listenPort = None uriToHandler = {} requestTimeout = 10.0 notify = notify def __new__(self, *a, **kw): obj = object.__new__(self, *a, **kw) obj.__dict__ = self._shared_state return obj def __init__(self, wantLandingPage = True): self.enableLandingPage(wantLandingPage) def listenOnPort(self,listenPort): """ Start the web server listening if it isn't already. Singleton server, so ignore multiple listen requests. """ if self.listenPort is None: self.listenPort = listenPort HttpRequest.HttpManagerInitialize(listenPort) self.notify.info("Listening on port %d" % listenPort) else: self.notify.warning("Already listening on port %d. Ignoring request to listen on port %d." % (self.listenPort,listenPort)) def invalidURI(self,replyTo,**kw): resp = "<html><body>Error 404</body></html>\r\n" replyTo.respondHTTP("404 Not Found",resp) self.notify.warning("%s - %s - 404" % (replyTo.getSourceAddress(),replyTo.getURI())) # access to head and body tags of landing page # only for 'returnsResponse' mode def getHeadTag(self): return self._headTag def getBodyTag(self): return self._bodyTag def handleGET(self,req): """ Parse and dispatch a single GET request. Expects to receive a WebRequest object. """ assert req.getRequestType() == "GET" self.landingPage.incrementQuickStat("Pages Served") uri = req.getURI() args = req.dictFromGET() callable,returnsResponse,autoSkin = self.uriToHandler.get(uri, [self.invalidURI,False,False]) if callable != self.invalidURI: self.notify.info("%s - %s - %s - 200" % (req.getSourceAddress(), uri, args)) if returnsResponse: result = apply(callable,(),args) if autoSkin: self._headTag = ET.Element('head') self._bodyTag = ET.Element('body') req.respond(self.landingPage.skin(result,uri, headTag=self._headTag, bodyTag=self._bodyTag)) del self._bodyTag del self._headTag else: req.respond(result) else: args["replyTo"] = SkinningReplyTo(req, self, uri, autoSkin) apply(callable,(),args) def poll(self): """ Pump the web server, handle any incoming requests. This function should be called regularly, about 2-4 calls/sec for current applications is a good number. """ request = HttpRequest.HttpManagerGetARequest() while request is not None: wreq = WebRequest(request) if wreq.getRequestType() == "GET": self.handleGET(wreq) else: self.notify.warning("Ignoring a non-GET request from %s: %s" % (request.GetSourceAddress(),request.GetRawRequest())) self.invalidURI(wreq) request = HttpRequest.HttpManagerGetARequest() def registerGETHandler(self,uri,handler,returnsResponse=False, autoSkin=False): """ Call this function to register a handler function to be called in response to a query to the given URI. GET options are translated into **kw arguments. Handler function should accept **kw in order to handle arbitrary queries. If returnsResponse is False, the request is left open after handler returns--handler or tasks it creates are responsible for fulfilling the query now or in the future. Argument replyTo (a WebRequest) is guaranteed to be passed to the handler, and replyTo.respond must be called with an HTML response string to fulfill the query and close the socket. If returnsResponse is True, WebRequestDispatcher expects the handler to return its response string, and we will route the response and close the socket ourselves. No replyTo argument is provided to the handler in this case. """ if uri[0] != "/": uri = "/" + uri if self.uriToHandler.get(uri,None) is None: self.notify.info("Registered handler %s for URI %s." % (handler,uri)) self.uriToHandler[uri] = [handler, returnsResponse, autoSkin] else: self.notify.warning("Attempting to register a duplicate handler for URI %s. Ignoring." % uri) def unregisterGETHandler(self,uri): if uri[0] != "/": uri = "/" + uri self.uriToHandler.pop(uri,None) # -- Poll task wrappers -- def pollHTTPTask(self,task): self.poll() return Task.again def startCheckingIncomingHTTP(self, interval=0.3): taskMgr.remove('pollHTTPTask') taskMgr.doMethodLater(interval,self.pollHTTPTask,'pollHTTPTask') def stopCheckingIncomingHTTP(self): taskMgr.remove('pollHTTPTask') # -- Landing page convenience functions -- def enableLandingPage(self, enable): if enable: if "landingPage" not in self.__dict__: self.landingPage = LandingPage() self.registerGETHandler("/", self._main, returnsResponse = True, autoSkin = True) self.registerGETHandler("/services", self._services, returnsResponse = True, autoSkin = True) self.registerGETHandler("/default.css", self._stylesheet) self.registerGETHandler("/favicon.ico", self._favicon) self.landingPage.addTab("Main", "/") self.landingPage.addTab("Services", "/services") else: self.landingPage = None self.unregisterGETHandler("/") self.unregisterGETHandler("/services") def _main(self): return self.landingPage.getMainPage() def _services(self): return self.landingPage.getServicesPage(self.uriToHandler) def _stylesheet(self,**kw): replyTo = kw.get("replyTo",None) assert replyTo is not None body = self.landingPage.getStyleSheet() replyTo.respondCustom("text/css",body) def _favicon(self,**kw): replyTo = kw.get("replyTo",None) assert replyTo is not None body = self.landingPage.getFavIcon() replyTo.respondCustom("image/x-icon",body)
# Copyright (c) 2015 Ambroz Bizjak # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function import json def _kwarg_maybe (name, kwargs, default=None): if name in kwargs: res = kwargs[name] del kwargs[name] else: res = default return res def _kwarg (name, kwargs): assert name in kwargs res = kwargs[name] del kwargs[name] return res def _merge_dicts (*dicts): res = dict() for d in dicts: assert type(d) is dict for (key, value) in d.items(): if key not in res: res[key] = value else: assert type(value) is dict assert type(res[key]) is dict res[key] = _merge_dicts(res[key], value) return res def _json_type_of (x): if type(x) is dict: return 'object' if type(x) is list: return 'array' if type(x) is str: return 'string' if type(x) is int or type(x) is long: return 'integer' if type(x) is float: return 'number' raise TypeError('wrong type') class ConfigBase (object): def __init__ (self, **kwargs): self.title = _kwarg_maybe('title', kwargs) self.title_key = _kwarg_maybe('title_key', kwargs) self.title_expr = _kwarg_maybe('title_expr', kwargs) self.collapsable = _kwarg_maybe('collapsable', kwargs, False) self.collapsed_initially = _kwarg_maybe('collapsed_initially', kwargs, True) self.ident = _kwarg_maybe('ident', kwargs) self.enum = _kwarg_maybe('enum', kwargs) self.no_header = _kwarg_maybe('no_header', kwargs, False) self.processing_order = _kwarg_maybe('processing_order', kwargs) self.default = _kwarg_maybe('default', kwargs) self.kwargs = kwargs def json_schema (self): if self.title_key is not None: headerTemplate = 'return vars.self[{}]'.format(json.dumps(self.title_key)) elif self.title_expr is not None: headerTemplate = 'return ({})'.format(self.title_expr) else: headerTemplate = None return _merge_dicts( ({ 'title': self.title } if self.title is not None else {}), ({ 'headerTemplate': headerTemplate } if headerTemplate is not None else {}), ({ 'id': self.ident } if self.ident is not None else {}), ({ 'options': { 'disable_collapse': True } } if not self.collapsable else {}), ({ 'options': { 'collapsed': True } } if self.collapsable and self.collapsed_initially else {}), ({ 'options': { 'no_header': True } } if self.no_header else {}), ({ 'processingOrder': self.processing_order } if self.processing_order is not None else {}), ({ 'enum': self.enum } if self.enum is not None else {}), ({ 'default': self.default } if self.default is not None else {}), self._json_extra() ) def _json_new_properties (self, container_id): return {} class String (ConfigBase): def _json_extra (self): return { 'type': 'string' } class Integer (ConfigBase): def _json_extra (self): return { 'type': 'integer' } class Float (ConfigBase): def _json_extra (self): return { 'type': 'number' } class Boolean (ConfigBase): def __init__ (self, **kwargs): self.false_title = _kwarg_maybe('false_title', kwargs, 'No') self.true_title = _kwarg_maybe('true_title', kwargs, 'Yes') self.first_value = _kwarg_maybe('first_value', kwargs, False) ConfigBase.__init__(self, **kwargs) def _json_extra (self): return { 'type': 'boolean', 'enum': self._order([False, True]), 'options': { 'enum_titles': self._order([self.false_title, self.true_title]) } } def _order (self, x): return list(reversed(x)) if self.first_value else x class Compound (ConfigBase): def __init__ (self, name, **kwargs): self.name = name self.attrs = _kwarg('attrs', kwargs) if 'title' not in kwargs: kwargs['title'] = name ConfigBase.__init__(self, **kwargs) def _json_extra (self): return { 'type': 'object', 'additionalProperties': False, 'properties': _merge_dicts( { '_compoundName': { 'constantValue': self.name, 'options': { 'hidden': True }, } }, *( [ { param.kwargs['key']: _merge_dicts( param.json_schema(), { 'propertyOrder' : i } ) } for (i, param) in enumerate(self.attrs) ] + [ param._json_new_properties(self.ident) for (i, param) in enumerate(self.attrs) ] ) ) } class Array (ConfigBase): def __init__ (self, **kwargs): self.elem = _kwarg('elem', kwargs) self.table = _kwarg_maybe('table', kwargs, False) self.copy_name_key = _kwarg_maybe('copy_name_key', kwargs) self.copy_name_suffix = _kwarg_maybe('copy_name_suffix', kwargs, ' (copy)') ConfigBase.__init__(self, **kwargs) def _json_extra (self): return _merge_dicts( { 'type': 'array', 'items': self.elem.json_schema() }, ({ 'format': 'table' } if self.table else {}), ({ 'copyTemplate': 'return ce_copyhelper(vars.rows,vars.row,{},{});'.format(json.dumps(self.copy_name_key), json.dumps(self.copy_name_suffix)) } if self.copy_name_key is not None else {}), ) class OneOf (ConfigBase): def __init__ (self, **kwargs): self.choices = _kwarg('choices', kwargs) ConfigBase.__init__(self, **kwargs) def _json_extra (self): return { 'oneOf': [choice.json_schema() for choice in self.choices], 'selectKey': '_compoundName', } class Constant (ConfigBase): def __init__ (self, **kwargs): self.value = _kwarg('value', kwargs) ConfigBase.__init__(self, **kwargs) def _json_extra (self): return { 'constantValue': self.value, 'options': { 'hidden': True }, } class Reference (ConfigBase): def __init__ (self, **kwargs): self.ref_array = _kwarg('ref_array', kwargs) self.ref_id_key = _kwarg('ref_id_key', kwargs) self.ref_name_key = _kwarg('ref_name_key', kwargs) self.deref_key = _kwarg_maybe('deref_key', kwargs) ConfigBase.__init__(self, **kwargs) def _json_extra (self): return { 'type': 'string', 'watch': { 'watch_array': self.ref_array['base'] }, 'enumSource': { 'sourceTemplate': 'return {};'.format(self._array_expr()), 'title': 'return vars.item[{}];'.format(json.dumps(self.ref_name_key)), 'value': 'return vars.item[{}];'.format(json.dumps(self.ref_id_key)), }, } def _json_new_properties (self, container_id): assert container_id is not None return ({ self.deref_key: { 'watch': { 'watch_array': self.ref_array['base'], 'watch_id': '{}.{}'.format(container_id, self.kwargs['key']) }, 'valueTemplate': 'return ce_deref({},{},vars.watch_id);'.format(self._array_expr(), json.dumps(self.ref_id_key)), 'excludeFromFinalValue': True } } if self.deref_key is not None else {}) def _array_expr (self): return 'ce_refarr(vars,["watch_array"{}])'.format(''.join(',{}'.format(json.dumps(attr)) for attr in self.ref_array['descend']))
# -*- coding: utf-8 -*- # Copyright 2017 IBM RESEARCH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Quantum Tomography Module Description: This module contains functions for performing quantum state and quantum process tomography. This includes: - Functions for generating a set of circuits in a QuantumProgram to extract tomographically complete sets of measurement data. - Functions for generating a tomography data set from the QuantumProgram results after the circuits have been executed on a backend. - Functions for reconstructing a quantum state, or quantum process (Choi-matrix) from tomography data sets. Reconstruction Methods: Currently implemented reconstruction methods are - Linear inversion by weighted least-squares fitting. - Fast maximum likelihood reconstruction using ref [1]. References: [1] J Smolin, JM Gambetta, G Smith, Phys. Rev. Lett. 108, 070502 (2012). Open access: arXiv:1106.5458 [quant-ph]. """ import numpy as np from functools import reduce from re import match from itertools import product from qiskit.tools.qi.qi import vectorize, devectorize, outer ############################################################### # Tomography circuit generation ############################################################### def build_state_tomography_circuits(Q_program, name, qubits, qreg, creg, silent=False): """ Add state tomography measurement circuits to a QuantumProgram. The quantum program must contain a circuit 'name', which is treated as a state preparation circuit. This function then appends the circuit with a tomographically overcomplete set of measurements in the Pauli basis for each qubit to be measured. For n-qubit tomography this result in 3 ** n measurement circuits being added to the quantum program. Args: Q_program (QuantumProgram): A quantum program to store the circuits. name (string): The name of the base circuit to be appended. qubits (list[int]): a list of the qubit indexes of qreg to be measured. qreg (QuantumRegister): the quantum register containing qubits to be measured. creg (ClassicalRegister): the classical register containing bits to store measurement outcomes. silent (bool, optional): hide verbose output. Returns: A list of names of the added quantum state tomography circuits. Example: ['circ_measX0', 'circ_measY0', 'circ_measZ0'] """ labels = __add_meas_circuits(Q_program, name, qubits, qreg, creg) if not silent: print('>> created state tomography circuits for "%s"' % name) return labels def build_process_tomography_circuits(Q_program, name, qubits, qreg, creg, silent=False): """ Add process tomography measurement circuits to a QuantumProgram. The quantum program must contain a circuit 'name', which is the circuit that will be reconstructed via tomographic measurements. This function then prepends and appends the circuit with a tomographically overcomplete set of preparations and measurements in the Pauli basis for each qubit to be measured. For n-qubit process tomography this result in (6 ** n) * (3 ** n) circuits being added to the quantum program: - 3 ** n measurements in the Pauli X, Y, Z bases. - 6 ** n preparations in the +1 and -1 eigenstates of X, Y, Z. Args: Q_program (QuantumProgram): A quantum program to store the circuits. name (string): The name of the base circuit to be appended. qubits (list[int]): a list of the qubit indexes of qreg to be measured. qreg (QuantumRegister): the quantum register containing qubits to be measured. creg (ClassicalRegister): the classical register containing bits to store measurement outcomes. silent (bool, optional): hide verbose output. Returns: A list of names of the added quantum process tomography circuits. Example: ['circ_prepXp0_measX0', 'circ_prepXp0_measY0', 'circ_prepXp0_measZ0', 'circ_prepXm0_measX0', 'circ_prepXm0_measY0', 'circ_prepXm0_measZ0', 'circ_prepYp0_measX0', 'circ_prepYp0_measY0', 'circ_prepYp0_measZ0', 'circ_prepYm0_measX0', 'circ_prepYm0_measY0', 'circ_prepYm0_measZ0', 'circ_prepZp0_measX0', 'circ_prepZp0_measY0', 'circ_prepZp0_measZ0', 'circ_prepZm0_measX0', 'circ_prepZm0_measY0', 'circ_prepZm0_measZ0'] """ # add preparation circuits preps = __add_prep_circuits(Q_program, name, qubits, qreg, creg) # add measurement circuits for each prep circuit labels = [] for circ in preps: labels += __add_meas_circuits(Q_program, circ, qubits, qreg, creg) # delete temp prep output del Q_program._QuantumProgram__quantum_program[circ] if not silent: print('>> created process tomography circuits for "%s"' % name) return labels def __tomo_dicts(qubits, basis=None, states=False): """Helper function. Build a dictionary assigning a basis element to a qubit. Args: qubit (int): the qubit to add tomos (list[dict]): list of tomo_dicts to add to basis (list[str], optional): basis to use. If not specified the default is ['X', 'Y', 'Z'] Returns: A new list of tomo_dict """ if isinstance(qubits, int): qubits = [qubits] if basis is None: basis = __DEFAULT_BASIS if states: ns = len(list(basis.values())[0]) lst = [(b, s) for b in basis.keys() for s in range(ns)] else: lst = basis.keys() return [dict(zip(qubits, b)) for b in product(lst, repeat=len(qubits))] def __add_meas_circuits(Q_program, name, qubits, qreg, creg): """ Add measurement circuits to a quantum program. See: build_state_tomography_circuits. build_process_tomography_circuits. """ orig = Q_program.get_circuit(name) labels = [] for dic in __tomo_dicts(qubits): # Construct meas circuit name label = '_meas' for qubit, op in dic.items(): label += op + str(qubit) circuit = Q_program.create_circuit(label, [qreg], [creg]) # add gates to circuit for qubit, op in dic.items(): circuit.barrier(qreg[qubit]) if op == "X": circuit.u2(0., np.pi, qreg[qubit]) # H elif op == "Y": circuit.u2(0., 0.5 * np.pi, qreg[qubit]) # H.S^* circuit.measure(qreg[qubit], creg[qubit]) # add circuit to QuantumProgram Q_program.add_circuit(name+label, orig + circuit) # add label to output labels.append(name+label) # delete temp circuit del Q_program._QuantumProgram__quantum_program[label] return labels def __add_prep_gates(circuit, qreg, qubit, op): """ Add state preparation gates to a circuit. """ p, s = op if p == "X": if s == 1: circuit.u2(np.pi, np.pi, qreg[qubit]) # H.X else: circuit.u2(0., np.pi, qreg[qubit]) # H if p == "Y": if s == 1: circuit.u2(-0.5 * np.pi, np.pi, qreg[qubit]) # S.H.X else: circuit.u2(0.5 * np.pi, np.pi, qreg[qubit]) # S.H if p == "Z" and s == 1: circuit.u3(np.pi, 0., np.pi, qreg[qubit]) # X def __add_prep_circuits(Q_program, name, qubits, qreg, creg): """ Add preparation circuits to a quantum program. See: build_process_tomography_circuits. """ orig = Q_program.get_circuit(name) labels = [] state = {0: 'p', 1: 'm'} for dic in __tomo_dicts(qubits, states=True): # make circuit label label = '_prep' for qubit, op in dic.items(): label += op[0] + state[op[1]] + str(qubit) # create circuit and add gates circuit = Q_program.create_circuit(label, [qreg], [creg]) for qubit, op in dic.items(): __add_prep_gates(circuit, qreg, qubit, op) # add circuit to QuantumProgram Q_program.add_circuit(name + label, circuit + orig) # add label to output labels += [name+label] # delete temp circuit del Q_program._QuantumProgram__quantum_program[label] return labels ############################################################### # Tomography circuit labels ############################################################### def __tomo_labels(name, qubits, basis=None, states=False): """Helper function. """ labels = [] state = {0: 'p', 1: 'm'} for dic in __tomo_dicts(qubits, states=states): label = '' if states: for qubit, op in dic.items(): label += op[0] + state[op[1]] + str(qubit) else: for qubit, op in dic.items(): label += op[0] + str(qubit) labels.append(name+label) return labels def state_tomography_circuit_names(name, qubits): """ Return a list of state tomography circuit names. This list is the same as that returned by the build_state_tomography_circuits function. Args: name (string): the name of the original state preparation circuit. qubits: (list[int]): the qubits being measured. Returns: A list of circuit names. """ return __tomo_labels(name + '_meas', qubits) def process_tomography_circuit_names(name, qubits): """ Return a list of process tomography circuit names. This list is the same as that returned by the build_process_tomography_circuits function. Args: name (string): the name of the original circuit to be reconstructed. qubits: (list[int]): the qubits being measured. Returns: A list of circuit names. """ preps = __tomo_labels(name + '_prep', qubits, states=True) return reduce(lambda acc, c: acc + __tomo_labels(c + '_meas', qubits), preps, []) ############################################################### # Preformatting count data ############################################################### def __counts_keys(n): """Generate outcome bitstrings for n-qubits. Args: n (int): the number of qubits. Returns: A list of bitstrings ordered as follows: Example: n=2 returns ['00', '01', '10', '11']. """ return [bin(j)[2:].zfill(n) for j in range(2 ** n)] def marginal_counts(counts, meas_qubits): """ Compute the marginal counts for a subset of measured qubits. Args: counts (dict{str:int}): the counts returned from a backend. meas_qubits (list[int]): the qubits to return the marginal counts distribution for. Returns: A counts dict for the meas_qubits.abs Example: if counts = {'00': 10, '01': 5} marginal_counts(counts, [0]) returns {'0': 15, '1': 0}. marginal_counts(counts, [0]) returns {'0': 10, '1': 5}. """ # Extract total number of qubits from count keys nq = len(list(counts.keys())[0]) # keys for measured qubits only qs = sorted(meas_qubits, reverse=True) meas_keys = __counts_keys(len(qs)) # get regex match strings for suming outcomes of other qubits rgx = [reduce(lambda x, y: (key[qs.index(y)] if y in qs else '\\d') + x, range(nq), '') for key in meas_keys] # build the return list meas_counts = [] for m in rgx: c = 0 for key, val in counts.items(): if match(m, key): c += val meas_counts.append(c) # return as counts dict on measured qubits only return dict(zip(meas_keys, meas_counts)) ############################################################### # Tomography preparation and measurement bases ############################################################### # Default Pauli basis # This corresponds to measurements in the X, Y, Z basis where # Outcomes 0,1 are the +1,-1 eigenstates respectively. # State preparation is also done in the +1 and -1 eigenstates. __DEFAULT_BASIS = {'X': [np.array([[0.5, 0.5], [0.5, 0.5]]), np.array([[0.5, -0.5], [-0.5, 0.5]])], 'Y': [np.array([[0.5, -0.5j], [0.5j, 0.5]]), np.array([[0.5, 0.5j], [-0.5j, 0.5]])], 'Z': [np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, 1]])]} def __get_meas_basis_ops(tup, basis): """ Return a n-qubit projector for a given measurement. """ # reverse tuple so least significant qubit is to the right return reduce(lambda acc, b: [np.kron(a, j) for a in acc for j in basis[b]], reversed(tup), [1]) def __meas_basis(n, basis): """ Return an ordered list of n-qubit measurment projectors. """ return [dict(zip(__counts_keys(n), __get_meas_basis_ops(key, basis))) for key in product(basis.keys(), repeat=n)] def __get_prep_basis_op(dic, basis): """ Return an n-qubit projector for a given prepration. """ keys = sorted(dic.keys()) # order qubits [0,1,...] tups = [dic[k] for k in keys] return reduce(lambda acc, b: np.kron(basis[b[0]][b[1]], acc), tups, [1]) def __prep_basis(n, basis): """ Return an ordered list of n-qubit preparation projectors. """ # use same function as prep circuits to get order right ordered = __tomo_dicts(range(n), states=True) return [__get_prep_basis_op(dic, basis) for dic in ordered] def state_tomography_data(Q_result, name, meas_qubits, basis=None): """ Return a list of state tomography measurement outcomes. Args: Q_result (Result): Results from execution of a state tomography circuits on a backend. name (string): The name of the base state preparation circuit. meas_qubits (list[int]): a list of the qubit indexes measured. basis (basis dict, optional): the basis used for measurement. Default is the Pauli basis. Returns: A list of dicts for the outcome of each state tomography measurement circuit. The keys of the dictionary are { 'counts': dict('str': int), <the marginal counts for measured qubits>, 'shots': int, <total number of shots for measurement circuit> 'meas_basis': dict('str': np.array) <the projector for the measurement outcomes> } """ if basis is None: basis = __DEFAULT_BASIS labels = state_tomography_circuit_names(name, meas_qubits) counts = [marginal_counts(Q_result.get_counts(circ), meas_qubits) for circ in labels] shots = [sum(c.values()) for c in counts] meas_basis = __meas_basis(len(meas_qubits), basis) ret = [{'counts': i, 'meas_basis': j, 'shots': k} for i, j, k in zip(counts, meas_basis, shots)] return ret def process_tomography_data(Q_result, name, meas_qubits, basis=None): """ Return a list of process tomography measurement outcomes. Args: Q_result (Result): Results from execution of a process tomography circuits on a backend. name (string): The name of the circuit being reconstructed. meas_qubits (list[int]): a list of the qubit indexes measured. basis (basis dict, optional): the basis used for measurement. Default is the Pauli basis. Returns: A list of dicts for the outcome of each process tomography measurement circuit. The keys of the dictionary are { 'counts': dict('str': int), <the marginal counts for measured qubits>, 'shots': int, <total number of shots for measurement circuit> 'meas_basis': dict('str': np.array), <the projector for the measurement outcomes> 'prep_basis': np.array, <the projector for the prepared input state> } """ if basis is None: basis = __DEFAULT_BASIS n = len(meas_qubits) labels = process_tomography_circuit_names(name, meas_qubits) counts = [marginal_counts(Q_result.get_counts(circ), meas_qubits) for circ in labels] shots = [sum(c.values()) for c in counts] meas_basis = __meas_basis(n, basis) prep_basis = __prep_basis(n, basis) ret = [{'meas_basis': meas, 'prep_basis': prep} for prep in prep_basis for meas in meas_basis] for dic, cts, sts in zip(ret, counts, shots): dic['counts'] = cts dic['shots'] = sts return ret ############################################################### # Tomographic Reconstruction functions. ############################################################### def __tomo_basis_matrix(meas_basis): """Return a matrix of vectorized measurement operators. Args: meas_basis(list(array_like)): measurement operators [M_j]. Returns: The operators S = sum_j |j><M_j|. """ n = len(meas_basis) d = meas_basis[0].size S = np.array([vectorize(m).conj() for m in meas_basis]) return S.reshape(n, d) def __tomo_linear_inv(freqs, ops, weights=None, trace=None): """ Reconstruct a matrix through linear inversion. Args: freqs (list[float]): list of observed frequences. ops (list[np.array]): list of corresponding projectors. weights (list[float] or array_like, optional): weights to be used for weighted fitting. trace (float, optional): trace of returned operator. Returns: A numpy array of the reconstructed operator. """ # get weights matrix if weights is not None: W = np.array(weights) if W.ndim == 1: W = np.diag(W) # Get basis S matrix S = np.array([vectorize(m).conj() for m in ops]).reshape(len(ops), ops[0].size) if weights is not None: S = np.dot(W, S) # W.S # get frequencies vec v = np.array(freqs) # |f> if weights is not None: v = np.dot(W, freqs) # W.|f> Sdg = S.T.conj() # S^*.W^* inv = np.linalg.pinv(np.dot(Sdg, S)) # (S^*.W^*.W.S)^-1 # linear inversion of freqs ret = devectorize(np.dot(inv, np.dot(Sdg, v))) # renormalize to input trace value if trace is not None: ret = trace * ret / np.trace(ret) return ret def __leastsq_fit(data, weights=None, trace=None, beta=None): """ Reconstruct a state from unconstrained least-squares fitting. Args: data (list[dict]): state or process tomography data. weights (list or array, optional): weights to use for least squares fitting. The default is standard deviation from a binomial distribution. trace (float, optional): trace of returned operator. The default is 1. beta (float >=0, optional): hedge parameter for computing frequencies from zero-count data. The default value is 0.50922. Returns: A numpy array of the reconstructed operator. """ if trace is None: trace = 1. # default to unit trace ks = data[0]['counts'].keys() K = len(ks) # Get counts and shots ns = np.array([dat['counts'][k] for dat in data for k in ks]) shots = np.array([dat['shots'] for dat in data for k in ks]) # convert to freqs using beta to hedge against zero counts if beta is None: beta = 0.50922 freqs = (ns + beta) / (shots + K * beta) # Use standard least squares fitting weights if weights is None: weights = np.sqrt(shots / (freqs * (1 - freqs))) # Get measurement basis ops if 'prep_basis' in data[0]: # process tomography fit ops = [np.kron(dat['prep_basis'].T, dat['meas_basis'][k]) for dat in data for k in ks] else: # state tomography fit ops = [dat['meas_basis'][k] for dat in data for k in ks] return __tomo_linear_inv(freqs, ops, weights, trace=trace) def __wizard(rho, epsilon=None): """ Returns the nearest postitive semidefinite operator to an operator. This method is based on reference [1]. It constrains positivity by setting negative eigenvalues to zero and rescaling the positive eigenvalues. Args: rho (array_like): the input operator. epsilon(float >=0, optional): threshold for truncating small eigenvalues values to zero. Returns: A positive semidefinite numpy array. """ if epsilon is None: epsilon = 0. # default value dim = len(rho) rho_wizard = np.zeros([dim, dim]) v, w = np.linalg.eigh(rho) # v eigenvecrors v[0] < v[1] <... for j in range(dim): if v[j] < epsilon: tmp = v[j] v[j] = 0. # redistribute loop x = 0. for k in range(j + 1, dim): x += tmp / (dim-(j+1)) v[k] = v[k] + tmp / (dim - (j+1)) for j in range(dim): rho_wizard = rho_wizard + v[j] * outer(w[:, j]) return rho_wizard def __get_option(opt, options): """ Return an optional value or None if not found. """ if options is not None: if opt in options: return options[opt] return None def fit_tomography_data(data, method=None, options=None): """ Reconstruct a density matrix or process-matrix from tomography data. If the input data is state_tomography_data the returned operator will be a density matrix. If the input data is process_tomography_data the returned operator will be a Choi-matrix in the column-vectorization convention. Args: data (dict): process tomography measurement data. method (str, optional): the fitting method to use. Available methods: - 'wizard' (default) - 'leastsq' options (dict, optional): additional options for fitting method. Returns: The fitted operator. Available methods: - 'wizard' (Default): The returned operator will be constrained to be positive-semidefinite. Options: - 'trace': the trace of the returned operator. The default value is 1. - 'beta': hedging parameter for computing frequencies from zero-count data. The default value is 0.50922. - 'epsilon: threshold for truncating small eigenvalues to zero. The default value is 0 - 'leastsq': Fitting without postive-semidefinite constraint. Options: - 'trace': Same as for 'wizard' method. - 'beta': Same as for 'wizard' method. """ if method is None: method = 'wizard' # set default method if method in ['wizard', 'leastsq']: # get options trace = __get_option('trace', options) beta = __get_option('beta', options) # fit state rho = __leastsq_fit(data, trace=trace, beta=beta) if method == 'wizard': # Use wizard method to constrain positivity epsilon = __get_option('epsilon', options) rho = __wizard(rho, epsilon=epsilon) return rho else: print('error: method unknown reconstruction method "%s"' % method) ############################################################### # Wigner function tomography ############################################################### def build_wigner_circuits(Q_program, name, phis, thetas, qubits, qreg, creg, silent=False): """Create the circuits to rotate to points in phase space Args: Q_program (QuantumProgram): A quantum program to store the circuits. name (string): The name of the base circuit to be appended. phis (np.matrix[[complex]]): thetas (np.matrix[[complex]]): qubits (list[int]): a list of the qubit indexes of qreg to be measured. qreg (QuantumRegister): the quantum register containing qubits to be measured. creg (ClassicalRegister): the classical register containing bits to store measurement outcomes. silent (bool, optional): hide verbose output. Returns: A list of names of the added wigner function circuits. """ orig = Q_program.get_circuit(name) labels = [] points = len(phis[0]) for point in range(points): label = '_wigner_phase_point' label += str(point) circuit = Q_program.create_circuit(label, [qreg], [creg]) c_index = 0 for qubit in range(len(qubits)): circuit.u3(thetas[qubit][point], 0, phis[qubit][point],qreg[qubits[qubit]]) circuit.measure(qreg[qubits[qubit]],creg[qubits[qubit]]) Q_program.add_circuit(name+label, orig+circuit) labels.append(name+label) if not silent: print('>> created Wigner function circuits for "%s"' % name) return labels def wigner_data(Q_result, name, meas_qubits, labels, shots=None): """Get the value of the Wigner function from measurement results. Args: Q_result (Result): Results from execution of a state tomography circuits on a backend. name (string): The name of the base state preparation circuit. meas_qubits (list[int]): a list of the qubit indexes measured. labels : a list of names of the circuits Returns: The values of the Wigner function at measured points in phase space """ num = len(meas_qubits) dim = 2**num P = [0.5+0.5*np.sqrt(3),0.5-0.5*np.sqrt(3)] parity = 1 for i in range(num): parity = np.kron(parity,P) W = [0]*len(labels) wpt = 0 counts = [marginal_counts(Q_result.get_counts(circ), meas_qubits) for circ in labels] for entry in counts: x =[0]*dim for i in range(dim): if bin(i)[2:].zfill(num) in entry: x[i] = float(entry[bin(i)[2:].zfill(num)]) if shots is None: shots = np.sum(x) for i in range(dim): W[wpt] = W[wpt]+(x[i]/shots)*parity[i] wpt += 1 return W
"""Tests for selector_events.py""" import errno import socket import unittest from unittest import mock try: import ssl except ImportError: ssl = None import asyncio from asyncio import selectors from asyncio import test_utils from asyncio.selector_events import BaseSelectorEventLoop from asyncio.selector_events import _SelectorTransport from asyncio.selector_events import _SelectorSslTransport from asyncio.selector_events import _SelectorSocketTransport from asyncio.selector_events import _SelectorDatagramTransport MOCK_ANY = mock.ANY class TestBaseSelectorEventLoop(BaseSelectorEventLoop): def close(self): # Don't call the close() method of the parent class, because the # selector is mocked self._closed = True def _make_self_pipe(self): self._ssock = mock.Mock() self._csock = mock.Mock() self._internal_fds += 1 def list_to_buffer(l=()): return bytearray().join(l) def close_transport(transport): # Don't call transport.close() because the event loop and the selector # are mocked if transport._sock is None: return transport._sock.close() transport._sock = None class BaseSelectorEventLoopTests(test_utils.TestCase): def setUp(self): self.selector = mock.Mock() self.selector.select.return_value = [] self.loop = TestBaseSelectorEventLoop(self.selector) self.set_event_loop(self.loop) def test_make_socket_transport(self): m = mock.Mock() self.loop.add_reader = mock.Mock() self.loop.add_reader._is_coroutine = False transport = self.loop._make_socket_transport(m, asyncio.Protocol()) self.assertIsInstance(transport, _SelectorSocketTransport) # Calling repr() must not fail when the event loop is closed self.loop.close() repr(transport) close_transport(transport) @unittest.skipIf(ssl is None, 'No ssl module') def test_make_ssl_transport(self): m = mock.Mock() self.loop._add_reader = mock.Mock() self.loop._add_reader._is_coroutine = False self.loop._add_writer = mock.Mock() self.loop._remove_reader = mock.Mock() self.loop._remove_writer = mock.Mock() waiter = asyncio.Future(loop=self.loop) with test_utils.disable_logger(): transport = self.loop._make_ssl_transport( m, asyncio.Protocol(), m, waiter) # execute the handshake while the logger is disabled # to ignore SSL handshake failure test_utils.run_briefly(self.loop) # Sanity check class_name = transport.__class__.__name__ self.assertIn("ssl", class_name.lower()) self.assertIn("transport", class_name.lower()) transport.close() # execute pending callbacks to close the socket transport test_utils.run_briefly(self.loop) @mock.patch('asyncio.selector_events.ssl', None) @mock.patch('asyncio.sslproto.ssl', None) def test_make_ssl_transport_without_ssl_error(self): m = mock.Mock() self.loop.add_reader = mock.Mock() self.loop.add_writer = mock.Mock() self.loop.remove_reader = mock.Mock() self.loop.remove_writer = mock.Mock() with self.assertRaises(RuntimeError): self.loop._make_ssl_transport(m, m, m, m) def test_close(self): class EventLoop(BaseSelectorEventLoop): def _make_self_pipe(self): self._ssock = mock.Mock() self._csock = mock.Mock() self._internal_fds += 1 self.loop = EventLoop(self.selector) self.set_event_loop(self.loop) ssock = self.loop._ssock ssock.fileno.return_value = 7 csock = self.loop._csock csock.fileno.return_value = 1 remove_reader = self.loop._remove_reader = mock.Mock() self.loop._selector.close() self.loop._selector = selector = mock.Mock() self.assertFalse(self.loop.is_closed()) self.loop.close() self.assertTrue(self.loop.is_closed()) self.assertIsNone(self.loop._selector) self.assertIsNone(self.loop._csock) self.assertIsNone(self.loop._ssock) selector.close.assert_called_with() ssock.close.assert_called_with() csock.close.assert_called_with() remove_reader.assert_called_with(7) # it should be possible to call close() more than once self.loop.close() self.loop.close() # operation blocked when the loop is closed f = asyncio.Future(loop=self.loop) self.assertRaises(RuntimeError, self.loop.run_forever) self.assertRaises(RuntimeError, self.loop.run_until_complete, f) fd = 0 def callback(): pass self.assertRaises(RuntimeError, self.loop.add_reader, fd, callback) self.assertRaises(RuntimeError, self.loop.add_writer, fd, callback) def test_close_no_selector(self): self.loop.remove_reader = mock.Mock() self.loop._selector.close() self.loop._selector = None self.loop.close() self.assertIsNone(self.loop._selector) def test_socketpair(self): self.assertRaises(NotImplementedError, self.loop._socketpair) def test_read_from_self_tryagain(self): self.loop._ssock.recv.side_effect = BlockingIOError self.assertIsNone(self.loop._read_from_self()) def test_read_from_self_exception(self): self.loop._ssock.recv.side_effect = OSError self.assertRaises(OSError, self.loop._read_from_self) def test_write_to_self_tryagain(self): self.loop._csock.send.side_effect = BlockingIOError with test_utils.disable_logger(): self.assertIsNone(self.loop._write_to_self()) def test_write_to_self_exception(self): # _write_to_self() swallows OSError self.loop._csock.send.side_effect = RuntimeError() self.assertRaises(RuntimeError, self.loop._write_to_self) def test_sock_recv(self): sock = test_utils.mock_nonblocking_socket() self.loop._sock_recv = mock.Mock() f = self.loop.sock_recv(sock, 1024) self.assertIsInstance(f, asyncio.Future) self.loop._sock_recv.assert_called_with(f, False, sock, 1024) def test__sock_recv_canceled_fut(self): sock = mock.Mock() f = asyncio.Future(loop=self.loop) f.cancel() self.loop._sock_recv(f, False, sock, 1024) self.assertFalse(sock.recv.called) def test__sock_recv_unregister(self): sock = mock.Mock() sock.fileno.return_value = 10 f = asyncio.Future(loop=self.loop) f.cancel() self.loop.remove_reader = mock.Mock() self.loop._sock_recv(f, True, sock, 1024) self.assertEqual((10,), self.loop.remove_reader.call_args[0]) def test__sock_recv_tryagain(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 sock.recv.side_effect = BlockingIOError self.loop.add_reader = mock.Mock() self.loop._sock_recv(f, False, sock, 1024) self.assertEqual((10, self.loop._sock_recv, f, True, sock, 1024), self.loop.add_reader.call_args[0]) def test__sock_recv_exception(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 err = sock.recv.side_effect = OSError() self.loop._sock_recv(f, False, sock, 1024) self.assertIs(err, f.exception()) def test_sock_sendall(self): sock = test_utils.mock_nonblocking_socket() self.loop._sock_sendall = mock.Mock() f = self.loop.sock_sendall(sock, b'data') self.assertIsInstance(f, asyncio.Future) self.assertEqual( (f, False, sock, b'data'), self.loop._sock_sendall.call_args[0]) def test_sock_sendall_nodata(self): sock = test_utils.mock_nonblocking_socket() self.loop._sock_sendall = mock.Mock() f = self.loop.sock_sendall(sock, b'') self.assertIsInstance(f, asyncio.Future) self.assertTrue(f.done()) self.assertIsNone(f.result()) self.assertFalse(self.loop._sock_sendall.called) def test__sock_sendall_canceled_fut(self): sock = mock.Mock() f = asyncio.Future(loop=self.loop) f.cancel() self.loop._sock_sendall(f, False, sock, b'data') self.assertFalse(sock.send.called) def test__sock_sendall_unregister(self): sock = mock.Mock() sock.fileno.return_value = 10 f = asyncio.Future(loop=self.loop) f.cancel() self.loop.remove_writer = mock.Mock() self.loop._sock_sendall(f, True, sock, b'data') self.assertEqual((10,), self.loop.remove_writer.call_args[0]) def test__sock_sendall_tryagain(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 sock.send.side_effect = BlockingIOError self.loop.add_writer = mock.Mock() self.loop._sock_sendall(f, False, sock, b'data') self.assertEqual( (10, self.loop._sock_sendall, f, True, sock, b'data'), self.loop.add_writer.call_args[0]) def test__sock_sendall_interrupted(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 sock.send.side_effect = InterruptedError self.loop.add_writer = mock.Mock() self.loop._sock_sendall(f, False, sock, b'data') self.assertEqual( (10, self.loop._sock_sendall, f, True, sock, b'data'), self.loop.add_writer.call_args[0]) def test__sock_sendall_exception(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 err = sock.send.side_effect = OSError() self.loop._sock_sendall(f, False, sock, b'data') self.assertIs(f.exception(), err) def test__sock_sendall(self): sock = mock.Mock() f = asyncio.Future(loop=self.loop) sock.fileno.return_value = 10 sock.send.return_value = 4 self.loop._sock_sendall(f, False, sock, b'data') self.assertTrue(f.done()) self.assertIsNone(f.result()) def test__sock_sendall_partial(self): sock = mock.Mock() f = asyncio.Future(loop=self.loop) sock.fileno.return_value = 10 sock.send.return_value = 2 self.loop.add_writer = mock.Mock() self.loop._sock_sendall(f, False, sock, b'data') self.assertFalse(f.done()) self.assertEqual( (10, self.loop._sock_sendall, f, True, sock, b'ta'), self.loop.add_writer.call_args[0]) def test__sock_sendall_none(self): sock = mock.Mock() f = asyncio.Future(loop=self.loop) sock.fileno.return_value = 10 sock.send.return_value = 0 self.loop.add_writer = mock.Mock() self.loop._sock_sendall(f, False, sock, b'data') self.assertFalse(f.done()) self.assertEqual( (10, self.loop._sock_sendall, f, True, sock, b'data'), self.loop.add_writer.call_args[0]) def test_sock_connect_timeout(self): # asyncio issue #205: sock_connect() must unregister the socket on # timeout error # prepare mocks self.loop.add_writer = mock.Mock() self.loop.remove_writer = mock.Mock() sock = test_utils.mock_nonblocking_socket() sock.connect.side_effect = BlockingIOError # first call to sock_connect() registers the socket fut = self.loop.create_task( self.loop.sock_connect(sock, ('127.0.0.1', 80))) self.loop._run_once() self.assertTrue(sock.connect.called) self.assertTrue(self.loop.add_writer.called) # on timeout, the socket must be unregistered sock.connect.reset_mock() fut.cancel() with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(fut) self.assertTrue(self.loop.remove_writer.called) @mock.patch('socket.getaddrinfo') def test_sock_connect_resolve_using_socket_params(self, m_gai): addr = ('need-resolution.com', 8080) sock = test_utils.mock_nonblocking_socket() m_gai.side_effect = (None, None, None, None, ('127.0.0.1', 0)) m_gai._is_coroutine = False con = self.loop.create_task(self.loop.sock_connect(sock, addr)) while not m_gai.called: self.loop._run_once() m_gai.assert_called_with( addr[0], addr[1], sock.family, sock.type, sock.proto, 0) con.cancel() with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(con) def test__sock_connect(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 resolved = self.loop.create_future() resolved.set_result([(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('127.0.0.1', 8080))]) self.loop._sock_connect(f, sock, resolved) self.assertTrue(f.done()) self.assertIsNone(f.result()) self.assertTrue(sock.connect.called) def test__sock_connect_cb_cancelled_fut(self): sock = mock.Mock() self.loop.remove_writer = mock.Mock() f = asyncio.Future(loop=self.loop) f.cancel() self.loop._sock_connect_cb(f, sock, ('127.0.0.1', 8080)) self.assertFalse(sock.getsockopt.called) def test__sock_connect_writer(self): # check that the fd is registered and then unregistered self.loop._process_events = mock.Mock() self.loop.add_writer = mock.Mock() self.loop.remove_writer = mock.Mock() sock = mock.Mock() sock.fileno.return_value = 10 sock.connect.side_effect = BlockingIOError sock.getsockopt.return_value = 0 address = ('127.0.0.1', 8080) resolved = self.loop.create_future() resolved.set_result([(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', address)]) f = asyncio.Future(loop=self.loop) self.loop._sock_connect(f, sock, resolved) self.loop._run_once() self.assertTrue(self.loop.add_writer.called) self.assertEqual(10, self.loop.add_writer.call_args[0][0]) self.loop._sock_connect_cb(f, sock, address) # need to run the event loop to execute _sock_connect_done() callback self.loop.run_until_complete(f) self.assertEqual((10,), self.loop.remove_writer.call_args[0]) def test__sock_connect_cb_tryagain(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 sock.getsockopt.return_value = errno.EAGAIN # check that the exception is handled self.loop._sock_connect_cb(f, sock, ('127.0.0.1', 8080)) def test__sock_connect_cb_exception(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 sock.getsockopt.return_value = errno.ENOTCONN self.loop.remove_writer = mock.Mock() self.loop._sock_connect_cb(f, sock, ('127.0.0.1', 8080)) self.assertIsInstance(f.exception(), OSError) def test_sock_accept(self): sock = test_utils.mock_nonblocking_socket() self.loop._sock_accept = mock.Mock() f = self.loop.sock_accept(sock) self.assertIsInstance(f, asyncio.Future) self.assertEqual( (f, False, sock), self.loop._sock_accept.call_args[0]) def test__sock_accept(self): f = asyncio.Future(loop=self.loop) conn = mock.Mock() sock = mock.Mock() sock.fileno.return_value = 10 sock.accept.return_value = conn, ('127.0.0.1', 1000) self.loop._sock_accept(f, False, sock) self.assertTrue(f.done()) self.assertEqual((conn, ('127.0.0.1', 1000)), f.result()) self.assertEqual((False,), conn.setblocking.call_args[0]) def test__sock_accept_canceled_fut(self): sock = mock.Mock() f = asyncio.Future(loop=self.loop) f.cancel() self.loop._sock_accept(f, False, sock) self.assertFalse(sock.accept.called) def test__sock_accept_unregister(self): sock = mock.Mock() sock.fileno.return_value = 10 f = asyncio.Future(loop=self.loop) f.cancel() self.loop.remove_reader = mock.Mock() self.loop._sock_accept(f, True, sock) self.assertEqual((10,), self.loop.remove_reader.call_args[0]) def test__sock_accept_tryagain(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 sock.accept.side_effect = BlockingIOError self.loop.add_reader = mock.Mock() self.loop._sock_accept(f, False, sock) self.assertEqual( (10, self.loop._sock_accept, f, True, sock), self.loop.add_reader.call_args[0]) def test__sock_accept_exception(self): f = asyncio.Future(loop=self.loop) sock = mock.Mock() sock.fileno.return_value = 10 err = sock.accept.side_effect = OSError() self.loop._sock_accept(f, False, sock) self.assertIs(err, f.exception()) def test_add_reader(self): self.loop._selector.get_key.side_effect = KeyError cb = lambda: True self.loop.add_reader(1, cb) self.assertTrue(self.loop._selector.register.called) fd, mask, (r, w) = self.loop._selector.register.call_args[0] self.assertEqual(1, fd) self.assertEqual(selectors.EVENT_READ, mask) self.assertEqual(cb, r._callback) self.assertIsNone(w) def test_add_reader_existing(self): reader = mock.Mock() writer = mock.Mock() self.loop._selector.get_key.return_value = selectors.SelectorKey( 1, 1, selectors.EVENT_WRITE, (reader, writer)) cb = lambda: True self.loop.add_reader(1, cb) self.assertTrue(reader.cancel.called) self.assertFalse(self.loop._selector.register.called) self.assertTrue(self.loop._selector.modify.called) fd, mask, (r, w) = self.loop._selector.modify.call_args[0] self.assertEqual(1, fd) self.assertEqual(selectors.EVENT_WRITE | selectors.EVENT_READ, mask) self.assertEqual(cb, r._callback) self.assertEqual(writer, w) def test_add_reader_existing_writer(self): writer = mock.Mock() self.loop._selector.get_key.return_value = selectors.SelectorKey( 1, 1, selectors.EVENT_WRITE, (None, writer)) cb = lambda: True self.loop.add_reader(1, cb) self.assertFalse(self.loop._selector.register.called) self.assertTrue(self.loop._selector.modify.called) fd, mask, (r, w) = self.loop._selector.modify.call_args[0] self.assertEqual(1, fd) self.assertEqual(selectors.EVENT_WRITE | selectors.EVENT_READ, mask) self.assertEqual(cb, r._callback) self.assertEqual(writer, w) def test_remove_reader(self): self.loop._selector.get_key.return_value = selectors.SelectorKey( 1, 1, selectors.EVENT_READ, (None, None)) self.assertFalse(self.loop.remove_reader(1)) self.assertTrue(self.loop._selector.unregister.called) def test_remove_reader_read_write(self): reader = mock.Mock() writer = mock.Mock() self.loop._selector.get_key.return_value = selectors.SelectorKey( 1, 1, selectors.EVENT_READ | selectors.EVENT_WRITE, (reader, writer)) self.assertTrue( self.loop.remove_reader(1)) self.assertFalse(self.loop._selector.unregister.called) self.assertEqual( (1, selectors.EVENT_WRITE, (None, writer)), self.loop._selector.modify.call_args[0]) def test_remove_reader_unknown(self): self.loop._selector.get_key.side_effect = KeyError self.assertFalse( self.loop.remove_reader(1)) def test_add_writer(self): self.loop._selector.get_key.side_effect = KeyError cb = lambda: True self.loop.add_writer(1, cb) self.assertTrue(self.loop._selector.register.called) fd, mask, (r, w) = self.loop._selector.register.call_args[0] self.assertEqual(1, fd) self.assertEqual(selectors.EVENT_WRITE, mask) self.assertIsNone(r) self.assertEqual(cb, w._callback) def test_add_writer_existing(self): reader = mock.Mock() writer = mock.Mock() self.loop._selector.get_key.return_value = selectors.SelectorKey( 1, 1, selectors.EVENT_READ, (reader, writer)) cb = lambda: True self.loop.add_writer(1, cb) self.assertTrue(writer.cancel.called) self.assertFalse(self.loop._selector.register.called) self.assertTrue(self.loop._selector.modify.called) fd, mask, (r, w) = self.loop._selector.modify.call_args[0] self.assertEqual(1, fd) self.assertEqual(selectors.EVENT_WRITE | selectors.EVENT_READ, mask) self.assertEqual(reader, r) self.assertEqual(cb, w._callback) def test_remove_writer(self): self.loop._selector.get_key.return_value = selectors.SelectorKey( 1, 1, selectors.EVENT_WRITE, (None, None)) self.assertFalse(self.loop.remove_writer(1)) self.assertTrue(self.loop._selector.unregister.called) def test_remove_writer_read_write(self): reader = mock.Mock() writer = mock.Mock() self.loop._selector.get_key.return_value = selectors.SelectorKey( 1, 1, selectors.EVENT_READ | selectors.EVENT_WRITE, (reader, writer)) self.assertTrue( self.loop.remove_writer(1)) self.assertFalse(self.loop._selector.unregister.called) self.assertEqual( (1, selectors.EVENT_READ, (reader, None)), self.loop._selector.modify.call_args[0]) def test_remove_writer_unknown(self): self.loop._selector.get_key.side_effect = KeyError self.assertFalse( self.loop.remove_writer(1)) def test_process_events_read(self): reader = mock.Mock() reader._cancelled = False self.loop._add_callback = mock.Mock() self.loop._process_events( [(selectors.SelectorKey( 1, 1, selectors.EVENT_READ, (reader, None)), selectors.EVENT_READ)]) self.assertTrue(self.loop._add_callback.called) self.loop._add_callback.assert_called_with(reader) def test_process_events_read_cancelled(self): reader = mock.Mock() reader.cancelled = True self.loop._remove_reader = mock.Mock() self.loop._process_events( [(selectors.SelectorKey( 1, 1, selectors.EVENT_READ, (reader, None)), selectors.EVENT_READ)]) self.loop._remove_reader.assert_called_with(1) def test_process_events_write(self): writer = mock.Mock() writer._cancelled = False self.loop._add_callback = mock.Mock() self.loop._process_events( [(selectors.SelectorKey(1, 1, selectors.EVENT_WRITE, (None, writer)), selectors.EVENT_WRITE)]) self.loop._add_callback.assert_called_with(writer) def test_process_events_write_cancelled(self): writer = mock.Mock() writer.cancelled = True self.loop._remove_writer = mock.Mock() self.loop._process_events( [(selectors.SelectorKey(1, 1, selectors.EVENT_WRITE, (None, writer)), selectors.EVENT_WRITE)]) self.loop._remove_writer.assert_called_with(1) def test_accept_connection_multiple(self): sock = mock.Mock() sock.accept.return_value = (mock.Mock(), mock.Mock()) backlog = 100 # Mock the coroutine generation for a connection to prevent # warnings related to un-awaited coroutines. mock_obj = mock.patch.object with mock_obj(self.loop, '_accept_connection2') as accept2_mock: accept2_mock.return_value = None with mock_obj(self.loop, 'create_task') as task_mock: task_mock.return_value = None self.loop._accept_connection(mock.Mock(), sock, backlog=backlog) self.assertEqual(sock.accept.call_count, backlog) class SelectorTransportTests(test_utils.TestCase): def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.sock = mock.Mock(socket.socket) self.sock.fileno.return_value = 7 def create_transport(self): transport = _SelectorTransport(self.loop, self.sock, self.protocol, None) self.addCleanup(close_transport, transport) return transport def test_ctor(self): tr = self.create_transport() self.assertIs(tr._loop, self.loop) self.assertIs(tr._sock, self.sock) self.assertIs(tr._sock_fd, 7) def test_abort(self): tr = self.create_transport() tr._force_close = mock.Mock() tr.abort() tr._force_close.assert_called_with(None) def test_close(self): tr = self.create_transport() tr.close() self.assertTrue(tr.is_closing()) self.assertEqual(1, self.loop.remove_reader_count[7]) self.protocol.connection_lost(None) self.assertEqual(tr._conn_lost, 1) tr.close() self.assertEqual(tr._conn_lost, 1) self.assertEqual(1, self.loop.remove_reader_count[7]) def test_close_write_buffer(self): tr = self.create_transport() tr._buffer.extend(b'data') tr.close() self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.assertFalse(self.protocol.connection_lost.called) def test_force_close(self): tr = self.create_transport() tr._buffer.extend(b'1') self.loop._add_reader(7, mock.sentinel) self.loop._add_writer(7, mock.sentinel) tr._force_close(None) self.assertTrue(tr.is_closing()) self.assertEqual(tr._buffer, list_to_buffer()) self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) # second close should not remove reader tr._force_close(None) self.assertFalse(self.loop.readers) self.assertEqual(1, self.loop.remove_reader_count[7]) @mock.patch('asyncio.log.logger.error') def test_fatal_error(self, m_exc): exc = OSError() tr = self.create_transport() tr._force_close = mock.Mock() tr._fatal_error(exc) m_exc.assert_called_with( test_utils.MockPattern( 'Fatal error on transport\nprotocol:.*\ntransport:.*'), exc_info=(OSError, MOCK_ANY, MOCK_ANY)) tr._force_close.assert_called_with(exc) def test_connection_lost(self): exc = OSError() tr = self.create_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) tr._call_connection_lost(exc) self.protocol.connection_lost.assert_called_with(exc) self.sock.close.assert_called_with() self.assertIsNone(tr._sock) self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) class SelectorSocketTransportTests(test_utils.TestCase): def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.sock = mock.Mock(socket.socket) self.sock_fd = self.sock.fileno.return_value = 7 def socket_transport(self, waiter=None): transport = _SelectorSocketTransport(self.loop, self.sock, self.protocol, waiter=waiter) self.addCleanup(close_transport, transport) return transport def test_ctor(self): waiter = asyncio.Future(loop=self.loop) tr = self.socket_transport(waiter=waiter) self.loop.run_until_complete(waiter) self.loop.assert_reader(7, tr._read_ready) test_utils.run_briefly(self.loop) self.protocol.connection_made.assert_called_with(tr) def test_ctor_with_waiter(self): waiter = asyncio.Future(loop=self.loop) self.socket_transport(waiter=waiter) self.loop.run_until_complete(waiter) self.assertIsNone(waiter.result()) def test_pause_resume_reading(self): tr = self.socket_transport() test_utils.run_briefly(self.loop) self.assertFalse(tr._paused) self.loop.assert_reader(7, tr._read_ready) tr.pause_reading() self.assertTrue(tr._paused) self.assertFalse(7 in self.loop.readers) tr.resume_reading() self.assertFalse(tr._paused) self.loop.assert_reader(7, tr._read_ready) with self.assertRaises(RuntimeError): tr.resume_reading() def test_read_ready(self): transport = self.socket_transport() self.sock.recv.return_value = b'data' transport._read_ready() self.protocol.data_received.assert_called_with(b'data') def test_read_ready_eof(self): transport = self.socket_transport() transport.close = mock.Mock() self.sock.recv.return_value = b'' transport._read_ready() self.protocol.eof_received.assert_called_with() transport.close.assert_called_with() def test_read_ready_eof_keep_open(self): transport = self.socket_transport() transport.close = mock.Mock() self.sock.recv.return_value = b'' self.protocol.eof_received.return_value = True transport._read_ready() self.protocol.eof_received.assert_called_with() self.assertFalse(transport.close.called) @mock.patch('logging.exception') def test_read_ready_tryagain(self, m_exc): self.sock.recv.side_effect = BlockingIOError transport = self.socket_transport() transport._fatal_error = mock.Mock() transport._read_ready() self.assertFalse(transport._fatal_error.called) @mock.patch('logging.exception') def test_read_ready_tryagain_interrupted(self, m_exc): self.sock.recv.side_effect = InterruptedError transport = self.socket_transport() transport._fatal_error = mock.Mock() transport._read_ready() self.assertFalse(transport._fatal_error.called) @mock.patch('logging.exception') def test_read_ready_conn_reset(self, m_exc): err = self.sock.recv.side_effect = ConnectionResetError() transport = self.socket_transport() transport._force_close = mock.Mock() with test_utils.disable_logger(): transport._read_ready() transport._force_close.assert_called_with(err) @mock.patch('logging.exception') def test_read_ready_err(self, m_exc): err = self.sock.recv.side_effect = OSError() transport = self.socket_transport() transport._fatal_error = mock.Mock() transport._read_ready() transport._fatal_error.assert_called_with( err, 'Fatal read error on socket transport') def test_write(self): data = b'data' self.sock.send.return_value = len(data) transport = self.socket_transport() transport.write(data) self.sock.send.assert_called_with(data) def test_write_bytearray(self): data = bytearray(b'data') self.sock.send.return_value = len(data) transport = self.socket_transport() transport.write(data) self.sock.send.assert_called_with(data) self.assertEqual(data, bytearray(b'data')) # Hasn't been mutated. def test_write_memoryview(self): data = memoryview(b'data') self.sock.send.return_value = len(data) transport = self.socket_transport() transport.write(data) self.sock.send.assert_called_with(data) def test_write_no_data(self): transport = self.socket_transport() transport._buffer.extend(b'data') transport.write(b'') self.assertFalse(self.sock.send.called) self.assertEqual(list_to_buffer([b'data']), transport._buffer) def test_write_buffer(self): transport = self.socket_transport() transport._buffer.extend(b'data1') transport.write(b'data2') self.assertFalse(self.sock.send.called) self.assertEqual(list_to_buffer([b'data1', b'data2']), transport._buffer) def test_write_partial(self): data = b'data' self.sock.send.return_value = 2 transport = self.socket_transport() transport.write(data) self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'ta']), transport._buffer) def test_write_partial_bytearray(self): data = bytearray(b'data') self.sock.send.return_value = 2 transport = self.socket_transport() transport.write(data) self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'ta']), transport._buffer) self.assertEqual(data, bytearray(b'data')) # Hasn't been mutated. def test_write_partial_memoryview(self): data = memoryview(b'data') self.sock.send.return_value = 2 transport = self.socket_transport() transport.write(data) self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'ta']), transport._buffer) def test_write_partial_none(self): data = b'data' self.sock.send.return_value = 0 self.sock.fileno.return_value = 7 transport = self.socket_transport() transport.write(data) self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'data']), transport._buffer) def test_write_tryagain(self): self.sock.send.side_effect = BlockingIOError data = b'data' transport = self.socket_transport() transport.write(data) self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'data']), transport._buffer) @mock.patch('asyncio.selector_events.logger') def test_write_exception(self, m_log): err = self.sock.send.side_effect = OSError() data = b'data' transport = self.socket_transport() transport._fatal_error = mock.Mock() transport.write(data) transport._fatal_error.assert_called_with( err, 'Fatal write error on socket transport') transport._conn_lost = 1 self.sock.reset_mock() transport.write(data) self.assertFalse(self.sock.send.called) self.assertEqual(transport._conn_lost, 2) transport.write(data) transport.write(data) transport.write(data) transport.write(data) m_log.warning.assert_called_with('socket.send() raised exception.') def test_write_str(self): transport = self.socket_transport() self.assertRaises(TypeError, transport.write, 'str') def test_write_closing(self): transport = self.socket_transport() transport.close() self.assertEqual(transport._conn_lost, 1) transport.write(b'data') self.assertEqual(transport._conn_lost, 2) def test_write_ready(self): data = b'data' self.sock.send.return_value = len(data) transport = self.socket_transport() transport._buffer.extend(data) self.loop._add_writer(7, transport._write_ready) transport._write_ready() self.assertTrue(self.sock.send.called) self.assertFalse(self.loop.writers) def test_write_ready_closing(self): data = b'data' self.sock.send.return_value = len(data) transport = self.socket_transport() transport._closing = True transport._buffer.extend(data) self.loop._add_writer(7, transport._write_ready) transport._write_ready() self.assertTrue(self.sock.send.called) self.assertFalse(self.loop.writers) self.sock.close.assert_called_with() self.protocol.connection_lost.assert_called_with(None) def test_write_ready_no_data(self): transport = self.socket_transport() # This is an internal error. self.assertRaises(AssertionError, transport._write_ready) def test_write_ready_partial(self): data = b'data' self.sock.send.return_value = 2 transport = self.socket_transport() transport._buffer.extend(data) self.loop._add_writer(7, transport._write_ready) transport._write_ready() self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'ta']), transport._buffer) def test_write_ready_partial_none(self): data = b'data' self.sock.send.return_value = 0 transport = self.socket_transport() transport._buffer.extend(data) self.loop._add_writer(7, transport._write_ready) transport._write_ready() self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'data']), transport._buffer) def test_write_ready_tryagain(self): self.sock.send.side_effect = BlockingIOError transport = self.socket_transport() transport._buffer = list_to_buffer([b'data1', b'data2']) self.loop._add_writer(7, transport._write_ready) transport._write_ready() self.loop.assert_writer(7, transport._write_ready) self.assertEqual(list_to_buffer([b'data1data2']), transport._buffer) def test_write_ready_exception(self): err = self.sock.send.side_effect = OSError() transport = self.socket_transport() transport._fatal_error = mock.Mock() transport._buffer.extend(b'data') transport._write_ready() transport._fatal_error.assert_called_with( err, 'Fatal write error on socket transport') def test_write_eof(self): tr = self.socket_transport() self.assertTrue(tr.can_write_eof()) tr.write_eof() self.sock.shutdown.assert_called_with(socket.SHUT_WR) tr.write_eof() self.assertEqual(self.sock.shutdown.call_count, 1) tr.close() def test_write_eof_buffer(self): tr = self.socket_transport() self.sock.send.side_effect = BlockingIOError tr.write(b'data') tr.write_eof() self.assertEqual(tr._buffer, list_to_buffer([b'data'])) self.assertTrue(tr._eof) self.assertFalse(self.sock.shutdown.called) self.sock.send.side_effect = lambda _: 4 tr._write_ready() self.assertTrue(self.sock.send.called) self.sock.shutdown.assert_called_with(socket.SHUT_WR) tr.close() @mock.patch('asyncio.base_events.logger') def test_transport_close_remove_writer(self, m_log): remove_writer = self.loop._remove_writer = mock.Mock() transport = self.socket_transport() transport.close() remove_writer.assert_called_with(self.sock_fd) @unittest.skipIf(ssl is None, 'No ssl module') class SelectorSslTransportTests(test_utils.TestCase): def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.sock = mock.Mock(socket.socket) self.sock.fileno.return_value = 7 self.sslsock = mock.Mock() self.sslsock.fileno.return_value = 1 self.sslcontext = mock.Mock() self.sslcontext.wrap_socket.return_value = self.sslsock def ssl_transport(self, waiter=None, server_hostname=None): transport = _SelectorSslTransport(self.loop, self.sock, self.protocol, self.sslcontext, waiter=waiter, server_hostname=server_hostname) self.addCleanup(close_transport, transport) return transport def _make_one(self, create_waiter=None): transport = self.ssl_transport() self.sock.reset_mock() self.sslsock.reset_mock() self.sslcontext.reset_mock() self.loop.reset_counters() return transport def test_on_handshake(self): waiter = asyncio.Future(loop=self.loop) tr = self.ssl_transport(waiter=waiter) self.assertTrue(self.sslsock.do_handshake.called) self.loop.assert_reader(1, tr._read_ready) test_utils.run_briefly(self.loop) self.assertIsNone(waiter.result()) def test_on_handshake_reader_retry(self): self.loop.set_debug(False) self.sslsock.do_handshake.side_effect = ssl.SSLWantReadError transport = self.ssl_transport() self.loop.assert_reader(1, transport._on_handshake, None) def test_on_handshake_writer_retry(self): self.loop.set_debug(False) self.sslsock.do_handshake.side_effect = ssl.SSLWantWriteError transport = self.ssl_transport() self.loop.assert_writer(1, transport._on_handshake, None) def test_on_handshake_exc(self): exc = ValueError() self.sslsock.do_handshake.side_effect = exc with test_utils.disable_logger(): waiter = asyncio.Future(loop=self.loop) self.ssl_transport(waiter=waiter) self.assertTrue(waiter.done()) self.assertIs(exc, waiter.exception()) self.assertTrue(self.sslsock.close.called) def test_on_handshake_base_exc(self): waiter = asyncio.Future(loop=self.loop) transport = self.ssl_transport(waiter=waiter) exc = BaseException() self.sslsock.do_handshake.side_effect = exc with test_utils.disable_logger(): self.assertRaises(BaseException, transport._on_handshake, 0) self.assertTrue(self.sslsock.close.called) self.assertTrue(waiter.done()) self.assertIs(exc, waiter.exception()) def test_cancel_handshake(self): # Python issue #23197: cancelling a handshake must not raise an # exception or log an error, even if the handshake failed waiter = asyncio.Future(loop=self.loop) transport = self.ssl_transport(waiter=waiter) waiter.cancel() exc = ValueError() self.sslsock.do_handshake.side_effect = exc with test_utils.disable_logger(): transport._on_handshake(0) transport.close() test_utils.run_briefly(self.loop) def test_pause_resume_reading(self): tr = self._make_one() self.assertFalse(tr._paused) self.loop.assert_reader(1, tr._read_ready) tr.pause_reading() self.assertTrue(tr._paused) self.assertFalse(1 in self.loop.readers) tr.resume_reading() self.assertFalse(tr._paused) self.loop.assert_reader(1, tr._read_ready) with self.assertRaises(RuntimeError): tr.resume_reading() def test_write(self): transport = self._make_one() transport.write(b'data') self.assertEqual(list_to_buffer([b'data']), transport._buffer) def test_write_bytearray(self): transport = self._make_one() data = bytearray(b'data') transport.write(data) self.assertEqual(list_to_buffer([b'data']), transport._buffer) self.assertEqual(data, bytearray(b'data')) # Hasn't been mutated. self.assertIsNot(data, transport._buffer) # Hasn't been incorporated. def test_write_memoryview(self): transport = self._make_one() data = memoryview(b'data') transport.write(data) self.assertEqual(list_to_buffer([b'data']), transport._buffer) def test_write_no_data(self): transport = self._make_one() transport._buffer.extend(b'data') transport.write(b'') self.assertEqual(list_to_buffer([b'data']), transport._buffer) def test_write_str(self): transport = self._make_one() self.assertRaises(TypeError, transport.write, 'str') def test_write_closing(self): transport = self._make_one() transport.close() self.assertEqual(transport._conn_lost, 1) transport.write(b'data') self.assertEqual(transport._conn_lost, 2) @mock.patch('asyncio.selector_events.logger') def test_write_exception(self, m_log): transport = self._make_one() transport._conn_lost = 1 transport.write(b'data') self.assertEqual(transport._buffer, list_to_buffer()) transport.write(b'data') transport.write(b'data') transport.write(b'data') transport.write(b'data') m_log.warning.assert_called_with('socket.send() raised exception.') def test_read_ready_recv(self): self.sslsock.recv.return_value = b'data' transport = self._make_one() transport._read_ready() self.assertTrue(self.sslsock.recv.called) self.assertEqual((b'data',), self.protocol.data_received.call_args[0]) def test_read_ready_write_wants_read(self): self.loop._add_writer = mock.Mock() self.sslsock.recv.side_effect = BlockingIOError transport = self._make_one() transport._write_wants_read = True transport._write_ready = mock.Mock() transport._buffer.extend(b'data') transport._read_ready() self.assertFalse(transport._write_wants_read) transport._write_ready.assert_called_with() self.loop._add_writer.assert_called_with( transport._sock_fd, transport._write_ready) def test_read_ready_recv_eof(self): self.sslsock.recv.return_value = b'' transport = self._make_one() transport.close = mock.Mock() transport._read_ready() transport.close.assert_called_with() self.protocol.eof_received.assert_called_with() def test_read_ready_recv_conn_reset(self): err = self.sslsock.recv.side_effect = ConnectionResetError() transport = self._make_one() transport._force_close = mock.Mock() with test_utils.disable_logger(): transport._read_ready() transport._force_close.assert_called_with(err) def test_read_ready_recv_retry(self): self.sslsock.recv.side_effect = ssl.SSLWantReadError transport = self._make_one() transport._read_ready() self.assertTrue(self.sslsock.recv.called) self.assertFalse(self.protocol.data_received.called) self.sslsock.recv.side_effect = BlockingIOError transport._read_ready() self.assertFalse(self.protocol.data_received.called) self.sslsock.recv.side_effect = InterruptedError transport._read_ready() self.assertFalse(self.protocol.data_received.called) def test_read_ready_recv_write(self): self.loop._remove_reader = mock.Mock() self.loop._add_writer = mock.Mock() self.sslsock.recv.side_effect = ssl.SSLWantWriteError transport = self._make_one() transport._read_ready() self.assertFalse(self.protocol.data_received.called) self.assertTrue(transport._read_wants_write) self.loop._remove_reader.assert_called_with(transport._sock_fd) self.loop._add_writer.assert_called_with( transport._sock_fd, transport._write_ready) def test_read_ready_recv_exc(self): err = self.sslsock.recv.side_effect = OSError() transport = self._make_one() transport._fatal_error = mock.Mock() transport._read_ready() transport._fatal_error.assert_called_with( err, 'Fatal read error on SSL transport') def test_write_ready_send(self): self.sslsock.send.return_value = 4 transport = self._make_one() transport._buffer = list_to_buffer([b'data']) transport._write_ready() self.assertEqual(list_to_buffer(), transport._buffer) self.assertTrue(self.sslsock.send.called) def test_write_ready_send_none(self): self.sslsock.send.return_value = 0 transport = self._make_one() transport._buffer = list_to_buffer([b'data1', b'data2']) transport._write_ready() self.assertTrue(self.sslsock.send.called) self.assertEqual(list_to_buffer([b'data1data2']), transport._buffer) def test_write_ready_send_partial(self): self.sslsock.send.return_value = 2 transport = self._make_one() transport._buffer = list_to_buffer([b'data1', b'data2']) transport._write_ready() self.assertTrue(self.sslsock.send.called) self.assertEqual(list_to_buffer([b'ta1data2']), transport._buffer) def test_write_ready_send_closing_partial(self): self.sslsock.send.return_value = 2 transport = self._make_one() transport._buffer = list_to_buffer([b'data1', b'data2']) transport._write_ready() self.assertTrue(self.sslsock.send.called) self.assertFalse(self.sslsock.close.called) def test_write_ready_send_closing(self): self.sslsock.send.return_value = 4 transport = self._make_one() transport._buffer = list_to_buffer([b'data']) transport.close() transport._write_ready() self.protocol.connection_lost.assert_called_with(None) def test_write_ready_send_closing_empty_buffer(self): self.sslsock.send.return_value = 4 call_soon = self.loop.call_soon = mock.Mock() transport = self._make_one() transport._buffer = list_to_buffer() transport.close() transport._write_ready() call_soon.assert_called_with(transport._call_connection_lost, None) def test_write_ready_send_retry(self): transport = self._make_one() transport._buffer = list_to_buffer([b'data']) self.sslsock.send.side_effect = ssl.SSLWantWriteError transport._write_ready() self.assertEqual(list_to_buffer([b'data']), transport._buffer) self.sslsock.send.side_effect = BlockingIOError() transport._write_ready() self.assertEqual(list_to_buffer([b'data']), transport._buffer) def test_write_ready_send_read(self): transport = self._make_one() transport._buffer = list_to_buffer([b'data']) self.loop._remove_writer = mock.Mock() self.sslsock.send.side_effect = ssl.SSLWantReadError transport._write_ready() self.assertFalse(self.protocol.data_received.called) self.assertTrue(transport._write_wants_read) self.loop._remove_writer.assert_called_with(transport._sock_fd) def test_write_ready_send_exc(self): err = self.sslsock.send.side_effect = OSError() transport = self._make_one() transport._buffer = list_to_buffer([b'data']) transport._fatal_error = mock.Mock() transport._write_ready() transport._fatal_error.assert_called_with( err, 'Fatal write error on SSL transport') self.assertEqual(list_to_buffer(), transport._buffer) def test_write_ready_read_wants_write(self): self.loop._add_reader = mock.Mock() self.sslsock.send.side_effect = BlockingIOError transport = self._make_one() transport._read_wants_write = True transport._read_ready = mock.Mock() transport._write_ready() self.assertFalse(transport._read_wants_write) transport._read_ready.assert_called_with() self.loop._add_reader.assert_called_with( transport._sock_fd, transport._read_ready) def test_write_eof(self): tr = self._make_one() self.assertFalse(tr.can_write_eof()) self.assertRaises(NotImplementedError, tr.write_eof) def check_close(self): tr = self._make_one() tr.close() self.assertTrue(tr.is_closing()) self.assertEqual(1, self.loop.remove_reader_count[1]) self.assertEqual(tr._conn_lost, 1) tr.close() self.assertEqual(tr._conn_lost, 1) self.assertEqual(1, self.loop.remove_reader_count[1]) test_utils.run_briefly(self.loop) def test_close(self): self.check_close() self.assertTrue(self.protocol.connection_made.called) self.assertTrue(self.protocol.connection_lost.called) def test_close_not_connected(self): self.sslsock.do_handshake.side_effect = ssl.SSLWantReadError self.check_close() self.assertFalse(self.protocol.connection_made.called) self.assertFalse(self.protocol.connection_lost.called) @unittest.skipIf(ssl is None, 'No SSL support') def test_server_hostname(self): self.ssl_transport(server_hostname='localhost') self.sslcontext.wrap_socket.assert_called_with( self.sock, do_handshake_on_connect=False, server_side=False, server_hostname='localhost') class SelectorSslWithoutSslTransportTests(unittest.TestCase): @mock.patch('asyncio.selector_events.ssl', None) def test_ssl_transport_requires_ssl_module(self): Mock = mock.Mock with self.assertRaises(RuntimeError): _SelectorSslTransport(Mock(), Mock(), Mock(), Mock()) class SelectorDatagramTransportTests(test_utils.TestCase): def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.DatagramProtocol) self.sock = mock.Mock(spec_set=socket.socket) self.sock.fileno.return_value = 7 def datagram_transport(self, address=None): transport = _SelectorDatagramTransport(self.loop, self.sock, self.protocol, address=address) self.addCleanup(close_transport, transport) return transport def test_read_ready(self): transport = self.datagram_transport() self.sock.recvfrom.return_value = (b'data', ('0.0.0.0', 1234)) transport._read_ready() self.protocol.datagram_received.assert_called_with( b'data', ('0.0.0.0', 1234)) def test_read_ready_tryagain(self): transport = self.datagram_transport() self.sock.recvfrom.side_effect = BlockingIOError transport._fatal_error = mock.Mock() transport._read_ready() self.assertFalse(transport._fatal_error.called) def test_read_ready_err(self): transport = self.datagram_transport() err = self.sock.recvfrom.side_effect = RuntimeError() transport._fatal_error = mock.Mock() transport._read_ready() transport._fatal_error.assert_called_with( err, 'Fatal read error on datagram transport') def test_read_ready_oserr(self): transport = self.datagram_transport() err = self.sock.recvfrom.side_effect = OSError() transport._fatal_error = mock.Mock() transport._read_ready() self.assertFalse(transport._fatal_error.called) self.protocol.error_received.assert_called_with(err) def test_sendto(self): data = b'data' transport = self.datagram_transport() transport.sendto(data, ('0.0.0.0', 1234)) self.assertTrue(self.sock.sendto.called) self.assertEqual( self.sock.sendto.call_args[0], (data, ('0.0.0.0', 1234))) def test_sendto_bytearray(self): data = bytearray(b'data') transport = self.datagram_transport() transport.sendto(data, ('0.0.0.0', 1234)) self.assertTrue(self.sock.sendto.called) self.assertEqual( self.sock.sendto.call_args[0], (data, ('0.0.0.0', 1234))) def test_sendto_memoryview(self): data = memoryview(b'data') transport = self.datagram_transport() transport.sendto(data, ('0.0.0.0', 1234)) self.assertTrue(self.sock.sendto.called) self.assertEqual( self.sock.sendto.call_args[0], (data, ('0.0.0.0', 1234))) def test_sendto_no_data(self): transport = self.datagram_transport() transport._buffer.append((b'data', ('0.0.0.0', 12345))) transport.sendto(b'', ()) self.assertFalse(self.sock.sendto.called) self.assertEqual( [(b'data', ('0.0.0.0', 12345))], list(transport._buffer)) def test_sendto_buffer(self): transport = self.datagram_transport() transport._buffer.append((b'data1', ('0.0.0.0', 12345))) transport.sendto(b'data2', ('0.0.0.0', 12345)) self.assertFalse(self.sock.sendto.called) self.assertEqual( [(b'data1', ('0.0.0.0', 12345)), (b'data2', ('0.0.0.0', 12345))], list(transport._buffer)) def test_sendto_buffer_bytearray(self): data2 = bytearray(b'data2') transport = self.datagram_transport() transport._buffer.append((b'data1', ('0.0.0.0', 12345))) transport.sendto(data2, ('0.0.0.0', 12345)) self.assertFalse(self.sock.sendto.called) self.assertEqual( [(b'data1', ('0.0.0.0', 12345)), (b'data2', ('0.0.0.0', 12345))], list(transport._buffer)) self.assertIsInstance(transport._buffer[1][0], bytes) def test_sendto_buffer_memoryview(self): data2 = memoryview(b'data2') transport = self.datagram_transport() transport._buffer.append((b'data1', ('0.0.0.0', 12345))) transport.sendto(data2, ('0.0.0.0', 12345)) self.assertFalse(self.sock.sendto.called) self.assertEqual( [(b'data1', ('0.0.0.0', 12345)), (b'data2', ('0.0.0.0', 12345))], list(transport._buffer)) self.assertIsInstance(transport._buffer[1][0], bytes) def test_sendto_tryagain(self): data = b'data' self.sock.sendto.side_effect = BlockingIOError transport = self.datagram_transport() transport.sendto(data, ('0.0.0.0', 12345)) self.loop.assert_writer(7, transport._sendto_ready) self.assertEqual( [(b'data', ('0.0.0.0', 12345))], list(transport._buffer)) @mock.patch('asyncio.selector_events.logger') def test_sendto_exception(self, m_log): data = b'data' err = self.sock.sendto.side_effect = RuntimeError() transport = self.datagram_transport() transport._fatal_error = mock.Mock() transport.sendto(data, ()) self.assertTrue(transport._fatal_error.called) transport._fatal_error.assert_called_with( err, 'Fatal write error on datagram transport') transport._conn_lost = 1 transport._address = ('123',) transport.sendto(data) transport.sendto(data) transport.sendto(data) transport.sendto(data) transport.sendto(data) m_log.warning.assert_called_with('socket.send() raised exception.') def test_sendto_error_received(self): data = b'data' self.sock.sendto.side_effect = ConnectionRefusedError transport = self.datagram_transport() transport._fatal_error = mock.Mock() transport.sendto(data, ()) self.assertEqual(transport._conn_lost, 0) self.assertFalse(transport._fatal_error.called) def test_sendto_error_received_connected(self): data = b'data' self.sock.send.side_effect = ConnectionRefusedError transport = self.datagram_transport(address=('0.0.0.0', 1)) transport._fatal_error = mock.Mock() transport.sendto(data) self.assertFalse(transport._fatal_error.called) self.assertTrue(self.protocol.error_received.called) def test_sendto_str(self): transport = self.datagram_transport() self.assertRaises(TypeError, transport.sendto, 'str', ()) def test_sendto_connected_addr(self): transport = self.datagram_transport(address=('0.0.0.0', 1)) self.assertRaises( ValueError, transport.sendto, b'str', ('0.0.0.0', 2)) def test_sendto_closing(self): transport = self.datagram_transport(address=(1,)) transport.close() self.assertEqual(transport._conn_lost, 1) transport.sendto(b'data', (1,)) self.assertEqual(transport._conn_lost, 2) def test_sendto_ready(self): data = b'data' self.sock.sendto.return_value = len(data) transport = self.datagram_transport() transport._buffer.append((data, ('0.0.0.0', 12345))) self.loop._add_writer(7, transport._sendto_ready) transport._sendto_ready() self.assertTrue(self.sock.sendto.called) self.assertEqual( self.sock.sendto.call_args[0], (data, ('0.0.0.0', 12345))) self.assertFalse(self.loop.writers) def test_sendto_ready_closing(self): data = b'data' self.sock.send.return_value = len(data) transport = self.datagram_transport() transport._closing = True transport._buffer.append((data, ())) self.loop._add_writer(7, transport._sendto_ready) transport._sendto_ready() self.sock.sendto.assert_called_with(data, ()) self.assertFalse(self.loop.writers) self.sock.close.assert_called_with() self.protocol.connection_lost.assert_called_with(None) def test_sendto_ready_no_data(self): transport = self.datagram_transport() self.loop._add_writer(7, transport._sendto_ready) transport._sendto_ready() self.assertFalse(self.sock.sendto.called) self.assertFalse(self.loop.writers) def test_sendto_ready_tryagain(self): self.sock.sendto.side_effect = BlockingIOError transport = self.datagram_transport() transport._buffer.extend([(b'data1', ()), (b'data2', ())]) self.loop._add_writer(7, transport._sendto_ready) transport._sendto_ready() self.loop.assert_writer(7, transport._sendto_ready) self.assertEqual( [(b'data1', ()), (b'data2', ())], list(transport._buffer)) def test_sendto_ready_exception(self): err = self.sock.sendto.side_effect = RuntimeError() transport = self.datagram_transport() transport._fatal_error = mock.Mock() transport._buffer.append((b'data', ())) transport._sendto_ready() transport._fatal_error.assert_called_with( err, 'Fatal write error on datagram transport') def test_sendto_ready_error_received(self): self.sock.sendto.side_effect = ConnectionRefusedError transport = self.datagram_transport() transport._fatal_error = mock.Mock() transport._buffer.append((b'data', ())) transport._sendto_ready() self.assertFalse(transport._fatal_error.called) def test_sendto_ready_error_received_connection(self): self.sock.send.side_effect = ConnectionRefusedError transport = self.datagram_transport(address=('0.0.0.0', 1)) transport._fatal_error = mock.Mock() transport._buffer.append((b'data', ())) transport._sendto_ready() self.assertFalse(transport._fatal_error.called) self.assertTrue(self.protocol.error_received.called) @mock.patch('asyncio.base_events.logger.error') def test_fatal_error_connected(self, m_exc): transport = self.datagram_transport(address=('0.0.0.0', 1)) err = ConnectionRefusedError() transport._fatal_error(err) self.assertFalse(self.protocol.error_received.called) m_exc.assert_called_with( test_utils.MockPattern( 'Fatal error on transport\nprotocol:.*\ntransport:.*'), exc_info=(ConnectionRefusedError, MOCK_ANY, MOCK_ANY)) if __name__ == '__main__': unittest.main()
""" :codeauthor: Jorge Schrauwen <info@blackdot.be> """ import salt.modules.smartos_imgadm as imgadm from salt.modules.smartos_imgadm import _parse_image_meta from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase image_orphan = { "manifest": {"uuid": "07f360fd-12d5-e624-a279-eb8a15b630f6"}, "zpool": "zones", "cloneNames": [], "clones": 0, } image_native = { "manifest": { "v": 2, "uuid": "9d91e334-3bcf-11e8-bb0b-e7b49eb01e38", "owner": "00000000-0000-0000-0000-000000000000", "name": "pkgbuild", "version": "18.1.0", "state": "active", "disabled": False, "public": True, "published_at": "2018-04-09T08:25:52Z", "type": "zone-dataset", "os": "smartos", "files": [ { "sha1": "5efaf95b7f226eb09c7d5e6c3734f8aa654b811d", "size": 465411979, "compression": "gzip", } ], "description": "A SmartOS image pre-configured for building pkgsrc packages.", "homepage": "https://docs.joyent.com/images/smartos/pkgbuild", "urn": "sdc:sdc:pkgbuild:18.1.0", "requirements": { "min_platform": {"7.0": "20141030T081701Z"}, "networks": [{"name": "net0", "description": "public"}], }, "tags": {"role": "os", "group": "pkgbuild"}, }, "zpool": "zones", "source": "https://images.joyent.com", "cloneNames": ["zones/dda70f61-70fe-65e7-cf70-d878d69442d4"], "clones": 1, } image_lx = { "manifest": { "v": 2, "uuid": "05140a7e-279f-11e6-aedf-47d4f69d2887", "owner": "00000000-0000-0000-0000-000000000000", "name": "ubuntu-16.04", "version": "20160601", "state": "active", "disabled": False, "public": True, "published_at": "2016-06-01T02:17:41Z", "type": "lx-dataset", "os": "linux", "files": [ { "sha1": "d342f137c5ccef0702ec479acb63c196cf81b38a", "size": 134969110, "compression": "gzip", } ], "description": ( "Container-native Ubuntu 16.04 64-bit image. Built to run on containers" " with bare metal speed, while offering all the services of a typical unix" " host." ), "homepage": "https://docs.joyent.com/images/container-native-linux", "requirements": { "networks": [{"name": "net0", "description": "public"}], "min_platform": {"7.0": "20160225T122859Z"}, "brand": "lx", }, "tags": {"role": "os", "kernel_version": "4.3.0"}, }, "zpool": "zones", "source": "https://images.joyent.com", "cloneNames": ["zones/e4c1f6b5-4429-e6c2-ae2a-d6aa58bdeebb"], "clones": 1, } image_zvol = { "manifest": { "v": 2, "uuid": "ac99517a-72ac-44c0-90e6-c7ce3d944a0a", "owner": "00000000-0000-0000-0000-000000000000", "name": "ubuntu-certified-18.04", "version": "20180808", "state": "active", "disabled": False, "public": True, "published_at": "2018-10-11T12:45:24.804Z", "type": "zvol", "os": "linux", "files": [ { "sha1": "9f7704969507bd97e160a8f42a3631487644e457", "size": 372276887, "compression": "gzip", } ], "description": ( "Ubuntu 18.04 LTS (20180808 64-bit). Certified Ubuntu Server Cloud Image" " from Canonical. For kvm and bhyve." ), "homepage": "https://docs.joyent.com/images/linux/ubuntu-certified", "requirements": { "min_platform": {"7.0": "20150929T232348Z"}, "networks": [{"name": "net0", "description": "public"}], "ssh_key": True, }, "nic_driver": "virtio", "disk_driver": "virtio", "cpu_type": "host", "image_size": 10240, "tags": {"default_user": "ubuntu", "role": "os"}, }, "zpool": "zones", "source": "https://images.joyent.com", "cloneNames": [], "clones": 0, } image_docker = { "manifest": { "v": 2, "uuid": "4a3db8cb-0e94-ae23-588c-ee7934088927", "owner": "00000000-0000-0000-0000-000000000000", "name": "docker-layer", "version": "62487cf6a7f6", "disabled": False, "public": True, "published_at": "2019-03-23T01:32:25.320Z", "type": "docker", "os": "linux", "description": '/bin/sh -c #(nop) CMD ["/bin/bash" "/opt/start.sh" "-bash"]', "tags": { "docker:repo": "busybox42/zimbra-docker-centos", "docker:id": "sha256:62487cf6a7f698af4edc20707e14b1b3bba13b98bea3375f05af04859a30b222", "docker:architecture": "amd64", "docker:tag:latest": True, "docker:config": { "Cmd": ["/bin/bash", "/opt/start.sh", "-bash"], "Entrypoint": None, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "WorkingDir": "", }, }, "origin": "2f0c529b-7bab-28d1-ff34-bdc9281b7a4b", }, "zpool": "zones", "source": "https://docker.io", "cloneNames": [], "clones": 0, } class ImgadmTestCase(TestCase, LoaderModuleMockMixin): """ TestCase for salt.modules.smartos_imgadm module """ def setup_loader_modules(self): return {imgadm: {}} def test_parse_image_meta_orphan(self): """ Test the internal _parse_image_meta methode Feed it an 'orphan' image as we get it from from imgadm list -j """ ret = {"Error": "This looks like an orphaned image, image payload was invalid."} self.assertEqual(_parse_image_meta(image_orphan, True), ret) def test_parse_image_meta_native(self): """ Test the internal _parse_image_meta methode Feed it an 'native' image as we get it from from imgadm list -j """ ret = { "description": ( "A SmartOS image pre-configured for building pkgsrc packages." ), "name": "pkgbuild", "os": "smartos", "published": "2018-04-09T08:25:52Z", "source": "https://images.joyent.com", "version": "18.1.0", } self.assertEqual(_parse_image_meta(image_native, True), ret) def test_parse_image_meta_lx(self): """ Test the internal _parse_image_meta methode Feed it an 'lx' image as we get it from from imgadm list -j """ ret = { "description": ( "Container-native Ubuntu 16.04 64-bit image. Built to run on " "containers with bare metal speed, while offering all the " "services of a typical unix host." ), "name": "ubuntu-16.04", "os": "linux", "published": "2016-06-01T02:17:41Z", "source": "https://images.joyent.com", "version": "20160601", } self.assertEqual(_parse_image_meta(image_lx, True), ret) def test_parse_image_meta_zvol(self): """ Test the internal _parse_image_meta methode Feed it an 'zvol' image as we get it from from imgadm list -j """ ret = { "description": ( "Ubuntu 18.04 LTS (20180808 64-bit). Certified Ubuntu Server " "Cloud Image from Canonical. For kvm and bhyve." ), "name": "ubuntu-certified-18.04", "os": "linux", "published": "2018-10-11T12:45:24.804Z", "source": "https://images.joyent.com", "version": "20180808", } self.assertEqual(_parse_image_meta(image_zvol, True), ret) def test_parse_image_meta_docker(self): """ Test the internal _parse_image_meta methode Feed it an 'docker' image as we get it from from imgadm list -j """ ret = { "description": ( "Docker image imported from " "busybox42/zimbra-docker-centos:latest on " "2019-03-23T01:32:25.320Z." ), "name": "busybox42/zimbra-docker-centos:latest", "os": "linux", "published": "2019-03-23T01:32:25.320Z", "source": "https://docker.io", "version": "62487cf6a7f6", } self.assertEqual(_parse_image_meta(image_docker, True), ret)
# Copyright 2013 dotCloud inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re import shlex import struct import requests import requests.exceptions import six import docker.auth as auth import docker.unixconn as unixconn import docker.utils as utils if not six.PY3: import websocket DEFAULT_TIMEOUT_SECONDS = 60 class APIError(requests.exceptions.HTTPError): def __init__(self, message, response, explanation=None): super(APIError, self).__init__(message, response=response) self.explanation = explanation if self.explanation is None and response.content: self.explanation = response.content.strip() def __str__(self): message = super(APIError, self).__str__() if self.is_client_error(): message = '%s Client Error: %s' % ( self.response.status_code, self.response.reason) elif self.is_server_error(): message = '%s Server Error: %s' % ( self.response.status_code, self.response.reason) if self.explanation: message = '%s ("%s")' % (message, self.explanation) return message def is_client_error(self): return 400 <= self.response.status_code < 500 def is_server_error(self): return 500 <= self.response.status_code < 600 class Client(requests.Session): def __init__(self, base_url="unix://var/run/docker.sock", version="1.6", timeout=DEFAULT_TIMEOUT_SECONDS): super(Client, self).__init__() if base_url.startswith('unix:///'): base_url = base_url.replace('unix:/', 'unix:') self.base_url = base_url self._version = version self._timeout = timeout self.mount('unix://', unixconn.UnixAdapter(base_url, timeout)) try: self._cfg = auth.load_config() except Exception: pass def _set_request_timeout(self, kwargs): """Prepare the kwargs for an HTTP request by inserting the timeout parameter, if not already present.""" kwargs.setdefault('timeout', self._timeout) return kwargs def _post(self, url, **kwargs): return self.post(url, **self._set_request_timeout(kwargs)) def _get(self, url, **kwargs): return self.get(url, **self._set_request_timeout(kwargs)) def _delete(self, url, **kwargs): return self.delete(url, **self._set_request_timeout(kwargs)) def _url(self, path): return '{0}/v{1}{2}'.format(self.base_url, self._version, path) def _raise_for_status(self, response, explanation=None): """Raises stored :class:`APIError`, if one occurred.""" try: response.raise_for_status() except requests.exceptions.HTTPError as e: raise APIError(e, response, explanation=explanation) def _stream_result(self, response): self._raise_for_status(response) for line in response.iter_lines(chunk_size=1): # filter out keep-alive new lines if line: yield line + '\n' def _stream_result_socket(self, response): self._raise_for_status(response) return response.raw._fp.fp._sock def _result(self, response, json=False): self._raise_for_status(response) if json: return response.json() return response.text def _container_config(self, image, command, hostname=None, user=None, detach=False, stdin_open=False, tty=False, mem_limit=0, ports=None, environment=None, dns=None, volumes=None, volumes_from=None, privileged=False): if isinstance(command, six.string_types): command = shlex.split(str(command)) if isinstance(environment, dict): environment = [ '{0}={1}'.format(k, v) for k, v in environment.items() ] if ports and isinstance(ports, list): exposed_ports = {} for port_definition in ports: port = port_definition proto = None if isinstance(port_definition, tuple): if len(port_definition) == 2: proto = port_definition[1] port = port_definition[0] exposed_ports['{0}{1}'.format( port, '/' + proto if proto else '' )] = {} ports = exposed_ports if volumes and isinstance(volumes, list): volumes_dict = {} for vol in volumes: volumes_dict[vol] = {} volumes = volumes_dict attach_stdin = False attach_stdout = False attach_stderr = False if not detach: attach_stdout = True attach_stderr = True if stdin_open: attach_stdin = True return { 'Hostname': hostname, 'ExposedPorts': ports, 'User': user, 'Tty': tty, 'OpenStdin': stdin_open, 'Memory': mem_limit, 'AttachStdin': attach_stdin, 'AttachStdout': attach_stdout, 'AttachStderr': attach_stderr, 'Env': environment, 'Cmd': command, 'Dns': dns, 'Image': image, 'Volumes': volumes, 'VolumesFrom': volumes_from, 'Privileged': privileged, } def _post_json(self, url, data, **kwargs): # Go <1.1 can't unserialize null to a string # so we do this disgusting thing here. data2 = {} if data is not None: for k, v in six.iteritems(data): if v is not None: data2[k] = v if 'headers' not in kwargs: kwargs['headers'] = {} kwargs['headers']['Content-Type'] = 'application/json' return self._post(url, data=json.dumps(data2), **kwargs) def _attach_params(self, override=None): return override or { 'stdout': 1, 'stderr': 1, 'stream': 1 } def _attach_websocket(self, container, params=None): if six.PY3: raise NotImplementedError("This method is not currently supported " "under python 3") url = self._url("/containers/{0}/attach/ws".format(container)) req = requests.Request("POST", url, params=self._attach_params(params)) full_url = req.prepare().url full_url = full_url.replace("http://", "ws://", 1) full_url = full_url.replace("https://", "wss://", 1) return self._create_websocket_connection(full_url) def _create_websocket_connection(self, url): return websocket.create_connection(url) def _stream_helper(self, response): socket = self._stream_result_socket(response).makefile() while True: size = int(socket.readline(), 16) if size <= 0: break data = socket.readline() if not data: break yield data def attach(self, container): socket = self.attach_socket(container) while True: chunk = socket.recv(4096) if chunk: yield chunk else: break def attach_socket(self, container, params=None, ws=False): if params is None: params = { 'stdout': 1, 'stderr': 1, 'stream': 1 } if ws: return self._attach_websocket(container, params) if isinstance(container, dict): container = container.get('Id') u = self._url("/containers/{0}/attach".format(container)) return self._stream_result_socket(self.post( u, None, params=self._attach_params(params), stream=True)) def build(self, path=None, tag=None, quiet=False, fileobj=None, nocache=False, rm=False, stream=False): remote = context = headers = None if path is None and fileobj is None: raise Exception("Either path or fileobj needs to be provided.") if fileobj is not None: context = utils.mkbuildcontext(fileobj) elif path.startswith(('http://', 'https://', 'git://', 'github.com/')): remote = path else: context = utils.tar(path) u = self._url('/build') params = { 't': tag, 'remote': remote, 'q': quiet, 'nocache': nocache, 'rm': rm } if context is not None: headers = {'Content-Type': 'application/tar'} response = self._post( u, data=context, params=params, headers=headers, stream=stream ) if context is not None: context.close() if stream: return self._stream_result(response) else: output = self._result(response) srch = r'Successfully built ([0-9a-f]+)' match = re.search(srch, output) if not match: return None, output return match.group(1), output def commit(self, container, repository=None, tag=None, message=None, author=None, conf=None): params = { 'container': container, 'repo': repository, 'tag': tag, 'comment': message, 'author': author } u = self._url("/commit") return self._result(self._post_json(u, data=conf, params=params), json=True) def containers(self, quiet=False, all=False, trunc=True, latest=False, since=None, before=None, limit=-1): params = { 'limit': 1 if latest else limit, 'all': 1 if all else 0, 'trunc_cmd': 1 if trunc else 0, 'since': since, 'before': before } u = self._url("/containers/json") res = self._result(self._get(u, params=params), True) if quiet: return [{'Id': x['Id']} for x in res] return res def copy(self, container, resource): res = self._post_json( self._url("/containers/{0}/copy".format(container)), data={"Resource": resource}, stream=True ) self._raise_for_status(res) return res.raw def create_container(self, image, command=None, hostname=None, user=None, detach=False, stdin_open=False, tty=False, mem_limit=0, ports=None, environment=None, dns=None, volumes=None, volumes_from=None, privileged=False, name=None): config = self._container_config( image, command, hostname, user, detach, stdin_open, tty, mem_limit, ports, environment, dns, volumes, volumes_from, privileged ) return self.create_container_from_config(config, name) def create_container_from_config(self, config, name=None): u = self._url("/containers/create") params = { 'name': name } res = self._post_json(u, data=config, params=params) return self._result(res, True) def diff(self, container): if isinstance(container, dict): container = container.get('Id') return self._result(self._get(self._url("/containers/{0}/changes". format(container))), True) def events(self): u = self._url("/events") socket = self._stream_result_socket(self.get(u, stream=True)) while True: chunk = socket.recv(4096) if chunk: # Messages come in the format of length, data, newline. length, data = chunk.split("\n", 1) length = int(length, 16) if length > len(data): data += socket.recv(length - len(data)) yield json.loads(data) else: break def export(self, container): if isinstance(container, dict): container = container.get('Id') res = self._get(self._url("/containers/{0}/export".format(container)), stream=True) self._raise_for_status(res) return res.raw def history(self, image): res = self._get(self._url("/images/{0}/history".format(image))) self._raise_for_status(res) return self._result(res) def images(self, name=None, quiet=False, all=False, viz=False): if viz: return self._result(self._get(self._url("images/viz"))) params = { 'filter': name, 'only_ids': 1 if quiet else 0, 'all': 1 if all else 0, } res = self._result(self._get(self._url("/images/json"), params=params), True) if quiet: return [x['Id'] for x in res] return res def import_image(self, src, data=None, repository=None, tag=None): u = self._url("/images/create") params = { 'repo': repository, 'tag': tag } try: # XXX: this is ways not optimal but the only way # for now to import tarballs through the API fic = open(src) data = fic.read() fic.close() src = "-" except IOError: # file does not exists or not a file (URL) data = None if isinstance(src, six.string_types): params['fromSrc'] = src return self._result(self._post(u, data=data, params=params)) return self._result(self._post(u, data=src, params=params)) def info(self): return self._result(self._get(self._url("/info")), True) def insert(self, image, url, path): api_url = self._url("/images/" + image + "/insert") params = { 'url': url, 'path': path } return self._result(self._post(api_url, params=params)) def inspect_container(self, container): if isinstance(container, dict): container = container.get('Id') return self._result( self._get(self._url("/containers/{0}/json".format(container))), True) def inspect_image(self, image_id): return self._result( self._get(self._url("/images/{0}/json".format(image_id))), True ) def kill(self, container, signal=None): if isinstance(container, dict): container = container.get('Id') url = self._url("/containers/{0}/kill".format(container)) params = {} if signal is not None: params['signal'] = signal res = self._post(url, params=params) self._raise_for_status(res) def login(self, username, password=None, email=None, registry=None): url = self._url("/auth") if registry is None: registry = auth.INDEX_URL if getattr(self, '_cfg', None) is None: self._cfg = auth.load_config() authcfg = auth.resolve_authconfig(self._cfg, registry) if 'username' in authcfg and authcfg['username'] == username: return authcfg req_data = { 'username': username, 'password': password, 'email': email } res = self._result(self._post_json(url, data=req_data), True) if res['Status'] == 'Login Succeeded': self._cfg['Configs'][registry] = req_data return res def logs(self, container): if isinstance(container, dict): container = container.get('Id') params = { 'logs': 1, 'stdout': 1, 'stderr': 1 } u = self._url("/containers/{0}/attach".format(container)) if utils.compare_version('1.6', self._version) < 0: return self._result(self._post(u, params=params)) res = '' response = self._result(self._post(u, params=params)) walker = 0 while walker < len(response): header = response[walker:walker+8] walker += 8 # we don't care about the type of stream since we want both # stdout and stderr length = struct.unpack(">L", header[4:].encode())[0] res += response[walker:walker+length] walker += length return res def port(self, container, private_port): if isinstance(container, dict): container = container.get('Id') res = self._get(self._url("/containers/{0}/json".format(container))) self._raise_for_status(res) json_ = res.json() s_port = str(private_port)+'/tcp' f_port = None f_port = json_['NetworkSettings']['Ports'][s_port][0]['HostPort'] #elif s_port in json_['NetworkSettings']['PortMapping']['Udp']: # f_port = json_['NetworkSettings']['PortMapping']['Udp'][s_port] #elif s_port in json_['NetworkSettings']['PortMapping']['Tcp']: # f_port = json_['NetworkSettings']['PortMapping']['Tcp'][s_port] return f_port def pull(self, repository, tag=None, stream=False): registry, repo_name = auth.resolve_repository_name(repository) if repo_name.count(":") == 1: repository, tag = repository.rsplit(":", 1) params = { 'tag': tag, 'fromImage': repository } headers = {} if utils.compare_version('1.5', self._version) >= 0: if getattr(self, '_cfg', None) is None: self._cfg = auth.load_config() authcfg = auth.resolve_authconfig(self._cfg, registry) # do not fail if no atuhentication exists # for this specific registry as we can have a readonly pull if authcfg: headers['X-Registry-Auth'] = auth.encode_header(authcfg) u = self._url("/images/create") response = self._post(u, params=params, headers=headers, stream=stream, timeout=None) if stream: return self._stream_helper(response) else: return self._result(response) def push(self, repository, stream=False): registry, repo_name = auth.resolve_repository_name(repository) u = self._url("/images/{0}/push".format(repository)) headers = {} if getattr(self, '_cfg', None) is None: self._cfg = auth.load_config() authcfg = auth.resolve_authconfig(self._cfg, registry) if utils.compare_version('1.5', self._version) >= 0: # do not fail if no atuhentication exists # for this specific registry as we can have an anon push if authcfg: headers['X-Registry-Auth'] = auth.encode_header(authcfg) if stream: return self._stream_helper( self._post_json(u, None, headers=headers, stream=True)) else: return self._result( self._post_json(u, None, headers=headers, stream=False)) if stream: return self._stream_helper( self._post_json(u, authcfg, stream=True)) else: return self._result(self._post_json(u, authcfg, stream=False)) def remove_container(self, container, v=False, link=False): if isinstance(container, dict): container = container.get('Id') params = {'v': v, 'link': link} res = self._delete(self._url("/containers/" + container), params=params) self._raise_for_status(res) def remove_image(self, image): res = self._delete(self._url("/images/" + image)) self._raise_for_status(res) def restart(self, container, timeout=10): if isinstance(container, dict): container = container.get('Id') params = {'t': timeout} url = self._url("/containers/{0}/restart".format(container)) res = self._post(url, params=params) self._raise_for_status(res) def search(self, term): return self._result(self._get(self._url("/images/search"), params={'term': term}), True) def start(self, container, binds=None, port_bindings=None, lxc_conf=None, publish_all_ports=False, links=None): if isinstance(container, dict): container = container.get('Id') if isinstance(lxc_conf, dict): formatted = [] for k, v in six.iteritems(lxc_conf): formatted.append({'Key': k, 'Value': str(v)}) lxc_conf = formatted start_config = { 'LxcConf': lxc_conf } if binds: bind_pairs = [ '{0}:{1}'.format(host, dest) for host, dest in binds.items() ] start_config['Binds'] = bind_pairs if port_bindings: start_config['PortBindings'] = utils.convert_port_bindings( port_bindings ) start_config['PublishAllPorts'] = publish_all_ports if links: formatted_links = [ '{0}:{1}'.format(k, v) for k, v in sorted(six.iteritems(links)) ] start_config['Links'] = formatted_links url = self._url("/containers/{0}/start".format(container)) res = self._post_json(url, data=start_config) self._raise_for_status(res) def stop(self, container, timeout=10): if isinstance(container, dict): container = container.get('Id') params = {'t': timeout} url = self._url("/containers/{0}/stop".format(container)) res = self._post(url, params=params, timeout=max(timeout, self._timeout)) self._raise_for_status(res) def tag(self, image, repository, tag=None, force=False): params = { 'tag': tag, 'repo': repository, 'force': 1 if force else 0 } url = self._url("/images/{0}/tag".format(image)) res = self._post(url, params=params) self._raise_for_status(res) return res.status_code == 201 def top(self, container): u = self._url("/containers/{0}/top".format(container)) return self._result(self._get(u), True) def version(self): return self._result(self._get(self._url("/version")), True) def wait(self, container): if isinstance(container, dict): container = container.get('Id') url = self._url("/containers/{0}/wait".format(container)) res = self._post(url, timeout=None) self._raise_for_status(res) json_ = res.json() if 'StatusCode' in json_: return json_['StatusCode'] return -1
# -*- coding: utf-8 -*- from __future__ import division, print_function from collections import OrderedDict, defaultdict from contextlib import contextmanager import six import re import os from pandas.api.types import is_scalar, is_integer import numpy as np import pandas as pd import h5py def partition(start, stop, step): """Partition an integer interval into equally-sized subintervals. Like builtin :py:func:`range`, but yields pairs of end points. Examples -------- >>> for lo, hi in partition(0, 9, 2): print(lo, hi) 0 2 2 4 4 6 6 8 8 9 """ return ((i, min(i + step, stop)) for i in range(start, stop, step)) def parse_cooler_uri(s): """ Parse a Cooler URI string e.g. /path/to/mycoolers.cool::/path/to/cooler """ parts = s.split("::") if len(parts) == 1: file_path, group_path = parts[0], "/" elif len(parts) == 2: file_path, group_path = parts if not group_path.startswith("/"): group_path = "/" + group_path else: raise ValueError("Invalid Cooler URI string") return file_path, group_path def atoi(s): return int(s.replace(",", "")) def parse_humanized(s): _NUMERIC_RE = re.compile("([0-9,.]+)") _, value, unit = _NUMERIC_RE.split(s.replace(",", "")) if not len(unit): return int(value) value = float(value) unit = unit.upper().strip() if unit in ("K", "KB"): value *= 1000 elif unit in ("M", "MB"): value *= 1000000 elif unit in ("G", "GB"): value *= 1000000000 else: raise ValueError("Unknown unit '{}'".format(unit)) return int(value) def parse_region_string(s): """ Parse a UCSC-style genomic region string into a triple. Parameters ---------- s : str UCSC-style string, e.g. "chr5:10,100,000-30,000,000". Ensembl and FASTA style sequence names are allowed. End coordinate must be greater than or equal to start. Returns ------- (str, int or None, int or None) """ def _tokenize(s): token_spec = [ ("HYPHEN", r"-"), ("COORD", r"[0-9,]+(\.[0-9]*)?(?:[a-z]+)?"), ("OTHER", r".+"), ] tok_regex = r"\s*" + r"|\s*".join(r"(?P<%s>%s)" % pair for pair in token_spec) tok_regex = re.compile(tok_regex, re.IGNORECASE) for match in tok_regex.finditer(s): typ = match.lastgroup yield typ, match.group(typ) def _check_token(typ, token, expected): if typ is None: raise ValueError("Expected {} token missing".format(" or ".join(expected))) else: if typ not in expected: raise ValueError('Unexpected token "{}"'.format(token)) def _expect(tokens): typ, token = next(tokens, (None, None)) _check_token(typ, token, ["COORD"]) start = parse_humanized(token) typ, token = next(tokens, (None, None)) _check_token(typ, token, ["HYPHEN"]) typ, token = next(tokens, (None, None)) if typ is None: return start, None _check_token(typ, token, ["COORD"]) end = parse_humanized(token) if end < start: raise ValueError("End coordinate less than start") return start, end parts = s.split(":") chrom = parts[0].strip() if not len(chrom): raise ValueError("Chromosome name cannot be empty") if len(parts) < 2: return (chrom, None, None) start, end = _expect(_tokenize(parts[1])) return (chrom, start, end) def parse_region(reg, chromsizes=None): """ Genomic regions are represented as half-open intervals (0-based starts, 1-based ends) along the length coordinate of a contig/scaffold/chromosome. Parameters ---------- reg : str or tuple UCSC-style genomic region string, or Triple (chrom, start, end), where ``start`` or ``end`` may be ``None``. chromsizes : mapping, optional Lookup table of scaffold lengths to check against ``chrom`` and the ``end`` coordinate. Required if ``end`` is not supplied. Returns ------- A well-formed genomic region triple (str, int, int) """ if isinstance(reg, six.string_types): chrom, start, end = parse_region_string(reg) else: chrom, start, end = reg start = int(start) if start is not None else start end = int(end) if end is not None else end try: clen = chromsizes[chrom] if chromsizes is not None else None except KeyError: raise ValueError("Unknown sequence label: {}".format(chrom)) start = 0 if start is None else start if end is None: if clen is None: # TODO --- remove? raise ValueError("Cannot determine end coordinate.") end = clen if end < start: raise ValueError("End cannot be less than start") if start < 0 or (clen is not None and end > clen): raise ValueError("Genomic region out of bounds: [{}, {})".format(start, end)) return chrom, start, end def natsort_key(s, _NS_REGEX=re.compile(r"(\d+)", re.U)): return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x]) def natsorted(iterable): return sorted(iterable, key=natsort_key) def argnatsort(array): array = np.asarray(array) if not len(array): return np.array([], dtype=int) cols = tuple(zip(*(natsort_key(x) for x in array))) return np.lexsort(cols[::-1]) def read_chromsizes( filepath_or, name_patterns=(r"^chr[0-9]+$", r"^chr[XY]$", r"^chrM$"), all_names=False, **kwargs ): """ Parse a ``<db>.chrom.sizes`` or ``<db>.chromInfo.txt`` file from the UCSC database, where ``db`` is a genome assembly name. Parameters ---------- filepath_or : str or file-like Path or url to text file, or buffer. name_patterns : sequence, optional Sequence of regular expressions to capture desired sequence names. Each corresponding set of records will be sorted in natural order. all_names : bool, optional Whether to return all contigs listed in the file. Default is ``False``. Returns ------- :py:class:`pandas.Series` Series of integer bp lengths indexed by sequence name. References ---------- * `UCSC assembly terminology <http://genome.ucsc.edu/FAQ/FAQdownloads.html#download9>`_ * `GRC assembly terminology <https://www.ncbi.nlm.nih.gov/grc/help/definitions>`_ """ if isinstance(filepath_or, six.string_types) and filepath_or.endswith(".gz"): kwargs.setdefault("compression", "gzip") chromtable = pd.read_csv( filepath_or, sep="\t", usecols=[0, 1], names=["name", "length"], dtype={"name": str}, **kwargs ) if not all_names: parts = [] for pattern in name_patterns: part = chromtable[chromtable["name"].str.contains(pattern)] part = part.iloc[argnatsort(part["name"])] parts.append(part) chromtable = pd.concat(parts, axis=0) chromtable.index = chromtable["name"].values return chromtable["length"] def fetch_chromsizes(db, **kwargs): """ Download chromosome sizes from UCSC as a :py:class:`pandas.Series`, indexed by chromosome label. """ return read_chromsizes( "http://hgdownload.cse.ucsc.edu/goldenPath/{}/database/chromInfo.txt.gz".format( db ), **kwargs ) def load_fasta(names, *filepaths): """ Load lazy FASTA records from one or multiple files without reading them into memory. Parameters ---------- names : sequence of str Names of sequence records in FASTA file or files. filepaths : str Paths to one or more FASTA files to gather records from. Returns ------- OrderedDict of sequence name -> sequence record """ import pyfaidx if len(filepaths) == 0: raise ValueError("Need at least one file") if len(filepaths) == 1: fa = pyfaidx.Fasta(filepaths[0], as_raw=True) else: fa = {} for filepath in filepaths: fa.update(pyfaidx.Fasta(filepath, as_raw=True).records) records = OrderedDict((chrom, fa[chrom]) for chrom in names) return records def binnify(chromsizes, binsize): """ Divide a genome into evenly sized bins. Parameters ---------- chromsizes : Series pandas Series indexed by chromosome name with chromosome lengths in bp. binsize : int size of bins in bp Returns ------- bins : :py:class:`pandas.DataFrame` Dataframe with columns: ``chrom``, ``start``, ``end``. """ def _each(chrom): clen = chromsizes[chrom] n_bins = int(np.ceil(clen / binsize)) binedges = np.arange(0, (n_bins + 1)) * binsize binedges[-1] = clen return pd.DataFrame( {"chrom": [chrom] * n_bins, "start": binedges[:-1], "end": binedges[1:]}, columns=["chrom", "start", "end"], ) bintable = pd.concat(map(_each, chromsizes.keys()), axis=0, ignore_index=True) bintable["chrom"] = pd.Categorical( bintable["chrom"], categories=list(chromsizes.index), ordered=True ) return bintable make_bintable = binnify def digest(fasta_records, enzyme): """ Divide a genome into restriction fragments. Parameters ---------- fasta_records : OrderedDict Dictionary of chromosome names to sequence records. enzyme: str Name of restriction enzyme (e.g., 'DpnII'). Returns ------- frags : :py:class:`pandas.DataFrame` Dataframe with columns: ``chrom``, ``start``, ``end``. """ try: import Bio.Restriction as biorst import Bio.Seq as bioseq except ImportError: raise ImportError("Biopython is required to find restriction fragments.") # http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698 chroms = fasta_records.keys() try: cut_finder = getattr(biorst, enzyme).search except AttributeError: raise ValueError("Unknown enzyme name: {}".format(enzyme)) def _each(chrom): seq = bioseq.Seq(str(fasta_records[chrom])) cuts = np.r_[0, np.array(cut_finder(seq)) + 1, len(seq)].astype(int) n_frags = len(cuts) - 1 frags = pd.DataFrame( {"chrom": [chrom] * n_frags, "start": cuts[:-1], "end": cuts[1:]}, columns=["chrom", "start", "end"], ) return frags return pd.concat(map(_each, chroms), axis=0, ignore_index=True) def get_binsize(bins): """ Infer bin size from a bin DataFrame. Assumes that the last bin of each contig is allowed to differ in size from the rest. Returns ------- int or None if bins are non-uniform """ sizes = set() for chrom, group in bins.groupby("chrom"): sizes.update((group["end"] - group["start"]).iloc[:-1].unique()) if len(sizes) > 1: return None if len(sizes) == 1: return next(iter(sizes)) else: return None def get_chromsizes(bins): """ Infer chromsizes Series from a bin DataFrame. Assumes that the last bin of each contig is allowed to differ in size from the rest. Returns ------- int or None if bins are non-uniform """ chromtable = ( bins.drop_duplicates(["chrom"], keep="last")[["chrom", "end"]] .reset_index(drop=True) .rename(columns={"chrom": "name", "end": "length"}) ) chroms, lengths = list(chromtable["name"]), list(chromtable["length"]) return pd.Series(index=chroms, data=lengths) def bedslice(grouped, chromsizes, region): """ Range query on a BED-like dataframe with non-overlapping intervals. """ chrom, start, end = parse_region(region, chromsizes) result = grouped.get_group(chrom) if start > 0 or end < chromsizes[chrom]: lo = result["end"].values.searchsorted(start, side="right") hi = lo + result["start"].values[lo:].searchsorted(end, side="left") result = result.iloc[lo:hi] return result def asarray_or_dataset(x): return x if isinstance(x, h5py.Dataset) else np.asarray(x) def rlencode(array, chunksize=None): """ Run length encoding. Based on http://stackoverflow.com/a/32681075, which is based on the rle function from R. Parameters ---------- x : 1D array_like Input array to encode dropna: bool, optional Drop all runs of NaNs. Returns ------- start positions, run lengths, run values """ where = np.flatnonzero array = asarray_or_dataset(array) n = len(array) if n == 0: return ( np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=array.dtype), ) if chunksize is None: chunksize = n starts, values = [], [] last_val = np.nan for i in range(0, n, chunksize): x = array[i : i + chunksize] locs = where(x[1:] != x[:-1]) + 1 if x[0] != last_val: locs = np.r_[0, locs] starts.append(i + locs) values.append(x[locs]) last_val = x[-1] starts = np.concatenate(starts) lengths = np.diff(np.r_[starts, n]) values = np.concatenate(values) return starts, lengths, values def cmd_exists(cmd): return any( os.access(os.path.join(path, cmd), os.X_OK) for path in os.environ["PATH"].split(os.pathsep) ) def mad(data, axis=None): return np.median(np.abs(data - np.median(data, axis)), axis) @contextmanager def open_hdf5(fp, mode="r", *args, **kwargs): """ Context manager like ``h5py.File`` but accepts already open HDF5 file handles which do not get closed on teardown. Parameters ---------- fp : str or ``h5py.File`` object If an open file object is provided, it passes through unchanged, provided that the requested mode is compatible. If a filepath is passed, the context manager will close the file on tear down. mode : str * r Readonly, file must exist * r+ Read/write, file must exist * a Read/write if exists, create otherwise * w Truncate if exists, create otherwise * w- or x Fail if exists, create otherwise """ if isinstance(fp, six.string_types): own_fh = True fh = h5py.File(fp, mode, *args, **kwargs) else: own_fh = False if mode == "r" and fp.file.mode == "r+": # warnings.warn("File object provided is writeable but intent is read-only") pass elif mode in ("r+", "a") and fp.file.mode == "r": raise ValueError("File object provided is not writeable") elif mode == "w": raise ValueError("Cannot truncate open file") elif mode in ("w-", "x"): raise ValueError("File exists") fh = fp try: yield fh finally: if own_fh: fh.close() class closing_hdf5(h5py.Group): def __init__(self, grp): super(closing_hdf5, self).__init__(grp.id) def __enter__(self): return self def __exit__(self, *exc_info): return self.file.close() def close(self): self.file.close() def attrs_to_jsonable(attrs): out = dict(attrs) for k, v in attrs.items(): try: out[k] = v.item() except ValueError: out[k] = v.tolist() except AttributeError: out[k] = v return out def infer_meta(x, index=None): # pragma: no cover """ Extracted and modified from dask/dataframe/utils.py : make_meta (BSD licensed) Create an empty pandas object containing the desired metadata. Parameters ---------- x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or an iterable of `(name, dtype)` tuples. To create a `Series`, provide a tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index should match the desired output. If a dtype or scalar, a scalar of the same dtype is returned. index : pd.Index, optional Any pandas index to use in the metadata. If none provided, a `RangeIndex` will be used. Examples -------- >>> make_meta([('a', 'i8'), ('b', 'O')]) Empty DataFrame Columns: [a, b] Index: [] >>> make_meta(('a', 'f8')) Series([], Name: a, dtype: float64) >>> make_meta('i8') 1 """ _simple_fake_mapping = { "b": np.bool_(True), "V": np.void(b" "), "M": np.datetime64("1970-01-01"), "m": np.timedelta64(1), "S": np.str_("foo"), "a": np.str_("foo"), "U": np.unicode_("foo"), "O": "foo", } UNKNOWN_CATEGORIES = "__UNKNOWN_CATEGORIES__" def _scalar_from_dtype(dtype): if dtype.kind in ("i", "f", "u"): return dtype.type(1) elif dtype.kind == "c": return dtype.type(complex(1, 0)) elif dtype.kind in _simple_fake_mapping: o = _simple_fake_mapping[dtype.kind] return o.astype(dtype) if dtype.kind in ("m", "M") else o else: raise TypeError("Can't handle dtype: {0}".format(dtype)) def _nonempty_scalar(x): if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)): return x elif np.isscalar(x): dtype = x.dtype if hasattr(x, "dtype") else np.dtype(type(x)) return _scalar_from_dtype(dtype) else: raise TypeError( "Can't handle meta of type " "'{0}'".format(type(x).__name__) ) def _empty_series(name, dtype, index=None): if isinstance(dtype, str) and dtype == "category": return pd.Series( pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index ).iloc[:0] return pd.Series([], dtype=dtype, name=name, index=index) if hasattr(x, "_meta"): return x._meta if isinstance(x, (pd.Series, pd.DataFrame)): return x.iloc[0:0] elif isinstance(x, pd.Index): return x[0:0] index = index if index is None else index[0:0] if isinstance(x, dict): return pd.DataFrame( {c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index ) if isinstance(x, tuple) and len(x) == 2: return _empty_series(x[0], x[1], index=index) elif isinstance(x, (list, tuple)): if not all(isinstance(i, tuple) and len(i) == 2 for i in x): raise ValueError( "Expected iterable of tuples of (name, dtype), " "got {0}".format(x) ) return pd.DataFrame( {c: _empty_series(c, d, index=index) for (c, d) in x}, columns=[c for c, d in x], index=index, ) elif not hasattr(x, "dtype") and x is not None: # could be a string, a dtype object, or a python type. Skip `None`, # because it is implictly converted to `dtype('f8')`, which we don't # want here. try: dtype = np.dtype(x) return _scalar_from_dtype(dtype) except: # noqa # Continue on to next check pass if is_scalar(x): return _nonempty_scalar(x) raise TypeError("Don't know how to create metadata from {0}".format(x)) def get_meta( columns, dtype=None, index_columns=None, index_names=None, default_dtype=np.object ): # pragma: no cover """ Extracted and modified from pandas/io/parsers.py : _get_empty_meta (BSD licensed). """ columns = list(columns) # Convert `dtype` to a defaultdict of some kind. # This will enable us to write `dtype[col_name]` # without worrying about KeyError issues later on. if not isinstance(dtype, dict): # if dtype == None, default will be default_dtype. dtype = defaultdict(lambda: dtype or default_dtype) else: # Save a copy of the dictionary. _dtype = dtype.copy() dtype = defaultdict(lambda: default_dtype) # Convert column indexes to column names. for k, v in six.iteritems(_dtype): col = columns[k] if is_integer(k) else k dtype[col] = v if index_columns is None or index_columns is False: index = pd.Index([]) else: data = [pd.Series([], dtype=dtype[name]) for name in index_names] if len(data) == 1: index = pd.Index(data[0], name=index_names[0]) else: index = pd.MultiIndex.from_arrays(data, names=index_names) index_columns.sort() for i, n in enumerate(index_columns): columns.pop(n - i) col_dict = {col_name: pd.Series([], dtype=dtype[col_name]) for col_name in columns} return pd.DataFrame(col_dict, columns=columns, index=index) def check_bins(bins, chromsizes): is_cat = pd.api.types.is_categorical(bins["chrom"]) bins = bins.copy() if not is_cat: bins["chrom"] = pd.Categorical( bins.chrom, categories=list(chromsizes.index), ordered=True ) else: assert (bins["chrom"].cat.categories == chromsizes.index).all() return bins def balanced_partition(gs, n_chunk_max, file_contigs, loadings=None): # n_bins = len(gs.bins) grouped = gs._bins_grouped chrom_nbins = grouped.size() if loadings is None: loadings = chrom_nbins chrmax = loadings.idxmax() loadings = loadings / loadings.loc[chrmax] const = chrom_nbins.loc[chrmax] / n_chunk_max granges = [] for chrom, group in grouped: if chrom not in file_contigs: continue clen = gs.chromsizes[chrom] step = int(np.ceil(const / loadings.loc[chrom])) anchors = group.start.values[::step] if anchors[-1] != clen: anchors = np.r_[anchors, clen] granges.extend( (chrom, start, end) for start, end in zip(anchors[:-1], anchors[1:]) ) return granges class GenomeSegmentation(object): def __init__(self, chromsizes, bins): bins = check_bins(bins, chromsizes) self._bins_grouped = bins.groupby("chrom", sort=False) nbins_per_chrom = self._bins_grouped.size().values self.chromsizes = chromsizes self.binsize = get_binsize(bins) self.contigs = list(chromsizes.keys()) self.bins = bins self.idmap = pd.Series(index=chromsizes.keys(), data=range(len(chromsizes))) self.chrom_binoffset = np.r_[0, np.cumsum(nbins_per_chrom)] self.chrom_abspos = np.r_[0, np.cumsum(chromsizes.values)] self.start_abspos = ( self.chrom_abspos[bins["chrom"].cat.codes] + bins["start"].values ) def fetch(self, region): chrom, start, end = parse_region(region, self.chromsizes) result = self._bins_grouped.get_group(chrom) if start > 0 or end < self.chromsizes[chrom]: lo = result["end"].values.searchsorted(start, side="right") hi = lo + result["start"].values[lo:].searchsorted(end, side="left") result = result.iloc[lo:hi] return result def buffered(chunks, size=10000000): """ Take an incoming iterator of small data frame chunks and buffer them into an outgoing iterator of larger chunks. Parameters ---------- chunks : iterator of :py:class:`pandas.DataFrame` Each chunk should have the same column names. size : int Minimum length of output chunks. Yields ------ Larger outgoing :py:class:`pandas.DataFrame` chunks made from concatenating the incoming ones. """ buf = [] n = 0 for chunk in chunks: n += len(chunk) buf.append(chunk) if n > size: yield pd.concat(buf, axis=0) buf = [] n = 0 if len(buf): yield pd.concat(buf, axis=0)
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from QOpenScienceFramework.widgets.projecttree import ProjectTree from QOpenScienceFramework.util import * from QOpenScienceFramework.compat import * from QOpenScienceFramework import dirname from qtpy import QtGui, QtCore, QtWidgets import pprint import arrow import humanize import fileinspector import QOpenScienceFramework.connection as osf import qtawesome as qta import os import re import sys import json import warnings import logging logger = logging.getLogger() # QtAwesome icon fonts for spinners # OSF connection interface # Fileinspector for determining filetypes # For presenting numbers in human readible formats # For better time functions # QT classes # Required QT classes pp = pprint.PrettyPrinter(indent=2) # Python 2 and 3 compatiblity settings # Utility classes and functions # Project tree widget osf_logo_path = os.path.join(dirname, 'img/cos-white2.png') osf_blacklogo_path = os.path.join(dirname, 'img/cos-black.png') # Dummy function later to be replaced for translation def _(s): return s class OSFExplorer(QtWidgets.QWidget): """ An explorer of the current user's OSF account """ # Size of preview icon in properties pane preview_size = QtCore.QSize(150, 150) button_icon_size = QtCore.QSize(20, 20) # Formatting of date displays timeformat = 'YYYY-MM-DD HH:mm' datedisplay = '{} ({})' # The maximum size an image may have to be downloaded for preview preview_size_limit = 1024**2/2.0 # Signal that is sent if image preview should be aborted abort_preview = QtCore.Signal() """ PyQt signal emitted when an image preview is to be aborted. """ def __init__(self, manager, tree_widget=None, locale='en_us'): """ Constructor Can be passed a reference to an already existing ProjectTree if desired, otherwise it creates a new instance of this object. Parameters ---------- manager : manger.ConnectionManager The object taking care of all the communication with the OSF tree_widget : ProjectTree (default: None) The kind of object, which can be project, folder or file locale : string (default: en-us) The language in which the time information should be presented.\ Should consist of lowercase characters only (e.g. nl_nl) """ # Call parent's constructor super(OSFExplorer, self).__init__() self.manager = manager self.setWindowTitle(_("OSF Explorer")) self.resize(800, 500) # Set Window icon if not os.path.isfile(osf_blacklogo_path): raise IOError("OSF logo not found at expected path: {}".format( osf_blacklogo_path)) osf_icon = QtGui.QIcon(osf_blacklogo_path) self.setWindowIcon(osf_icon) self.__config = {} # Set up the title widget (so much code for a simple header with image...) self.title_widget = QtWidgets.QWidget(self) self.title_widget.setLayout(QtWidgets.QHBoxLayout(self)) title_logo = QtWidgets.QLabel(self) title_logo.setPixmap(osf_icon.pixmap(QtCore.QSize(32, 32))) title_label = QtWidgets.QLabel("<h1>Open Science Framework</h1>", self) self.title_widget.layout().addWidget(title_logo) self.title_widget.layout().addWidget(title_label) self.title_widget.layout().addStretch(1) self.title_widget.setContentsMargins(0, 0, 0, 0) self.title_widget.layout().setContentsMargins(0, 0, 0, 0) self.title_widget.setSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) # globally accessible items self.locale = locale # ProjectTree widget. Can be passed as a reference to this object. if tree_widget is None: # Create a new ProjectTree instance self.tree = ProjectTree(manager) else: # Check if passed reference is a ProjectTree instance if type(tree_widget) != ProjectTree: raise TypeError("Passed tree_widget should be a 'ProjectTree' " "instance.") else: # assign passed reference of ProjectTree to this instance self.tree = tree_widget self.tree.setSortingEnabled(True) self.tree.sortItems(0, QtCore.Qt.AscendingOrder) self.tree.contextMenuEvent = self.__show_tree_context_menu # File properties overview properties_pane = self.__create_properties_pane() # The section in which the file icon or the image preview is presented preview_area = QtWidgets.QVBoxLayout() # Space for image self.image_space = QtWidgets.QLabel() self.image_space.setAlignment(QtCore.Qt.AlignCenter) self.image_space.resizeEvent = self.__resizeImagePreview # This holds the image preview in binary format. Everytime the img preview # needs to be rescaled, it is done with this variable as the img source self.current_img_preview = None # The progress bar depicting the download state of the image preview self.img_preview_progress_bar = QtWidgets.QProgressBar() self.img_preview_progress_bar.setAlignment(QtCore.Qt.AlignCenter) self.img_preview_progress_bar.hide() preview_area.addWidget(self.image_space) preview_area.addWidget(self.img_preview_progress_bar) # Create layouts # The box layout holding all elements self.main_layout = QtWidgets.QVBoxLayout(self) # Grid layout for the info consisting of an image space and the # properties grid info_grid = QtWidgets.QVBoxLayout() info_grid.setSpacing(10) info_grid.addLayout(preview_area) info_grid.addLayout(properties_pane) # The widget to hold the infogrid self.info_frame = QtWidgets.QWidget() self.info_frame.setLayout(info_grid) self.info_frame.setVisible(False) filterPanel = QtWidgets.QWidget(self) filterPanel.setLayout(QtWidgets.QHBoxLayout()) filterLabel = QtWidgets.QLabel('Filter:') self.filterField = QtWidgets.QLineEdit(self) self.filterField.setPlaceholderText(_('Search projects by their name')) self.filterField.textChanged.connect(self.__slot_filterChanged) filterPanel.layout().addWidget(filterLabel) filterPanel.layout().addWidget(self.filterField) filterPanel.layout().setContentsMargins(0, 0, 0, 0) # The widget to hold the filter textfield and the tree treePanel = QtWidgets.QWidget(self) treePanel.setLayout(QtWidgets.QVBoxLayout()) treePanel.layout().addWidget(filterPanel) treePanel.layout().addWidget(self.tree) # Combine tree and info frame with a splitter in the middle splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal) splitter.addWidget(treePanel) splitter.addWidget(self.info_frame) # Create buttons at the bottom self.buttonbar = self.__create_buttonbar() # Add splitter to extra parent widget to allow overlay self.login_required_overlay = QtWidgets.QLabel( _(u"Log in to the OSF to use this module")) self.login_required_overlay.setStyleSheet( """ font-size: 20px; background: rgba(250, 250, 250, 0.75); """) self.login_required_overlay.setAlignment( QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter) # Content pane with tree and properties view # Also has overlay showing login required message when use is logged # out content_panel = QtWidgets.QWidget(self) content_layout = QtWidgets.QGridLayout() content_layout.setContentsMargins(0, 0, 0, 0) content_panel.setLayout(content_layout) content_layout.addWidget(splitter, 1, 1) content_layout.addWidget(self.login_required_overlay, 1, 1) # Add to layout self.main_layout.addWidget(self.title_widget) self.main_layout.addWidget(content_panel) self.main_layout.addWidget(self.buttonbar) self.main_layout.setContentsMargins(12, 12, 12, 12) self.setLayout(self.main_layout) # Event connections self.tree.currentItemChanged.connect(self.__slot_currentItemChanged) self.tree.itemSelectionChanged.connect( self.__slot_itemSelectionChanged) self.tree.refreshFinished.connect(self.__tree_refresh_finished) # Private functions def __resizeImagePreview(self, event): """ Resize the image preview (if there is any) after a resize event """ if not self.current_img_preview is None: # Calculate new height, but let the minimum be determined by # the y coordinate of preview_size new_height = max(event.size().height()-20, self.preview_size.height()) pm = self.current_img_preview.scaledToHeight(new_height) self.image_space.setPixmap(pm) def __create_buttonbar(self): """ Creates the button bar at the bottom of the explorer """ # General buttonbar widget buttonbar = QtWidgets.QWidget() buttonbar_hbox = QtWidgets.QHBoxLayout(buttonbar) buttonbar.setLayout(buttonbar_hbox) # Refresh button - always visible self.refresh_icon = qta.icon('fa.refresh', color='green') self.refresh_button = QtWidgets.QPushButton( self.refresh_icon, _('Refresh')) self.refresh_icon_spinning = qta.icon( 'fa.refresh', color='green', animation=qta.Spin(self.refresh_button)) self.refresh_button.setIconSize(self.button_icon_size) self.refresh_button.clicked.connect(self.__clicked_refresh_tree) self.refresh_button.setToolTip(_(u"Refresh")) self.refresh_button.setDisabled(True) # Other buttons, depend on config settings of OSF explorer self.new_folder_icon = QtGui.QIcon.fromTheme( 'folder-new', qta.icon('ei.folder-sign') ) self.new_folder_button = QtWidgets.QPushButton( self.new_folder_icon, _('New folder')) self.new_folder_button.setIconSize(self.button_icon_size) self.new_folder_button.clicked.connect(self.__clicked_new_folder) self.new_folder_button.setToolTip(_(u"Create a new folder at the currently" " selected location")) self.new_folder_button.setDisabled(True) self.delete_icon = QtGui.QIcon.fromTheme( 'edit-delete', qta.icon('fa.trash') ) self.delete_button = QtWidgets.QPushButton( self.delete_icon, _('Delete')) self.delete_button.setIconSize(self.button_icon_size) self.delete_button.clicked.connect(self.__clicked_delete) self.delete_button.setToolTip(_(u"Delete the currently selected file or " "folder")) self.delete_button.setDisabled(True) self.download_icon = QtGui.QIcon.fromTheme( 'go-down', qta.icon('fa.cloud-download') ) self.download_button = QtWidgets.QPushButton(self.download_icon, _('Download')) self.download_button.setIconSize(self.button_icon_size) self.download_button.clicked.connect(self._clicked_download_file) self.download_button.setToolTip( _(u"Download the currently selected file")) self.download_button.setDisabled(True) self.upload_icon = QtGui.QIcon.fromTheme( 'go-up', qta.icon('fa.cloud-upload') ) self.upload_button = QtWidgets.QPushButton(self.upload_icon, _('Upload')) self.upload_button.clicked.connect(self.__clicked_upload_file) self.upload_button.setIconSize(self.button_icon_size) self.upload_button.setToolTip(_(u"Upload a file to the currently selected" " folder")) self.upload_button.setDisabled(True) # Set up the general button bar layouts buttonbar_hbox.addWidget(self.refresh_button) buttonbar_hbox.addStretch(1) # Add default buttons to default widget buttonbar_hbox.addWidget(self.new_folder_button) buttonbar_hbox.addWidget(self.delete_button) buttonbar_hbox.addWidget(self.download_button) buttonbar_hbox.addWidget(self.upload_button) # Make sure the button bar is vertically as small as possible. buttonbar.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) # Store the above buttons (except refresh) into a variable which later # can be used to customize button set configurations self.buttonsets = { 'default': [] } self.buttonsets['default'].append(self.new_folder_button) self.buttonsets['default'].append(self.delete_button) self.buttonsets['default'].append(self.upload_button) self.buttonsets['default'].append(self.download_button) buttonbar.layout().setContentsMargins(0, 0, 0, 0) return buttonbar def __create_properties_pane(self): """ Creates the panel showing the selected item's properties on the right. """ # Box to show the properties of the selected item properties_pane = QtWidgets.QFormLayout() properties_pane.setFormAlignment( QtCore.Qt.AlignLeft | QtCore.Qt.AlignLeft) properties_pane.setLabelAlignment(QtCore.Qt.AlignRight) properties_pane.setContentsMargins(15, 11, 15, 40) labelStyle = 'font-weight: bold' self.common_fields = ['Name', 'Type'] self.file_fields = ['Size', 'Created', 'Modified', 'Online'] self.properties = {} for field in self.common_fields + self.file_fields: label = QtWidgets.QLabel(_(field)) label.setStyleSheet(labelStyle) if field == "Online": # Initialize label with some HTML to trigger the rich text mode value = QtWidgets.QLabel('<a></a>') value.setOpenExternalLinks(True) else: value = QElidedLabel('') value.setWindowFlags(QtCore.Qt.Dialog) self.properties[field] = (label, value) properties_pane.addRow(label, value) # Make sure the fields specific for files are shown for row in self.file_fields: for field in self.properties[row]: field.hide() return properties_pane # Public functions def create_context_menu(self, item): """ Creates a context menu for the currently selected TreeWidgetItem. Menu contents differ depending on if the selected item is a file or a folder, and if the folder is the root of a repo or a subfolder thereof. """ data = item.data(0, QtCore.Qt.UserRole) # Don't make context menu for a project if data['type'] == 'nodes': return None user_has_write_permissions = False try: user_has_write_permissions = "write" in \ data["attributes"]["current_user_permissions"] except AttributeError as e: raise osf.OSFInvalidResponse('Could not retrieve permission info: ' '{}'.format(e)) if data['type'] == 'files': kind = data["attributes"]["kind"] # Check if the current item is a repository (which is represented as a # normal folder) parent_data = item.parent().data(0, QtCore.Qt.UserRole) if parent_data['type'] == 'nodes': item_is_repo = True else: item_is_repo = False menu = QtWidgets.QMenu(self.tree) # Actions only allowed on files if kind == "file": menu.addAction(self.download_icon, _(u"Download file"), self._clicked_download_file) # Actions only allowed on folders if kind == "folder": upload_action = menu.addAction(self.upload_icon, _(u"Upload file to folder"), self.__clicked_upload_file) newfolder_action = menu.addAction(self.new_folder_icon, _(u"Create new folder"), self.__clicked_new_folder) menu.addAction(self.refresh_icon, _(u"Refresh contents"), self.__clicked_partial_refresh) if not user_has_write_permissions: upload_action.setDisabled(True) newfolder_action.setDisabled(True) # Only allow deletion of files and subfolders of repos if kind == "file" or not item_is_repo: delete_action = menu.addAction(self.delete_icon, _(u"Delete"), self.__clicked_delete) if not user_has_write_permissions: delete_action.setDisabled(True) return menu def add_buttonset(self, title, buttons): """ Adds a set of buttons that can be referenced by 'title'. With set_buttonset(title) the buttons can be switched to this set. Parameters ---------- title : str The label of the buttonset buttons : list A list of objects that inherit from QWidgets.QAbstractButton and which should be included in the buttonset designated by *title* Raises ------ TypeError If an item in the buttons list is not an instance of QAbstractButton. """ # Check if the passed parameters are valid. This function only takes a list # (even if the set consists of a single button) if not isinstance(buttons, list): raise TypeError('"buttons" should be a list with QtWidgets.QAbstractButton' ' that belong to the set') # Check if all items in the list are a QtWidgets.QPushButton for bttn in buttons: if not isinstance(bttn, QtWidgets.QAbstractButton): raise TypeError('All items in the buttons list should ' ' inherit from QtWidgets.QAbstractButton') bttn.setVisible(False) self.buttonbar.layout().addWidget(bttn) self.buttonsets[title] = buttons def show_buttonset(self, title): """ Sets the buttonset to show and hides all others. Parameters ---------- title : str The label of the buttonset that should be shown. To show the default buttonset, pass 'default'. Raises ------ KeyError If there is no buttonset known by that label. """ if not title in self.buttonsets: raise KeyError('Buttonset "{}" could not be found.'.format(title)) # First hide all items for bttnset in self.buttonsets.values(): for bttn in bttnset: bttn.setVisible(False) # Then show only the buttons of the specified buttonset for bttn in self.buttonsets[title]: bttn.setVisible(True) def set_file_properties(self, data): """ Fills the contents of the properties panel for files. Makes sure the extra fields concerning files are shown. Parameters ---------- attributes : dict A dictionary containing the information retrieved from the OSF, stored at the data/attributes path of the json response. """ # Get required properties attributes = data['attributes'] name = attributes.get("name", "Unspecified") filesize = attributes.get("size", "Unspecified") created = attributes.get("date_created", "Unspecified") modified = attributes.get("date_modified", "Unspecified") if check_if_opensesame_file(name): filetype = "OpenSesame experiment" else: # Use fileinspector to determine filetype filetype = fileinspector.determine_type(name) # If filetype could not be determined, the response is False if not filetype is None: self.properties["Type"][1].setText(filetype) if fileinspector.determine_category(filetype) == "image": # Download and display image if it is not too big. if not filesize is None and filesize <= self.preview_size_limit: self.img_preview_progress_bar.setValue(0) self.img_preview_progress_bar.show() self.manager.get( data["links"]["download"], self.__set_image_preview, downloadProgress=self.__prev_dl_progress, errorCallback=self.__img_preview_error, abortSignal=self.abort_preview ) else: filetype = "file" # If filesize is None, default to the value 'Unspecified' if filesize is None: filesize = "Unspecified" # If filesize is a number do some reformatting of the data to make it # look nicer for us humans if filesize != "Unspecified" and isinstance(filesize, int): filesize = humanize.naturalsize(filesize) # Format created time if created != "Unspecified": cArrow = arrow.get(created).to('local') created = self.datedisplay.format( cArrow.format(self.timeformat), cArrow.humanize(locale=self.locale) ) # Format modified time if modified != "Unspecified": mArrow = arrow.get(modified).to('local') modified = self.datedisplay.format( mArrow.format(self.timeformat), mArrow.humanize(locale=self.locale) ) # Set properties in the panel. self.properties["Name"][1].setText(name) self.properties["Type"][1].setText(filetype) self.properties["Size"][1].setText(filesize) self.properties["Created"][1].setText(created) self.properties["Modified"][1].setText(modified) # Make sure the fields specific for files are visible for row in self.file_fields: for field in self.properties[row]: field.show() # Get the link to the file on the website of OSF. # Sadly, this is URL is not always available for all files, so hide the # row if the GUID is not provided. guid = data["attributes"]["guid"] if guid is None: self.properties["Online"][0].hide() self.properties["Online"][1].hide() else: web_url = u"{}/{}".format(osf.settings['website_url'], guid) a = u"<a href=\"{0}\">{0}</a>".format(web_url) # Set the URL in the field self.properties["Online"][1].setText(a) # Show the row self.properties["Online"][0].show() self.properties["Online"][1].show() def set_folder_properties(self, data): """ Fills the contents of the properties pane for folders. Make sure the fields only concerning files are hidden. Parameters ---------- attributes : dict A dictionary containing the information retrieved from the OSF, stored at the data/attributes path of the json response """ attributes = data['attributes'] # A node (i.e. a project) has title and category fields if "title" in attributes and "category" in attributes: self.properties["Name"][1].setText(attributes["title"]) if attributes["public"]: level = "Public" else: level = "Private" access_level = "" if not "write" in attributes["current_user_permissions"]: access_level = " (read only)" self.properties["Type"][1].setText(level + " " + attributes["category"] + access_level) elif "name" in attributes and "kind" in attributes: self.properties["Name"][1].setText(attributes["name"]) self.properties["Type"][1].setText(attributes["kind"]) else: raise osf.OSFInvalidResponse("Invalid structure for folder property" " received") # Make sure the fields specific for files are shown for row in self.file_fields: for field in self.properties[row]: field.hide() # Just to be sure (even though it's useless as these fields are hidden) # clear the contents of the fields below self.properties["Size"][1].setText('') self.properties["Created"][1].setText('') self.properties["Modified"][1].setText('') def set_config(self, config): """ Function that sets the current config. The OSF explorer can be configured to show specific button sets at the bottom (e.g. show other buttons than the default download, upload, etc.) and to hide items in the tree by setting a filter. To only show items with a .txt extension, one can set the filter by passing the dict: :: config = {'filter':'.txt'} Multiple filetypes can be filtered by passing a list of extensions: :: config = {'filter':['.txt','.py']} To clear a previously set filter, set its value to None :: config = {'filter': None} If you have created extra button sets by using the `add_buttonset` function, you can specify which buttonset should be shown by adding a 'buttonset' entry to the config dict, which contains the name of the buttonset to show :: config = {'buttonset': 'my_buttonset'} to switch back to the default buttonset, pass 'default' as the value :: config = {'buttonset': 'default'} .. note :: Calling this function is equal to setting the config variable directly by using OSFExplorer.config = <config dict> Parameters ---------- config : dict The dictionary containing new configuration parameters. It can contain directives to set a filter (with the filter key) and/or which buttonset to show (with the buttonset key) """ self.config = config @property def config(self): """ The current configuration of the project explorer. Contains information about the current filter that is set for the project tree and the buttonset that is shown. """ return self.__config @config.setter def config(self, value): """ Sets the current config for the project explorer. The config dict can contain two entries. - filter : a list of file extensions which should only be shown in the \ tree - buttonset : the buttonset to show, if one has added custom buttonsets. \ The default buttonset is designated by 'default' """ if not isinstance(value, dict): raise TypeError('config should be a dict with options') self.__config.update(value) cfg = self.__config.copy() # Get the current filter filt = cfg.pop('filter', None) # Get the current buttonset buttonset = cfg.pop('buttonset', 'default') self.tree.filter = filt self.show_buttonset(buttonset) if len(cfg): logger.warning("Unknown options: {}".format(cfg.keys())) # PyQT slots def __show_tree_context_menu(self, e): """ Shows the context menu when a tree item is right clicked. """ item = self.tree.itemAt(e.pos()) if item is None: return context_menu = self.create_context_menu(item) if not context_menu is None: context_menu.popup(e.globalPos()) def __slot_filterChanged(self, contents): self.config = {"filter": contents} def __slot_currentItemChanged(self, item, col): """ Handles the QTreeWidget currentItemChanged event. """ # If selection changed to no item, do nothing if item is None: return # Reset the image preview contents self.current_img_preview = None self.img_preview_progress_bar.hide() # Abort previous preview operation (if any) self.abort_preview.emit() data = item.data(0, QtCore.Qt.UserRole) user_has_write_permissions = "write" in \ data["attributes"]["current_user_permissions"] access=None if data['type'] == 'nodes': name = data["attributes"]["title"] kind = data["attributes"]["category"] if not user_has_write_permissions: access = "readonly" elif data["attributes"]["public"]: access = "public" else: access = "private" if data['type'] == 'files': name = data["attributes"]["name"] kind = data["attributes"]["kind"] pm = self.tree.get_icon(kind, name, access).pixmap(self.preview_size) self.image_space.setPixmap(pm) if kind == "file": self.set_file_properties(data) self.download_button.setDisabled(False) self.upload_button.setDisabled(True) self.new_folder_button.setDisabled(True) if user_has_write_permissions: self.delete_button.setDisabled(False) else: self.delete_button.setDisabled(True) elif kind == "folder": self.set_folder_properties(data) if user_has_write_permissions: self.new_folder_button.setDisabled(False) self.upload_button.setDisabled(False) else: self.new_folder_button.setDisabled(True) self.upload_button.setDisabled(True) self.download_button.setDisabled(True) # Check if the parent node is a project # If so the current 'folder' must be a storage provider (e.g. dropbox) # which should not be allowed to be deleted. parent_data = item.parent().data(0, QtCore.Qt.UserRole) if parent_data['type'] == 'nodes' or not user_has_write_permissions: self.delete_button.setDisabled(True) else: self.delete_button.setDisabled(False) else: self.set_folder_properties(data) self.new_folder_button.setDisabled(True) self.download_button.setDisabled(True) self.upload_button.setDisabled(True) self.delete_button.setDisabled(True) nodeStatus = item.data(1, QtCore.Qt.UserRole) if (data['type'] == 'nodes' or data['attributes']['kind'] == 'folder') \ and not nodeStatus['fetched']: self.tree.refresh_children_of_node(item) def __slot_itemSelectionChanged(self): selected = self.tree.selectedItems() items_selected = bool(selected) # If there are selected items, show the properties pane if items_selected and not self.info_frame.isVisible(): self.info_frame.setVisible(True) self.info_frame.resize(300, 500) return if not items_selected and self.info_frame.isVisible(): # Reset the image preview contents self.current_img_preview = None self.info_frame.setVisible(False) self.download_button.setDisabled(True) self.upload_button.setDisabled(True) self.delete_button.setDisabled(True) self.refresh_button.setDisabled(True) return def __clicked_refresh_tree(self): """ Refresh the tree contents and animate the refresh button while this process is in progress. """ # Don't do anything if the refresh button is disabled. This probably # means a refresh operation is in progress, and activating another one # during this is asking for trouble. if self.refresh_button.isEnabled() == False: return self.refresh_button.setDisabled(True) self.refresh_button.setIcon(self.refresh_icon_spinning) self.tree.refresh_contents() def __clicked_partial_refresh(self): selected_item = self.tree.currentItem() # Don't do anything if the refresh button is disabled. This probably # means a refresh operation is in progress, and activating another one # during this is asking for trouble. if self.refresh_button.isEnabled() == False: return self.refresh_button.setDisabled(True) self.refresh_button.setIcon(self.refresh_icon_spinning) self.tree.refresh_children_of_node(selected_item) def _clicked_download_file(self): """ Action to be performed when download button is clicked. Downloads the selected file to the user specified location. """ selected_item = self.tree.currentItem() data = selected_item.data(0, QtCore.Qt.UserRole) download_url = data['links']['download'] filename = data['attributes']['name'] # See if a previous folder was set, and if not, try to set # the user's home folder as a starting folder if not hasattr(self, 'last_dl_destination_folder'): self.last_dl_destination_folder = safe_decode( os.path.expanduser(safe_str("~")), enc=sys.getfilesystemencoding()) destination = QtWidgets.QFileDialog.getSaveFileName(self, _("Save file as"), os.path.join( self.last_dl_destination_folder, filename), ) # PyQt5 returns a tuple, because it actually performs the function of # PyQt4's getSaveFileNameAndFilter() function if isinstance(destination, tuple): destination = destination[0] if destination: # Remember this folder for later when this dialog has to be presented again self.last_dl_destination_folder = os.path.split(destination)[0] # Configure progress dialog (only if filesize is known) if data['attributes']['size']: progress_dialog_data = { "filename": filename, "filesize": data['attributes']['size'] } else: progress_dialog_data = None # Download the file self.manager.download_file( download_url, destination, progressDialog=progress_dialog_data, finishedCallback=self.__download_finished ) def __clicked_delete(self): """ Handles a click on the delete button. Deletes the selected file or folder. """ selected_item = self.tree.currentItem() data = selected_item.data(0, QtCore.Qt.UserRole) reply = QtWidgets.QMessageBox.question( self, _("Please confirm"), _("Are you sure you want to delete '") + data['attributes']['name'] + "'?", QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Yes ) if reply == QtWidgets.QMessageBox.Yes: delete_url = data['links']['delete'] self.manager.delete(delete_url, self.__item_deleted, selected_item) def __clicked_upload_file(self): """ Handles a click on the upload button. Prepares for upload of a file to the currently selected folder. """ selected_item = self.tree.currentItem() data = selected_item.data(0, QtCore.Qt.UserRole) upload_url = data['links']['upload'] # See if a previous folder was set, and if not, try to set # the user's home folder as a starting folder if not hasattr(self, 'last_open_destination_folder'): self.last_open_destination_folder = safe_decode( os.path.expanduser(safe_str("~")), enc=sys.getfilesystemencoding()) file_to_upload = QtWidgets.QFileDialog.getOpenFileName( self, _("Select file for upload"), os.path.join( self.last_open_destination_folder), ) # PyQt5 returns a tuple, because it actually performs the function of # PyQt4's getSaveFileNameAndFilter() function if isinstance(file_to_upload, tuple): file_to_upload = file_to_upload[0] if file_to_upload: # Get the filename folder, filename = os.path.split(file_to_upload) # Remember the containing folder for later self.last_open_destination_folder = folder # ... and the convert to QFile file_to_upload = QtCore.QFile(file_to_upload) # Check if file is already present and get its index if so index_if_present = self.tree.find_item(selected_item, 0, filename) # If index_is_present is None, the file is probably new if index_if_present is None: # add required query parameters upload_url += '?kind=file&name={}'.format(filename) # If index_is_present is a number, it means the file is present # and that file needs to be updated. else: reply = QtWidgets.QMessageBox.question( self, _("Please confirm"), _("The selected folder already contains this file. Are you " "sure you want to overwrite it?"), QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Yes ) if reply == QtWidgets.QMessageBox.No: return logger.info( "File {} exists and will be updated".format(filename)) old_item = selected_item.child(index_if_present) # Get data stored in item old_item_data = old_item.data(0, QtCore.Qt.UserRole) # Get file specific update utrl upload_url = old_item_data['links']['upload'] upload_url += '?kind=file' progress_dialog_data = { "filename": file_to_upload.fileName(), "filesize": file_to_upload.size() } self.manager.upload_file( upload_url, file_to_upload, progressDialog=progress_dialog_data, finishedCallback=self._upload_finished, selectedTreeItem=selected_item, updateIndex=index_if_present ) def __clicked_new_folder(self): """ Creates a new folder in the selected folder on OSF """ selected_item = self.tree.currentItem() data = selected_item.data(0, QtCore.Qt.UserRole) # Get new folder link from data new_folder_url = data['links']['new_folder'] new_folder_name, ok = QtWidgets.QInputDialog.getText( self, _(u'Create new folder'), _(u'Please enter the folder name:') ) new_folder_name = safe_decode(new_folder_name) if not ok or not len(new_folder_name): return # Remove illegal filesystem characters (mainly for Windows) new_folder_name = u"".join( i for i in new_folder_name if i not in r'\/:*?"<>|') # Check again if not len(new_folder_name): return new_folder_url += "&name={}".format(new_folder_name) self.manager.put( new_folder_url, self._upload_finished, selectedTreeItem=selected_item ) def __download_finished(self, reply, *args, **kwargs): self.manager.success_message.emit( 'Download finished', 'Your download completed successfully') def _upload_finished(self, reply, *args, **kwargs): """ Callback for reply() object after an upload is finished """ # See if upload action was triggered by interaction on a tree item selectedTreeItem = kwargs.get('selectedTreeItem') # The new item data should be returned in the reply new_item_data = json.loads(safe_decode(reply.readAll().data())) # new_item_data is only reliable for osfstorage for now, so simply # refresh the whole tree if data is from another provider. if not selectedTreeItem: self.__upload_refresh_tree(*args, **kwargs) else: # See if object is still alive (could be deleted after user has had # to reauthenticate) try: selectedTreeItem.parent() except RuntimeError: # if not, simple refresh the whole tree self.__upload_refresh_tree(*args, **kwargs) return try: provider = new_item_data['data']['attributes']['provider'] except KeyError as e: raise osf.OSFInvalidResponse( u'Could not parse provider from OSF response: {}'.format(e)) # OSF storage is easy. Just take the newly returned path if provider == 'osfstorage': info_url = osf.api_call('file_info', new_item_data['data']['attributes']['path']) # All other repo's are a bit more difficult... else: # Don't even bother for folders and simply refresh the tree. # OSF does not provide possibility to get folder information (in # contrast to folder contents) for newly created folders in external # repositories if new_item_data['data']['attributes']['kind'] == 'folder': kwargs['entry_node'] = selectedTreeItem self.__upload_refresh_tree(*args, **kwargs) return # If kind is a file, try to add it to the tree incrementally # (thus without refreshing the whole tree). At the moment, this # only works well for osfstorage... try: project_id = new_item_data['data']['attributes']['resource'] temp_id = new_item_data['data']['id'] except KeyError as e: raise osf.OSFInvalidResponse( u'Could not parse provider from OSF response: {}'.format(e)) # Create an url for this file with which the complete information # set can be retrieved info_url = osf.api_call('repo_files', project_id, temp_id) # The repo_files api call adds a trailing slash, but this is invalid # when requesting information about files. Remove it if present. if info_url[-1] == u"/": info_url = info_url[:-1] # Refresh info for the new file as the returned representation # is incomplete self.manager.get( info_url, self.__upload_refresh_item, selectedTreeItem, *args, **kwargs ) def __upload_refresh_tree(self, *args, **kwargs): """ Called by _upload_finished() if the whole tree needs to be refreshed """ # If an entry node is specified, only refresh the children of that node, # otherwise, refresh entire tree entry_node = kwargs.pop('entry_node', None) if entry_node is None: self.__clicked_refresh_tree() else: self.refresh_button.setDisabled(True) self.refresh_button.setIcon(self.refresh_icon_spinning) self.tree.refresh_children_of_node(entry_node) after_upload_cb = kwargs.pop('afterUploadCallback', None) if callable(after_upload_cb): after_upload_cb(*args, **kwargs) def __upload_refresh_item(self, reply, parent_item, *args, **kwargs): """ Called by __upload_finished, if it is possible to add the new item at the correct position in the tree, without refreshing the whole tree. """ item = json.loads(safe_decode(reply.readAll().data())) # Remove old item first, before adding new one updateIndex = kwargs.get('updateIndex') if not updateIndex is None: parent_item.takeChild(updateIndex) # Add the item as a new item to the tree new_item, kind = self.tree.add_item(parent_item, item['data']) # Set new item as currently selected item self.tree.setCurrentItem(new_item) # Store item in kwargs so callback functions can use it kwargs['new_item'] = new_item # Perform the afterUploadCallback if it has been specified after_upload_cb = kwargs.pop('afterUploadCallback', None) if callable(after_upload_cb): after_upload_cb(*args, **kwargs) def __item_deleted(self, reply, item): """ Callback for when an item has been successfully deleted from the OSF. Removes the item from the tree. """ # See if object is still alive (could be deleted after user has had # to reauthenticate) try: item.parent().removeChild(item) except RuntimeError as e: warnings.warn("Deleting item failed: {}".format(e)) def __tree_refresh_finished(self): """ Slot for the event fired when the tree refresh is finished """ self.refresh_button.setIcon(self.refresh_icon) self.refresh_button.setDisabled(False) def handle_login(self): """ Callback function for a login event is detected. """ self.login_required_overlay.setVisible(False) self.refresh_button.setDisabled(True) def handle_logout(self): """ Callback function for when a logout event is detected. """ self.image_space.setPixmap(QtGui.QPixmap()) for label, value in self.properties.values(): value.setText("") self.refresh_button.setDisabled(True) self.login_required_overlay.setVisible(True) def closeEvent(self, event): """ Reimplementation of closeEvent. Makes sure the login window also closes if the explorer closes. """ super(OSFExplorer, self).closeEvent(event) self.manager.browser.close() # --- Other callback functions def __set_image_preview(self, img_content): """ Callback for set_file_properties(). Sets the preview of an image in the properties panel. """ # Create a pixmap from the just received data self.current_img_preview = QtGui.QPixmap() self.current_img_preview.loadFromData(img_content.readAll()) # Scale to preview area hight pixmap = self.current_img_preview.scaledToHeight( self.image_space.height()) # Hide progress bar self.img_preview_progress_bar.hide() # Show image preview self.image_space.setPixmap(pixmap) # Reset variable holding preview reply object def __prev_dl_progress(self, received, total): """ Callback for set_file_properties() """ # If total is 0, this is probably a redirect to the image location in # cloud storage. Do nothing in this case if total == 0: return # Convert to percentage progress = 100*received/total self.img_preview_progress_bar.setValue(progress) def __img_preview_error(self, reply, *args, **kwargs): """ Callback for set_file_properties() """ self.img_preview_progress_bar.hide()
from __future__ import division, absolute_import, print_function import pytest import numpy as np import numpy.ma as ma from numpy.ma.mrecords import MaskedRecords from numpy.ma.testutils import assert_equal from numpy.testing import assert_, assert_raises from numpy.lib.recfunctions import ( drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, repack_fields) get_names = np.lib.recfunctions.get_names get_names_flat = np.lib.recfunctions.get_names_flat zip_descr = np.lib.recfunctions.zip_descr class TestRecFunctions(object): # Misc tests def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_zip_descr(self): # Test zip_descr (w, x, y, z) = self.data # Std array test = zip_descr((x, x), flatten=True) assert_equal(test, np.dtype([('', int), ('', int)])) test = zip_descr((x, x), flatten=False) assert_equal(test, np.dtype([('', int), ('', int)])) # Std & flexible-dtype test = zip_descr((x, z), flatten=True) assert_equal(test, np.dtype([('', int), ('A', '|S3'), ('B', float)])) test = zip_descr((x, z), flatten=False) assert_equal(test, np.dtype([('', int), ('', [('A', '|S3'), ('B', float)])])) # Standard & nested dtype test = zip_descr((x, w), flatten=True) assert_equal(test, np.dtype([('', int), ('a', int), ('ba', float), ('bb', int)])) test = zip_descr((x, w), flatten=False) assert_equal(test, np.dtype([('', int), ('', [('a', int), ('b', [('ba', float), ('bb', int)])])])) def test_drop_fields(self): # Test drop_fields a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) # A basic field test = drop_fields(a, 'a') control = np.array([((2, 3.0),), ((5, 6.0),)], dtype=[('b', [('ba', float), ('bb', int)])]) assert_equal(test, control) # Another basic field (but nesting two fields) test = drop_fields(a, 'b') control = np.array([(1,), (4,)], dtype=[('a', int)]) assert_equal(test, control) # A nested sub-field test = drop_fields(a, ['ba', ]) control = np.array([(1, (3.0,)), (4, (6.0,))], dtype=[('a', int), ('b', [('bb', int)])]) assert_equal(test, control) # All the nested sub-field from a field: zap that field test = drop_fields(a, ['ba', 'bb']) control = np.array([(1,), (4,)], dtype=[('a', int)]) assert_equal(test, control) test = drop_fields(a, ['a', 'b']) assert_(test is None) def test_rename_fields(self): # Test rename fields a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], dtype=[('a', int), ('b', [('ba', float), ('bb', (float, 2))])]) test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] control = a.view(newdtype) assert_equal(test.dtype, newdtype) assert_equal(test, control) def test_get_names(self): # Test get_names ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_names(ndtype) assert_equal(test, ('A', 'B')) ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) test = get_names(ndtype) assert_equal(test, ('a', ('b', ('ba', 'bb')))) def test_get_names_flat(self): # Test get_names_flat ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_names_flat(ndtype) assert_equal(test, ('A', 'B')) ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) test = get_names_flat(ndtype) assert_equal(test, ('a', 'b', 'ba', 'bb')) def test_get_fieldstructure(self): # Test get_fieldstructure # No nested fields ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_fieldstructure(ndtype) assert_equal(test, {'A': [], 'B': []}) # One 1-nested field ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) test = get_fieldstructure(ndtype) assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) # One 2-nested fields ndtype = np.dtype([('A', int), ('B', [('BA', int), ('BB', [('BBA', int), ('BBB', int)])])]) test = get_fieldstructure(ndtype) control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} assert_equal(test, control) def test_find_duplicates(self): # Test find_duplicates a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) test = find_duplicates(a, ignoremask=False, return_index=True) control = [0, 2] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='A', return_index=True) control = [0, 1, 2, 3, 5] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='B', return_index=True) control = [0, 1, 2, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='BA', return_index=True) control = [0, 1, 2, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='BB', return_index=True) control = [0, 1, 2, 3, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) def test_find_duplicates_ignoremask(self): # Test the ignoremask option of find_duplicates ndtype = [('a', int)] a = ma.array([1, 1, 1, 2, 2, 3, 3], mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) test = find_duplicates(a, ignoremask=True, return_index=True) control = [0, 1, 3, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, ignoremask=False, return_index=True) control = [0, 1, 2, 3, 4, 6] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) def test_repack_fields(self): dt = np.dtype('u1,f4,i8', align=True) a = np.zeros(2, dtype=dt) assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) assert_equal(repack_fields(a).itemsize, 13) assert_equal(repack_fields(repack_fields(dt), align=True), dt) # make sure type is preserved dt = np.dtype((np.record, dt)) assert_(repack_fields(dt).type is np.record) class TestRecursiveFillFields(object): # Test recursive_fill_fields. def test_simple_flexible(self): # Test recursive_fill_fields on flexible-array a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) b = np.zeros((3,), dtype=a.dtype) test = recursive_fill_fields(a, b) control = np.array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', int), ('B', float)]) assert_equal(test, control) def test_masked_flexible(self): # Test recursive_fill_fields on masked flexible-array a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], dtype=[('A', int), ('B', float)]) b = ma.zeros((3,), dtype=a.dtype) test = recursive_fill_fields(a, b) control = ma.array([(1, 10.), (2, 20.), (0, 0.)], mask=[(0, 1), (1, 0), (0, 0)], dtype=[('A', int), ('B', float)]) assert_equal(test, control) class TestMergeArrays(object): # Test merge_arrays def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array( [(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_solo(self): # Test merge_arrays on a single array. (_, x, _, z) = self.data test = merge_arrays(x) control = np.array([(1,), (2,)], dtype=[('f0', int)]) assert_equal(test, control) test = merge_arrays((x,)) assert_equal(test, control) test = merge_arrays(z, flatten=False) assert_equal(test, z) test = merge_arrays(z, flatten=True) assert_equal(test, z) def test_solo_w_flatten(self): # Test merge_arrays on a single array w & w/o flattening w = self.data[0] test = merge_arrays(w, flatten=False) assert_equal(test, w) test = merge_arrays(w, flatten=True) control = np.array([(1, 2, 3.0), (4, 5, 6.0)], dtype=[('a', int), ('ba', float), ('bb', int)]) assert_equal(test, control) def test_standard(self): # Test standard & standard # Test merge arrays (_, x, y, _) = self.data test = merge_arrays((x, y), usemask=False) control = np.array([(1, 10), (2, 20), (-1, 30)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) test = merge_arrays((x, y), usemask=True) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_flatten(self): # Test standard & flexible (_, x, _, z) = self.data test = merge_arrays((x, z), flatten=True) control = np.array([(1, 'A', 1.), (2, 'B', 2.)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) assert_equal(test, control) test = merge_arrays((x, z), flatten=False) control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], dtype=[('f0', int), ('f1', [('A', '|S3'), ('B', float)])]) assert_equal(test, control) def test_flatten_wflexible(self): # Test flatten standard & nested (w, x, _, _) = self.data test = merge_arrays((x, w), flatten=True) control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], dtype=[('f0', int), ('a', int), ('ba', float), ('bb', int)]) assert_equal(test, control) test = merge_arrays((x, w), flatten=False) controldtype = [('f0', int), ('f1', [('a', int), ('b', [('ba', float), ('bb', int)])])] control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], dtype=controldtype) assert_equal(test, control) def test_wmasked_arrays(self): # Test merge_arrays masked arrays (_, x, _, _) = self.data mx = ma.array([1, 2, 3], mask=[1, 0, 0]) test = merge_arrays((x, mx), usemask=True) control = ma.array([(1, 1), (2, 2), (-1, 3)], mask=[(0, 1), (0, 0), (1, 0)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) test = merge_arrays((x, mx), usemask=True, asrecarray=True) assert_equal(test, control) assert_(isinstance(test, MaskedRecords)) def test_w_singlefield(self): # Test single field test = merge_arrays((np.array([1, 2]).view([('a', int)]), np.array([10., 20., 30.])),) control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('a', int), ('f1', float)]) assert_equal(test, control) def test_w_shorter_flex(self): # Test merge_arrays w/ a shorter flexndarray. z = self.data[-1] # Fixme, this test looks incomplete and broken #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], # dtype=[('A', '|S3'), ('B', float), ('C', int)]) #assert_equal(test, control) # Hack to avoid pyflakes warnings about unused variables merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], dtype=[('A', '|S3'), ('B', float), ('C', int)]) def test_singlerecord(self): (_, x, y, z) = self.data test = merge_arrays((x[0], y[0], z[0]), usemask=False) control = np.array([(1, 10, ('A', 1))], dtype=[('f0', int), ('f1', int), ('f2', [('A', '|S3'), ('B', float)])]) assert_equal(test, control) class TestAppendFields(object): # Test append_fields def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_append_single(self): # Test simple case (_, x, _, _) = self.data test = append_fields(x, 'A', data=[10, 20, 30]) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('f0', int), ('A', int)],) assert_equal(test, control) def test_append_double(self): # Test simple case (_, x, _, _) = self.data test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], dtype=[('f0', int), ('A', int), ('B', int)],) assert_equal(test, control) def test_append_on_flex(self): # Test append_fields on flexible type arrays z = self.data[-1] test = append_fields(z, 'C', data=[10, 20, 30]) control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('C', int)],) assert_equal(test, control) def test_append_on_nested(self): # Test append_fields on nested fields w = self.data[0] test = append_fields(w, 'C', data=[10, 20, 30]) control = ma.array([(1, (2, 3.0), 10), (4, (5, 6.0), 20), (-1, (-1, -1.), 30)], mask=[( 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], dtype=[('a', int), ('b', [('ba', float), ('bb', int)]), ('C', int)],) assert_equal(test, control) class TestStackArrays(object): # Test stack_arrays def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_solo(self): # Test stack_arrays on single arrays (_, x, _, _) = self.data test = stack_arrays((x,)) assert_equal(test, x) assert_(test is x) test = stack_arrays(x) assert_equal(test, x) assert_(test is x) def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields (_, x, y, _) = self.data test = stack_arrays((x, x), usemask=False) control = np.array([1, 2, 1, 2]) assert_equal(test, control) test = stack_arrays((x, y), usemask=False) control = np.array([1, 2, 10, 20, 30]) assert_equal(test, control) test = stack_arrays((y, x), usemask=False) control = np.array([10, 20, 30, 1, 2]) assert_equal(test, control) def test_unnamed_and_named_fields(self): # Test combination of arrays w/ & w/o named fields (_, x, _, z) = self.data test = stack_arrays((x, z)) control = ma.array([(1, -1, -1), (2, -1, -1), (-1, 'A', 1), (-1, 'B', 2)], mask=[(0, 1, 1), (0, 1, 1), (1, 0, 0), (1, 0, 0)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, x)) control = ma.array([('A', 1, -1), ('B', 2, -1), (-1, -1, 1), (-1, -1, 2), ], mask=[(0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('f2', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, z, x)) control = ma.array([('A', 1, -1), ('B', 2, -1), ('A', 1, -1), ('B', 2, -1), (-1, -1, 1), (-1, -1, 2), ], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('f2', int)]) assert_equal(test, control) def test_matching_named_fields(self): # Test combination of arrays w/ matching field names (_, x, _, z) = self.data zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) test = stack_arrays((z, zz)) control = ma.array([('A', 1, -1), ('B', 2, -1), ( 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, zz, x)) ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), ('a', 10., 100., -1), ('b', 20., 200., -1), ('c', 30., 300., -1), (-1, -1, -1, 1), (-1, -1, -1, 2)], dtype=ndtype, mask=[(0, 0, 1, 1), (0, 0, 1, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (1, 1, 1, 0), (1, 1, 1, 0)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_defaults(self): # Test defaults: no exception raised if keys of defaults are not fields. (_, _, _, z) = self.data zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} test = stack_arrays((z, zz), defaults=defaults) control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), ( 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_autoconversion(self): # Tests autoconversion adtype = [('A', int), ('B', bool), ('C', float)] a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) bdtype = [('A', int), ('B', float), ('C', float)] b = ma.array([(4, 5, 6)], dtype=bdtype) control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], dtype=bdtype) test = stack_arrays((a, b), autoconvert=True) assert_equal(test, control) assert_equal(test.mask, control.mask) with assert_raises(TypeError): stack_arrays((a, b), autoconvert=False) def test_checktitles(self): # Test using titles in the field names adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] b = ma.array([(4, 5, 6)], dtype=bdtype) test = stack_arrays((a, b)) control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], dtype=bdtype) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_subdtype(self): z = np.array([ ('A', 1), ('B', 2) ], dtype=[('A', '|S3'), ('B', float, (1,))]) zz = np.array([ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) res = stack_arrays((z, zz)) expected = ma.array( data=[ (b'A', [1.0], 0), (b'B', [2.0], 0), (b'a', [10.0], 100.0), (b'b', [20.0], 200.0), (b'c', [30.0], 300.0)], mask=[ (False, [False], True), (False, [False], True), (False, [False], False), (False, [False], False), (False, [False], False) ], dtype=zz.dtype ) assert_equal(res.dtype, expected.dtype) assert_equal(res, expected) assert_equal(res.mask, expected.mask) class TestJoinBy(object): def setup(self): self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) def test_inner_join(self): # Basic test of join_by a, b = self.a, self.b test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), (9, 59, 69, 109, 104)], dtype=[('a', int), ('b1', int), ('b2', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_join(self): a, b = self.a, self.b # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), # (7, 57, 107, 102), (8, 58, 108, 103), # (9, 59, 109, 104)], # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) # Hack to avoid pyflakes unused variable warnings join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), (9, 59, 109, 104)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) def test_join_subdtype(self): # tests the bug in https://stackoverflow.com/q/44769632/102441 from numpy.lib import recfunctions as rfn foo = np.array([(1,)], dtype=[('key', int)]) bar = np.array([(1, np.array([1,2,3]))], dtype=[('key', int), ('value', 'uint16', 3)]) res = join_by('key', foo, bar) assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): a, b = self.a, self.b test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (5, 65, -1, 100), (6, 56, 106, -1), (6, 66, -1, 101), (7, 57, 107, -1), (7, 67, -1, 102), (8, 58, 108, -1), (8, 68, -1, 103), (9, 59, 109, -1), (9, 69, -1, 104), (10, 70, -1, 105), (11, 71, -1, 106), (12, 72, -1, 107), (13, 73, -1, 108), (14, 74, -1, 109)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_leftouter_join(self): a, b = self.a, self.b test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (6, 56, 106, -1), (7, 57, 107, -1), (8, 58, 108, -1), (9, 59, 109, -1)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_different_field_order(self): # gh-8940 a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) # this should not give a FutureWarning: j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) def test_duplicate_keys(self): a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) @pytest.mark.xfail(reason="See comment at gh-9343") def test_same_name_different_dtypes_key(self): a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) expected_dtype = np.dtype([ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_same_name_different_dtypes(self): # gh-9338 a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')]) expected_dtype = np.dtype([ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_subarray_key(self): a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')]) a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype) b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')]) b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype) expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')]) expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype) res = join_by('pos', a, b) assert_equal(res.dtype, expected_dtype) assert_equal(res, expected) def test_padded_dtype(self): dt = np.dtype('i1,f4', align=True) dt.names = ('k', 'v') assert_(len(dt.descr), 3) # padding field is inserted a = np.array([(1, 3), (3, 2)], dt) b = np.array([(1, 1), (2, 2)], dt) res = join_by('k', a, b) # no padding fields remain expected_dtype = np.dtype([ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4') ]) assert_equal(res.dtype, expected_dtype) class TestJoinBy2(object): @classmethod def setup(cls): cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) def test_no_r1postfix(self): # Basic test of join_by no_r1postfix a, b = self.a, self.b test = join_by( 'a', a, b, r1postfix='', r2postfix='2', jointype='inner') control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], dtype=[('a', int), ('b', int), ('b2', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_no_postfix(self): assert_raises(ValueError, join_by, 'a', self.a, self.b, r1postfix='', r2postfix='') def test_no_r2postfix(self): # Basic test of join_by no_r2postfix a, b = self.a, self.b test = join_by( 'a', a, b, r1postfix='1', r2postfix='', jointype='inner') control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], dtype=[('a', int), ('b1', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_two_keys_two_vars(self): a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(50, 60), np.arange(10, 20))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(65, 75), np.arange(0, 10))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1), (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3), (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5), (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7), (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)], dtype=[('k', int), ('a', int), ('b1', int), ('b2', int), ('c1', int), ('c2', int)]) test = join_by( ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') assert_equal(test.dtype, control.dtype) assert_equal(test, control) class TestAppendFieldsObj(object): """ Test append_fields with arrays containing objects """ # https://github.com/numpy/numpy/issues/2346 def setup(self): from datetime import date self.data = dict(obj=date(2000, 1, 1)) def test_append_to_objects(self): "Test append_fields when the base array contains objects" obj = self.data['obj'] x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)]) y = np.array([10, 20], dtype=int) test = append_fields(x, 'C', data=y, usemask=False) control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)], dtype=[('A', object), ('B', float), ('C', int)]) assert_equal(test, control)
import asyncio import importlib.util import json import logging import os.path import queue import sys import multiprocessing from typing import Dict, List, Optional, Union import click import websockets import netdumplings from netdumplings._shared import ( configure_logging, ND_CLOSE_MSGS, HUB_HOST, HUB_IN_PORT, ) from netdumplings.console._shared import CLICK_CONTEXT_SETTINGS def network_sniffer( kitchen_name: str, interface: str, chefs: Union[List[str], bool], chef_modules: List[str], valid_chefs: Dict, sniffer_filter: str, chef_poke_interval: int, dumpling_queue: multiprocessing.Queue, ): """ The main network sniffing management function, responsible for: * Instantiating a dumpling kitchen (which does the actual sniffing) and providing it with a queue to put chef-created dumplings on * Instantiating the dumpling chefs and registering them with the kitchen * Running the kitchen This function is intended to be invoked as a Python process. A dumpling chef will only be instantiated if its ``assignable_to_kitchen`` class attribute is ``True``. :param kitchen_name: Name of the sniffer kitchen. :param interface: Network interface to sniff (``all`` sniffs all interfaces). :param chefs: List of chefs to send packets to. Used for display only. :param chef_modules: List of Python module names in which to find chefs. Used for display only. :param valid_chefs: Dict of module+chef combinations we plan on importing. :param sniffer_filter: PCAP-compliant sniffer filter. :param chef_poke_interval: Interval (in secs) to poke chefs. :param dumpling_queue: Queue to pass to the kitchen to put dumplings on. """ configure_logging() log = logging.getLogger('netdumplings.console.sniff') log.info("{0}: Starting network sniffer process".format(kitchen_name)) log.info("{0}: Interface: {1}".format(kitchen_name, interface)) log.info("{0}: Requested chefs: {1}".format(kitchen_name, "all" if chefs is True else ", ".join(chefs))) log.info("{0}: Chef modules: {1}".format( kitchen_name, ", ".join(chef_modules))) log.info("{0}: Filter: {1}".format(kitchen_name, "<all packets>" if not sniffer_filter else sniffer_filter)) log.info("{0}: Chef poke interval (secs): {1}".format( kitchen_name, chef_poke_interval)) sniffer_kitchen = netdumplings.DumplingKitchen( dumpling_queue=dumpling_queue, name=kitchen_name, interface=interface, sniffer_filter=sniffer_filter, chef_poke_interval=chef_poke_interval, ) # Instantiate all the valid DumplingChef classes and register them with # the kitchen. for chef_module in valid_chefs: chef_class_names = valid_chefs[chef_module] if os.path.isfile(chef_module): spec = importlib.util.spec_from_file_location('chefs', chef_module) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) else: # TODO: Investigate replacing __import__ with # importlib.import_module mod = __import__(chef_module, fromlist=chef_class_names) for chef_class_name in chef_class_names: log.info("{0}: Registering {1}.{2} with kitchen".format( kitchen_name, chef_module, chef_class_name)) klass = getattr(mod, chef_class_name) klass(kitchen=sniffer_kitchen) sniffer_kitchen.run() async def send_dumplings_from_queue_to_hub( kitchen_name: str, hub: str, dumpling_queue: multiprocessing.Queue, kitchen_info: dict, log: logging.Logger, ): """ Grabs dumplings from the dumpling queue and sends them to ``nd-hub``. :param kitchen_name: The name of the kitchen. :param hub: The address where ``nd-hub`` is receiving dumplings. :param dumpling_queue: Queue to grab dumplings from. :param kitchen_info: Dict describing the kitchen. :param log: Logger. """ hub_ws = 'ws://{0}'.format(hub) log.info("{0}: Connecting to the dumpling hub at {1}".format( kitchen_name, hub_ws) ) try: websocket = await websockets.client.connect(hub_ws) except OSError as e: log.error( "{0}: There was a problem with the dumpling hub connection. " "Is nd-hub available?".format(kitchen_name)) log.error("{0}: {1}".format(kitchen_name, e)) return try: # Register our kitchen information with the dumpling hub. await websocket.send(json.dumps(kitchen_info)) # Send dumplings to the hub when they come in from the chefs. while True: # This is a bit hacky. We have a multiprocessing queue to get from, # but we're running in a coroutine. The get() blocks, which I think # is inferfering with websockets' ability to manage its heartbeat # with the hub. This only seems to affect Windows. The workaround # implemented here is to put a 1-second timeout on the queue get, # ignore empty gets, and await asyncio.sleep() which appears to # allow the run loop to continue (presumably allowing the keepalives # to work). try: dumpling = dumpling_queue.get(timeout=1) await websocket.send(dumpling) except queue.Empty: pass await asyncio.sleep(0) except asyncio.CancelledError: log.warning( "{0}: Connection to dumpling hub cancelled; closing...".format( kitchen_name)) try: await websocket.close(*ND_CLOSE_MSGS['conn_cancelled']) except websockets.exceptions.InvalidState: pass except websockets.exceptions.ConnectionClosed as e: log.warning("{0}: Lost connection to dumpling hub: {1}".format( kitchen_name, e)) except OSError as e: log.exception( "{0}: Error talking to dumpling hub: {1}".format(kitchen_name, e) ) def dumpling_emitter( kitchen_name: str, hub: str, dumpling_queue: multiprocessing.Queue, kitchen_info: Dict, ): """ Starts an async event loop to manage funneling dumplings from the queue to the dumpling hub. This function is intended to be invoked as a Python process. :param kitchen_name: The name of the kitchen that the dumplings will be coming from. :param hub: The address where ``nd-hub`` is receiving dumplings. :param dumpling_queue: Queue to get dumplings from. :param kitchen_info: Information on the kitchen. """ configure_logging() log = logging.getLogger('netdumplings.console.sniff') log.info("{0}: Starting dumpling emitter process".format(kitchen_name)) try: asyncio.run( send_dumplings_from_queue_to_hub( kitchen_name, hub, dumpling_queue, kitchen_info, log ) ) except KeyboardInterrupt: log.warning(f"Keyboard interrupt detected") def list_chefs(chef_modules: Optional[List[str]] = None): """ Lists all the chef classes (subclassed from :class:`DumplingChef`) found in the given list of ``chef_modules``. :param chef_modules: Python module names to look for chefs in. """ chef_info = netdumplings.DumplingKitchen.get_chefs_in_modules(chef_modules) print() for chef_module in sorted(chef_info): print("{0}".format(chef_module)) import_error = chef_info[chef_module]['import_error'] if not import_error: for chef_class in chef_info[chef_module]['chef_classes']: print(" {0}".format(chef_class)) else: print(" error importing module: {0}".format(import_error)) print() def get_valid_chefs( kitchen_name: str, chef_modules: List[str], chefs_requested: Union[List[str], bool], log: logging.Logger, ) -> Dict: """ Retrieves the names of all valid DumplingChef subclasses for later instantiation. Valid chefs are all the classes in ``chef_modules`` which subclass DumplingChef and are included in our list of ``chefs_requested``. They also need to have their ``assignable_to_kitchen`` attribute set to True. :param kitchen_name: Kitchen name (for logging purposes). :param chef_modules: List of modules to look for chefs in. :param chefs_requested: List of requested chef names (True means all chefs are requested). :param log: Logger to log to. :return: Dict of valid DumpingChef subclasses. Keys are the Python module names and the values are a list of valid chef class names in each module. """ valid_chefs = {} chef_info = netdumplings.DumplingKitchen.get_chefs_in_modules(chef_modules) # TODO: chefs_seen could be a set. chefs_seen = [] # Find all the valid chefs. for chef_module in chef_info: import_error = chef_info[chef_module]['import_error'] if import_error: log.error('Problem with {}: {}'.format(chef_module, import_error)) continue chef_class_names = chef_info[chef_module]['chef_classes'] is_py_file = chef_info[chef_module]['is_py_file'] if is_py_file: spec = importlib.util.spec_from_file_location('chefs', chef_module) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) else: # TODO: Investigate replacing __import__ with # importlib.import_module mod = __import__(chef_module, fromlist=chef_class_names) for chef_class_name in chef_class_names: chefs_seen.append(chef_class_name) klass = getattr(mod, chef_class_name) if not klass.assignable_to_kitchen: log.warning("{0}: Chef {1} is marked as unassignable".format( kitchen_name, chef_class_name)) continue # A chefs_requested value of True means all chefs. if chefs_requested is True or chef_class_name in chefs_requested: try: valid_chefs[chef_module].append(chef_class_name) except KeyError: valid_chefs[chef_module] = [chef_class_name] # Warn about any requested chefs which were not found. if chefs_requested is not True: for chef_not_found in [chef for chef in chefs_requested if chef not in chefs_seen]: log.warning("{0}: Chef {1} not found".format( kitchen_name, chef_not_found)) return valid_chefs # ----------------------------------------------------------------------------- @click.command( context_settings=CLICK_CONTEXT_SETTINGS, ) @click.option( '--kitchen-name', '-n', help='Dumpling kitchen name to assign to the sniffer', metavar='KITCHEN_NAME', default='default_kitchen', show_default=True, ) @click.option( '--hub', '-h', help='Address where nd-hub is receiving dumplings.', metavar='HOST:PORT', default='{}:{}'.format(HUB_HOST, HUB_IN_PORT), show_default=True, ) @click.option( '--interface', '-i', help='Network interface to sniff.', metavar='INTERFACE', default='all', show_default=True, ) @click.option( '--filter', '-f', 'pkt_filter', help='PCAP-style sniffer packet filter.', metavar='PCAP_FILTER', default='tcp or udp or arp', show_default=True, ) @click.option( '--chef-module', '-m', help='Python module containing chef implementations. Can be module.name ' 'or /path/to/file.py. Multiple can be specified.', metavar='PYTHON_MODULE', default=['netdumplings.dumplingchefs'], show_default=True, multiple=True, ) @click.option( '--chef', '-c', help='Chef (as found in a --chef-module) to deliver packets to. Multiple ' 'can be specified. Default is to send packets to all chefs.', metavar='CHEF_NAME', multiple=True, ) @click.option( '--chef-list', '-l', help='List all available chefs (as found in the given --chef-module ' 'Python modules) and exit.', is_flag=True, default=False, ) @click.option( '--poke-interval', '-p', help='Interval (in seconds) to poke chefs instructing them to send their ' 'interval dumplings.', metavar='SECONDS', type=click.FLOAT, default=5.0, show_default=True, ) @click.version_option(version=netdumplings.__version__) def sniff_cli(kitchen_name, hub, interface, pkt_filter, chef_module, chef, chef_list, poke_interval): """ A dumpling sniffer kitchen. Sniffs network packets matching the given PCAP-style filter and sends them to chefs for processing into dumplings. Dumplings are then sent to nd-hub for distribution to the dumpling eaters. This tool likely needs to be run as root, or as an Administrator user. """ # NOTE: Since the --chef-module and --chef flags can be specified multiple # times, the associated 'chef_module' and 'chef' parameters are tuples of # zero or more modules/chefs respectively. chef = True if len(chef) == 0 else chef # Display the chef list and exit if that's all the user wanted. if chef_list: list_chefs(chef_module) sys.exit(0) # now do the following: # # * Create a queue for a network-sniffing kitchen process to put dumplings # onto # * Start a kitchen process, which will be putting dumplings onto the queue # * Start a dumpling emitter process which takes dumplings from the queue # and sends them to nd-hub over a websocket configure_logging() logger = logging.getLogger('netdumplings.console.sniff') logger.info("Initializing sniffer...") # A queue for passing dumplings from the sniffer kitchen to the # dumpling-emitter process. dumpling_emitter_queue = multiprocessing.Queue() # Determine what chefs we'll be sending packets to. valid_chefs = get_valid_chefs(kitchen_name, chef_module, chef, logger) if not valid_chefs: logger.error('{}: No valid chefs found. Not starting sniffer.'.format( kitchen_name )) sys.exit(1) # Generate list of module.class names for all the seemingly-valid chefs # we'll be instantiating. This is for use in the status dumplings. valid_chef_list = [] for chef_module_name in sorted(valid_chefs.keys()): for chef_class_name in sorted(valid_chefs[chef_module_name]): valid_chef_list.append('{}.{}'.format( chef_module_name, chef_class_name) ) # Start the sniffer kitchen and dumpling-emitter processes. sniffer_process = multiprocessing.Process( target=network_sniffer, args=( kitchen_name, interface, chef, chef_module, valid_chefs, pkt_filter, poke_interval, dumpling_emitter_queue, ) ) kitchen_info = { 'kitchen_name': kitchen_name, 'interface': interface, 'filter': pkt_filter, 'chefs': valid_chef_list, 'poke_interval': poke_interval, } dumpling_emitter_process = multiprocessing.Process( target=dumpling_emitter, args=(kitchen_name, hub, dumpling_emitter_queue, kitchen_info), daemon=True, ) sniffer_process.start() dumpling_emitter_process.start() try: while True: sniffer_process.join(0.5) dumpling_emitter_process.join(0.5) if not sniffer_process.is_alive(): logger.error( f"{kitchen_name}: Network sniffer process died; exiting." ) break if not dumpling_emitter_process.is_alive(): logger.error( f"{kitchen_name}: Dumpling emitter process died; exiting." ) break except KeyboardInterrupt: logger.warning(f"{kitchen_name}: Caught keyboard interrupt; exiting.") for process in multiprocessing.active_children(): process.terminate() if __name__ == '__main__': sniff_cli()
# -*- coding: utf-8 -*- from __future__ import unicode_literals, division, print_function, absolute_import try: import urlparse from urllib import unquote, quote except ImportError: import urllib.parse as urlparse from urllib.parse import unquote, quote import re import os __version__ = '0.1.15' class ParseResult(object): """ hold the results of a parsed dsn this is very similar to urlparse.ParseResult tuple http://docs.python.org/2/library/urlparse.html#results-of-urlparse-and-urlsplit it exposes the following attributes -- scheme schemes -- if your scheme has +'s in it, then this will contain a list of schemes split by + path paths -- the path segment split by /, so "/foo/bar" would be ["foo", "bar"] host -- same as hostname (I just like host better) hostname hostloc -- host:port username password netloc query -- a dict of the query string query_str -- the raw query string port fragment anchor -- same as fragment, just an alternative name """ @classmethod def verify(cls, dsn): if not re.match(r"^\S+://\S+", dsn): raise ValueError("{dsn} is invalid, only full dsn urls (scheme://host...) allowed".format(dsn=dsn)) @classmethod def parse_scheme(cls, dsn): first_colon = dsn.find(':') scheme = dsn[0:first_colon] dsn = dsn[first_colon+1:] return scheme, dsn @classmethod def parse_credentials(cls, dsn): # so urlparse doesn't support passwords with special characters /+. So # I'm going to parse out the username:password with a more lenient # parser, the problem is something like "example.com:1000/@" will now # fail but I think it's probably far more common for a dsn to have a # username/password at the beginning than not have one but have a port # and @ symbol in the path username = password = None m = re.match(r"^//([^:]*):([^@]*)@", dsn) if m: username = m.group(1) password = m.group(2) dsn = "//{}".format(dsn[m.end():]) return username, password, dsn @classmethod def parse_query(cls, url): # parse the query into options options = {} if url.query: for k, kv in urlparse.parse_qs(url.query, True, True).items(): if len(kv) > 1: options[k] = kv else: options[k] = kv[0] return options @classmethod def parse(cls, dsn, **defaults): cls.verify(dsn) scheme, dsn_url = cls.parse_scheme(dsn) username, password, dsn_url = cls.parse_credentials(dsn_url) url = urlparse.urlparse(dsn_url) username = url.username or username password = url.password or password hostname = url.hostname path = url.path if url.netloc == ":memory:": # the special :memory: signifier is used in SQLite to define a fully in # memory database, I think it makes sense to support it since dsnparse is all # about making it easy to parse *any* dsn path = url.netloc hostname = None port = None else: # compensate for relative path if url.hostname == "." or url.hostname == "..": path = "".join([hostname, path]) hostname = None port = url.port if hostname is not None: hostname = unquote(hostname) options = cls.parse_query(url) ret = { "dsn": dsn, "scheme": scheme, "hostname": hostname, "path": path, "port": port, "username": username, "password": password, } ret = cls.merge(ret, url, defaults, options) return ret @classmethod def merge(cls, ret, url, defaults, options): ret.update(dict( params=url.params, query=options, fragment=url.fragment, query_str=url.query, )) for k, v in defaults.items(): if not ret.get(k, None): ret[k] = v for k in list(options.keys()): if k in ret: if ret[k] is None: ret[k] = options.pop(k) else: raise ValueError("{} specified in query string and dsn".format(k)) for ret_k, options_k in [("hostname", "host")]: if options_k in options: if ret[ret_k] is None: ret[ret_k] = options.pop(options_k) else: raise ValueError("{} specified in query string and dsn".format(options_k)) return ret def __init__(self, dsn, **defaults): kwargs = self.parse(dsn, **defaults) for k, v in kwargs.items(): setattr(self, k, v) self.configure() def configure(self): """designed to be overridden in a child class""" pass def __iter__(self): mapping = ['scheme', 'netloc', 'path', 'params', 'query', 'fragment'] for k in mapping: yield getattr(self, k, '') def __getitem__(self, index): index = int(index) mapping = { 0: 'scheme', 1: 'netloc', 2: 'path', 3: 'params', 4: 'query', 5: 'fragment', } return getattr(self, mapping[index], '') @property def schemes(self): """the scheme, split by plus signs""" return self.scheme.split('+') @property def netloc(self): """return username:password@hostname:port""" s = '' prefix = '' if self.username: s += self.username prefix = '@' if self.password: s += ":{password}".format(password=self.password) prefix = '@' s += "{prefix}{hostloc}".format(prefix=prefix, hostloc=self.hostloc) return s @property def paths(self): """the path attribute split by /""" return list(filter(None, self.path.split('/'))) @property def host(self): """the hostname, but I like host better""" return self.hostname @property def user(self): """alias for username to match psycopg2""" return self.username @property def secret(self): """alias for password to match postgres dsn https://www.postgresql.org/docs/9.2/static/libpq-connect.html#LIBPQ-CONNSTRING """ return self.password @property def hostloc(self): """return host:port""" hostloc = quote(self.hostname, safe="") #hostloc = self.hostname if self.port: hostloc = '{hostloc}:{port}'.format(hostloc=hostloc, port=self.port) return hostloc @property def anchor(self): """alternative name for the fragment""" return self.fragment @property def database(self): # sqlite uses database in its connect method https://docs.python.org/3.6/library/sqlite3.html if self.hostname is None: database = self.path else: # we have a host, which means the dsn is in the form: hostname/database most # likely, so let's get rid of the slashes when setting the db database = self.path.strip("/") return database # psycopg2 uses dbname: http://initd.org/psycopg/docs/module.html#psycopg2.connect dbname = database def setdefault(self, key, val): """ set a default value for key this is different than dict's setdefault because it will set default either if the key doesn't exist, or if the value at the key evaluates to False, so an empty string or a None value will also be updated :param key: string, the attribute to update :param val: mixed, the attributes new value if key has a current value that evaluates to False """ if not getattr(self, key, None): setattr(self, key, val) def geturl(self): """return the dsn back into url form""" return urlparse.urlunparse(( self.scheme, self.netloc, self.path, self.params, self.query_str, self.fragment, )) def parse_environ(name, parse_class=ParseResult, **defaults): """ same as parse() but you pass in an environment variable name that will be used to fetch the dsn :param name: string, the environment variable name that contains the dsn to parse :param parse_class: ParseResult, the class that will be used to hold parsed values :param **defaults: dict, any values you want to have defaults for if they aren't in the dsn :returns: ParseResult() tuple """ return parse(os.environ[name], parse_class, **defaults) def parse_environs(name, parse_class=ParseResult, **defaults): """ same as parse_environ() but will also check name_1, name_2, ..., name_N and return all the found dsn strings from the environment this will look for name, and name_N (where N is 1 through infinity) in the environment, if it finds them, it will assume they are dsn urls and will parse them. The num checks (eg PROM_DSN_1, PROM_DSN_2) go in order, so you can't do PROM_DSN_1, PROM_DSN_3, because it will fail on _2 and move on, so make sure your num dsns are in order (eg, 1, 2, 3, ...) example -- export DSN_1=some.Interface://host:port/dbname#i1 export DSN_2=some.Interface://host2:port/dbname2#i2 $ python >>> import dsnparse >>> print dsnparse.parse_environs('DSN') # prints list with 2 parsed dsn objects :param dsn_env_name: string, the name of the environment variables, _1, ... will be appended :param parse_class: ParseResult, the class that will be used to hold parsed values :returns: list all the found dsn strings in the environment with the given name prefix """ ret = [] if name in os.environ: ret.append(parse_environ(name, parse_class, **defaults)) # now try importing _1 -> _N dsns increment_name = lambda name, num: '{name}_{num}'.format(name=name, num=num) dsn_num = 0 if increment_name(name, 0) in os.environ else 1 dsn_env_num_name = increment_name(name, dsn_num) if dsn_env_num_name in os.environ: try: while True: ret.append(parse_environ(dsn_env_num_name, parse_class, **defaults)) dsn_num += 1 dsn_env_num_name = increment_name(name, dsn_num) except KeyError: pass return ret def parse(dsn, parse_class=ParseResult, **defaults): """ parse a dsn to parts similar to parseurl :param dsn: string, the dsn to parse :param parse_class: ParseResult, the class that will be used to hold parsed values :param **defaults: dict, any values you want to have defaults for if they aren't in the dsn :returns: ParseResult() tuple-like instance """ r = parse_class(dsn, **defaults) return r
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Contributed by: Zi Shen Lim. """Runs SciMark2. Original documentation & code: http://math.nist.gov/scimark2/ SciMark2 is a Java (and C) benchmark for scientific and numerical computing. It measures several computational kernels and reports a composite score in approximate Mflops (Millions of floating point operations per second). """ import logging import re from perfkitbenchmarker import configs from perfkitbenchmarker import errors from perfkitbenchmarker import regex_util from perfkitbenchmarker import sample from perfkitbenchmarker.linux_packages import scimark2 BENCHMARK_NAME = 'scimark2' BENCHMARK_CONFIG = """ scimark2: description: Runs SciMark2 vm_groups: default: vm_spec: *default_single_core """ def GetConfig(user_config): return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def CheckPrerequisites(benchmark_config): pass def Prepare(benchmark_spec): """Install SciMark2 on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ vms = benchmark_spec.vms vm = vms[0] logging.info('Preparing SciMark2 on %s', vm) vm.Install('scimark2') def Run(benchmark_spec): """Run SciMark2 on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vms = benchmark_spec.vms vm = vms[0] logging.info('Running SciMark2 on %s', vm) samples = [] # Run the Java and C benchmarks twice each, once with defaults and # once with the "-large" flag to use a larger working set size. # # Since the default output is not very parsing-friendly, print an # extra header to identify the tests. This must match # RESULT_START_REGEX as used below. cmds = [ '(echo ";;; Java small"; cd {0} && java -cp {1} {2})'.format( scimark2.PATH, scimark2.JAVA_JAR, scimark2.JAVA_MAIN), '(echo ";;; C small"; cd {0} && ./scimark2)'.format( scimark2.C_SRC), '(echo ";;; Java large"; cd {0} && java -cp {1} {2} -large)'.format( scimark2.PATH, scimark2.JAVA_JAR, scimark2.JAVA_MAIN), '(echo ";;; C large"; cd {0} && ./scimark2 -large)'.format( scimark2.C_SRC), ] for cmd in cmds: stdout, _ = vm.RemoteCommand(cmd, should_log=True) samples.extend(ParseResults(stdout)) return samples def Cleanup(unused_benchmark_spec): pass def ParseResults(results): """Result parser for SciMark2. Sample Results (C version): ** ** ** SciMark2 Numeric Benchmark, see http://math.nist.gov/scimark ** ** for details. (Results can be submitted to pozo@nist.gov) ** ** ** Using 2.00 seconds min time per kenel. Composite Score: 1596.04 FFT Mflops: 1568.64 (N=1024) SOR Mflops: 1039.98 (100 x 100) MonteCarlo: Mflops: 497.64 Sparse matmult Mflops: 1974.39 (N=1000, nz=5000) LU Mflops: 2899.56 (M=100, N=100) (Yes, "kenel" is part of the original output.) Sample Results (Java version): SciMark 2.0a Composite Score: 1731.4467627163242 FFT (1024): 996.9938397943672 SOR (100x100): 1333.5328291027124 Monte Carlo : 724.5221517116782 Sparse matmult (N=1000, nz=5000): 1488.18620413327 LU (100x100): 4113.998788839592 java.vendor: Oracle Corporation java.version: 1.7.0_75 os.arch: amd64 os.name: Linux os.version: 3.16.0-25-generic Args: results: SciMark2 result. Returns: A list of sample.Sample objects. """ result_start_regex = re.compile(r""" ^ ;;; \s+ (\S+) #1: Language ("C" or "Java") \s+ (\S+) #2: Size ("small" or "large") """, re.VERBOSE | re.MULTILINE) score_regex = re.compile(r""" ^ (Composite \s+ Score) : \s+ (\d+ \. \d+) """, re.VERBOSE | re.MULTILINE) result_regex_c = re.compile(r""" ^ ( .+? ) \s+ #1: Test name Mflops: \s+ ( \d+ \. \d+ ) #2: Test score ( \s+ \( .+? \) )? #3: Optional test details """, re.VERBOSE | re.MULTILINE) result_regex_java = re.compile(r""" ^ ( .+? ) #1: Test name : \s+ ( \d+ \. \d+ ) #2: Test score """, re.VERBOSE | re.MULTILINE) platform_regex = re.compile(r""" ^ ( \w+ \. \w+ ) #1: Property name : \s+ ( .* ) #2: Property value """, re.VERBOSE | re.MULTILINE) def FindBenchStart(results, start_index=0): m = result_start_regex.search(results, start_index) if m is None: return -1, None, None return m.start(), m.group(1), m.group(2) def ExtractPlatform(result, benchmark_language): """Retrieves platform data from the result string.""" metadata = {} meta_start = None if benchmark_language == 'C': pass elif benchmark_language == 'Java': for m in platform_regex.finditer(result): if meta_start is None: meta_start = m.start() metadata[m.group(1)] = m.group(2) return metadata, meta_start def ExtractScore(result): m = score_regex.search(result) if m is None: raise errors.Benchmarks.RunError('scimark2: Cannot find score in output.') label = m.group(1) score = float(m.group(2)) return score, label, m.end() def ExtractResults(result, benchmark_language): """Retrieves data points from the result string.""" datapoints = [] if benchmark_language == 'C': for groups in regex_util.ExtractAllMatches(result_regex_c, result): metric = '{0} {1}'.format(groups[0].strip(), groups[2].strip()) metric = metric.strip().strip(':') # Extra ':' in 'MonteCarlo:'. value = float(groups[1]) datapoints.append((metric, value)) elif benchmark_language == 'Java': for groups in regex_util.ExtractAllMatches(result_regex_java, result): datapoints.append((groups[0].strip(), float(groups[1]))) return datapoints # Find start positions for all the test results. tests = [] test_start_pos = 0 while True: start_index, benchmark_language, benchmark_size = FindBenchStart( results, test_start_pos) if start_index == -1: break tests.append((start_index, benchmark_language, benchmark_size)) test_start_pos = start_index + 1 # Now loop over individual tests collecting samples. samples = [] for test_num, test_details in enumerate(tests): start_index, benchmark_language, benchmark_size = test_details # Get end index - either start of next test, or None for the last test. end_index = None if test_num + 1 < len(tests): end_index = tests[test_num + 1][0] result = results[start_index:end_index] metadata = {'benchmark_language': benchmark_language, 'benchmark_size': benchmark_size} # Assume that the result consists of overall score followed by # specific scores and then platform metadata. # Get the metadata first since we need that to annotate samples. platform_metadata, meta_start = ExtractPlatform(result, benchmark_language) metadata.update(platform_metadata) # Get the overall score. score, label, score_end = ExtractScore(result) samples.append(sample.Sample(label, score, 'Mflops', metadata)) # For the specific scores, only look at the part of the string # bounded by score_end and meta_start to avoid adding extraneous # items. The overall score and platform data would match the # result regex. datapoints = ExtractResults( result[score_end:meta_start], benchmark_language) for metric, value in datapoints: samples.append(sample.Sample(metric, value, 'Mflops', metadata)) return samples
# -*- coding: utf-8 -*- """ Created on Tue Apr 01 17:47:38 2014 @author: xumiao """ import monk.roles.administrator as mra import monk.roles.worker as mrw import monk.core.api as monkapi import pymongo as pm import logging from monk.math.flexible_vector import FlexibleVector from random import sample import pickle import numpy as np import matplotlib.pyplot as plt import math logging.basicConfig(format='[%(asctime)s][%(name)-12s][%(levelname)-8s] : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.ERROR) adminBroker = mra.AdminBroker() workerBroker = mrw.WorkerBroker() turtleName = 'mouthOpenTurtle2' pandaName = 'mouthOpen2' users = {} trainData = {} # the ObjectID of the selected data in DB testData = {} UoI = {} fracTrain = 0.5 def rebalance_users(): for i in range(4): usersi = [user for user in users if users[user] == i] for user in usersi[len(usersi)/2:]: users[user] = i + 4 def prepareData(): global trainData global testData originalData = retrieveData() splitData(originalData) destfile = open("trainData", 'w') # save trainData _id pickle.dump(trainData, destfile) destfile.close() destfile = open("testData", 'w') # save testData _id pickle.dump(testData, destfile) destfile.close() def loadPreparedData(file1, file2 = None): global trainData global testData destfile = open(file1, 'r') # load trainData _id trainData = pickle.load(destfile) destfile.close() if(file2): destfile = open(file2, 'r') # load testData _id testData = pickle.load(destfile) destfile.close() def add_users(): global users adminBroker.add_user() mcl = pm.MongoClient('10.137.172.201:27017') coll = mcl.DataSet['PMLExpression'] for ent in coll.find(None, {'_id':True, 'userId':True}, timeout=False): follower = ent['userId'] adminBroker.add_user(follower, ) if follower not in users: encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':'monk', 'follower':follower, 'operation':'add_user'}) print producer.send(follower, encodedMessage) userColl = mcl.DataSet['PMLUsers'] if users: for userId, partitionId in users.iteritems(): u = userColl.find_one({'userId':userId}, {'userId':userId}, timeout=False) if not u: userColl.insert({'userId':userId, 'partitionId':partitionId}); #userColl.insert([{'userId':userId, 'partitionId':partitionId} for userId, partitionId in users.iteritems()]) def add_data(): global users global trainData checkUserPartitionMapping() mcl = pm.MongoClient('10.137.172.201:27017') kafka = KafkaClient(kafkaHost, timeout=None) producer = UserProducer(kafka, kafkaTopic, users, partitions, async=False, req_acks=UserProducer.ACK_AFTER_LOCAL_WRITE, ack_timeout=200) coll = mcl.DataSet['PMLExpression'] for ent in coll.find(None, {'_id':True, 'userId':True}, timeout=False): entity = str(ent['_id']) user = ent['userId'] if ent['_id'] in trainData[user]: encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':user, 'entity':entity, 'operation':'add_data'}) print producer.send(user, encodedMessage) for user, partitionId in users.iteritems(): encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':user, 'operation':'save_turtle'}) print producer.send(user, encodedMessage) mcl.close() def train(numIters): global users checkUserPartitionMapping() kafka = KafkaClient(kafkaHost, timeout=None) producer = UserProducer(kafka, kafkaTopic, users, partitions, async=False, req_acks=UserProducer.ACK_AFTER_LOCAL_WRITE, ack_timeout=200) for i in range(numIters): for user, partitionId in users.iteritems(): if user == '' or user == 'monk': continue encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':user, 'operation':'train'}) print i, producer.send(user, encodedMessage) producer.stop(1) kafka.close() def test(isPersonalized): global users global testData checkUserPartitionMapping() mcl = pm.MongoClient('10.137.172.201:27017') kafka = KafkaClient(kafkaHost, timeout=None) producer = UserProducer(kafka, kafkaTopic, users, partitions, async=False, req_acks=UserProducer.ACK_AFTER_LOCAL_WRITE, ack_timeout=200) for user, partitionId in users.iteritems(): if user != u'': for dataID in testData[user]: entity = str(dataID) encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':user, 'entity':entity, 'isPersonalized':isPersonalized, 'operation':'test_data'}) print producer.send(user, encodedMessage) mcl.close() def centralizedTest(isPersonalized): global users global testData checkUserPartitionMapping() mcl = pm.MongoClient('10.137.172.201:27017') coll = mcl.DataSet['PMLExpression'] MONKModelPandaStore = mcl.MONKModel['PandaStore'] monkpa = MONKModelPandaStore.find_one({'creator': 'monk', 'name': pandaName}, {'_id':True, 'weights':True, 'z':True}, timeout=False) z = FlexibleVector(generic=monkpa['z']) resGTs = {} for user in testData.keys(): if user == '': continue pa = MONKModelPandaStore.find_one({'creator': user, 'name': pandaName}, {'_id':True, 'weights':True, 'z':True}, timeout=False) if pa == None: continue if isPersonalized == True: wei = FlexibleVector(generic=pa['weights']) else: wei = z resGT = [] for ent in coll.find({'_id': {'$in':testData[user]}}, {'_features':True, 'labels':True}, timeout=False): fea = FlexibleVector(generic=ent['_features']) if not len(ent['labels']) == 0: resGT.append((float(wei.dot(fea)), 1.0)) else: resGT.append((float(wei.dot(fea)), 0.0)) resGTs[user] = resGT del wei mcl.close() return resGTs def evaluate(resGTs, curvefile=None): global users checkUserPartitionMapping() overallResGT = [] thres = {} precisions = {} recalls = {} FPrates = {} totalTestSamples = {} for user in resGTs.keys(): overallResGT = overallResGT + resGTs[user] thre, precision, recall, FPrate, totalTestSample = buildMetric(resGTs[user]) thres[user] = thre precisions[user] = precision recalls[user] = recall FPrates[user] = FPrate totalTestSamples[user] = totalTestSample buildMetric(overallResGT, curvefile) #plotCurveFromFile(curvefile) plotUserCurve(thres, precisions, recalls, FPrates) plotCombinedUserCurve(totalTestSamples, recalls, FPrates, False) def offsetCommit(): global users checkUserPartitionMapping() kafkaClient = KafkaClient(kafkaHost, timeout=None) producer = KeyedProducer(kafkaClient, async=False, req_acks=UserProducer.ACK_AFTER_LOCAL_WRITE, ack_timeout=200) for partition in partitions: encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':'', 'operation':'offsetCommit'}) print producer.send(kafkaTopic, partition, encodedMessage) producer.stop(1) kafkaClient.close() def reset(): global users checkUserPartitionMapping() kafka = KafkaClient(kafkaHost, timeout=None) producer = UserProducer(kafka, kafkaTopic, users, partitions, async=False, req_acks=UserProducer.ACK_AFTER_LOCAL_WRITE, ack_timeout=200) for user, partitionId in users.iteritems(): encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':user, 'operation':'reset'}) print producer.send(user, encodedMessage) # users['monk'] = 8 # encodedMessage = simplejson.dumps({'turtleName':turtleName, # 'user':'monk', # 'operation':'reset'}) # print producer.send('monk', encodedMessage) producer.stop(1) kafka.close() def reset_all_data(): global users checkUserPartitionMapping() kafka = KafkaClient(kafkaHost, timeout=None) producer = UserProducer(kafka, kafkaTopic, users, partitions, async=False, req_acks=UserProducer.ACK_AFTER_LOCAL_WRITE, ack_timeout=200) for user, partitionId in users.iteritems(): encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':user, 'operation':'reset_all_data'}) print producer.send(user, encodedMessage) users['monk'] = 8 encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':'monk', 'operation':'reset_all_data'}) print producer.send('monk', encodedMessage) producer.stop(1) kafka.close() def set_mantis_parameter(para, value): global users checkUserPartitionMapping() kafka = KafkaClient(kafkaHost, timeout=None) producer = UserProducer(kafka, kafkaTopic, users, partitions, async=False, req_acks=UserProducer.ACK_AFTER_LOCAL_WRITE, ack_timeout=200) for user, partitionId in users.iteritems(): # if not partitionId == 4: # continue encodedMessage = simplejson.dumps({'turtleName':turtleName, 'user':user, 'operation':'set_mantis_parameter', 'para':para, 'value':value}) print producer.send(user, encodedMessage) producer.stop(1) kafka.close() def changeParameters(): global users checkUserPartitionMapping() mcl = pm.MongoClient('10.137.172.201:27017') MONKModelTurtleStore = mcl.MONKModel['TurtleStore'] MONKModelPandaStore = mcl.MONKModel['PandaStore'] MONKModelMantisStore = mcl.MONKModel['MantisStore'] #MONKModelPandaStore.update({'creator': 'monk2', 'name': pandaName}, {'$set':{'z':[]}}, timeout=False) #{'name':{$exists: true}} for user, partitionId in users.iteritems(): #MONKModelTurtleStore.update({'creator': user, 'name': turtleName}, {'$set':{'leader':'monk'}}, timeout=False) MONKModelMantisStore.update({'creator': user, 'name': pandaName}, {'$set':{'gamma':1}}, timeout=False) mcl.close() #========================================== Data Preparation ====================================== def retrieveData(): global UoI mcl = pm.MongoClient('10.137.172.201:27017') coll = mcl.DataSet['PMLExpression'] originalData = {} for user in UoI.keys(): originalData[user] = {'0':[], '1':[]} #for ent in coll.find({'userId': {'$in': UoI.keys()}}, {'_id':True, 'userId':True, 'labels':True}, timeout=False): for ent in coll.find({}, {'_id':True, 'userId':True, 'labels':True}, timeout=False): userId = ent['userId'] if not userId in originalData: #if len(originalData.keys()) >= 8: # control the number of total users #break originalData[userId] = {'0':[], '1':[]} UoI[userId] = 0 # if (stop_add_data(userId)): # continue # UoI[userId] += 1 if not len(ent['labels']) == 0: originalData[userId]['1'].append(ent['_id']) # in the format of ObjectId else: originalData[userId]['0'].append(ent['_id']) return originalData def splitData(originalData): global trainData global testData global fracTrain for user in originalData.keys(): trainData[user] = [] testData[user] = [] numOfPosData = len(originalData[user]['1']) numOfNegData = len(originalData[user]['0']) pos = range(numOfPosData) neg = range(numOfNegData) selectedPos, selectedNeg = stratifiedSelection(pos, neg, fracTrain) for i in range(numOfPosData): if i in selectedPos: trainData[user].append(originalData[user]['1'][i]) else: testData[user].append(originalData[user]['1'][i]) for i in range(numOfNegData): if i in selectedNeg: trainData[user].append(originalData[user]['0'][i]) else: testData[user].append(originalData[user]['0'][i]) def stratifiedSelection(posindex, negindex, fracTrain): num = int(len(posindex)*fracTrain) # if len(posindex) > 0: # num = 1 # else: # num = 0 selectPosIndex = sample(posindex, num) num = int(len(negindex)*fracTrain) # if len(negindex) > 0: # num = 1 # else: # num = 0 selectNegIndex = sample(negindex, num) return selectPosIndex, selectNegIndex def checkUserPartitionMapping(): global users mcl = pm.MongoClient('10.137.172.201:27017') if not users: userColl = mcl.DataSet['PMLUsers'] for u in userColl.find(None, {'userId':True, 'partitionId':True}, timeout=False): users[u['userId']] = u['partitionId'] mcl.close() def buildMetric(resGT, curvefile = None): totalP = 0.0 totalN = 0.0 for i in reversed(range(len(resGT))): # remove the wrong values if resGT[i][0] > 100000: del resGT[i] elif resGT[i][0] < -100000: del resGT[i] else: if resGT[i][1] > 0: totalP += 1 else: totalN += 1 resGT.sort() logging.debug("totalP = {0}".format(totalP)) logging.debug("totalN = {0}".format(totalN)) totalTestSamples = totalP + totalN totalFP = totalN totalFN = 0.0 totalTP = totalP totalTN = 0.0 numberOfCurve = 500 minVal = float(resGT[0][0]) maxVal = float(resGT[-1][0]) thre = np.linspace(minVal, maxVal, numberOfCurve) precisions = [] recalls = [] FPrates = [] k = 0 for i in xrange(numberOfCurve): while(float(resGT[k][0]) < thre[i]): if(float(resGT[k][1]) > 0): totalFN = totalFN + 1 else: totalTN = totalTN + 1 k = k + 1 totalFP = totalN - totalTN totalTP = totalP - totalFN if(totalTP+totalFP == 0): precision = 1 else: precision = float(totalTP) / float((totalTP+totalFP)) if(totalP == 0): recall = 0 else: recall = float(totalTP) / float(totalP) if(totalN == 0): FPrate = 0 else: FPrate = float(totalFP) / float(totalN) precisions.append(precision) recalls.append(recall) FPrates.append(FPrate) if curvefile != None: fCurve = open(curvefile, 'w') fCurve.write('threshold\tPrecision\tRecall\tFPrate\n') for i in range(len(thre)): o = '{0:.8f}\t{1:.8f}\t{2:.8f}\t{3:.8f}'.format(thre[i], precisions[i], recalls[i], FPrates[i]) fCurve.write(o + '\n') fCurve.close() return thre, precisions, recalls, FPrates, totalTestSamples def plot(groupTH, groupTP, groupFP, groupPrecision): font = {'family' : 'serif', 'color' : 'darkred', 'weight' : 'normal', 'size' : 16 } #lineType = ['g--', 'r-', 'k-.', 'b.'] # ['g', 'r-', 'k-', 'b-'] #leg = ['consensus Mouth Open model', 'personalized Mouth Open model','consensus Mouth Open model', 'personalized Mouth Open model'] # ['0', '0.1', '0.25', '0.4'] ### plot PR curve fig = plt.figure() fig.patch.set_facecolor('white') plt.title('P-R curve', fontdict=font) plt.xlabel('recall', fontdict=font) plt.xticks(np.linspace(0, 1, 11)) plt.yticks(np.linspace(0, 1, 11)) plt.ylabel('precision', fontdict=font) plt.grid(True) for i in range(len(groupTP)): plt.plot(groupTP[i], groupPrecision[i], linewidth=3, markersize = 10) #plt.legend(leg, loc = 7) ### plot ROC curve fig = plt.figure() fig.patch.set_facecolor('white') plt.title('ROC curve', fontdict=font) plt.xlabel('FP rate', fontdict=font) plt.xticks(np.linspace(0, 1, 11)) plt.yticks(np.linspace(0, 1, 11)) plt.ylabel('TP rate (recall)', fontdict=font) plt.grid(True) for i in range(len(groupTP)): plt.plot(groupFP[i], groupTP[i], linewidth=3, markersize = 10) #plt.legend(leg, loc = 7) def plotUserCurve(thre, precisions, recalls, FPrates): groupTH = [] groupTP = [] groupFP = [] groupPrecision = [] for user in thre.keys(): th = thre[user] precision = precisions[user] recall = recalls[user] fpRate = FPrates[user] groupTH.append(th) groupTP.append(recall) groupFP.append(fpRate) groupPrecision.append(precision) #print '{0}\t{1}\t{2}\t{3}'.format(float(th[-1]), float(precision[-1]), float(recall[-1]), float(fpRate[-1])) plot(groupTH, groupTP, groupFP, groupPrecision) def plotCombinedUserCurve(totalTestSamples, recalls, FPrates, weighted): combinedTPmean = [] combinedTPstd = [] combinedFP = [] numberOfCurvePoint = 500 falsePositiveSet = np.linspace(0.0, 1.0, numberOfCurvePoint) validUsers = [] # remove the users who only have positive or negative test sameples for user in totalTestSamples.keys(): if recalls[user][0] != 0 and FPrates[user][-1] != 1 : validUsers.append(user) print "number of valid user: {0}".format(len(validUsers)) weights = {} weightSum = 0.0 for user in validUsers: weights[user] = totalTestSamples[user] weightSum += totalTestSamples[user] if weighted: for user in validUsers: weights[user] = weights[user] / weightSum else: for user in validUsers: weights[user] = 1.0 / len(validUsers) #fig = plt.figure() #plt.plot(range(len(weights.values())), weights.values()) for fp in falsePositiveSet: mean = 0.0 std = 0.0 weightSum = 0.0 #TP = [] for user in validUsers: tp = interpolateTP(fp, FPrates[user], recalls[user]) #TP.append(tp) mean += weights[user] * tp std += weights[user] * tp * tp weightSum += weights[user] std = math.sqrt(max(0, std /weightSum - mean * mean)) combinedTPstd.append(std) combinedTPmean.append(mean) combinedFP.append(fp) font = {'family' : 'serif', 'color' : 'darkred', 'weight' : 'normal', 'size' : 16 } fig = plt.figure() fig.patch.set_facecolor('white') plt.title('Combined ROC curve', fontdict=font) plt.xlabel('FP rate', fontdict=font) plt.xticks(np.linspace(0, 1, 11)) plt.yticks(np.linspace(0, 1, 11)) plt.ylabel('TP rate (recall)', fontdict=font) plt.grid(True) plt.errorbar(combinedFP, combinedTPmean, yerr=combinedTPstd) def interpolateTP(fp, FPrates, recalls): # values in FPrates and recalls are in decreasing order if fp <= FPrates[-1]: return 0 if fp >= FPrates[0]: return 1.0 for i in range(len(FPrates)): if fp <= FPrates[i] and fp >= FPrates[i+1]: if FPrates[i+1] == FPrates[i]: return recalls[i] else: delta = (recalls[i+1] - recalls[i]) * (fp - FPrates[i]) / (FPrates[i+1] - FPrates[i]) return recalls[i] + delta def plotCurveFromFile(fileNames): groupTH = [] groupTP = [] groupFP = [] groupPrecision = [] for fileName in fileNames: f = file(fileName, 'r') th = [] precision = [] recall = [] fpRate = [] i = 0 f.readline() strs = f.readline().split('\t') while (len(strs) != 0 and strs[0] != ''): i+=1 if(i== 500): print "cool" th.append(float(strs[0])) precision.append(float(strs[1])) recall.append(float(strs[2])) fpRate.append(float(strs[3])) strs = f.readline().split('\t') groupTH.append(th) groupTP.append(recall) groupFP.append(fpRate) groupPrecision.append(precision) f.close() #print '{0}\t{1}\t{2}\t{3}'.format(float(th[-1]), float(precision[-1]), float(recall[-1]), float(fpRate[-1])) plot(groupTH, groupTP, groupFP, groupPrecision) def normalize_data(): mcl = pm.MongoClient('10.137.172.201:27017') coll = mcl.DataSet['PMLExpression'] collBackup = mcl.DataSet['PMLExpressionBackup'] dimension = 4275 minVal = [1000000000.0] * dimension maxVal = [-1000000000.0] * dimension for ent in collBackup.find(None, {'_id':True, '_features':True}, timeout=False): feature = ent['_features'] for i in range(len(feature)): if feature[i][1] < minVal[i]: minVal[i] = feature[i][1] if feature[i][1] > maxVal[i]: maxVal[i] = feature[i][1] for ent in collBackup.find(None, {'_id':True, '_features':True}, timeout=False): feature = ent['_features'] dataId = ent['_id'] for i in range(len(feature)): if maxVal[i] == minVal[i]: feature[i][1] = 0.0 else: feature[i][1] = 2.0 * (feature[i][1] - minVal[i]) / (maxVal[i] - minVal[i]) - 1.0 coll.update({'_id': dataId}, {'$set':{'_features':feature}}, timeout=False) mcl.close() #reset() if __name__=='__main__': #normalize_data() #reset() #prepareData() loadPreparedData("trainData", "testData") # ## print "add_users" ## add_users() # print "add_data" # add_data() print "train" train(1) # print "test" # isPersonalized = True # resGTs = centralizedTest(isPersonalized) # destfile = open("resGTs_personalized", 'w') # save result and gt # pickle.dump(resGTs, destfile) # destfile.close() # print "evaluate" # file = open("resGTs_personalized", 'r') # resGTs_personalized = pickle.load(file) # file.close() # evaluate(resGTs_personalized, "acc.curve")
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._azure_quantum_management_client_enums import * class ErrorAdditionalInfo(msrest.serialization.Model): """The resource management error additional info. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The additional info type. :vartype type: str :ivar info: The additional info. :vartype info: any """ _validation = { 'type': {'readonly': True}, 'info': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'info': {'key': 'info', 'type': 'object'}, } def __init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info = None class ErrorDetail(msrest.serialization.Model): """The error detail. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar message: The error message. :vartype message: str :ivar target: The error target. :vartype target: str :ivar details: The error details. :vartype details: list[~azure.mgmt.quantum.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype additional_info: list[~azure.mgmt.quantum.models.ErrorAdditionalInfo] """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message = None self.target = None self.details = None self.additional_info = None class ErrorResponse(msrest.serialization.Model): """Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). :param error: The error object. :type error: ~azure.mgmt.quantum.models.ErrorDetail """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, *, error: Optional["ErrorDetail"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error class OfferingsListResult(msrest.serialization.Model): """The response of a list Providers operation. :param value: Result of a list Providers operation. :type value: list[~azure.mgmt.quantum.models.ProviderDescription] :param next_link: Link to the next set of results. Not empty if Value contains incomplete list of Providers. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ProviderDescription]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["ProviderDescription"]] = None, next_link: Optional[str] = None, **kwargs ): super(OfferingsListResult, self).__init__(**kwargs) self.value = value self.next_link = next_link class Operation(msrest.serialization.Model): """Operation provided by provider. :param name: Name of the operation. :type name: str :param is_data_action: Indicates whether the operation is a data action. :type is_data_action: bool :param display: Properties of the operation. :type display: ~azure.mgmt.quantum.models.OperationDisplay """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__( self, *, name: Optional[str] = None, is_data_action: Optional[bool] = None, display: Optional["OperationDisplay"] = None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = name self.is_data_action = is_data_action self.display = display class OperationDisplay(msrest.serialization.Model): """Properties of the operation. :param provider: Provider name. :type provider: str :param resource: Resource name. :type resource: str :param operation: Operation name. :type operation: str :param description: Description of the operation. :type description: str """ _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self, *, provider: Optional[str] = None, resource: Optional[str] = None, operation: Optional[str] = None, description: Optional[str] = None, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = provider self.resource = resource self.operation = operation self.description = description class OperationsList(msrest.serialization.Model): """Lists the operations available. All required parameters must be populated in order to send to Azure. :param next_link: Url to follow for getting next page of operations. :type next_link: str :param value: Required. Array of operations. :type value: list[~azure.mgmt.quantum.models.Operation] """ _validation = { 'value': {'required': True}, } _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, 'value': {'key': 'value', 'type': '[Operation]'}, } def __init__( self, *, value: List["Operation"], next_link: Optional[str] = None, **kwargs ): super(OperationsList, self).__init__(**kwargs) self.next_link = next_link self.value = value class PricingDetail(msrest.serialization.Model): """Detailed pricing information for an sku. :param id: Unique id for this pricing information. :type id: str :param value: The unit cost of this sku. :type value: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, value: Optional[str] = None, **kwargs ): super(PricingDetail, self).__init__(**kwargs) self.id = id self.value = value class PricingDimension(msrest.serialization.Model): """Information about pricing dimension. :param id: Unique id of this pricing dimension. :type id: str :param name: The display name of this pricing dimension. :type name: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, **kwargs ): super(PricingDimension, self).__init__(**kwargs) self.id = id self.name = name class Provider(msrest.serialization.Model): """Information about a Provider. A Provider is an entity that offers Targets to run Azure Quantum Jobs. :param provider_id: Unique id of this provider. :type provider_id: str :param provider_sku: The sku associated with pricing information for this provider. :type provider_sku: str :param instance_uri: A Uri identifying the specific instance of this provider. :type instance_uri: str :param application_name: The provider's marketplace application display name. :type application_name: str :param provisioning_state: Provisioning status field. Possible values include: "Succeeded", "Launching", "Updating", "Deleting", "Deleted", "Failed". :type provisioning_state: str or ~azure.mgmt.quantum.models.Status :param resource_usage_id: Id to track resource usage for the provider. :type resource_usage_id: str """ _attribute_map = { 'provider_id': {'key': 'providerId', 'type': 'str'}, 'provider_sku': {'key': 'providerSku', 'type': 'str'}, 'instance_uri': {'key': 'instanceUri', 'type': 'str'}, 'application_name': {'key': 'applicationName', 'type': 'str'}, 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, 'resource_usage_id': {'key': 'resourceUsageId', 'type': 'str'}, } def __init__( self, *, provider_id: Optional[str] = None, provider_sku: Optional[str] = None, instance_uri: Optional[str] = None, application_name: Optional[str] = None, provisioning_state: Optional[Union[str, "Status"]] = None, resource_usage_id: Optional[str] = None, **kwargs ): super(Provider, self).__init__(**kwargs) self.provider_id = provider_id self.provider_sku = provider_sku self.instance_uri = instance_uri self.application_name = application_name self.provisioning_state = provisioning_state self.resource_usage_id = resource_usage_id class ProviderDescription(msrest.serialization.Model): """Information about an offering. A provider offering is an entity that offers Targets to run Azure Quantum Jobs. Variables are only populated by the server, and will be ignored when sending a request. :param id: Unique provider's id. :type id: str :ivar name: Provider's display name. :vartype name: str :param properties: A list of provider-specific properties. :type properties: ~azure.mgmt.quantum.models.ProviderProperties """ _validation = { 'name': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'ProviderProperties'}, } def __init__( self, *, id: Optional[str] = None, properties: Optional["ProviderProperties"] = None, **kwargs ): super(ProviderDescription, self).__init__(**kwargs) self.id = id self.name = None self.properties = properties class ProviderProperties(msrest.serialization.Model): """Provider properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar description: A description about this provider. :vartype description: str :ivar provider_type: Provider type. :vartype provider_type: str :ivar company: Company name. :vartype company: str :ivar default_endpoint: Provider's default endpoint. :vartype default_endpoint: str :param aad: Azure Active Directory info. :type aad: ~azure.mgmt.quantum.models.ProviderPropertiesAad :param managed_application: Provider's Managed-Application info. :type managed_application: ~azure.mgmt.quantum.models.ProviderPropertiesManagedApplication :param targets: The list of targets available from this provider. :type targets: list[~azure.mgmt.quantum.models.TargetDescription] :param skus: The list of skus available from this provider. :type skus: list[~azure.mgmt.quantum.models.SkuDescription] :param quota_dimensions: The list of quota dimensions from the provider. :type quota_dimensions: list[~azure.mgmt.quantum.models.QuotaDimension] :param pricing_dimensions: The list of pricing dimensions from the provider. :type pricing_dimensions: list[~azure.mgmt.quantum.models.PricingDimension] """ _validation = { 'description': {'readonly': True}, 'provider_type': {'readonly': True}, 'company': {'readonly': True}, 'default_endpoint': {'readonly': True}, } _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'provider_type': {'key': 'providerType', 'type': 'str'}, 'company': {'key': 'company', 'type': 'str'}, 'default_endpoint': {'key': 'defaultEndpoint', 'type': 'str'}, 'aad': {'key': 'aad', 'type': 'ProviderPropertiesAad'}, 'managed_application': {'key': 'managedApplication', 'type': 'ProviderPropertiesManagedApplication'}, 'targets': {'key': 'targets', 'type': '[TargetDescription]'}, 'skus': {'key': 'skus', 'type': '[SkuDescription]'}, 'quota_dimensions': {'key': 'quotaDimensions', 'type': '[QuotaDimension]'}, 'pricing_dimensions': {'key': 'pricingDimensions', 'type': '[PricingDimension]'}, } def __init__( self, *, aad: Optional["ProviderPropertiesAad"] = None, managed_application: Optional["ProviderPropertiesManagedApplication"] = None, targets: Optional[List["TargetDescription"]] = None, skus: Optional[List["SkuDescription"]] = None, quota_dimensions: Optional[List["QuotaDimension"]] = None, pricing_dimensions: Optional[List["PricingDimension"]] = None, **kwargs ): super(ProviderProperties, self).__init__(**kwargs) self.description = None self.provider_type = None self.company = None self.default_endpoint = None self.aad = aad self.managed_application = managed_application self.targets = targets self.skus = skus self.quota_dimensions = quota_dimensions self.pricing_dimensions = pricing_dimensions class ProviderPropertiesAad(msrest.serialization.Model): """Azure Active Directory info. Variables are only populated by the server, and will be ignored when sending a request. :ivar application_id: Provider's application id. :vartype application_id: str :ivar tenant_id: Provider's tenant id. :vartype tenant_id: str """ _validation = { 'application_id': {'readonly': True}, 'tenant_id': {'readonly': True}, } _attribute_map = { 'application_id': {'key': 'applicationId', 'type': 'str'}, 'tenant_id': {'key': 'tenantId', 'type': 'str'}, } def __init__( self, **kwargs ): super(ProviderPropertiesAad, self).__init__(**kwargs) self.application_id = None self.tenant_id = None class ProviderPropertiesManagedApplication(msrest.serialization.Model): """Provider's Managed-Application info. Variables are only populated by the server, and will be ignored when sending a request. :ivar publisher_id: Provider's publisher id. :vartype publisher_id: str :ivar offer_id: Provider's offer id. :vartype offer_id: str """ _validation = { 'publisher_id': {'readonly': True}, 'offer_id': {'readonly': True}, } _attribute_map = { 'publisher_id': {'key': 'publisherId', 'type': 'str'}, 'offer_id': {'key': 'offerId', 'type': 'str'}, } def __init__( self, **kwargs ): super(ProviderPropertiesManagedApplication, self).__init__(**kwargs) self.publisher_id = None self.offer_id = None class Resource(msrest.serialization.Model): """Common fields that are returned in the response for all Azure Resource Manager resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type = None class TrackedResource(Resource): """The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, } def __init__( self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags = tags self.location = location class QuantumWorkspace(TrackedResource): """The resource proxy definition object for quantum workspace. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str :param identity: Managed Identity information. :type identity: ~azure.mgmt.quantum.models.QuantumWorkspaceIdentity :param providers: List of Providers selected for this Workspace. :type providers: list[~azure.mgmt.quantum.models.Provider] :ivar usable: Whether the current workspace is ready to accept Jobs. Possible values include: "Yes", "No", "Partial". :vartype usable: str or ~azure.mgmt.quantum.models.UsableStatus :ivar provisioning_state: Provisioning status field. Possible values include: "Succeeded", "ProviderLaunching", "ProviderUpdating", "ProviderDeleting", "ProviderProvisioning", "Failed". :vartype provisioning_state: str or ~azure.mgmt.quantum.models.ProvisioningStatus :param storage_account: ARM Resource Id of the storage account associated with this workspace. :type storage_account: str :ivar endpoint_uri: The URI of the workspace endpoint. :vartype endpoint_uri: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, 'usable': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'endpoint_uri': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'QuantumWorkspaceIdentity'}, 'providers': {'key': 'properties.providers', 'type': '[Provider]'}, 'usable': {'key': 'properties.usable', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'storage_account': {'key': 'properties.storageAccount', 'type': 'str'}, 'endpoint_uri': {'key': 'properties.endpointUri', 'type': 'str'}, } def __init__( self, *, location: str, tags: Optional[Dict[str, str]] = None, identity: Optional["QuantumWorkspaceIdentity"] = None, providers: Optional[List["Provider"]] = None, storage_account: Optional[str] = None, **kwargs ): super(QuantumWorkspace, self).__init__(tags=tags, location=location, **kwargs) self.identity = identity self.providers = providers self.usable = None self.provisioning_state = None self.storage_account = storage_account self.endpoint_uri = None class QuantumWorkspaceIdentity(msrest.serialization.Model): """Managed Identity information. Variables are only populated by the server, and will be ignored when sending a request. :ivar principal_id: The principal ID of resource identity. :vartype principal_id: str :ivar tenant_id: The tenant ID of resource. :vartype tenant_id: str :param type: The identity type. Possible values include: "SystemAssigned", "None". :type type: str or ~azure.mgmt.quantum.models.ResourceIdentityType """ _validation = { 'principal_id': {'readonly': True}, 'tenant_id': {'readonly': True}, } _attribute_map = { 'principal_id': {'key': 'principalId', 'type': 'str'}, 'tenant_id': {'key': 'tenantId', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, "ResourceIdentityType"]] = None, **kwargs ): super(QuantumWorkspaceIdentity, self).__init__(**kwargs) self.principal_id = None self.tenant_id = None self.type = type class QuotaDimension(msrest.serialization.Model): """Information about a specific quota dimension. :param id: Unique id of this dimension. :type id: str :param scope: The scope of this quota dimension. :type scope: str :param period: The reset period of this quota dimension. :type period: str :param quota: The max limit of this dimension. :type quota: float :param name: The display name of this quota dimension. :type name: str :param description: A description about this quota dimension. :type description: str :param unit: The standard unit of measurement used for this quota dimension. :type unit: str :param unit_plural: The standard unit of measurement used for this quota dimension in plural form. :type unit_plural: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'scope': {'key': 'scope', 'type': 'str'}, 'period': {'key': 'period', 'type': 'str'}, 'quota': {'key': 'quota', 'type': 'float'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'unit_plural': {'key': 'unitPlural', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, scope: Optional[str] = None, period: Optional[str] = None, quota: Optional[float] = None, name: Optional[str] = None, description: Optional[str] = None, unit: Optional[str] = None, unit_plural: Optional[str] = None, **kwargs ): super(QuotaDimension, self).__init__(**kwargs) self.id = id self.scope = scope self.period = period self.quota = quota self.name = name self.description = description self.unit = unit self.unit_plural = unit_plural class SkuDescription(msrest.serialization.Model): """Information about a specific sku. :param id: Unique sku id. :type id: str :param name: Display name of this sku. :type name: str :param description: Description about this sku. :type description: str :param targets: The list of targets available for this sku. :type targets: list[str] :param quota_dimensions: The list of quota dimensions for this sku. :type quota_dimensions: list[~azure.mgmt.quantum.models.QuotaDimension] :param pricing_details: The list of pricing details for the sku. :type pricing_details: list[~azure.mgmt.quantum.models.PricingDetail] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'targets': {'key': 'targets', 'type': '[str]'}, 'quota_dimensions': {'key': 'quotaDimensions', 'type': '[QuotaDimension]'}, 'pricing_details': {'key': 'pricingDetails', 'type': '[PricingDetail]'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None, targets: Optional[List[str]] = None, quota_dimensions: Optional[List["QuotaDimension"]] = None, pricing_details: Optional[List["PricingDetail"]] = None, **kwargs ): super(SkuDescription, self).__init__(**kwargs) self.id = id self.name = name self.description = description self.targets = targets self.quota_dimensions = quota_dimensions self.pricing_details = pricing_details class TagsObject(msrest.serialization.Model): """Tags object for patch operations. :param tags: A set of tags. Resource tags. :type tags: dict[str, str] """ _attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, *, tags: Optional[Dict[str, str]] = None, **kwargs ): super(TagsObject, self).__init__(**kwargs) self.tags = tags class TargetDescription(msrest.serialization.Model): """Information about a Target. A target is the component that can process a specific type of Job. :param id: Unique target id. :type id: str :param name: Display name of this target. :type name: str :param description: A description about this target. :type description: str :param accepted_data_formats: List of data formats accepted by this target. :type accepted_data_formats: list[str] :param accepted_content_encodings: List of content encodings accepted by this target. :type accepted_content_encodings: list[str] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'accepted_data_formats': {'key': 'acceptedDataFormats', 'type': '[str]'}, 'accepted_content_encodings': {'key': 'acceptedContentEncodings', 'type': '[str]'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None, accepted_data_formats: Optional[List[str]] = None, accepted_content_encodings: Optional[List[str]] = None, **kwargs ): super(TargetDescription, self).__init__(**kwargs) self.id = id self.name = name self.description = description self.accepted_data_formats = accepted_data_formats self.accepted_content_encodings = accepted_content_encodings class WorkspaceListResult(msrest.serialization.Model): """The response of a list Workspaces operation. :param value: Result of a list Workspaces operation. :type value: list[~azure.mgmt.quantum.models.QuantumWorkspace] :param next_link: Link to the next set of results. Not empty if Value contains incomplete list of Workspaces. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[QuantumWorkspace]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["QuantumWorkspace"]] = None, next_link: Optional[str] = None, **kwargs ): super(WorkspaceListResult, self).__init__(**kwargs) self.value = value self.next_link = next_link
"""Functions for plotting and comparing isotherms.""" import math import typing as t from collections import abc from itertools import cycle import matplotlib as mpl import matplotlib.pyplot as plt from cycler import cycler from pygaps import logger from pygaps.graphing.labels import label_lgd from pygaps.graphing.labels import label_units_dict from pygaps.graphing.mpl_styles import BASE_STYLE from pygaps.graphing.mpl_styles import ISO_MARKERS from pygaps.graphing.mpl_styles import ISO_STYLE from pygaps.graphing.mpl_styles import Y1_COLORS from pygaps.graphing.mpl_styles import Y2_COLORS from pygaps.utilities.exceptions import GraphingError from pygaps.utilities.exceptions import ParameterError #: list of branch types _BRANCH_TYPES = { "ads": (True, False), "des": (False, True), "all": (True, True), } @mpl.rc_context(BASE_STYLE) @mpl.rc_context(ISO_STYLE) def plot_iso( isotherms, ax=None, x_data: str = 'pressure', y1_data: str = 'loading', y2_data: str = None, branch: str = "all", x_range: t.Tuple[float, float] = (None, None), y1_range: t.Tuple[float, float] = (None, None), y2_range: t.Tuple[float, float] = (None, None), x_points: t.Iterable[float] = None, y1_points: t.Iterable[float] = None, material_basis: str = None, material_unit: str = None, loading_basis: str = None, loading_unit: str = None, pressure_mode: str = None, pressure_unit: str = None, logx: bool = False, logy1: bool = False, logy2: bool = False, color: t.Union[bool, str, t.Iterable[str]] = True, marker: t.Union[bool, str, t.Iterable[str]] = True, y1_line_style: dict = None, y2_line_style: dict = None, lgd_keys: list = None, lgd_pos: str = 'best', save_path: str = None, ): """ Plot the isotherm(s) provided on a single graph. Parameters ---------- isotherms : PointIsotherms or list of Pointisotherms An isotherm or iterable of isotherms to be plotted. ax : matplotlib axes object, default None The axes object where to plot the graph if a new figure is not desired. x_data : str Key of data to plot on the x axis. Defaults to 'pressure'. y1_data : tuple Key of data to plot on the left y axis. Defaults to 'loading'. y2_data : tuple Key of data to plot on the right y axis. Defaults to None. branch : str Which branch to display, adsorption ('ads'), desorption ('des'), or both ('all'). x_range : tuple Range for data on the x axis. eg: (0, 1). Is applied to each isotherm, in the unit/mode/basis requested. y1_range : tuple Range for data on the regular y axis. eg: (0, 1). Is applied to each isotherm, in the unit/mode/basis requested. y2_range : tuple Range for data on the secondary y axis. eg: (0, 1). Is applied to each isotherm, in the unit/mode/basis requested. x_points : tuple Specific points of pressure where to evaluate an isotherm. Assumes x=pressure. y1_points : tuple Specific points of loading where to evaluate an isotherm. Assumes y1=loading. material_basis : str, optional Whether the adsorption is read in terms of either 'per volume' or 'per mass'. material_unit : str, optional Unit of loading, otherwise first isotherm value is used. loading_basis : str, optional Loading basis, otherwise first isotherm value is used. loading_unit : str, optional Unit of loading, otherwise first isotherm value is used. pressure_mode : str, optional The pressure mode, either absolute pressures or relative in the form of p/p0, otherwise first isotherm value is used. pressure_unit : str, optional Unit of pressure, otherwise first isotherm value is used. logx : bool Whether the graph x axis should be logarithmic. logy1 : bool Whether the graph y1 axis should be logarithmic. logy2 : bool Whether the graph y2 axis should be logarithmic. color : bool, int, list, optional If a boolean, the option controls if the graph is coloured or grayscale. Grayscale graphs are usually preferred for publications or print media. Otherwise, give a list of matplotlib colours or a number of colours to repeat in the cycle. marker : bool, int, list, optional Whether markers should be used to denote isotherm points. If an int, it will be the number of markers used. Otherwise, give a list of matplotlib markers or a number of markers to repeat in the cycle. y1_line_style : dict A dictionary that will be passed into the matplotlib plot() function. Applicable for left axis. y2_line_style : dict A dictionary that will be passed into the matplotlib plot() function. Applicable for right axis. lgd_keys : iterable The components of the isotherm which are displayed on the legend. For example pass ['material', 'adsorbate'] to have the legend labels display only these two components. Works with any isotherm properties and with 'branch' and 'key', the isotherm branch and the y-axis key respectively. Defaults to 'material' and 'adsorbate'. lgd_pos : [None, Matplotlib legend classifier, 'out bottom', 'out top', 'out left', out right] Specify to have the legend position outside the figure (out...) or inside the plot area itself (as determined by Matplotlib). Defaults to 'best'. save_path : str, optional Whether to save the graph or not. If a path is provided, then that is where the graph will be saved. Returns ------- axes : matplotlib.axes.Axes or numpy.ndarray of them """ ####################################### # # Initial checks # Make iterable if not already if not isinstance(isotherms, abc.Iterable): isotherms = (isotherms, ) else: isotherms = list(isotherms) # Check for plot validity if None in [x_data, y1_data]: raise ParameterError( "Specify a plot type to graph" " e.g. x_data=\'loading\', y1_data=\'pressure\'" ) # Check if required keys are present in isotherms if any(x_data not in _get_keys(isotherm) for isotherm in isotherms): raise GraphingError(f"One of the isotherms supplied does not have {x_data} data.") if any(y1_data not in _get_keys(isotherm) for isotherm in isotherms): raise GraphingError(f"One of the isotherms supplied does not have {y1_data} data.") if y2_data: if all(y2_data not in _get_keys(isotherm) for isotherm in isotherms): raise GraphingError(f"None of the isotherms supplied have {y2_data} data") if any(y2_data not in _get_keys(isotherm) for isotherm in isotherms): logger.warning(f"Some isotherms do not have {y2_data} data") # Store which branches will be displayed if not branch: raise ParameterError("Specify a branch to display" " e.g. branch=\'ads\'") if branch not in _BRANCH_TYPES: raise GraphingError( "The supplied branch type is not valid." f"Viable types are {_BRANCH_TYPES}" ) ads, des = _BRANCH_TYPES[branch] # Ensure iterable y1_line_style = y1_line_style if y1_line_style else {} y2_line_style = y2_line_style if y2_line_style else {} lgd_keys = lgd_keys if lgd_keys else [] # Pack other parameters data_params = dict( x_data=x_data, y1_data=y1_data, y2_data=y2_data, x_points=x_points, y1_points=y1_points, ) unit_params = dict( pressure_mode=pressure_mode if pressure_mode else isotherms[0].pressure_mode, pressure_unit=pressure_unit if pressure_unit else isotherms[0].pressure_unit, loading_basis=loading_basis if loading_basis else isotherms[0].loading_basis, loading_unit=loading_unit if loading_unit else isotherms[0].loading_unit, material_basis=material_basis if material_basis else isotherms[0].material_basis, material_unit=material_unit if material_unit else isotherms[0].material_unit, ) range_params = dict( x_range=x_range, y1_range=y1_range, y2_range=y2_range, x_points=x_points, y1_points=y1_points ) log_params = dict( logx=logx, logy1=logy1, logy2=logy2, ) ####################################### # # Settings and graph generation # # Generate or assign the figure and the axes if ax: ax1 = ax fig = ax1.get_figure() else: fig = plt.figure() ax1 = fig.add_subplot(111) # Create second axes object, populate it if required ax2 = ax1.twinx() if y2_data else None # Get a cycling style for the graph # # Color styling y1_colors = _get_colors(color, Y1_COLORS) y2_colors = _get_colors(color, Y2_COLORS) y1_color_cy = cycler('color', y1_colors) y2_color_cy = cycler('color', y2_colors) # # Marker styling markers = _get_markers(marker) y1_marker_cy = cycler('marker', markers) y2_marker_cy = cycler('marker', markers[::-1]) # # Combine cycles cycle_compose = True if marker else False pc_y1 = _cycle_compose(y1_marker_cy, y1_color_cy, cycle_compose) pc_y2 = _cycle_compose(y2_marker_cy, y2_color_cy, cycle_compose) # Labels ax1.set_xlabel(label_units_dict(x_data, unit_params)) ax1.set_ylabel(label_units_dict(y1_data, unit_params)) if y2_data: ax2.set_ylabel(label_units_dict(y2_data, unit_params)) ##################################### # # Actual plotting # # Plot the data for isotherm in isotherms: # Line styles for the current isotherm y1_ls = next(pc_y1) y2_ls = next(pc_y2) y1_ls.update(y1_line_style) y2_ls.update(y2_line_style) # If there's an adsorption branch, plot it if ads and isotherm.has_branch('ads'): # Points x1_p, y1_p, x2_p, y2_p = _get_data( isotherm, 'ads', data_params=data_params, unit_params=unit_params, range_params=range_params, ) # Plot line 1 y1_lbl = label_lgd(isotherm, lgd_keys, 'ads', y1_data) ax1.plot(x1_p, y1_p, label=y1_lbl, **y1_ls) # Plot line 2 if y2_data and y2_p is not None: y2_lbl = label_lgd(isotherm, lgd_keys, 'ads', y2_data) ax2.plot(x2_p, y2_p, label=y2_lbl, **y2_ls) # Switch to desorption linestyle (dotted, white marker) y1_ls['markerfacecolor'] = 'white' y1_ls['linestyle'] = '--' y2_ls['markerfacecolor'] = 'white' # If there's a desorption branch, plot it if des and isotherm.has_branch('des'): # Points x1_p, y1_p, x2_p, y2_p = _get_data( isotherm, 'des', data_params=data_params, unit_params=unit_params, range_params=range_params, ) # Plot line 1 if branch == 'all' and 'branch' not in lgd_keys: y1_lbl = '' else: y1_lbl = label_lgd(isotherm, lgd_keys, 'des', y1_data) ax1.plot(x1_p, y1_p, label=y1_lbl, **y1_ls) # Plot line 2 if y2_data and y2_p is not None: if branch == 'all' and 'branch' not in lgd_keys: y2_lbl = '' else: y2_lbl = label_lgd(isotherm, lgd_keys, 'des', y2_data) ax2.plot(x2_p, y2_p, label=y2_lbl, **y2_ls) ##################################### # # Final settings _final_styling( fig, ax1, ax2, log_params, range_params, lgd_pos, save_path, ) if ax2: return (ax1, ax2) return ax1 def _get_keys(iso): return ['loading', 'pressure'] + iso.other_keys def _get_colors(color, palette): if color: if isinstance(color, bool): return palette if isinstance(color, int): ncol = len(palette) if color > len(palette) else color return palette[:ncol] if isinstance(color, abc.Iterable): return color raise ParameterError("Unknown ``color`` parameter type.") return ['black', 'grey', 'silver'] def _get_markers(marker): if marker: if isinstance(marker, bool): return ISO_MARKERS if isinstance(marker, int): nmark = len(ISO_MARKERS) if marker > len(ISO_MARKERS) else marker return ISO_MARKERS[:nmark] if isinstance(marker, abc.Iterable): return marker raise ParameterError("Unknown ``marker`` parameter type.") return [] def _cycle_compose(cy_1, cy_2, cycle_compose): if cycle_compose: return cycle(cy_1 * cy_2) l_1 = len(cy_1) l_2 = len(cy_2) if l_1 == 0: return cycle(cy_2) if l_2 == 0: return cycle(cy_1) if l_1 > l_2: return cycle(cy_1 + (cy_2 * math.ceil(l_1 / l_2))[:l_1]) return cycle(cy_2 + (cy_1 * math.ceil(l_2 / l_1))[:l_2]) def _get_data( isotherm, branch, data_params, unit_params, range_params, ): """Plot the y1 data and y2 data of each branch.""" if data_params['x_points'] is None and data_params['y1_points'] is None: # Data X x1_p = _get_data_column( isotherm=isotherm, data_name=data_params['x_data'], branch=branch, unit_params=unit_params, data_range=range_params['x_range'], ) # Data line 1 y1_p = _get_data_column( isotherm=isotherm, data_name=data_params['y1_data'], branch=branch, unit_params=unit_params, data_range=range_params['y1_range'], ) x1_p, y1_p = x1_p.align(y1_p, join='inner') # Data line 2 x2_p = None y2_p = None if data_params['y2_data'] and data_params['y2_data'] in _get_keys(isotherm): y2_p = _get_data_column( isotherm, data_name=data_params['y2_data'], branch=branch, unit_params=unit_params, data_range=range_params['y2_range'], ) x2_p, y2_p = x1_p.align(y2_p, join='inner') else: if data_params['x_points'] is not None: x1_p = data_params['x_points'] y1_p = _get_data_column( isotherm=isotherm, data_name=data_params['y1_data'], branch=branch, unit_params=unit_params, data_range=range_params['y1_range'], data_points=data_params['x_points'], ) elif data_params['y1_points'] is not None: x1_p = _get_data_column( isotherm=isotherm, data_name=data_params['x_data'], branch=branch, unit_params=unit_params, data_range=range_params['x_range'], data_points=data_params['y1_points'], ) y1_p = data_params['y1_points'] x2_p = None y2_p = None return x1_p, y1_p, x2_p, y2_p def _get_data_column( isotherm, data_name, branch, unit_params, data_range=None, data_points=None, ): """Get different data from an isotherm.""" caller_dict = {'branch': branch} if data_name == 'pressure': caller_dict['pressure_mode'] = unit_params['pressure_mode'] caller_dict['pressure_unit'] = unit_params['pressure_unit'] if data_points is not None: return isotherm.pressure_at(data_points, **caller_dict) return isotherm.pressure(limits=data_range, indexed=True, **caller_dict) if data_name == 'loading': caller_dict['loading_basis'] = unit_params['loading_basis'] caller_dict['loading_unit'] = unit_params['loading_unit'] caller_dict['material_basis'] = unit_params['material_basis'] caller_dict['material_unit'] = unit_params['material_unit'] if data_points is not None: return isotherm.loading_at(data_points, **caller_dict) return isotherm.loading(limits=data_range, indexed=True, **caller_dict) return isotherm.other_data(data_name, limits=data_range, indexed=True, **caller_dict) def _final_styling( fig, ax1, ax2, log_params, range_params, lgd_pos, save_path, ): """Axes scales and limits, legend and graph saving.""" # Convert the axes into logarithmic if required if log_params['logx']: ax1.set_xscale('log') if log_params['logy1']: ax1.set_yscale('log') if ax2 and log_params['logy2']: ax2.set_yscale('log') # Axes range settings ax1.set_xlim(range_params['x_range']) ax1.set_ylim(range_params['y1_range']) if ax2: ax2.set_ylim(range_params['y2_range']) # Add the legend bbox_extra_artists = [] if lgd_pos is not None: # Get handles and combine them lines, labels = ax1.get_legend_handles_labels() if ax2: lines2, labels2 = ax2.get_legend_handles_labels() lines = lines + lines2 labels = labels + labels2 # Add the option for a large figure legend if lgd_pos in ['out left', 'out right', 'out bottom', 'out top']: lgd_style = {'bbox_transform': fig.transFigure} if lgd_pos == 'out top': lgd_style['bbox_to_anchor'] = (0.5, 1) lgd_style['loc'] = 'lower center' lgd_style['ncol'] = 2 elif lgd_pos == 'out bottom': lgd_style['bbox_to_anchor'] = (0.5, 0) lgd_style['loc'] = 'upper center' lgd_style['ncol'] = 2 elif lgd_pos == 'out right': lgd_style = {} lgd_style['bbox_to_anchor'] = (1, 0.5) lgd_style['loc'] = 'center left' elif lgd_pos == 'out left': lgd_style = {} lgd_style['bbox_to_anchor'] = (0, 0.5) lgd_style['loc'] = 'center right' lgd = fig.legend(lines, labels, **lgd_style) else: lgd = ax1.legend(lines, labels, loc=lgd_pos) bbox_extra_artists.append(lgd) # Fix size of graphs fig.tight_layout() # Save if desired if save_path: fig.savefig( save_path, bbox_extra_artists=bbox_extra_artists, bbox_inches='tight', dpi=300, )
"""Tests for Philips Hue config flow.""" import asyncio from unittest.mock import Mock, patch from aiohue.discovery import URL_NUPNP from aiohue.errors import LinkButtonNotPressed import pytest import voluptuous as vol from homeassistant import config_entries from homeassistant.components import ssdp, zeroconf from homeassistant.components.hue import config_flow, const from homeassistant.components.hue.errors import CannotConnect from homeassistant.helpers import device_registry as dr from tests.common import MockConfigEntry @pytest.fixture(name="hue_setup", autouse=True) def hue_setup_fixture(): """Mock hue entry setup.""" with patch("homeassistant.components.hue.async_setup_entry", return_value=True): yield def get_discovered_bridge(bridge_id="aabbccddeeff", host="1.2.3.4", supports_v2=False): """Return a mocked Discovered Bridge.""" return Mock(host=host, id=bridge_id, supports_v2=supports_v2) def create_mock_api_discovery(aioclient_mock, bridges): """Patch aiohttp responses with fake data for bridge discovery.""" aioclient_mock.get( URL_NUPNP, json=[{"internalipaddress": host, "id": id} for (host, id) in bridges], ) for (host, bridge_id) in bridges: aioclient_mock.get( f"http://{host}/api/config", json={"bridgeid": bridge_id}, ) # mock v2 support if v2 found in id aioclient_mock.get( f"https://{host}/clip/v2/resources", status=403 if "v2" in bridge_id else 404, ) async def test_flow_works(hass): """Test config flow .""" disc_bridge = get_discovered_bridge(supports_v2=True) with patch( "homeassistant.components.hue.config_flow.discover_nupnp", return_value=[disc_bridge], ): result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "init" result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"id": disc_bridge.id} ) assert result["type"] == "form" assert result["step_id"] == "link" flow = next( flow for flow in hass.config_entries.flow.async_progress() if flow["flow_id"] == result["flow_id"] ) assert flow["context"]["unique_id"] == "aabbccddeeff" with patch.object(config_flow, "create_app_key", return_value="123456789"): result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result["type"] == "create_entry" assert result["title"] == "Hue Bridge aabbccddeeff" assert result["data"] == { "host": "1.2.3.4", "api_key": "123456789", "api_version": 2, } async def test_manual_flow_works(hass): """Test config flow discovers only already configured bridges.""" disc_bridge = get_discovered_bridge(bridge_id="id-1234", host="2.2.2.2") MockConfigEntry( domain="hue", source=config_entries.SOURCE_IGNORE, unique_id="bla" ).add_to_hass(hass) with patch( "homeassistant.components.hue.config_flow.discover_nupnp", return_value=[disc_bridge], ): result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "init" result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"id": "manual"} ) assert result["type"] == "form" assert result["step_id"] == "manual" with patch.object(config_flow, "discover_bridge", return_value=disc_bridge): result = await hass.config_entries.flow.async_configure( result["flow_id"], {"host": "2.2.2.2"} ) assert result["type"] == "form" assert result["step_id"] == "link" with patch.object(config_flow, "create_app_key", return_value="123456789"), patch( "homeassistant.components.hue.async_unload_entry", return_value=True ): result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == "create_entry" assert result["title"] == f"Hue Bridge {disc_bridge.id}" assert result["data"] == { "host": "2.2.2.2", "api_key": "123456789", "api_version": 1, } entries = hass.config_entries.async_entries("hue") assert len(entries) == 2 entry = entries[-1] assert entry.unique_id == "id-1234" async def test_manual_flow_bridge_exist(hass): """Test config flow aborts on already configured bridges.""" MockConfigEntry( domain="hue", unique_id="id-1234", data={"host": "2.2.2.2"} ).add_to_hass(hass) with patch( "homeassistant.components.hue.config_flow.discover_nupnp", return_value=[], ): result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "manual" result = await hass.config_entries.flow.async_configure( result["flow_id"], {"host": "2.2.2.2"} ) assert result["type"] == "abort" assert result["reason"] == "already_configured" async def test_manual_flow_no_discovered_bridges(hass, aioclient_mock): """Test config flow discovers no bridges.""" create_mock_api_discovery(aioclient_mock, []) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "manual" async def test_flow_all_discovered_bridges_exist(hass, aioclient_mock): """Test config flow discovers only already configured bridges.""" mock_host = "1.2.3.4" mock_id = "bla" create_mock_api_discovery(aioclient_mock, [(mock_host, mock_id)]) MockConfigEntry( domain="hue", unique_id=mock_id, data={"host": mock_host} ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "manual" async def test_flow_bridges_discovered(hass, aioclient_mock): """Test config flow discovers two bridges.""" # Add ignored config entry. Should still show up as option. MockConfigEntry( domain="hue", source=config_entries.SOURCE_IGNORE, unique_id="bla" ).add_to_hass(hass) create_mock_api_discovery( aioclient_mock, [("1.2.3.4", "bla"), ("5.6.7.8", "beer_v2")] ) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "init" with pytest.raises(vol.Invalid): assert result["data_schema"]({"id": "not-discovered"}) result["data_schema"]({"id": "bla"}) result["data_schema"]({"id": "beer_v2"}) result["data_schema"]({"id": "manual"}) async def test_flow_two_bridges_discovered_one_new(hass, aioclient_mock): """Test config flow discovers two bridges.""" create_mock_api_discovery(aioclient_mock, [("1.2.3.4", "bla"), ("5.6.7.8", "beer")]) MockConfigEntry( domain="hue", unique_id="bla", data={"host": "1.2.3.4"} ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "init" assert result["data_schema"]({"id": "beer"}) assert result["data_schema"]({"id": "manual"}) with pytest.raises(vol.error.MultipleInvalid): assert not result["data_schema"]({"id": "bla"}) async def test_flow_timeout_discovery(hass): """Test config flow .""" with patch( "homeassistant.components.hue.config_flow.discover_nupnp", side_effect=asyncio.TimeoutError, ): result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "abort" assert result["reason"] == "discover_timeout" async def test_flow_link_unknown_error(hass): """Test if a unknown error happened during the linking processes.""" disc_bridge = get_discovered_bridge() with patch( "homeassistant.components.hue.config_flow.discover_nupnp", return_value=[disc_bridge], ): result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch.object(config_flow, "create_app_key", side_effect=Exception): result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"id": disc_bridge.id} ) result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result["type"] == "form" assert result["step_id"] == "link" assert result["errors"] == {"base": "linking"} async def test_flow_link_button_not_pressed(hass): """Test config flow .""" disc_bridge = get_discovered_bridge() with patch( "homeassistant.components.hue.config_flow.discover_nupnp", return_value=[disc_bridge], ): result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch.object(config_flow, "create_app_key", side_effect=LinkButtonNotPressed): result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"id": disc_bridge.id} ) result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result["type"] == "form" assert result["step_id"] == "link" assert result["errors"] == {"base": "register_failed"} async def test_flow_link_cannot_connect(hass): """Test config flow .""" disc_bridge = get_discovered_bridge() with patch( "homeassistant.components.hue.config_flow.discover_nupnp", return_value=[disc_bridge], ): result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch.object(config_flow, "create_app_key", side_effect=CannotConnect): result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"id": disc_bridge.id} ) result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result["type"] == "abort" assert result["reason"] == "cannot_connect" @pytest.mark.parametrize("mf_url", config_flow.HUE_MANUFACTURERURL) async def test_bridge_ssdp(hass, mf_url, aioclient_mock): """Test a bridge being discovered.""" create_mock_api_discovery(aioclient_mock, [("0.0.0.0", "1234")]) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", ssdp_location="http://0.0.0.0/", upnp={ ssdp.ATTR_UPNP_MANUFACTURER_URL: mf_url, ssdp.ATTR_UPNP_SERIAL: "1234", }, ), ) assert result["type"] == "form" assert result["step_id"] == "link" async def test_bridge_ssdp_discover_other_bridge(hass): """Test that discovery ignores other bridges.""" result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", upnp={ssdp.ATTR_UPNP_MANUFACTURER_URL: "http://www.notphilips.com"}, ), ) assert result["type"] == "abort" assert result["reason"] == "not_hue_bridge" async def test_bridge_ssdp_emulated_hue(hass): """Test if discovery info is from an emulated hue instance.""" result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", ssdp_location="http://0.0.0.0/", upnp={ ssdp.ATTR_UPNP_FRIENDLY_NAME: "Home Assistant Bridge", ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL[0], ssdp.ATTR_UPNP_SERIAL: "1234", }, ), ) assert result["type"] == "abort" assert result["reason"] == "not_hue_bridge" async def test_bridge_ssdp_missing_location(hass): """Test if discovery info is missing a location attribute.""" result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", upnp={ ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL[0], ssdp.ATTR_UPNP_SERIAL: "1234", }, ), ) assert result["type"] == "abort" assert result["reason"] == "not_hue_bridge" async def test_bridge_ssdp_missing_serial(hass): """Test if discovery info is a serial attribute.""" result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", ssdp_location="http://0.0.0.0/", upnp={ ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL[0], }, ), ) assert result["type"] == "abort" assert result["reason"] == "not_hue_bridge" async def test_bridge_ssdp_invalid_location(hass): """Test if discovery info is a serial attribute.""" result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", ssdp_location="http:///", upnp={ ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL[0], ssdp.ATTR_UPNP_SERIAL: "1234", }, ), ) assert result["type"] == "abort" assert result["reason"] == "not_hue_bridge" async def test_bridge_ssdp_espalexa(hass): """Test if discovery info is from an Espalexa based device.""" result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", ssdp_location="http://0.0.0.0/", upnp={ ssdp.ATTR_UPNP_FRIENDLY_NAME: "Espalexa (0.0.0.0)", ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL[0], ssdp.ATTR_UPNP_SERIAL: "1234", }, ), ) assert result["type"] == "abort" assert result["reason"] == "not_hue_bridge" async def test_bridge_ssdp_already_configured(hass, aioclient_mock): """Test if a discovered bridge has already been configured.""" create_mock_api_discovery(aioclient_mock, [("0.0.0.0", "1234")]) MockConfigEntry( domain="hue", unique_id="1234", data={"host": "0.0.0.0"} ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", ssdp_location="http://0.0.0.0/", upnp={ ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL[0], ssdp.ATTR_UPNP_SERIAL: "1234", }, ), ) assert result["type"] == "abort" assert result["reason"] == "already_configured" async def test_import_with_no_config(hass, aioclient_mock): """Test importing a host without an existing config file.""" create_mock_api_discovery(aioclient_mock, [("0.0.0.0", "1234")]) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={"host": "0.0.0.0"}, ) assert result["type"] == "form" assert result["step_id"] == "link" async def test_creating_entry_removes_entries_for_same_host_or_bridge( hass, aioclient_mock ): """Test that we clean up entries for same host and bridge. An IP can only hold a single bridge and a single bridge can only be accessible via a single IP. So when we create a new entry, we'll remove all existing entries that either have same IP or same bridge_id. """ create_mock_api_discovery(aioclient_mock, [("2.2.2.2", "id-1234")]) orig_entry = MockConfigEntry( domain="hue", data={"host": "0.0.0.0", "api_key": "123456789"}, unique_id="id-1234", ) orig_entry.add_to_hass(hass) MockConfigEntry( domain="hue", data={"host": "1.2.3.4", "api_key": "123456789"}, unique_id="id-5678", ).add_to_hass(hass) assert len(hass.config_entries.async_entries("hue")) == 2 result = await hass.config_entries.flow.async_init( "hue", data={"host": "2.2.2.2"}, context={"source": config_entries.SOURCE_IMPORT}, ) assert result["type"] == "form" assert result["step_id"] == "link" with patch( "homeassistant.components.hue.config_flow.create_app_key", return_value="123456789", ), patch("homeassistant.components.hue.async_unload_entry", return_value=True): result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == "create_entry" assert result["title"] == "Hue Bridge id-1234" assert result["data"] == { "host": "2.2.2.2", "api_key": "123456789", "api_version": 1, } entries = hass.config_entries.async_entries("hue") assert len(entries) == 2 new_entry = entries[-1] assert orig_entry.entry_id != new_entry.entry_id assert new_entry.unique_id == "id-1234" async def test_bridge_homekit(hass, aioclient_mock): """Test a bridge being discovered via HomeKit.""" create_mock_api_discovery(aioclient_mock, [("0.0.0.0", "bla")]) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_HOMEKIT}, data=zeroconf.ZeroconfServiceInfo( host="0.0.0.0", addresses=["0.0.0.0"], hostname="mock_hostname", name="mock_name", port=None, properties={zeroconf.ATTR_PROPERTIES_ID: "aa:bb:cc:dd:ee:ff"}, type="mock_type", ), ) assert result["type"] == "form" assert result["step_id"] == "link" flow = next( flow for flow in hass.config_entries.flow.async_progress() if flow["flow_id"] == result["flow_id"] ) assert flow["context"]["unique_id"] == config_entries.DEFAULT_DISCOVERY_UNIQUE_ID async def test_bridge_import_already_configured(hass): """Test if a import flow aborts if host is already configured.""" MockConfigEntry( domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"} ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={"host": "0.0.0.0", "properties": {"id": "aa:bb:cc:dd:ee:ff"}}, ) assert result["type"] == "abort" assert result["reason"] == "already_configured" async def test_bridge_homekit_already_configured(hass, aioclient_mock): """Test if a HomeKit discovered bridge has already been configured.""" create_mock_api_discovery(aioclient_mock, [("0.0.0.0", "aabbccddeeff")]) MockConfigEntry( domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"} ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_HOMEKIT}, data=zeroconf.ZeroconfServiceInfo( host="0.0.0.0", addresses=["0.0.0.0"], hostname="mock_hostname", name="mock_name", port=None, properties={zeroconf.ATTR_PROPERTIES_ID: "aa:bb:cc:dd:ee:ff"}, type="mock_type", ), ) assert result["type"] == "abort" assert result["reason"] == "already_configured" async def test_ssdp_discovery_update_configuration(hass, aioclient_mock): """Test if a discovered bridge is configured and updated with new host.""" create_mock_api_discovery(aioclient_mock, [("1.1.1.1", "aabbccddeeff")]) entry = MockConfigEntry( domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"} ) entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=ssdp.SsdpServiceInfo( ssdp_usn="mock_usn", ssdp_st="mock_st", ssdp_location="http://1.1.1.1/", upnp={ ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL[0], ssdp.ATTR_UPNP_SERIAL: "aabbccddeeff", }, ), ) assert result["type"] == "abort" assert result["reason"] == "already_configured" assert entry.data["host"] == "1.1.1.1" async def test_options_flow_v1(hass): """Test options config flow for a V1 bridge.""" entry = MockConfigEntry( domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"}, ) entry.add_to_hass(hass) result = await hass.config_entries.options.async_init(entry.entry_id) assert result["type"] == "form" assert result["step_id"] == "init" schema = result["data_schema"].schema assert ( _get_schema_default(schema, const.CONF_ALLOW_HUE_GROUPS) == const.DEFAULT_ALLOW_HUE_GROUPS ) assert ( _get_schema_default(schema, const.CONF_ALLOW_UNREACHABLE) == const.DEFAULT_ALLOW_UNREACHABLE ) result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={ const.CONF_ALLOW_HUE_GROUPS: True, const.CONF_ALLOW_UNREACHABLE: True, }, ) assert result["type"] == "create_entry" assert result["data"] == { const.CONF_ALLOW_HUE_GROUPS: True, const.CONF_ALLOW_UNREACHABLE: True, } def _get_schema_default(schema, key_name): """Iterate schema to find a key.""" for schema_key in schema: if schema_key == key_name: return schema_key.default() raise KeyError(f"{key_name} not found in schema") async def test_options_flow_v2(hass): """Test options config flow for a V2 bridge.""" entry = MockConfigEntry( domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0", "api_version": 2}, ) entry.add_to_hass(hass) dev_reg = dr.async_get(hass) mock_dev_id = "aabbccddee" dev_reg.async_get_or_create( config_entry_id=entry.entry_id, identifiers={(const.DOMAIN, mock_dev_id)} ) result = await hass.config_entries.options.async_init(entry.entry_id) assert result["type"] == "form" assert result["step_id"] == "init" schema = result["data_schema"].schema assert _get_schema_default(schema, const.CONF_IGNORE_AVAILABILITY) == [] result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={const.CONF_IGNORE_AVAILABILITY: [mock_dev_id]}, ) assert result["type"] == "create_entry" assert result["data"] == { const.CONF_IGNORE_AVAILABILITY: [mock_dev_id], } async def test_bridge_zeroconf(hass, aioclient_mock): """Test a bridge being discovered.""" create_mock_api_discovery(aioclient_mock, [("192.168.1.217", "ecb5fafffeabcabc")]) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=zeroconf.ZeroconfServiceInfo( host="192.168.1.217", addresses=["192.168.1.217"], port=443, hostname="Philips-hue.local", type="_hue._tcp.local.", name="Philips Hue - ABCABC._hue._tcp.local.", properties={ "_raw": {"bridgeid": b"ecb5fafffeabcabc", "modelid": b"BSB002"}, "bridgeid": "ecb5fafffeabcabc", "modelid": "BSB002", }, ), ) assert result["type"] == "form" assert result["step_id"] == "link" async def test_bridge_zeroconf_already_exists(hass, aioclient_mock): """Test a bridge being discovered by zeroconf already exists.""" create_mock_api_discovery( aioclient_mock, [("0.0.0.0", "ecb5faabcabc"), ("192.168.1.217", "ecb5faabcabc")] ) entry = MockConfigEntry( domain="hue", source=config_entries.SOURCE_SSDP, data={"host": "0.0.0.0"}, unique_id="ecb5faabcabc", ) entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( const.DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=zeroconf.ZeroconfServiceInfo( host="192.168.1.217", addresses=["192.168.1.217"], port=443, hostname="Philips-hue.local", type="_hue._tcp.local.", name="Philips Hue - ABCABC._hue._tcp.local.", properties={ "_raw": {"bridgeid": b"ecb5faabcabc", "modelid": b"BSB002"}, "bridgeid": "ecb5faabcabc", "modelid": "BSB002", }, ), ) assert result["type"] == "abort" assert result["reason"] == "already_configured" assert entry.data["host"] == "192.168.1.217"
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Makes sure that files include headers from allowed directories. Checks DEPS files in the source tree for rules, and applies those rules to "#include" commands in source files. Any source file including something not permitted by the DEPS files will fail. The format of the deps file: First you have the normal module-level deps. These are the ones used by gclient. An example would be: deps = { "base":"http://foo.bar/trunk/base" } DEPS files not in the top-level of a module won't need this. Then you have any additional include rules. You can add (using "+") or subtract (using "-") from the previously specified rules (including module-level deps). include_rules = { # Code should be able to use base (it's specified in the module-level # deps above), but nothing in "base/evil" because it's evil. "-base/evil", # But this one subdirectory of evil is OK. "+base/evil/not", # And it can include files from this other directory even though there is # no deps rule for it. "+tools/crime_fighter" } DEPS files may be placed anywhere in the tree. Each one applies to all subdirectories, where there may be more DEPS files that provide additions or subtractions for their own sub-trees. There is an implicit rule for the current directory (where the DEPS file lives) and all of its subdirectories. This prevents you from having to explicitly allow the current directory everywhere. This implicit rule is applied first, so you can modify or remove it using the normal include rules. The rules are processed in order. This means you can explicitly allow a higher directory and then take away permissions from sub-parts, or the reverse. Note that all directory separators must be slashes (Unix-style) and not backslashes. All directories should be relative to the source root and use only lowercase. """ import os import optparse import pipes import re import sys import copy # Variable name used in the DEPS file to add or subtract include files from # the module-level deps. INCLUDE_RULES_VAR_NAME = "include_rules" # Optionally present in the DEPS file to list subdirectories which should not # be checked. This allows us to skip third party code, for example. SKIP_SUBDIRS_VAR_NAME = "skip_child_includes" # The maximum number of non-include lines we can see before giving up. MAX_UNINTERESTING_LINES = 50 # The maximum line length, this is to be efficient in the case of very long # lines (which can't be #includes). MAX_LINE_LENGTH = 128 # Set to true for more output. This is set by the command line options. VERBOSE = False # This regular expression will be used to extract filenames from include # statements. EXTRACT_INCLUDE_PATH = re.compile('[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"') # In lowercase, using forward slashes as directory separators, ending in a # forward slash. Set by the command line options. BASE_DIRECTORY = "" # The directories which contain the sources managed by git. GIT_SOURCE_DIRECTORY = set() # Specifies a single rule for an include, which can be either allow or disallow. class Rule(object): def __init__(self, allow, dir, source): self._allow = allow self._dir = dir self._source = source def __str__(self): if (self._allow): return '"+%s" from %s.' % (self._dir, self._source) return '"-%s" from %s.' % (self._dir, self._source) def ParentOrMatch(self, other): """Returns true if the input string is an exact match or is a parent of the current rule. For example, the input "foo" would match "foo/bar".""" return self._dir == other or self._dir.startswith(other + "/") def ChildOrMatch(self, other): """Returns true if the input string would be covered by this rule. For example, the input "foo/bar" would match the rule "foo".""" return self._dir == other or other.startswith(self._dir + "/") def ParseRuleString(rule_string, source): """Returns a tuple of a boolean indicating whether the directory is an allow rule, and a string holding the directory name. """ if len(rule_string) < 1: raise Exception('The rule string "%s" is too short\nin %s' % (rule_string, source)) if rule_string[0] == "+": return (True, rule_string[1:]) if rule_string[0] == "-": return (False, rule_string[1:]) raise Exception('The rule string "%s" does not begin with a "+" or a "-"' % rule_string) class Rules: def __init__(self): """Initializes the current rules with an empty rule list.""" self._rules = [] def __str__(self): ret = "Rules = [\n" ret += "\n".join([" %s" % x for x in self._rules]) ret += "]\n" return ret def AddRule(self, rule_string, source): """Adds a rule for the given rule string. Args: rule_string: The include_rule string read from the DEPS file to apply. source: A string representing the location of that string (filename, etc.) so that we can give meaningful errors. """ (add_rule, rule_dir) = ParseRuleString(rule_string, source) # Remove any existing rules or sub-rules that apply. For example, if we're # passed "foo", we should remove "foo", "foo/bar", but not "foobar". self._rules = [x for x in self._rules if not x.ParentOrMatch(rule_dir)] self._rules.insert(0, Rule(add_rule, rule_dir, source)) def DirAllowed(self, allowed_dir): """Returns a tuple (success, message), where success indicates if the given directory is allowed given the current set of rules, and the message tells why if the comparison failed.""" for rule in self._rules: if rule.ChildOrMatch(allowed_dir): # This rule applies. if rule._allow: return (True, "") return (False, rule.__str__()) # No rules apply, fail. return (False, "no rule applying") def ApplyRules(existing_rules, includes, cur_dir): """Applies the given include rules, returning the new rules. Args: existing_rules: A set of existing rules that will be combined. include: The list of rules from the "include_rules" section of DEPS. cur_dir: The current directory. We will create an implicit rule that allows inclusion from this directory. Returns: A new set of rules combining the existing_rules with the other arguments. """ rules = copy.copy(existing_rules) # First apply the implicit "allow" rule for the current directory. if cur_dir.lower().startswith(BASE_DIRECTORY): relative_dir = cur_dir[len(BASE_DIRECTORY) + 1:] # Normalize path separators to slashes. relative_dir = relative_dir.replace("\\", "/") source = relative_dir if len(source) == 0: source = "top level" # Make the help string a little more meaningful. rules.AddRule("+" + relative_dir, "Default rule for " + source) else: raise Exception("Internal error: base directory is not at the beginning" + " for\n %s and base dir\n %s" % (cur_dir, BASE_DIRECTORY)) # Last, apply the additional explicit rules. for (index, rule_str) in enumerate(includes): if not len(relative_dir): rule_description = "the top level include_rules" else: rule_description = relative_dir + "'s include_rules" rules.AddRule(rule_str, rule_description) return rules def ApplyDirectoryRules(existing_rules, dir_name): """Combines rules from the existing rules and the new directory. Any directory can contain a DEPS file. Toplevel DEPS files can contain module dependencies which are used by gclient. We use these, along with additional include rules and implicit rules for the given directory, to come up with a combined set of rules to apply for the directory. Args: existing_rules: The rules for the parent directory. We'll add-on to these. dir_name: The directory name that the deps file may live in (if it exists). This will also be used to generate the implicit rules. Returns: A tuple containing: (1) the combined set of rules to apply to the sub-tree, and (2) a list of all subdirectories that should NOT be checked, as specified in the DEPS file (if any). """ # Check for a .svn directory in this directory or check this directory is # contained in git source direcotries. This will tell us if it's a source # directory and should be checked. if not (os.path.exists(os.path.join(dir_name, ".svn")) or (dir_name.lower() in GIT_SOURCE_DIRECTORY)): return (None, []) # Check the DEPS file in this directory. if VERBOSE: print "Applying rules from", dir_name def FromImpl(unused, unused2): pass # NOP function so "From" doesn't fail. def FileImpl(unused): pass # NOP function so "File" doesn't fail. class _VarImpl: def __init__(self, local_scope): self._local_scope = local_scope def Lookup(self, var_name): """Implements the Var syntax.""" if var_name in self._local_scope.get("vars", {}): return self._local_scope["vars"][var_name] raise Error("Var is not defined: %s" % var_name) local_scope = {} global_scope = { "File": FileImpl, "From": FromImpl, "Var": _VarImpl(local_scope).Lookup, } deps_file = os.path.join(dir_name, "DEPS") if os.path.isfile(deps_file): execfile(deps_file, global_scope, local_scope) elif VERBOSE: print " No deps file found in", dir_name # Even if a DEPS file does not exist we still invoke ApplyRules # to apply the implicit "allow" rule for the current directory include_rules = local_scope.get(INCLUDE_RULES_VAR_NAME, []) skip_subdirs = local_scope.get(SKIP_SUBDIRS_VAR_NAME, []) return (ApplyRules(existing_rules, include_rules, dir_name), skip_subdirs) def ShouldCheckFile(file_name): """Returns True if the given file is a type we want to check.""" checked_extensions = [ '.h', '.cc', '.m', '.mm', ] basename, extension = os.path.splitext(file_name) return extension in checked_extensions def CheckLine(rules, line): """Checks the given file with the given rule set. Returns a tuple (is_include, illegal_description). If the line is an #include directive the first value will be True. If it is also an illegal include, the second value will be a string describing the error. Otherwise, it will be None.""" found_item = EXTRACT_INCLUDE_PATH.match(line) if not found_item: return False, None # Not a match include_path = found_item.group(1) # Fix up backslashes in case somebody accidentally used them. include_path.replace("\\", "/") if include_path.find("/") < 0: # Don't fail when no directory is specified. We may want to be more # strict about this in the future. if VERBOSE: print " WARNING: directory specified with no path: " + include_path return True, None (allowed, why_failed) = rules.DirAllowed(include_path) if not allowed: if VERBOSE: retval = "\nFor " + rules.__str__() else: retval = "" return True, retval + ('Illegal include: "%s"\n Because of %s' % (include_path, why_failed)) return True, None def CheckFile(rules, file_name): """Checks the given file with the given rule set. Args: rules: The set of rules that apply to files in this directory. file_name: The source file to check. Returns: Either a string describing the error if there was one, or None if the file checked out OK. """ if VERBOSE: print "Checking: " + file_name ret_val = "" # We'll collect the error messages in here last_include = 0 try: cur_file = open(file_name, "r") in_if0 = 0 for line_num in xrange(sys.maxint): if line_num - last_include > MAX_UNINTERESTING_LINES: break cur_line = cur_file.readline(MAX_LINE_LENGTH) if cur_line == "": break cur_line = cur_line.strip() # Check to see if we're at / inside a #if 0 block if cur_line == '#if 0': in_if0 += 1 continue if in_if0 > 0: if cur_line.startswith('#if'): in_if0 += 1 elif cur_line == '#endif': in_if0 -= 1 continue is_include, line_status = CheckLine(rules, cur_line) if is_include: last_include = line_num if line_status is not None: if len(line_status) > 0: # Add newline to separate messages. line_status += "\n" ret_val += line_status cur_file.close() except IOError: if VERBOSE: print "Unable to open file: " + file_name cur_file.close() # Map empty string to None for easier checking. if len(ret_val) == 0: return None return ret_val def CheckDirectory(parent_rules, dir_name): (rules, skip_subdirs) = ApplyDirectoryRules(parent_rules, dir_name) if rules == None: return True # Collect a list of all files and directories to check. files_to_check = [] dirs_to_check = [] success = True contents = os.listdir(dir_name) for cur in contents: if cur in skip_subdirs: continue # Don't check children that DEPS has asked us to skip. full_name = os.path.join(dir_name, cur) if os.path.isdir(full_name): dirs_to_check.append(full_name) elif ShouldCheckFile(full_name): files_to_check.append(full_name) # First check all files in this directory. for cur in files_to_check: file_status = CheckFile(rules, cur) if file_status != None: print "ERROR in " + cur + "\n" + file_status success = False # Next recurse into the subdirectories. for cur in dirs_to_check: if not CheckDirectory(rules, cur): success = False return success def GetGitSourceDirectory(root): """Returns a set of the directories to be checked. Args: root: The repository root where .git directory exists. Returns: A set of directories which contain sources managed by git. """ git_source_directory = set() popen_out = os.popen("cd %s && git ls-files --full-name ." % pipes.quote(root)) for line in popen_out.readlines(): dir_name = os.path.join(root, os.path.dirname(line)) # Add the directory as well as all the parent directories. while dir_name != root: git_source_directory.add(dir_name) dir_name = os.path.dirname(dir_name) git_source_directory.add(root) return git_source_directory def PrintUsage(): print """Usage: python checkdeps.py [--root <root>] [tocheck] --root Specifies the repository root. This defaults to "../../.." relative to the script file. This will be correct given the normal location of the script in "<root>/tools/checkdeps". tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything. Only one level deep is currently supported, so you can say "chrome" but not "chrome/browser". Examples: python checkdeps.py python checkdeps.py --root c:\\source chrome""" def checkdeps(options, args): global VERBOSE if options.verbose: VERBOSE = True # Optional base directory of the repository. global BASE_DIRECTORY if not options.base_directory: BASE_DIRECTORY = os.path.abspath( os.path.join(os.path.abspath(os.path.dirname(__file__)), "../..")) else: BASE_DIRECTORY = os.path.abspath(options.base_directory) # Figure out which directory we have to check. if len(args) == 0: # No directory to check specified, use the repository root. start_dir = BASE_DIRECTORY elif len(args) == 1: # Directory specified. Start here. It's supposed to be relative to the # base directory. start_dir = os.path.abspath(os.path.join(BASE_DIRECTORY, args[0])) else: # More than one argument, we don't handle this. PrintUsage() return 1 print "Using base directory:", BASE_DIRECTORY print "Checking:", start_dir base_rules = Rules() # The base directory should be lower case from here on since it will be used # for substring matching on the includes, and we compile on case-insensitive # systems. Plus, we always use slashes here since the include parsing code # will also normalize to slashes. BASE_DIRECTORY = BASE_DIRECTORY.lower() BASE_DIRECTORY = BASE_DIRECTORY.replace("\\", "/") start_dir = start_dir.replace("\\", "/") if os.path.exists(os.path.join(BASE_DIRECTORY, ".git")): global GIT_SOURCE_DIRECTORY GIT_SOURCE_DIRECTORY = GetGitSourceDirectory(BASE_DIRECTORY) success = CheckDirectory(base_rules, start_dir) if not success: print "\nFAILED\n" return 1 print "\nSUCCESS\n" return 0 def main(): option_parser = optparse.OptionParser() option_parser.add_option("", "--root", default="", dest="base_directory", help='Specifies the repository root. This defaults ' 'to "../../.." relative to the script file, which ' 'will normally be the repository root.') option_parser.add_option("-v", "--verbose", action="store_true", default=False, help="Print debug logging") options, args = option_parser.parse_args() return checkdeps(options, args) if '__main__' == __name__: sys.exit(main())
# coding: utf-8 from __future__ import unicode_literals import hashlib import hmac import re import time import uuid from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, str_or_none, try_get, url_or_none, ) class HotStarBaseIE(InfoExtractor): _AKAMAI_ENCRYPTION_KEY = b'\x05\xfc\x1a\x01\xca\xc9\x4b\xc4\x12\xfc\x53\x12\x07\x75\xf9\xee' def _call_api_impl(self, path, video_id, query): st = int(time.time()) exp = st + 6000 auth = 'st=%d~exp=%d~acl=/*' % (st, exp) auth += '~hmac=' + hmac.new(self._AKAMAI_ENCRYPTION_KEY, auth.encode(), hashlib.sha256).hexdigest() response = self._download_json( 'https://api.hotstar.com/' + path, video_id, headers={ 'hotstarauth': auth, 'x-country-code': 'IN', 'x-platform-code': 'JIO', }, query=query) if response['statusCode'] != 'OK': raise ExtractorError( response['body']['message'], expected=True) return response['body']['results'] def _call_api(self, path, video_id, query_name='contentId'): return self._call_api_impl(path, video_id, { query_name: video_id, 'tas': 10000, }) def _call_api_v2(self, path, video_id): return self._call_api_impl( '%s/in/contents/%s' % (path, video_id), video_id, { 'desiredConfig': 'encryption:plain;ladder:phone,tv;package:hls,dash', 'client': 'mweb', 'clientVersion': '6.18.0', 'deviceId': compat_str(uuid.uuid4()), 'osName': 'Windows', 'osVersion': '10', }) class HotStarIE(HotStarBaseIE): IE_NAME = 'hotstar' _VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+?[/-])?(?P<id>\d{10})' _TESTS = [{ # contentData 'url': 'https://www.hotstar.com/can-you-not-spread-rumours/1000076273', 'info_dict': { 'id': '1000076273', 'ext': 'mp4', 'title': 'Can You Not Spread Rumours?', 'description': 'md5:c957d8868e9bc793ccb813691cc4c434', 'timestamp': 1447248600, 'upload_date': '20151111', 'duration': 381, }, 'params': { # m3u8 download 'skip_download': True, } }, { # contentDetail 'url': 'https://www.hotstar.com/movies/radha-gopalam/1000057157', 'only_matching': True, }, { 'url': 'http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583', 'only_matching': True, }, { 'url': 'http://www.hotstar.com/1000000515', 'only_matching': True, }, { # only available via api v2 'url': 'https://www.hotstar.com/tv/ek-bhram-sarvagun-sampanna/s-2116/janhvi-targets-suman/1000234847', 'only_matching': True, }] _GEO_BYPASS = False def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) app_state = self._parse_json(self._search_regex( r'<script>window\.APP_STATE\s*=\s*({.+?})</script>', webpage, 'app state'), video_id) video_data = {} getters = list( lambda x, k=k: x['initialState']['content%s' % k]['content'] for k in ('Data', 'Detail') ) for v in app_state.values(): content = try_get(v, getters, dict) if content and content.get('contentId') == video_id: video_data = content break title = video_data['title'] if video_data.get('drmProtected'): raise ExtractorError('This video is DRM protected.', expected=True) formats = [] geo_restricted = False playback_sets = self._call_api_v2('h/v2/play', video_id)['playBackSets'] for playback_set in playback_sets: if not isinstance(playback_set, dict): continue format_url = url_or_none(playback_set.get('playbackUrl')) if not format_url: continue format_url = re.sub( r'(?<=//staragvod)(\d)', r'web\1', format_url) tags = str_or_none(playback_set.get('tagsCombination')) or '' if tags and 'encryption:plain' not in tags: continue ext = determine_ext(format_url) try: if 'package:hls' in tags or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) elif 'package:dash' in tags or ext == 'mpd': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash')) elif ext == 'f4m': # produce broken files pass else: formats.append({ 'url': format_url, 'width': int_or_none(playback_set.get('width')), 'height': int_or_none(playback_set.get('height')), }) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: geo_restricted = True continue if not formats and geo_restricted: self.raise_geo_restricted(countries=['IN']) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'timestamp': int_or_none(video_data.get('broadcastDate') or video_data.get('startDate')), 'formats': formats, 'channel': video_data.get('channelName'), 'channel_id': video_data.get('channelId'), 'series': video_data.get('showName'), 'season': video_data.get('seasonName'), 'season_number': int_or_none(video_data.get('seasonNo')), 'season_id': video_data.get('seasonId'), 'episode': title, 'episode_number': int_or_none(video_data.get('episodeNo')), } class HotStarPlaylistIE(HotStarBaseIE): IE_NAME = 'hotstar:playlist' _VALID_URL = r'https?://(?:www\.)?hotstar\.com/tv/[^/]+/s-\w+/list/[^/]+/t-(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.hotstar.com/tv/savdhaan-india/s-26/list/popular-clips/t-3_2_26', 'info_dict': { 'id': '3_2_26', }, 'playlist_mincount': 20, }, { 'url': 'https://www.hotstar.com/tv/savdhaan-india/s-26/list/extras/t-2480', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) collection = self._call_api('o/v1/tray/find', playlist_id, 'uqId') entries = [ self.url_result( 'https://www.hotstar.com/%s' % video['contentId'], ie=HotStarIE.ie_key(), video_id=video['contentId']) for video in collection['assets']['items'] if video.get('contentId')] return self.playlist_result(entries, playlist_id)
""" This module converts requested URLs to callback view functions. RegexURLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a ResolverMatch object which provides access to all attributes of the resolved URL match. """ from __future__ import unicode_literals import functools import re import warnings from importlib import import_module from threading import local from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.http import Http404 from django.utils import lru_cache, six from django.utils.datastructures import MultiValueDict from django.utils.deprecation import RemovedInDjango110Warning from django.utils.encoding import force_str, force_text, iri_to_uri from django.utils.functional import cached_property, lazy from django.utils.http import RFC3986_SUBDELIMS, urlquote from django.utils.module_loading import module_has_submodule from django.utils.regex_helper import normalize from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit from django.utils.translation import get_language, override # SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for # the current thread (which is the only one we ever access), it is assumed to # be empty. _prefixes = local() # Overridden URLconfs for each thread are stored here. _urlconfs = local() class ResolverMatch(object): def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name # If a URLRegexResolver doesn't have a namespace or app_name, it passes # in an empty value. self.app_names = [x for x in app_names if x] if app_names else [] self.app_name = ':'.join(self.app_names) if namespaces: self.namespaces = [x for x in namespaces if x] else: self.namespaces = [] self.namespace = ':'.join(self.namespaces) if not hasattr(func, '__name__'): # A class-based view self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__]) else: # A function-based view self._func_path = '.'.join([func.__module__, func.__name__]) view_path = url_name or self._func_path self.view_name = ':'.join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % ( self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces) class Resolver404(Http404): pass class NoReverseMatch(Exception): pass @lru_cache.lru_cache(maxsize=None) def get_callable(lookup_view, can_fail=False): """ Return a callable corresponding to lookup_view. This function is used by both resolve() and reverse(), so can_fail allows the caller to choose between returning the input as is and raising an exception when the input string can't be interpreted as an import path. If lookup_view is already a callable, return it. If lookup_view is a string import path that can be resolved to a callable, import that callable and return it. If lookup_view is some other kind of string and can_fail is True, the string is returned as is. If can_fail is False, an exception is raised (either ImportError or ViewDoesNotExist). """ if callable(lookup_view): return lookup_view if not isinstance(lookup_view, six.string_types): raise ViewDoesNotExist( "'%s' is not a callable or a dot-notation path" % lookup_view ) mod_name, func_name = get_mod_func(lookup_view) if not func_name: # No '.' in lookup_view if can_fail: return lookup_view else: raise ImportError( "Could not import '%s'. The path must be fully qualified." % lookup_view) try: mod = import_module(mod_name) except ImportError: if can_fail: return lookup_view else: parentmod, submod = get_mod_func(mod_name) if submod and not module_has_submodule(import_module(parentmod), submod): raise ViewDoesNotExist( "Could not import '%s'. Parent module %s does not exist." % (lookup_view, mod_name)) else: raise else: try: view_func = getattr(mod, func_name) except AttributeError: if can_fail: return lookup_view else: raise ViewDoesNotExist( "Could not import '%s'. View does not exist in module %s." % (lookup_view, mod_name)) else: if not callable(view_func): # For backwards compatibility this is raised regardless of can_fail raise ViewDoesNotExist( "Could not import '%s.%s'. View is not callable." % (mod_name, func_name)) return view_func @lru_cache.lru_cache(maxsize=None) def get_resolver(urlconf=None): if urlconf is None: from django.conf import settings urlconf = settings.ROOT_URLCONF return RegexURLResolver(r'^/', urlconf) @lru_cache.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver): # Build a namespaced resolver for the given parent urlconf pattern. # This makes it possible to have captured parameters in the parent # urlconf pattern. ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns) return RegexURLResolver(r'^/', [ns_resolver]) def get_mod_func(callback): # Converts 'django.views.news.stories.story_detail' to # ['django.views.news.stories', 'story_detail'] try: dot = callback.rindex('.') except ValueError: return callback, '' return callback[:dot], callback[dot + 1:] class LocaleRegexProvider(object): """ A mixin to provide a default regex property which can vary by active language. """ def __init__(self, regex): # regex is either a string representing a regular expression, or a # translatable string (using ugettext_lazy) representing a regular # expression. self._regex = regex self._regex_dict = {} @property def regex(self): """ Returns a compiled regular expression, depending upon the activated language-code. """ language_code = get_language() if language_code not in self._regex_dict: if isinstance(self._regex, six.string_types): regex = self._regex else: regex = force_text(self._regex) try: compiled_regex = re.compile(regex, re.UNICODE) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, six.text_type(e))) self._regex_dict[language_code] = compiled_regex return self._regex_dict[language_code] class RegexURLPattern(LocaleRegexProvider): def __init__(self, regex, callback, default_args=None, name=None): LocaleRegexProvider.__init__(self, regex) # callback is either a string like 'foo.views.news.stories.story_detail' # which represents the path to a module and a view function name, or a # callable object (view). if callable(callback): self._callback = callback else: self._callback = None self._callback_str = callback self.default_args = default_args or {} self.name = name def __repr__(self): return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern)) def add_prefix(self, prefix): """ Adds the prefix string to a string-based callback. """ if not prefix or not hasattr(self, '_callback_str'): return self._callback_str = prefix + '.' + self._callback_str def resolve(self, path): match = self.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() if kwargs: args = () else: args = match.groups() # In both cases, pass any extra_kwargs as **kwargs. kwargs.update(self.default_args) return ResolverMatch(self.callback, args, kwargs, self.name) @property def callback(self): if self._callback is not None: return self._callback self._callback = get_callable(self._callback_str) return self._callback class RegexURLResolver(LocaleRegexProvider): def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None): LocaleRegexProvider.__init__(self, regex) # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False def __repr__(self): if isinstance(self.urlconf_name, list) and len(self.urlconf_name): # Don't bother to output the whole list, it can be huge urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return str('<%s %s (%s:%s) %s>') % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.regex.pattern) def _populate(self): lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for pattern in reversed(self.url_patterns): if hasattr(pattern, '_callback_str'): self._callback_strs.add(pattern._callback_str) elif hasattr(pattern, '_callback'): callback = pattern._callback if isinstance(callback, functools.partial): callback = callback.func if not hasattr(callback, '__name__'): lookup_str = callback.__module__ + "." + callback.__class__.__name__ else: lookup_str = callback.__module__ + "." + callback.__name__ self._callback_strs.add(lookup_str) p_pattern = pattern.regex.pattern if p_pattern.startswith('^'): p_pattern = p_pattern[1:] if isinstance(pattern, RegexURLResolver): if pattern.namespace: namespaces[pattern.namespace] = (p_pattern, pattern) if pattern.app_name: apps.setdefault(pattern.app_name, []).append(pattern.namespace) else: parent_pat = pattern.regex.pattern for name in pattern.reverse_dict: for matches, pat, defaults in pattern.reverse_dict.getlist(name): new_matches = normalize(parent_pat + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs), ) ) for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items(): namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(pattern._callback_strs) else: bits = normalize(p_pattern) lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args)) if pattern.name is not None: lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args)) self._reverse_dict[language_code] = lookups self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._populated = True @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = force_text(path) # path may be a reverse_lazy object tried = [] match = self.regex.search(path) if match: new_path = path[match.end():] for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: sub_tried = e.args[0].get('tried') if sub_tried is not None: tried.extend([pattern] + t for t in sub_tried) else: tried.append([pattern]) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = dict(match.groupdict(), **self.default_kwargs) sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = match.groups() + sub_match.args return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, [self.app_name] + sub_match.app_names, [self.namespace] + sub_match.namespaces ) tried.append([pattern]) raise Resolver404({'tried': tried, 'path': new_path}) raise Resolver404({'path': path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, six.string_types): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError: msg = ( "The included urlconf '{name}' does not appear to have any " "patterns in it. If you see valid patterns in the file then " "the issue is probably caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, 'handler%s' % view_type, None) if not callback: # No handler specified in file; use default # Lazy import, since django.urls imports this file from django.conf import urls callback = getattr(urls, 'handler%s' % view_type) return get_callable(callback), {} def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, '', *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") text_args = [force_text(v) for v in args] text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()} if not self._populated: self._populate() original_lookup = lookup_view try: if self._is_callback(lookup_view): lookup_view = get_callable(lookup_view, True) except (ImportError, AttributeError) as e: raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e)) else: if not callable(original_lookup) and callable(lookup_view): warnings.warn( 'Reversing by dotted path is deprecated (%s).' % original_lookup, RemovedInDjango110Warning, stacklevel=3 ) possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, text_args)) else: if (set(kwargs.keys()) | set(defaults.keys()) != set(params) | set(defaults.keys())): continue matches = True for k, v in defaults.items(): if kwargs.get(k, v) != v: matches = False break if not matches: continue candidate_subs = text_kwargs # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace('%', '%%') + result if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE): # safe characters from `pchar` definition of RFC 3986 url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@')) # Don't allow construction of scheme relative urls. if url.startswith('//'): url = '/%%2F%s' % url[2:] return url # lookup_view can be URL label, or dotted path, or callable, Any of # these can be passed in at the top, but callables are not friendly in # error messages. m = getattr(lookup_view, '__module__', None) n = getattr(lookup_view, '__name__', None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (possibility, pattern, defaults) in possibilities] raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword " "arguments '%s' not found. %d pattern(s) tried: %s" % (lookup_view_s, args, kwargs, len(patterns), patterns)) class LocaleRegexURLResolver(RegexURLResolver): """ A URL resolver that always matches the active language code as URL prefix. Rather than taking a regex argument, we just override the ``regex`` function to always return the active language-code as regex. """ def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None): super(LocaleRegexURLResolver, self).__init__( None, urlconf_name, default_kwargs, app_name, namespace) @property def regex(self): language_code = get_language() if language_code not in self._regex_dict: regex_compiled = re.compile('^%s/' % language_code, re.UNICODE) self._regex_dict[language_code] = regex_compiled return self._regex_dict[language_code] def resolve(path, urlconf=None): if urlconf is None: urlconf = get_urlconf() return get_resolver(urlconf).resolve(path) def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None): if urlconf is None: urlconf = get_urlconf() resolver = get_resolver(urlconf) args = args or [] kwargs = kwargs or {} prefix = get_script_prefix() if not isinstance(viewname, six.string_types): view = viewname else: parts = viewname.split(':') parts.reverse() view = parts[0] path = parts[1:] if current_app: current_path = current_app.split(':') current_path.reverse() else: current_path = None resolved_path = [] ns_pattern = '' while path: ns = path.pop() current_ns = current_path.pop() if current_path else None # Lookup the name to see if it could be an app identifier try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver if current_ns and current_ns in app_list: # If we are reversing for a particular app, # use that namespace ns = current_ns elif ns not in app_list: # The name isn't shared by one of the instances # (i.e., the default) so just pick the first instance # as the default. ns = app_list[0] except KeyError: pass if ns != current_ns: current_path = None try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern = ns_pattern + extra except KeyError as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ':'.join(resolved_path))) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: resolver = get_ns_resolver(ns_pattern, resolver) return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs))) reverse_lazy = lazy(reverse, six.text_type) def clear_url_caches(): get_callable.cache_clear() get_resolver.cache_clear() get_ns_resolver.cache_clear() def set_script_prefix(prefix): """ Sets the script prefix for the current thread. """ if not prefix.endswith('/'): prefix += '/' _prefixes.value = prefix def get_script_prefix(): """ Returns the currently active script prefix. Useful for client code that wishes to construct their own URLs manually (although accessing the request instance is normally going to be a lot cleaner). """ return getattr(_prefixes, "value", '/') def clear_script_prefix(): """ Unsets the script prefix for the current thread. """ try: del _prefixes.value except AttributeError: pass def set_urlconf(urlconf_name): """ Sets the URLconf for the current thread (overriding the default one in settings). Set to None to revert back to the default. """ if urlconf_name: _urlconfs.value = urlconf_name else: if hasattr(_urlconfs, "value"): del _urlconfs.value def get_urlconf(default=None): """ Returns the root URLconf to use for the current thread if it has been changed from the default one. """ return getattr(_urlconfs, "value", default) def is_valid_path(path, urlconf=None): """ Returns True if the given path resolves against the default URL resolver, False otherwise. This is a convenience method to make working with "is this a match?" cases easier, avoiding unnecessarily indented try...except blocks. """ try: resolve(path, urlconf) return True except Resolver404: return False def translate_url(url, lang_code): """ Given a URL (absolute or relative), try to get its translated version in the `lang_code` language (either by i18n_patterns or by translated regex). Return the original URL if no translated version is found. """ parsed = urlsplit(url) try: match = resolve(parsed.path) except Resolver404: pass else: to_be_reversed = "%s:%s" % (match.namespace, match.url_name) if match.namespace else match.url_name with override(lang_code): try: url = reverse(to_be_reversed, args=match.args, kwargs=match.kwargs) except NoReverseMatch: pass else: url = urlunsplit((parsed.scheme, parsed.netloc, url, parsed.query, parsed.fragment)) return url
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import datetime import errno import hashlib import os import re import shutil import tempfile import jinja2 from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import netutils from oslo_utils import timeutils import paramiko import pytz import six from ironic.common import exception from ironic.common.i18n import _, _LE, _LW from ironic.conf import CONF LOG = logging.getLogger(__name__) warn_deprecated_extra_vif_port_id = False def _get_root_helper(): # NOTE(jlvillal): This function has been moved to ironic-lib. And is # planned to be deleted here. If need to modify this function, please # also do the same modification in ironic-lib return 'sudo ironic-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method. :param cmd: Passed to processutils.execute. :param use_standard_locale: True | False. Defaults to False. If set to True, execute command with standard locale added to environment variables. :returns: (stdout, stderr) from process execution :raises: UnknownArgumentError :raises: ProcessExecutionError """ use_standard_locale = kwargs.pop('use_standard_locale', False) if use_standard_locale: env = kwargs.pop('env_variables', os.environ.copy()) env['LC_ALL'] = 'C' kwargs['env_variables'] = env if kwargs.get('run_as_root') and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() result = processutils.execute(*cmd, **kwargs) LOG.debug('Execution completed, command line is "%s"', ' '.join(map(str, cmd))) LOG.debug('Command stdout is: "%s"', result[0]) LOG.debug('Command stderr is: "%s"', result[1]) return result def ssh_connect(connection): """Method to connect to a remote system using ssh protocol. :param connection: a dict of connection parameters. :returns: paramiko.SSHClient -- an active ssh connection. :raises: SSHConnectFailed """ try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) key_contents = connection.get('key_contents') if key_contents: data = six.StringIO(key_contents) if "BEGIN RSA PRIVATE" in key_contents: pkey = paramiko.RSAKey.from_private_key(data) elif "BEGIN DSA PRIVATE" in key_contents: pkey = paramiko.DSSKey.from_private_key(data) else: # Can't include the key contents - secure material. raise ValueError(_("Invalid private key")) else: pkey = None ssh.connect(connection.get('host'), username=connection.get('username'), password=connection.get('password'), port=connection.get('port', 22), pkey=pkey, key_filename=connection.get('key_filename'), timeout=connection.get('timeout', 10)) # send TCP keepalive packets every 20 seconds ssh.get_transport().set_keepalive(20) except Exception as e: LOG.debug("SSH connect failed: %s", e) raise exception.SSHConnectFailed(host=connection.get('host')) return ssh def is_valid_datapath_id(datapath_id): """Verify the format of an OpenFlow datapath_id. Check if a datapath_id is valid and contains 16 hexadecimal digits. Datapath ID format: the lower 48-bits are for a MAC address, while the upper 16-bits are implementer-defined. :param datapath_id: OpenFlow datapath_id to be validated. :returns: True if valid. False if not. """ m = "^[0-9a-f]{16}$" return (isinstance(datapath_id, six.string_types) and re.match(m, datapath_id.lower())) _is_valid_logical_name_re = re.compile(r'^[A-Z0-9-._~]+$', re.I) # old is_hostname_safe() regex, retained for backwards compat _is_hostname_safe_re = re.compile(r"""^ [a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])? # host (\.[a-z0-9\-_]{0,62}[a-z0-9])* # domain \.? # trailing dot $""", re.X) def is_valid_logical_name(hostname): """Determine if a logical name is valid. The logical name may only consist of RFC3986 unreserved characters, to wit: ALPHA / DIGIT / "-" / "." / "_" / "~" """ if not isinstance(hostname, six.string_types) or len(hostname) > 255: return False return _is_valid_logical_name_re.match(hostname) is not None def is_hostname_safe(hostname): """Old check for valid logical node names. Retained for compatibility with REST API < 1.10. Nominally, checks that the supplied hostname conforms to: * http://en.wikipedia.org/wiki/Hostname * http://tools.ietf.org/html/rfc952 * http://tools.ietf.org/html/rfc1123 In practice, this check has several shortcomings and errors that are more thoroughly documented in bug #1468508. :param hostname: The hostname to be validated. :returns: True if valid. False if not. """ if not isinstance(hostname, six.string_types) or len(hostname) > 255: return False return _is_hostname_safe_re.match(hostname) is not None def is_valid_no_proxy(no_proxy): """Check no_proxy validity Check if no_proxy value that will be written to environment variable by ironic-python-agent is valid. :param no_proxy: the value that requires validity check. Expected to be a comma-separated list of host names, IP addresses and domain names (with optional :port). :returns: True if no_proxy is valid, False otherwise. """ if not isinstance(no_proxy, six.string_types): return False hostname_re = re.compile('(?!-)[A-Z\d-]{1,63}(?<!-)$', re.IGNORECASE) for hostname in no_proxy.split(','): hostname = hostname.strip().split(':')[0] if not hostname: continue max_length = 253 if hostname.startswith('.'): # It is allowed to specify a dot in the beginning of the value to # indicate that it is a domain name, which means there will be at # least one additional character in full hostname. *. is also # possible but may be not supported by some clients, so is not # considered valid here. hostname = hostname[1:] max_length = 251 if len(hostname) > max_length: return False if not all(hostname_re.match(part) for part in hostname.split('.')): return False return True def validate_and_normalize_mac(address): """Validate a MAC address and return normalized form. Checks whether the supplied MAC address is formally correct and normalize it to all lower case. :param address: MAC address to be validated and normalized. :returns: Normalized and validated MAC address. :raises: InvalidMAC If the MAC address is not valid. """ if not netutils.is_valid_mac(address): raise exception.InvalidMAC(mac=address) return address.lower() def validate_and_normalize_datapath_id(datapath_id): """Validate an OpenFlow datapath_id and return normalized form. Checks whether the supplied OpenFlow datapath_id is formally correct and normalize it to all lower case. :param datapath_id: OpenFlow datapath_id to be validated and normalized. :returns: Normalized and validated OpenFlow datapath_id. :raises: InvalidDatapathID If an OpenFlow datapath_id is not valid. """ if not is_valid_datapath_id(datapath_id): raise exception.InvalidDatapathID(datapath_id=datapath_id) return datapath_id.lower() def _get_hash_object(hash_algo_name): """Create a hash object based on given algorithm. :param hash_algo_name: name of the hashing algorithm. :raises: InvalidParameterValue, on unsupported or invalid input. :returns: a hash object based on the given named algorithm. """ algorithms = (hashlib.algorithms_guaranteed if six.PY3 else hashlib.algorithms) if hash_algo_name not in algorithms: msg = (_("Unsupported/Invalid hash name '%s' provided.") % hash_algo_name) LOG.error(msg) raise exception.InvalidParameterValue(msg) return getattr(hashlib, hash_algo_name)() def hash_file(file_like_object, hash_algo='md5'): """Generate a hash for the contents of a file. It returns a hash of the file object as a string of double length, containing only hexadecimal digits. It supports all the algorithms hashlib does. :param file_like_object: file like object whose hash to be calculated. :param hash_algo: name of the hashing strategy, default being 'md5'. :raises: InvalidParameterValue, on unsupported or invalid input. :returns: a condensed digest of the bytes of contents. """ checksum = _get_hash_object(hash_algo) for chunk in iter(lambda: file_like_object.read(32768), b''): checksum.update(chunk) return checksum.hexdigest() def file_has_content(path, content, hash_algo='md5'): """Checks that content of the file is the same as provided reference. :param path: path to file :param content: reference content to check against :param hash_algo: hashing algo from hashlib to use, default is 'md5' :returns: True if the hash of reference content is the same as the hash of file's content, False otherwise """ with open(path, 'rb') as existing: file_hash_hex = hash_file(existing, hash_algo=hash_algo) ref_hash = _get_hash_object(hash_algo) ref_hash.update(content) return file_hash_hex == ref_hash.hexdigest() @contextlib.contextmanager def tempdir(**kwargs): tempfile.tempdir = CONF.tempdir tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.error(_LE('Could not remove tmpdir: %s'), e) def rmtree_without_raise(path): try: if os.path.isdir(path): shutil.rmtree(path) except OSError as e: LOG.warning(_LW("Failed to remove dir %(path)s, error: %(e)s"), {'path': path, 'e': e}) def write_to_file(path, contents): with open(path, 'w') as f: f.write(contents) def create_link_without_raise(source, link): try: os.symlink(source, link) except OSError as e: if e.errno == errno.EEXIST: return else: LOG.warning( _LW("Failed to create symlink from %(source)s to %(link)s" ", error: %(e)s"), {'source': source, 'link': link, 'e': e}) def safe_rstrip(value, chars=None): """Removes trailing characters from a string if that does not make it empty :param value: A string value that will be stripped. :param chars: Characters to remove. :return: Stripped value. """ if not isinstance(value, six.string_types): LOG.warning(_LW("Failed to remove trailing character. Returning " "original object. Supplied object is not a string: " "%s,"), value) return value return value.rstrip(chars) or value def mount(src, dest, *args): """Mounts a device/image file on specified location. :param src: the path to the source file for mounting :param dest: the path where it needs to be mounted. :param args: a tuple containing the arguments to be passed to mount command. :raises: processutils.ProcessExecutionError if it failed to run the process. """ args = ('mount', ) + args + (src, dest) execute(*args, run_as_root=True, check_exit_code=[0]) def umount(loc, *args): """Umounts a mounted location. :param loc: the path to be unmounted. :param args: a tuple containing the argumnets to be passed to the umount command. :raises: processutils.ProcessExecutionError if it failed to run the process. """ args = ('umount', ) + args + (loc, ) execute(*args, run_as_root=True, check_exit_code=[0]) def check_dir(directory_to_check=None, required_space=1): """Check a directory is usable. This function can be used by drivers to check that directories they need to write to are usable. This should be called from the drivers init function. This function checks that the directory exists and then calls check_dir_writable and check_dir_free_space. If directory_to_check is not provided the default is to use the temp directory. :param directory_to_check: the directory to check. :param required_space: amount of space to check for in MiB. :raises: PathNotFound if directory can not be found :raises: DirectoryNotWritable if user is unable to write to the directory :raises InsufficientDiskSpace: if free space is < required space """ # check if directory_to_check is passed in, if not set to tempdir if directory_to_check is None: directory_to_check = CONF.tempdir LOG.debug("checking directory: %s", directory_to_check) if not os.path.exists(directory_to_check): raise exception.PathNotFound(dir=directory_to_check) _check_dir_writable(directory_to_check) _check_dir_free_space(directory_to_check, required_space) def _check_dir_writable(chk_dir): """Check that the chk_dir is able to be written to. :param chk_dir: Directory to check :raises: DirectoryNotWritable if user is unable to write to the directory """ is_writable = os.access(chk_dir, os.W_OK) if not is_writable: raise exception.DirectoryNotWritable(dir=chk_dir) def _check_dir_free_space(chk_dir, required_space=1): """Check that directory has some free space. :param chk_dir: Directory to check :param required_space: amount of space to check for in MiB. :raises InsufficientDiskSpace: if free space is < required space """ # check that we have some free space stat = os.statvfs(chk_dir) # get dir free space in MiB. free_space = float(stat.f_bsize * stat.f_bavail) / 1024 / 1024 # check for at least required_space MiB free if free_space < required_space: raise exception.InsufficientDiskSpace(path=chk_dir, required=required_space, actual=free_space) def get_updated_capabilities(current_capabilities, new_capabilities): """Returns an updated capability string. This method updates the original (or current) capabilities with the new capabilities. The original capabilities would typically be from a node's properties['capabilities']. From new_capabilities, any new capabilities are added, and existing capabilities may have their values updated. This updated capabilities string is returned. :param current_capabilities: Current capability string :param new_capabilities: the dictionary of capabilities to be updated. :returns: An updated capability string. with new_capabilities. :raises: ValueError, if current_capabilities is malformed or if new_capabilities is not a dictionary """ if not isinstance(new_capabilities, dict): raise ValueError( _("Cannot update capabilities. The new capabilities should be in " "a dictionary. Provided value is %s") % new_capabilities) cap_dict = {} if current_capabilities: try: cap_dict = dict(x.split(':', 1) for x in current_capabilities.split(',')) except ValueError: # Capabilities can be filled by operator. ValueError can # occur in malformed capabilities like: # properties/capabilities='boot_mode:bios,boot_option'. raise ValueError( _("Invalid capabilities string '%s'.") % current_capabilities) cap_dict.update(new_capabilities) return ','.join('%(key)s:%(value)s' % {'key': key, 'value': value} for key, value in cap_dict.items()) def is_regex_string_in_file(path, string): with open(path, 'r') as inf: return any(re.search(string, line) for line in inf.readlines()) def unix_file_modification_datetime(file_name): return timeutils.normalize_time( # normalize time to be UTC without timezone datetime.datetime.fromtimestamp( # fromtimestamp will return local time by default, make it UTC os.path.getmtime(file_name), tz=pytz.utc ) ) def validate_network_port(port, port_name="Port"): """Validates the given port. :param port: TCP/UDP port. :param port_name: Name of the port. :returns: An integer port number. :raises: InvalidParameterValue, if the port is invalid. """ try: port = int(port) except ValueError: raise exception.InvalidParameterValue(_( '%(port_name)s "%(port)s" is not a valid integer.') % {'port_name': port_name, 'port': port}) if port < 1 or port > 65535: raise exception.InvalidParameterValue(_( '%(port_name)s "%(port)s" is out of range. Valid port ' 'numbers must be between 1 and 65535.') % {'port_name': port_name, 'port': port}) return port def render_template(template, params, is_file=True): """Renders Jinja2 template file with given parameters. :param template: full path to the Jinja2 template file :param params: dictionary with parameters to use when rendering :param is_file: whether template is file or string with template itself :returns: the rendered template as a string """ if is_file: tmpl_path, tmpl_name = os.path.split(template) loader = jinja2.FileSystemLoader(tmpl_path) else: tmpl_name = 'template' loader = jinja2.DictLoader({tmpl_name: template}) env = jinja2.Environment(loader=loader) tmpl = env.get_template(tmpl_name) return tmpl.render(params) def warn_about_deprecated_extra_vif_port_id(): global warn_deprecated_extra_vif_port_id if not warn_deprecated_extra_vif_port_id: warn_deprecated_extra_vif_port_id = True LOG.warning(_LW("Attaching VIF to a port/portgroup via " "extra['vif_port_id'] is deprecated and will not " "be supported in Pike release. API endpoint " "v1/nodes/<node>/vifs should be used instead."))