repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
DgFutureLab/satoyama
satoyama/database.py
1
1967
from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base import inspect engine = create_engine('postgresql://halfdan:halfdan@localhost/tekrice_dev', convert_unicode = True) db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine)) Base = declarative_base() Base.query = db_session.query_property() def nuke_db(): import models db_session.close() Base.metadata.drop_all(bind=engine) def init_db(): import models # import all modules here that might define models so that # they will be registered properly on the metadata. Otherwise # you will have to import them first before calling init_db() Base.metadata.create_all(bind=engine) def recreate(): nuke_db() init_db() def get_defined_models(): import models import sqlalchemy members = dict(inspect.getmembers(models)) members.pop('Base') models = list() for name, member in members.items(): if isinstance(member, sqlalchemy.ext.declarative.api.DeclarativeMeta): models.append(member) return models def db_demo(): from models import Node, Sensor, SensorType, Reading from datetime import datetime import time from random import gauss sensortype = SensorType.create(name = 'Qartz thermometer', unit = 'Celcius') node = Node.create(alias = 'ricefield1') sensor1 = Sensor.create(sensortype = sensortype, node = node, alias = 'CPU temperature') sensor2 = Sensor.create(sensortype = sensortype, node = node, alias = 'Ambient air temperature') for i in range(5): Reading.create(sensor = sensor1, value = gauss(80, 0.1), timestamp = datetime.now()) time.sleep(0.1) for i in range(5): Reading.create(sensor = sensor2, value = gauss(25, 0.1), timestamp = datetime.now()) time.sleep(0.1) print print 'NODE:\t', node.json() print print 'SENSOR1:\t', sensor1.json() print print 'SENSOR2:\t', sensor2.json() return node, sensor1, sensor2
mit
rnirmal/savanna
savanna/plugins/hdp/clusterspec.py
1
10479
# Copyright (c) 2013 Hortonworks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from savanna.openstack.common import jsonutils as json from savanna.plugins.hdp import configprovider as cfg import savanna.utils.openstack.nova as n_helper class ClusterSpec(): def _get_servers_from_savanna_cluster(self, cluster): servers = [] for node_group in cluster.node_groups: for server in node_group.instances: setattr(server, 'role', node_group.name) setattr(server, 'node_processes', node_group.node_processes) servers.append(server) return servers def __init__(self, cluster_template, cluster=None): self.services = [] self.configurations = {} self.node_groups = {} self.str = cluster_template servers = [] if cluster is not None: if hasattr(cluster, 'node_groups'): servers = self._get_servers_from_savanna_cluster(cluster) else: servers = cluster.instances host_manifest = self._generate_host_manifest(servers) #TODO(jspeidel): don't hard code ambari server ambari_server = self._get_ambari_host(servers) if ambari_server is not None: cluster_template = cluster_template.replace('%AMBARI_HOST%', ambari_server.fqdn) else: raise RuntimeError('No Ambari server host found') self.str = self._add_manifest_to_config(cluster_template, host_manifest) template_json = json.loads(self.str) self._parse_services(template_json) self._parse_configurations(template_json) self._parse_host_component_mappings(template_json) def _get_ambari_host(self, servers): # iterate thru servers and find the master server host = next((server for server in servers if server.node_processes is not None and 'AMBARI_SERVER' in server.node_processes), None) if host is None: host = next((server for server in servers if server.role == 'MASTER'), None) return host def normalize(self): return NormalizedClusterConfig(self) def _parse_services(self, template_json): for s in template_json['services']: service = Service(s['name']) self.services.append(service) for c in s['components']: component = Component(c['name'], c['type'], c['cardinality']) service.add_component(component) if 'users' in s: for u in s['users']: user = User(u['name'], u['password'], u['groups']) service.add_user(user) configs = self._parse_configurations(s) for config in configs: service.add_configuration(config) def _parse_configurations(self, template_json): config_names = [] for config in template_json['configurations']: config_props = {} name = config['name'] config_names.append(name) if name in self.configurations: config_props = self.configurations[name] else: self.configurations[name] = config_props if 'properties' in config: for prop in config['properties']: config_props[prop['name']] = prop['value'] return config_names def _parse_host_component_mappings(self, template_json): for group in template_json['host_role_mappings']: node_group = NodeGroup(group['name'].lower()) for component in group['components']: node_group.add_component(component['name']) for host in group['hosts']: if 'predicate' in host: node_group.predicate = host['predicate'] if 'cardinality' in host: node_group.cardinality = host['cardinality'] if 'default_count' in host: node_group.default_count = host['default_count'] self.node_groups[node_group.name] = node_group def _generate_host_manifest(self, servers): host_manifest = {} hosts = [] host_id = 1 for server in servers: instance_info = n_helper.get_instance_info(server) hosts.append({'host_id': host_id, 'hostname': server.hostname, 'role': server.role, 'vm_image': instance_info.image, 'vm_flavor': instance_info.flavor, 'public_ip': server.management_ip, 'private_ip': server.internal_ip}) host_id += 1 host_manifest['hosts'] = hosts return json.dumps(host_manifest).strip('{}') def _add_manifest_to_config(self, cluster_template, host_manifest): # add the host manifest to the enf of the cluster template return '{0},\n{1}\n}}'.format(cluster_template.rstrip('}'), host_manifest) class Service(): def __init__(self, name): self.name = name self.configurations = [] self.components = [] self.users = [] def add_component(self, component): self.components.append(component) def add_configuration(self, configuration): self.configurations.append(configuration) def add_user(self, user): self.users.append(user) class Component(): def __init__(self, name, component_type, cardinality): self.name = name self.type = component_type self.cardinality = cardinality class NodeGroup(): def __init__(self, name): self.name = name self.components = [] self.predicate = None self.cardinality = None self.default_count = None def add_component(self, component): self.components.append(component) class User(): def __init__(self, name, password, groups): self.name = name self.password = password self.groups = groups class NormalizedClusterConfig(): def __init__(self, cluster_spec): #TODO(jspeidel): get from stack config self.hadoop_version = '1.3.0' self.cluster_configs = [] self.node_groups = [] self.config = cfg.ConfigurationProvider( json.load(open(os.path.join(os.path.dirname(__file__), 'resources', 'ambari-config-resource.json'), "r"))) self._parse_configurations(cluster_spec.configurations) self._parse_node_groups(cluster_spec.node_groups) def _parse_configurations(self, configurations): for config_name, properties in configurations.items(): for prop, value in properties.items(): target = self._get_property_target(config_name, prop) prop_type = self._get_property_type(prop, value) #todo: should we supply a scope? self.cluster_configs.append( NormalizedConfigEntry(NormalizedConfig( prop, prop_type, value, target, 'cluster'), value)) def _parse_node_groups(self, node_groups): for node_group in node_groups.values(): self.node_groups.append(NormalizedNodeGroup(node_group)) def _get_property_target(self, config, prop): # Once config resource is complete we won't need to fall through # based on config type target = self.config.get_applicable_target(prop) if not target: if config == 'hdfs-site': target = 'HDFS' elif config == 'mapred-site': target = 'MAPREDUCE' else: target = 'general' return target def _get_property_type(self, prop, value): #TODO(jspeidel): seems that all numeric prop values in default config # are encoded as strings. This may be incorrect. #TODO(jspeidel): should probably analyze string value to determine if # it is numeric #TODO(jspeidel): would then need to know whether Ambari expects a # string or a numeric value prop_type = type(value).__name__ #print 'Type: {0}'.format(prop_type) if prop_type == 'str' or prop_type == 'unicode' or value == '': return 'string' elif prop_type == 'int': return 'integer' elif prop_type == 'bool': return 'boolean' else: raise ValueError( "Could not determine property type for property '{0}' with " "value: {1}". format(prop, value)) class NormalizedConfig(): def __init__(self, name, config_type, default_value, target, scope): self.name = name self.description = None self.type = config_type self.default_value = default_value self.is_optional = False self.applicable_target = target self.scope = scope class NormalizedConfigEntry(): def __init__(self, config, value): self.config = config self.value = value class NormalizedNodeGroup(): def __init__(self, node_group): self.name = node_group.name self.node_processes = node_group.components self.node_configs = None #TODO(jpseidel): should not have to specify img/flavor self.img = None # TODO(jmaron) the flavor will be set via an ambari blueprint setting, # but that setting doesn't exist yet. It will be addressed by a bug # fix shortly self.flavor = 3 self.count = node_group.default_count #TODO(jspeidel): self.requirements
apache-2.0
StephenWeber/ansible
lib/ansible/galaxy/role.py
15
15020
######################################################################## # # (C) 2015, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import tarfile import tempfile import yaml from distutils.version import LooseVersion from shutil import rmtree import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.playbook.role.requirement import RoleRequirement from ansible.galaxy.api import GalaxyAPI try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = os.path.join('meta', 'main.yml') META_INSTALL = os.path.join('meta', '.galaxy_install_info') ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests') def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None): self._metadata = None self._install_info = None self._validate_certs = not galaxy.options.ignore_certs display.debug('Validate TLS certificates: %s' % self._validate_certs) self.options = galaxy.options self.galaxy = galaxy self.name = name self.version = version self.src = src or name self.scm = scm if path is not None: if self.name not in path: path = os.path.join(path, self.name) self.path = path else: for role_path_dir in galaxy.roles_paths: role_path = os.path.join(role_path_dir, self.name) if os.path.exists(role_path): self.path = role_path break else: # use the first path by default self.path = os.path.join(galaxy.roles_paths[0], self.name) # create list of possible paths self.paths = [x for x in galaxy.roles_paths] self.paths = [os.path.join(x, self.name) for x in self.paths] def __repr__(self): """ Returns "rolename (version)" if version is not null Returns "rolename" otherwise """ if self.version: return "%s (%s)" % (self.name, self.version) else: return self.name def __eq__(self, other): return self.name == other.name @property def metadata(self): """ Returns role metadata """ if self._metadata is None: meta_path = os.path.join(self.path, self.META_MAIN) if os.path.isfile(meta_path): try: f = open(meta_path, 'r') self._metadata = yaml.safe_load(f) except: display.vvvvv("Unable to load metadata for %s" % self.name) return False finally: f.close() return self._metadata @property def install_info(self): """ Returns role install info """ if self._install_info is None: info_path = os.path.join(self.path, self.META_INSTALL) if os.path.isfile(info_path): try: f = open(info_path, 'r') self._install_info = yaml.safe_load(f) except: display.vvvvv("Unable to load Galaxy install info for %s" % self.name) return False finally: f.close() return self._install_info def _write_galaxy_install_info(self): """ Writes a YAML-formatted file to the role's meta/ directory (named .galaxy_install_info) which contains some information we can use later for commands like 'list' and 'info'. """ info = dict( version=self.version, install_date=datetime.datetime.utcnow().strftime("%c"), ) if not os.path.exists(os.path.join(self.path, 'meta')): os.makedirs(os.path.join(self.path, 'meta')) info_path = os.path.join(self.path, self.META_INSTALL) with open(info_path, 'w+') as f: try: self._install_info = yaml.safe_dump(info, f) except: return False return True def remove(self): """ Removes the specified role from the roles path. There is a sanity check to make sure there's a meta/main.yml file at this path so the user doesn't blow away random directories. """ if self.metadata: try: rmtree(self.path) return True except: pass return False def fetch(self, role_data): """ Downloads the archived role from github to a temp location """ if role_data: # first grab the file and save it to a temp location if "github_user" in role_data and "github_repo" in role_data: archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version) else: archive_url = self.src display.display("- downloading role from %s" % archive_url) try: url_file = open_url(archive_url, validate_certs=self._validate_certs) temp_file = tempfile.NamedTemporaryFile(delete=False) data = url_file.read() while data: temp_file.write(data) data = url_file.read() temp_file.close() return temp_file.name except Exception as e: display.error("failed to download the file: %s" % str(e)) return False def install(self): # the file is a tar, so open it that way and extract it # to the specified (or default) roles directory local_file = False if self.scm: # create tar file from scm url tmp_file = RoleRequirement.scm_archive_role(**self.spec) elif self.src: if os.path.isfile(self.src): # installing a local tar.gz local_file = True tmp_file = self.src elif '://' in self.src: role_data = self.src tmp_file = self.fetch(role_data) else: api = GalaxyAPI(self.galaxy) role_data = api.lookup_role_by_name(self.src) if not role_data: raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server)) if role_data.get('role_type') == 'CON' and not os.environ.get('ANSIBLE_CONTAINER'): # Container Enabled, running outside of a container display.warning("%s is a Container Enabled role and should only be installed using " "Ansible Container" % self.name) if role_data.get('role_type') == 'APP': # Container Role display.warning("%s is a Container App role and should only be installed using Ansible " "Container" % self.name) role_versions = api.fetch_role_related('versions', role_data['id']) if not self.version: # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head # of the master branch if len(role_versions) > 0: loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] loose_versions.sort() self.version = str(loose_versions[-1]) elif role_data.get('github_branch', None): self.version = role_data['github_branch'] else: self.version = 'master' elif self.version != 'master': if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) tmp_file = self.fetch(role_data) else: raise AnsibleError("No valid role data found") if tmp_file: display.debug("installing from %s" % tmp_file) if not tarfile.is_tarfile(tmp_file): raise AnsibleError("the file downloaded was not a tar.gz") else: if tmp_file.endswith('.gz'): role_tar_file = tarfile.open(tmp_file, "r:gz") else: role_tar_file = tarfile.open(tmp_file, "r") # verify the role's meta file meta_file = None members = role_tar_file.getmembers() # next find the metadata file for member in members: if self.META_MAIN in member.name: # Look for parent of meta/main.yml # Due to possibility of sub roles each containing meta/main.yml # look for shortest length parent meta_parent_dir = os.path.dirname(os.path.dirname(member.name)) if not meta_file: archive_parent_dir = meta_parent_dir meta_file = member else: if len(meta_parent_dir) < len(archive_parent_dir): archive_parent_dir = meta_parent_dir meta_file = member if not meta_file: raise AnsibleError("this role does not appear to have a meta/main.yml file.") else: try: self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file)) except: raise AnsibleError("this role does not appear to have a valid meta/main.yml file.") # we strip off any higher-level directories for all of the files contained within # the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other # hand, does not have a parent directory at all. installed = False while not installed: display.display("- extracting %s to %s" % (self.name, self.path)) try: if os.path.exists(self.path): if not os.path.isdir(self.path): raise AnsibleError("the specified roles path exists and is not a directory.") elif not getattr(self.options, "force", False): raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name) else: # using --force, remove the old path if not self.remove(): raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path) else: os.makedirs(self.path) # now we do the actual extraction to the path for member in members: # we only extract files, and remove any relative path # bits that might be in the file for security purposes # and drop any containing directory, as mentioned above if member.isreg() or member.issym(): parts = member.name.replace(archive_parent_dir, "").split(os.sep) final_parts = [] for part in parts: if part != '..' and '~' not in part and '$' not in part: final_parts.append(part) member.name = os.path.join(*final_parts) role_tar_file.extract(member, self.path) # write out the install info file for later use self._write_galaxy_install_info() installed = True except OSError as e: error = True if e[0] == 13 and len(self.paths) > 1: current = self.paths.index(self.path) nextidx = current + 1 if len(self.paths) >= current: self.path = self.paths[nextidx] error = False if error: raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e))) # return the parsed yaml metadata display.display("- %s was installed successfully" % str(self)) if not local_file: try: os.unlink(tmp_file) except (OSError,IOError) as e: display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e))) return True return False @property def spec(self): """ Returns role spec info { 'scm': 'git', 'src': 'http://git.example.com/repos/repo.git', 'version': 'v1.0', 'name': 'repo' } """ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
gpl-3.0
stevehof/location-ninja
lib/sqlalchemy/orm/exc.py
33
5439
# orm/exc.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """SQLAlchemy ORM exceptions.""" from .. import exc as sa_exc, util NO_STATE = (AttributeError, KeyError) """Exception types that may be raised by instrumentation implementations.""" class StaleDataError(sa_exc.SQLAlchemyError): """An operation encountered database state that is unaccounted for. Conditions which cause this to happen include: * A flush may have attempted to update or delete rows and an unexpected number of rows were matched during the UPDATE or DELETE statement. Note that when version_id_col is used, rows in UPDATE or DELETE statements are also matched against the current known version identifier. * A mapped object with version_id_col was refreshed, and the version number coming back from the database does not match that of the object itself. * A object is detached from its parent object, however the object was previously attached to a different parent identity which was garbage collected, and a decision cannot be made if the new parent was really the most recent "parent". .. versionadded:: 0.7.4 """ ConcurrentModificationError = StaleDataError class FlushError(sa_exc.SQLAlchemyError): """A invalid condition was detected during flush().""" class UnmappedError(sa_exc.InvalidRequestError): """Base for exceptions that involve expected mappings not present.""" class ObjectDereferencedError(sa_exc.SQLAlchemyError): """An operation cannot complete due to an object being garbage collected. """ class DetachedInstanceError(sa_exc.SQLAlchemyError): """An attempt to access unloaded attributes on a mapped instance that is detached.""" class UnmappedInstanceError(UnmappedError): """An mapping operation was requested for an unknown instance.""" @util.dependencies("sqlalchemy.orm.base") def __init__(self, base, obj, msg=None): if not msg: try: base.class_mapper(type(obj)) name = _safe_cls_name(type(obj)) msg = ("Class %r is mapped, but this instance lacks " "instrumentation. This occurs when the instance" "is created before sqlalchemy.orm.mapper(%s) " "was called." % (name, name)) except UnmappedClassError: msg = _default_unmapped(type(obj)) if isinstance(obj, type): msg += ( '; was a class (%s) supplied where an instance was ' 'required?' % _safe_cls_name(obj)) UnmappedError.__init__(self, msg) def __reduce__(self): return self.__class__, (None, self.args[0]) class UnmappedClassError(UnmappedError): """An mapping operation was requested for an unknown class.""" def __init__(self, cls, msg=None): if not msg: msg = _default_unmapped(cls) UnmappedError.__init__(self, msg) def __reduce__(self): return self.__class__, (None, self.args[0]) class ObjectDeletedError(sa_exc.InvalidRequestError): """A refresh operation failed to retrieve the database row corresponding to an object's known primary key identity. A refresh operation proceeds when an expired attribute is accessed on an object, or when :meth:`.Query.get` is used to retrieve an object which is, upon retrieval, detected as expired. A SELECT is emitted for the target row based on primary key; if no row is returned, this exception is raised. The true meaning of this exception is simply that no row exists for the primary key identifier associated with a persistent object. The row may have been deleted, or in some cases the primary key updated to a new value, outside of the ORM's management of the target object. """ @util.dependencies("sqlalchemy.orm.base") def __init__(self, base, state, msg=None): if not msg: msg = "Instance '%s' has been deleted, or its "\ "row is otherwise not present." % base.state_str(state) sa_exc.InvalidRequestError.__init__(self, msg) def __reduce__(self): return self.__class__, (None, self.args[0]) class UnmappedColumnError(sa_exc.InvalidRequestError): """Mapping operation was requested on an unknown column.""" class NoResultFound(sa_exc.InvalidRequestError): """A database result was required but none was found.""" class MultipleResultsFound(sa_exc.InvalidRequestError): """A single database result was required but more than one were found.""" def _safe_cls_name(cls): try: cls_name = '.'.join((cls.__module__, cls.__name__)) except AttributeError: cls_name = getattr(cls, '__name__', None) if cls_name is None: cls_name = repr(cls) return cls_name @util.dependencies("sqlalchemy.orm.base") def _default_unmapped(base, cls): try: mappers = base.manager_of_class(cls).mappers except NO_STATE: mappers = {} except TypeError: mappers = {} name = _safe_cls_name(cls) if not mappers: return "Class '%s' is not mapped" % name
gpl-3.0
pyramania/scipy
scipy/optimize/_lsq/common.py
50
20808
"""Functions used by least-squares algorithms.""" from __future__ import division, print_function, absolute_import from math import copysign import numpy as np from numpy.linalg import norm from scipy.linalg import cho_factor, cho_solve, LinAlgError from scipy.sparse import issparse from scipy.sparse.linalg import LinearOperator, aslinearoperator EPS = np.finfo(float).eps # Functions related to a trust-region problem. def intersect_trust_region(x, s, Delta): """Find the intersection of a line with the boundary of a trust region. This function solves the quadratic equation with respect to t ||(x + s*t)||**2 = Delta**2. Returns ------- t_neg, t_pos : tuple of float Negative and positive roots. Raises ------ ValueError If `s` is zero or `x` is not within the trust region. """ a = np.dot(s, s) if a == 0: raise ValueError("`s` is zero.") b = np.dot(x, s) c = np.dot(x, x) - Delta**2 if c > 0: raise ValueError("`x` is not within the trust region.") d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant. # Computations below avoid loss of significance, see "Numerical Recipes". q = -(b + copysign(d, b)) t1 = q / a t2 = c / q if t1 < t2: return t1, t2 else: return t2, t1 def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None, rtol=0.01, max_iter=10): """Solve a trust-region problem arising in least-squares minimization. This function implements a method described by J. J. More [1]_ and used in MINPACK, but it relies on a single SVD of Jacobian instead of series of Cholesky decompositions. Before running this function, compute: ``U, s, VT = svd(J, full_matrices=False)``. Parameters ---------- n : int Number of variables. m : int Number of residuals. uf : ndarray Computed as U.T.dot(f). s : ndarray Singular values of J. V : ndarray Transpose of VT. Delta : float Radius of a trust region. initial_alpha : float, optional Initial guess for alpha, which might be available from a previous iteration. If None, determined automatically. rtol : float, optional Stopping tolerance for the root-finding procedure. Namely, the solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``. max_iter : int, optional Maximum allowed number of iterations for the root-finding procedure. Returns ------- p : ndarray, shape (n,) Found solution of a trust-region problem. alpha : float Positive value such that (J.T*J + alpha*I)*p = -J.T*f. Sometimes called Levenberg-Marquardt parameter. n_iter : int Number of iterations made by root-finding procedure. Zero means that Gauss-Newton step was selected as the solution. References ---------- .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. """ def phi_and_derivative(alpha, suf, s, Delta): """Function of which to find zero. It is defined as "norm of regularized (by alpha) least-squares solution minus `Delta`". Refer to [1]_. """ denom = s**2 + alpha p_norm = norm(suf / denom) phi = p_norm - Delta phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm return phi, phi_prime suf = s * uf # Check if J has full rank and try Gauss-Newton step. if m >= n: threshold = EPS * m * s[0] full_rank = s[-1] > threshold else: full_rank = False if full_rank: p = -V.dot(uf / s) if norm(p) <= Delta: return p, 0.0, 0 alpha_upper = norm(suf) / Delta if full_rank: phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta) alpha_lower = -phi / phi_prime else: alpha_lower = 0.0 if initial_alpha is None or not full_rank and initial_alpha == 0: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) else: alpha = initial_alpha for it in range(max_iter): if alpha < alpha_lower or alpha > alpha_upper: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta) if phi < 0: alpha_upper = alpha ratio = phi / phi_prime alpha_lower = max(alpha_lower, alpha - ratio) alpha -= (phi + Delta) * ratio / Delta if np.abs(phi) < rtol * Delta: break p = -V.dot(suf / (s**2 + alpha)) # Make the norm of p equal to Delta, p is changed only slightly during # this. It is done to prevent p lie outside the trust region (which can # cause problems later). p *= Delta / norm(p) return p, alpha, it + 1 def solve_trust_region_2d(B, g, Delta): """Solve a general trust-region problem in 2 dimensions. The problem is reformulated as a 4-th order algebraic equation, the solution of which is found by numpy.roots. Parameters ---------- B : ndarray, shape (2, 2) Symmetric matrix, defines a quadratic term of the function. g : ndarray, shape (2,) Defines a linear term of the function. Delta : float Radius of a trust region. Returns ------- p : ndarray, shape (2,) Found solution. newton_step : bool Whether the returned solution is the Newton step which lies within the trust region. """ try: R, lower = cho_factor(B) p = -cho_solve((R, lower), g) if np.dot(p, p) <= Delta**2: return p, True except LinAlgError: pass a = B[0, 0] * Delta**2 b = B[0, 1] * Delta**2 c = B[1, 1] * Delta**2 d = g[0] * Delta f = g[1] * Delta coeffs = np.array( [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d]) t = np.roots(coeffs) # Can handle leading zeros. t = np.real(t[np.isreal(t)]) p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2))) value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p) i = np.argmin(value) p = p[:, i] return p, False def update_tr_radius(Delta, actual_reduction, predicted_reduction, step_norm, bound_hit): """Update the radius of a trust region based on the cost reduction. Returns ------- Delta : float New radius. ratio : float Ratio between actual and predicted reductions. Zero if predicted reduction is zero. """ if predicted_reduction > 0: ratio = actual_reduction / predicted_reduction else: ratio = 0 if ratio < 0.25: Delta = 0.25 * step_norm elif ratio > 0.75 and bound_hit: Delta *= 2.0 return Delta, ratio # Construction and minimization of quadratic functions. def build_quadratic_1d(J, g, s, diag=None, s0=None): """Parameterize a multivariate quadratic function along a line. The resulting univariate quadratic function is given as follows: :: f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) + g.T * (s0 + s*t) Parameters ---------- J : ndarray, sparse matrix or LinearOperator shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (n,) Direction vector of a line. diag : None or ndarray with shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. s0 : None or ndarray with shape (n,), optional Initial point. If None, assumed to be 0. Returns ------- a : float Coefficient for t**2. b : float Coefficient for t. c : float Free term. Returned only if `s0` is provided. """ v = J.dot(s) a = np.dot(v, v) if diag is not None: a += np.dot(s * diag, s) a *= 0.5 b = np.dot(g, s) if s0 is not None: u = J.dot(s0) b += np.dot(u, v) c = 0.5 * np.dot(u, u) + np.dot(g, s0) if diag is not None: b += np.dot(s0 * diag, s) c += 0.5 * np.dot(s0 * diag, s0) return a, b, c else: return a, b def minimize_quadratic_1d(a, b, lb, ub, c=0): """Minimize a 1-d quadratic function subject to bounds. The free term `c` is 0 by default. Bounds must be finite. Returns ------- t : float Minimum point. y : float Minimum value. """ t = [lb, ub] if a != 0: extremum = -0.5 * b / a if lb < extremum < ub: t.append(extremum) t = np.asarray(t) y = a * t**2 + b * t + c min_index = np.argmin(y) return t[min_index], y[min_index] def evaluate_quadratic(J, g, s, diag=None): """Compute values of a quadratic function arising in least squares. The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. Parameters ---------- J : ndarray, sparse matrix or LinearOperator, shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (k, n) or (n,) Array containing steps as rows. diag : ndarray, shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. Returns ------- values : ndarray with shape (k,) or float Values of the function. If `s` was 2-dimensional then ndarray is returned, otherwise float is returned. """ if s.ndim == 1: Js = J.dot(s) q = np.dot(Js, Js) if diag is not None: q += np.dot(s * diag, s) else: Js = J.dot(s.T) q = np.sum(Js**2, axis=0) if diag is not None: q += np.sum(diag * s**2, axis=1) l = np.dot(s, g) return 0.5 * q + l # Utility functions to work with bound constraints. def in_bounds(x, lb, ub): """Check if a point lies within bounds.""" return np.all((x >= lb) & (x <= ub)) def step_size_to_bound(x, s, lb, ub): """Compute a min_step size required to reach a bound. The function computes a positive scalar t, such that x + s * t is on the bound. Returns ------- step : float Computed step. Non-negative value. hits : ndarray of int with shape of x Each element indicates whether a corresponding variable reaches the bound: * 0 - the bound was not hit. * -1 - the lower bound was hit. * 1 - the upper bound was hit. """ non_zero = np.nonzero(s) s_non_zero = s[non_zero] steps = np.empty_like(x) steps.fill(np.inf) with np.errstate(over='ignore'): steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero, (ub - x)[non_zero] / s_non_zero) min_step = np.min(steps) return min_step, np.equal(steps, min_step) * np.sign(s).astype(int) def find_active_constraints(x, lb, ub, rtol=1e-10): """Determine which constraints are active in a given point. The threshold is computed using `rtol` and the absolute value of the closest bound. Returns ------- active : ndarray of int with shape of x Each component shows whether the corresponding constraint is active: * 0 - a constraint is not active. * -1 - a lower bound is active. * 1 - a upper bound is active. """ active = np.zeros_like(x, dtype=int) if rtol == 0: active[x <= lb] = -1 active[x >= ub] = 1 return active lower_dist = x - lb upper_dist = ub - x lower_threshold = rtol * np.maximum(1, np.abs(lb)) upper_threshold = rtol * np.maximum(1, np.abs(ub)) lower_active = (np.isfinite(lb) & (lower_dist <= np.minimum(upper_dist, lower_threshold))) active[lower_active] = -1 upper_active = (np.isfinite(ub) & (upper_dist <= np.minimum(lower_dist, upper_threshold))) active[upper_active] = 1 return active def make_strictly_feasible(x, lb, ub, rstep=1e-10): """Shift a point to the interior of a feasible region. Each element of the returned vector is at least at a relative distance `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used. """ x_new = x.copy() active = find_active_constraints(x, lb, ub, rstep) lower_mask = np.equal(active, -1) upper_mask = np.equal(active, 1) if rstep == 0: x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask]) x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask]) else: x_new[lower_mask] = (lb[lower_mask] + rstep * np.maximum(1, np.abs(lb[lower_mask]))) x_new[upper_mask] = (ub[upper_mask] - rstep * np.maximum(1, np.abs(ub[upper_mask]))) tight_bounds = (x_new < lb) | (x_new > ub) x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds]) return x_new def CL_scaling_vector(x, g, lb, ub): """Compute Coleman-Li scaling vector and its derivatives. Components of a vector v are defined as follows: :: | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf | 1, otherwise According to this definition v[i] >= 0 for all i. It differs from the definition in paper [1]_ (eq. (2.2)), where the absolute value of v is used. Both definitions are equivalent down the line. Derivatives of v with respect to x take value 1, -1 or 0 depending on a case. Returns ------- v : ndarray with shape of x Scaling vector. dv : ndarray with shape of x Derivatives of v[i] with respect to x[i], diagonal elements of v's Jacobian. References ---------- .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. """ v = np.ones_like(x) dv = np.zeros_like(x) mask = (g < 0) & np.isfinite(ub) v[mask] = ub[mask] - x[mask] dv[mask] = -1 mask = (g > 0) & np.isfinite(lb) v[mask] = x[mask] - lb[mask] dv[mask] = 1 return v, dv def reflective_transformation(y, lb, ub): """Compute reflective transformation and its gradient.""" if in_bounds(y, lb, ub): return y, np.ones_like(y) lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) x = y.copy() g_negative = np.zeros_like(y, dtype=bool) mask = lb_finite & ~ub_finite x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask]) g_negative[mask] = y[mask] < lb[mask] mask = ~lb_finite & ub_finite x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask]) g_negative[mask] = y[mask] > ub[mask] mask = lb_finite & ub_finite d = ub - lb t = np.remainder(y[mask] - lb[mask], 2 * d[mask]) x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t) g_negative[mask] = t > d[mask] g = np.ones_like(y) g[g_negative] = -1 return x, g # Functions to display algorithm's progress. def print_header_nonlinear(): print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}" .format("Iteration", "Total nfev", "Cost", "Cost reduction", "Step norm", "Optimality")) def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction, step_norm, optimality): if cost_reduction is None: cost_reduction = " " * 15 else: cost_reduction = "{0:^15.2e}".format(cost_reduction) if step_norm is None: step_norm = " " * 15 else: step_norm = "{0:^15.2e}".format(step_norm) print("{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}" .format(iteration, nfev, cost, cost_reduction, step_norm, optimality)) def print_header_linear(): print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}" .format("Iteration", "Cost", "Cost reduction", "Step norm", "Optimality")) def print_iteration_linear(iteration, cost, cost_reduction, step_norm, optimality): if cost_reduction is None: cost_reduction = " " * 15 else: cost_reduction = "{0:^15.2e}".format(cost_reduction) if step_norm is None: step_norm = " " * 15 else: step_norm = "{0:^15.2e}".format(step_norm) print("{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}".format( iteration, cost, cost_reduction, step_norm, optimality)) # Simple helper functions. def compute_grad(J, f): """Compute gradient of the least-squares cost function.""" if isinstance(J, LinearOperator): return J.rmatvec(f) else: return J.T.dot(f) def compute_jac_scale(J, scale_inv_old=None): """Compute variables scale based on the Jacobian matrix.""" if issparse(J): scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5 else: scale_inv = np.sum(J**2, axis=0)**0.5 if scale_inv_old is None: scale_inv[scale_inv == 0] = 1 else: scale_inv = np.maximum(scale_inv, scale_inv_old) return 1 / scale_inv, scale_inv def left_multiplied_operator(J, d): """Return diag(d) J as LinearOperator.""" J = aslinearoperator(J) def matvec(x): return d * J.matvec(x) def matmat(X): return d * J.matmat(X) def rmatvec(x): return J.rmatvec(x.ravel() * d) return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec) def right_multiplied_operator(J, d): """Return J diag(d) as LinearOperator.""" J = aslinearoperator(J) def matvec(x): return J.matvec(np.ravel(x) * d) def matmat(X): return J.matmat(X * d[:, np.newaxis]) def rmatvec(x): return d * J.rmatvec(x) return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec) def regularized_lsq_operator(J, diag): """Return a matrix arising in regularized least squares as LinearOperator. The matrix is [ J ] [ D ] where D is diagonal matrix with elements from `diag`. """ J = aslinearoperator(J) m, n = J.shape def matvec(x): return np.hstack((J.matvec(x), diag * x)) def rmatvec(x): x1 = x[:m] x2 = x[m:] return J.rmatvec(x1) + diag * x2 return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec) def right_multiply(J, d, copy=True): """Compute J diag(d). If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe. elif isinstance(J, LinearOperator): J = right_multiplied_operator(J, d) else: J *= d return J def left_multiply(J, d, copy=True): """Compute diag(d) J. If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe. elif isinstance(J, LinearOperator): J = left_multiplied_operator(J, d) else: J *= d[:, np.newaxis] return J def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol): """Check termination condition for nonlinear least squares.""" ftol_satisfied = dF < ftol * F and ratio > 0.25 xtol_satisfied = dx_norm < xtol * (xtol + x_norm) if ftol_satisfied and xtol_satisfied: return 4 elif ftol_satisfied: return 2 elif xtol_satisfied: return 3 else: return None def scale_for_robust_loss_function(J, f, rho): """Scale Jacobian and residuals for a robust loss function. Arrays are modified in place. """ J_scale = rho[1] + 2 * rho[2] * f**2 J_scale[J_scale < EPS] = EPS J_scale **= 0.5 f *= rho[1] / J_scale return left_multiply(J, J_scale, copy=False), f
bsd-3-clause
Lujeni/ansible
lib/ansible/module_utils/network/nxos/argspec/l2_interfaces/l2_interfaces.py
19
2016
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ############################################# # WARNING # ############################################# # # This file is auto generated by the resource # module builder playbook. # # Do not edit this file manually. # # Changes to this file will be over written # by the resource module builder. # # Changes should be made in the model used to # generate this file or in the resource module # builder template. # ############################################# """ The arg spec for the nxos_l2_interfaces module """ from __future__ import absolute_import, division, print_function __metaclass__ = type class L2_interfacesArgs(object): # pylint: disable=R0903 """The arg spec for the nxos_l2_interfaces module """ def __init__(self, **kwargs): pass argument_spec = { 'config': { 'elements': 'dict', 'options': { 'access': { 'options': { 'vlan': { 'type': 'int' } }, 'type': 'dict' }, 'name': { 'required': True, 'type': 'str' }, 'trunk': { 'options': { 'allowed_vlans': { 'type': 'str' }, 'native_vlan': { 'type': 'int' } }, 'type': 'dict' } }, 'type': 'list' }, 'state': { 'choices': ['merged', 'replaced', 'overridden', 'deleted'], 'default': 'merged', 'type': 'str' } } # pylint: disable=C0301
gpl-3.0
WhiteWind/yate
share/scripts/yaypm/yaypm/utils/resources/__init__.py
10
3062
import logging from yaypm.utils import XOR from twisted.internet import defer from random import random logger = logging.getLogger("yaypm.resources") class Resource: def _match(self, *args): raise NotImplementedError("Abstract Method!") @defer.inlineCallbacks def play(self, yate, callid, targetid, stopOnDTMF = False, until = None, override = False, *args): files = self._match(*args) if not until: until = yate.onwatch("chan.hangup", lambda m : m["id"] == callid) for f in files: logger.debug("on %s %s: %s", targetid, "overiding" if override else "playing", f) nid = f + str(random()) m = yate.msg("chan.masquerade", {"message": "chan.attach", "id": targetid, "override" if override else "source": f, "notify": nid}) yield m.dispatch() if stopOnDTMF: dtmf, notify = yield XOR( yate.onmsg("chan.notify", lambda m : m["targetid"] == nid, autoreturn = True, until = until), yate.onwatch("chan.dtmf", lambda m : m["id"] == callid, until = until)) if dtmf: defer.returnValue(notify) else: notify = yield yate.onwatch("chan.notify", lambda m : m["targetid"] == nid, until = until) if notify["reason"] != "eof": break def override(self, yate, callid, stopOnDTMF=False, until = None, *args): return Resource.play(self, yate, callid, callid, stopOnDTMF, until, True, *args) class StaticResource(Resource): def __init__(self, attach, desc = None): self.attach = attach if not desc: self.desc = attach def _match(self, *args): return [self.attach] class ConcatenationResource(Resource): def __init__(self, *args): self.resources = [] current = None current_args = None for arg in args: if isinstance(arg, Resource): if current: self.resources.append((current, current_args)) current = arg current_args = [] else: if not current: raise WrongValue("Argument without Resource!") current_args.append(arg) if current: self.resources.append((current, current_args)) def _match(self, *args): result = [] for resource, res_args in self.resources: result.extend(resource._match(*(args[i] for i in res_args))) return result
gpl-2.0
andrewnc/scikit-learn
examples/plot_multilabel.py
236
4157
# Authors: Vlad Niculae, Mathieu Blondel # License: BSD 3 clause """ ========================= Multilabel classification ========================= This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles. The classification is performed by projecting to the first two principal components found by PCA and CCA for visualisation purposes, followed by using the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one. Note: in the plot, "unlabeled samples" does not mean that we don't know the labels (as in semi-supervised learning) but that the samples simply do *not* have a label. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_multilabel_classification from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.preprocessing import LabelBinarizer from sklearn.decomposition import PCA from sklearn.cross_decomposition import CCA def plot_hyperplane(clf, min_x, max_x, linestyle, label): # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough yy = a * xx - (clf.intercept_[0]) / w[1] plt.plot(xx, yy, linestyle, label=label) def plot_subfigure(X, Y, subplot, title, transform): if transform == "pca": X = PCA(n_components=2).fit_transform(X) elif transform == "cca": X = CCA(n_components=2).fit(X, Y).transform(X) else: raise ValueError min_x = np.min(X[:, 0]) max_x = np.max(X[:, 0]) min_y = np.min(X[:, 1]) max_y = np.max(X[:, 1]) classif = OneVsRestClassifier(SVC(kernel='linear')) classif.fit(X, Y) plt.subplot(2, 2, subplot) plt.title(title) zero_class = np.where(Y[:, 0]) one_class = np.where(Y[:, 1]) plt.scatter(X[:, 0], X[:, 1], s=40, c='gray') plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b', facecolors='none', linewidths=2, label='Class 1') plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange', facecolors='none', linewidths=2, label='Class 2') plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--', 'Boundary\nfor class 1') plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.', 'Boundary\nfor class 2') plt.xticks(()) plt.yticks(()) plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x) plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y) if subplot == 2: plt.xlabel('First principal component') plt.ylabel('Second principal component') plt.legend(loc="upper left") plt.figure(figsize=(8, 6)) X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=True, random_state=1) plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca") X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, random_state=1) plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca") plt.subplots_adjust(.04, .02, .97, .94, .09, .2) plt.show()
bsd-3-clause
tik0/inkscapeGrid
share/extensions/synfig_output.py
7
48326
#!/usr/bin/env python """ synfig_output.py An Inkscape extension for exporting Synfig files (.sif) Copyright (C) 2011 Nikita Kitaev This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA """ import sys import math import uuid from copy import deepcopy import inkex from inkex import NSS, addNS, etree, errormsg import simplepath, simplestyle, simpletransform import cubicsuperpath from synfig_prepare import SynfigPrep, MalformedSVGError, get_dimension import synfig_fileformat as sif ###### Utility Classes #################################### class UnsupportedException(Exception): """When part of an element is not supported, this exception is raised to invalidate the whole element""" pass class SynfigDocument(object): """A synfig document, with commands for adding layers and layer parameters""" def __init__(self, width=1024, height=768, name="Synfig Animation 1"): self.root_canvas = etree.fromstring( """ <canvas version="0.5" width="%f" height="%f" xres="2834.645752" yres="2834.645752" view-box="0 0 0 0" > <name>%s</name> </canvas> """ % (width, height, name) ) self._update_viewbox() self.gradients = {} self.filters = {} ### Properties def get_root_canvas(self): return self.root_canvas def get_root_tree(self): return self.root_canvas.getroottree() def _update_viewbox(self): """Update the viewbox to match document width and height""" attr_viewbox = "%f %f %f %f" % ( -self.width/2.0/sif.kux, self.height/2.0/sif.kux, self.width/2.0/sif.kux, -self.height/2.0/sif.kux ) self.root_canvas.set("view-box", attr_viewbox) def get_width(self): return float(self.root_canvas.get("width", "0")) def set_width(self, value): self.root_canvas.set("width", str(value)) self._update_viewbox() def get_height(self): return float(self.root_canvas.get("height", "0")) def set_height(self, value): self.root_canvas.set("height", str(value)) self._update_viewbox() def get_name(self): return self.root_canvas.get("name", "") def set_name(self, value): self.root_canvas.set("name", value) self._update_viewbox() width = property(get_width, set_width) height = property(get_height, set_height) name = property(get_name, set_name) ### Public utility functions def new_guid(self): """Generate a new GUID""" return uuid.uuid4().hex ### Coordinate system conversions def distance_svg2sif(self, distance): """Convert distance from SVG to Synfig units""" return distance/sif.kux def distance_sif2svg(self, distance): """Convert distance from Synfig to SVG units""" return distance*sif.kux def coor_svg2sif(self, vector): """Convert SVG coordinate [x, y] to Synfig units""" x = vector[0] y = self.height - vector[1] x -= self.width/2.0 y -= self.height/2.0 x /= sif.kux y /= sif.kux return [x, y] def coor_sif2svg(self, vector): """Convert Synfig coordinate [x, y] to SVG units""" x = vector[0] * sif.kux + self.width/2.0 y = vector[1] * sif.kux + self.height/2.0 y = self.height - y assert self.coor_svg2sif([x, y]) == vector, "sif to svg coordinate conversion error" return [x, y] def list_coor_svg2sif(self, l): """Scan a list for coordinate pairs and convert them to Synfig units""" # If list has two numerical elements, # treat it as a coordinate pair if type(l) == list and len(l) == 2: if type(l[0]) == int or type(l[0]) == float: if type(l[1]) == int or type(l[1]) == float: l_sif = self.coor_svg2sif(l) l[0] = l_sif[0] l[1] = l_sif[1] return # Otherwise recursively iterate over the list for x in l: if type(x) == list: self.list_coor_svg2sif(x) def list_coor_sif2svg(self, l): """Scan a list for coordinate pairs and convert them to SVG units""" # If list has two numerical elements, # treat it as a coordinate pair if type(l) == list and len(l) == 2: if type(l[0]) == int or type(l[0]) == float: if type(l[1]) == int or type(l[1]) == float: l_sif = self.coor_sif2svg(l) l[0] = l_sif[0] l[1] = l_sif[1] return # Otherwise recursively iterate over the list for x in l: if type(x) == list: self.list_coor_sif2svg(x) def bline_coor_svg2sif(self, b): """Convert a BLine from SVG to Synfig coordinate units""" self.list_coor_svg2sif(b["points"]) def bline_coor_sif2svg(self, b): """Convert a BLine from Synfig to SVG coordinate units""" self.list_coor_sif2svg(b["points"]) ### XML Builders -- private ### used to create XML elements in the Synfig document def build_layer(self, layer_type, desc, canvas=None, active=True, version="auto"): """Build an empty layer""" if canvas is None: layer = self.root_canvas.makeelement("layer") else: layer = etree.SubElement(canvas, "layer") layer.set("type", layer_type) layer.set("desc", desc) if active: layer.set("active", "true") else: layer.set("active", "false") if version == "auto": version = sif.defaultLayerVersion(layer_type) if type(version) == float: version = str(version) layer.set("version", version) return layer def _calc_radius(self, p1x, p1y, p2x, p2y): """Calculate radius of a tangent given two points""" # Synfig tangents are scaled by a factor of 3 return sif.tangent_scale * math.sqrt( (p2x-p1x)**2 + (p2y-p1y)**2 ) def _calc_angle(self, p1x, p1y, p2x, p2y): """Calculate angle (in radians) of a tangent given two points""" dx = p2x-p1x dy = p2y-p1y if dx > 0 and dy > 0: ag = math.pi + math.atan(dy/dx) elif dx > 0 and dy < 0: ag = math.pi + math.atan(dy/dx) elif dx < 0 and dy < 0: ag = math.atan(dy/dx) elif dx < 0 and dy > 0: ag = 2*math.pi + math.atan(dy/dx) elif dx == 0 and dy > 0: ag = -1*math.pi/2 elif dx == 0 and dy < 0: ag = math.pi/2 elif dx == 0 and dy == 0: ag = 0 elif dx < 0 and dy == 0: ag = 0 elif dx > 0 and dy == 0: ag = math.pi return (ag*180)/math.pi def build_param(self, layer, name, value, param_type="auto", guid=None): """Add a parameter node to a layer""" if layer is None: param = self.root_canvas.makeelement("param") else: param = etree.SubElement(layer, "param") param.set("name", name) #Automatically detect param_type if param_type == "auto": if layer is not None: layer_type = layer.get("type") param_type = sif.paramType(layer_type, name) else: param_type = sif.paramType(None, name, value) if param_type == "real": el = etree.SubElement(param, "real") el.set("value", str(float(value))) elif param_type == "integer": el = etree.SubElement(param, "integer") el.set("value", str(int(value))) elif param_type == "vector": el = etree.SubElement(param, "vector") x = etree.SubElement(el, "x") x.text = str(float(value[0])) y = etree.SubElement(el, "y") y.text = str(float(value[1])) elif param_type == "color": el = etree.SubElement(param, "color") r = etree.SubElement(el, "r") r.text = str(float(value[0])) g = etree.SubElement(el, "g") g.text = str(float(value[1])) b = etree.SubElement(el, "b") b.text = str(float(value[2])) a = etree.SubElement(el, "a") a.text = str(float(value[3])) if len(value) > 3 else "1.0" elif param_type == "gradient": el = etree.SubElement(param, "gradient") # Value is a dictionary of color stops # see get_gradient() for pos in value.keys(): color = etree.SubElement(el, "color") color.set("pos", str(float(pos))) c = value[pos] r = etree.SubElement(color, "r") r.text = str(float(c[0])) g = etree.SubElement(color, "g") g.text = str(float(c[1])) b = etree.SubElement(color, "b") b.text = str(float(c[2])) a = etree.SubElement(color, "a") a.text = str(float(c[3])) if len(c) > 3 else "1.0" elif param_type == "bool": el = etree.SubElement(param, "bool") if value: el.set("value", "true") else: el.set("value", "false") elif param_type == "time": el = etree.SubElement(param, "time") if type(value) == int: el.set("value", "%ds" % value) elif type(value) == float: el.set("value", "%fs" % value) elif type(value) == str: el.set("value", value) elif param_type == "bline": el = etree.SubElement(param, "bline") el.set("type", "bline_point") # value is a bline (dictionary type), see path_to_bline_list if value["loop"] == True: el.set("loop", "true") else: el.set("loop", "false") for vertex in value["points"]: x = float(vertex[1][0]) y = float(vertex[1][1]) tg1x = float(vertex[0][0]) tg1y = float(vertex[0][1]) tg2x = float(vertex[2][0]) tg2y = float(vertex[2][1]) tg1_radius = self._calc_radius(x, y, tg1x, tg1y) tg1_angle = self._calc_angle(x, y, tg1x, tg1y) tg2_radius = self._calc_radius(x, y, tg2x, tg2y) tg2_angle = self._calc_angle(x, y, tg2x, tg2y)-180.0 if vertex[3]: split = "true" else: split = "false" entry = etree.SubElement(el, "entry") composite = etree.SubElement(entry, "composite") composite.set("type", "bline_point") point = etree.SubElement(composite, "point") vector = etree.SubElement(point, "vector") etree.SubElement(vector, "x").text = str(x) etree.SubElement(vector, "y").text = str(y) width = etree.SubElement(composite, "width") etree.SubElement(width, "real").set("value", "1.0") origin = etree.SubElement(composite, "origin") etree.SubElement(origin, "real").set("value", "0.5") split_el = etree.SubElement(composite, "split") etree.SubElement(split_el, "bool").set("value", split) t1 = etree.SubElement(composite, "t1") t2 = etree.SubElement(composite, "t2") t1_rc = etree.SubElement(t1, "radial_composite") t1_rc.set("type", "vector") t2_rc = etree.SubElement(t2, "radial_composite") t2_rc.set("type", "vector") t1_r = etree.SubElement(t1_rc, "radius") t2_r = etree.SubElement(t2_rc, "radius") t1_radius = etree.SubElement(t1_r, "real") t2_radius = etree.SubElement(t2_r, "real") t1_radius.set("value", str(tg1_radius)) t2_radius.set("value", str(tg2_radius)) t1_t = etree.SubElement(t1_rc, "theta") t2_t = etree.SubElement(t2_rc, "theta") t1_angle = etree.SubElement(t1_t, "angle") t2_angle = etree.SubElement(t2_t, "angle") t1_angle.set("value", str(tg1_angle)) t2_angle.set("value", str(tg2_angle)) elif param_type == "canvas": el = etree.SubElement(param, "canvas") el.set("xres", "10.0") el.set("yres", "10.0") # "value" is a list of layers if value is not None: for layer in value: el.append(layer) else: raise AssertionError, "Unsupported param type %s" % (param_type) if guid: el.set("guid", guid) else: el.set("guid", self.new_guid()) return param ### Public layer API ### Should be used by outside functions to create layers and set layer parameters def create_layer(self, layer_type, desc, params={}, guids={}, canvas=None, active=True, version="auto"): """Create a new layer Keyword arguments: layer_type -- layer type string used internally by Synfig desc -- layer description params -- a dictionary of parameter names and their values guids -- a dictionary of parameter types and their guids (optional) active -- set to False to create a hidden layer """ layer = self.build_layer(layer_type, desc, canvas, active, version) default_layer_params = sif.defaultLayerParams(layer_type) for param_name in default_layer_params.keys(): param_type = default_layer_params[param_name][0] if param_name in params.keys(): param_value = params[param_name] else: param_value = default_layer_params[param_name][1] if param_name in guids.keys(): param_guid = guids[param_name] else: param_guid = None if param_value is not None: self.build_param(layer, param_name, param_value, param_type, guid=param_guid) return layer def set_param(self, layer, name, value, param_type="auto", guid=None, modify_linked=False): """Set a layer parameter Keyword arguments: layer -- the layer to set the parameter for name -- parameter name value -- parameter value param_type -- parameter type (default "auto") guid -- guid of the parameter value """ if modify_linked: raise AssertionError, "Modifying linked parameters is not supported" layer_type = layer.get("type") assert layer_type, "Layer does not have a type" if param_type == "auto": param_type = sif.paramType(layer_type, name) # Remove existing parameters with this name existing = [] for param in layer.iterchildren(): if param.get("name") == name: existing.append(param) if len(existing) == 0: self.build_param(layer, name, value, param_type, guid) elif len(existing) > 1: raise AssertionError, "Found multiple parameters with the same name" else: new_param = self.build_param(None, name, value, param_type, guid) layer.replace(existing[0], new_param) def set_params(self, layer, params={}, guids={}, modify_linked=False): """Set layer parameters Keyword arguments: layer -- the layer to set the parameter for params -- a dictionary of parameter names and their values guids -- a dictionary of parameter types and their guids (optional) """ for param_name in params.keys(): if param_name in guids.keys(): self.set_param(layer, param_name, params[param_name], guid=guids[param_name], modify_linked=modify_linked) else: self.set_param(layer, param_name, params[param_name], modify_linked=modify_linked) def get_param(self, layer, name, param_type="auto"): """Get the value of a layer parameter Keyword arguments: layer -- the layer to get the parameter from name -- param name param_type -- parameter type (default "auto") NOT FULLY IMPLEMENTED """ layer_type = layer.get("type") assert layer_type, "Layer does not have a type" if param_type == "auto": param_type = sif.paramType(layer_type, name) for param in layer.iterchildren(): if param.get("name") == name: if param_type == "real": return float(param[0].get("value", "0")) elif param_type == "integer": return int(param[0].get("integer", "0")) else: raise Exception, "Getting this type of parameter not yet implemented" ### Global defs, and related # SVG Filters def add_filter(self, filter_id, f): """Register a filter""" self.filters[filter_id] = f # SVG Gradients def add_linear_gradient(self, gradient_id, p1, p2, mtx=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], stops=[], link="", spread_method="pad"): """Register a linear gradient definition""" gradient = { "type" : "linear", "p1" : p1, "p2" : p2, "mtx" : mtx, "spreadMethod": spread_method } if stops != []: gradient["stops"] = stops gradient["stops_guid"] = self.new_guid() elif link != "": gradient["link"] = link else: raise MalformedSVGError, "Gradient has neither stops nor link" self.gradients[gradient_id] = gradient def add_radial_gradient(self, gradient_id, center, radius, focus, mtx=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], stops=[], link="", spread_method="pad"): """Register a radial gradient definition""" gradient = { "type" : "radial", "center" : center, "radius" : radius, "focus" : focus, "mtx" : mtx, "spreadMethod": spread_method } if stops != []: gradient["stops"] = stops gradient["stops_guid"] = self.new_guid() elif link != "": gradient["link"] = link else: raise MalformedSVGError, "Gradient has neither stops nor link" self.gradients[gradient_id] = gradient def get_gradient(self, gradient_id): """ Return a gradient with a given id Linear gradient format: { "type" : "linear", "p1" : [x, y], "p2" : [x, y], "mtx" : mtx, "stops" : color stops, "stops_guid": color stops guid, "spreadMethod": "pad", "reflect", or "repeat" } Radial gradient format: { "type" : "radial", "center" : [x, y], "radius" : r, "focus" : [x, y], "mtx" : mtx, "stops" : color stops, "stops_guid": color stops guid, "spreadMethod": "pad", "reflect", or "repeat" } Color stops format { 0.0 : color ([r,g,b,a] or [r,g,b]) at start, [a number] : color at that position, 1.0 : color at end } """ if gradient_id not in self.gradients.keys(): return None gradient = self.gradients[gradient_id] # If the gradient has no link, we are done if "link" not in gradient.keys() or gradient["link"] == "": return gradient # If the gradient does have a link, find the color stops recursively if gradient["link"] not in self.gradients.keys(): raise MalformedSVGError, "Linked gradient ID not found" linked_gradient = self.get_gradient(gradient["link"]) gradient["stops"] = linked_gradient["stops"] gradient["stops_guid"] = linked_gradient["stops_guid"] del gradient["link"] # Update the gradient in our listing # (so recursive lookup only happens once) self.gradients[gradient_id] = gradient return gradient def gradient_to_params(self, gradient): """Transform gradient to a list of parameters to pass to a Synfig layer""" # Create a copy of the gradient g = gradient.copy() # Set synfig-only attribs if g["spreadMethod"] == "repeat": g["loop"] = True elif g["spreadMethod"] == "reflect": g["loop"] = True # Reflect the gradient # Original: 0.0 [A . B . C] 1.0 # New: 0.0 [A . B . C . B . A] 1.0 # (with gradient size doubled) new_stops = {} # reflect the stops for pos in g["stops"]: val = g["stops"][pos] if pos == 1.0: new_stops[pos/2.0] = val else: new_stops[pos/2.0] = val new_stops[1 - pos/2.0] = val g["stops"] = new_stops # double the gradient size if g["type"] == "linear": g["p2"] = [ g["p1"][0]+2.0*(g["p2"][0]-g["p1"][0]), g["p1"][1]+2.0*(g["p2"][1]-g["p1"][1]) ] if g["type"] == "radial": g["radius"]= 2.0*g["radius"] # Rename "stops" to "gradient" g["gradient"] = g["stops"] # Convert coordinates if g["type"] == "linear": g["p1"] = self.coor_svg2sif(g["p1"]) g["p2"] = self.coor_svg2sif(g["p2"]) if g["type"] == "radial": g["center"] = self.coor_svg2sif(g["center"]) g["radius"] = self.distance_svg2sif(g["radius"]) # Delete extra attribs removed_attribs = ["type", "stops", "stops_guid", "mtx", "focus", "spreadMethod"] for x in removed_attribs: if x in g.keys(): del g[x] return g ### Public operations API # Operations act on a series of layers, and (optionally) on a series of named parameters # The "is_end" attribute should be set to true when the layers are at the end of a canvas # (i.e. when adding transform layers on top of them does not require encapsulation) def op_blur(self, layers, x, y, name="Blur", is_end=False): """Gaussian blur the given layers by the given x and y amounts Keyword arguments: layers -- list of layers x -- x-amount of blur y -- x-amount of blur is_end -- set to True if layers are at the end of a canvas Returns: list of layers """ blur = self.create_layer("blur", name, params={ "blend_method" : sif.blend_methods["straight"], "size" : [x, y] }) if is_end: return layers + [blur] else: return self.op_encapsulate(layers + [blur]) def op_color(self, layers, overlay, is_end=False): """Apply a color overlay to the given layers Should be used to apply a gradient or pattern to a shape Keyword arguments: layers -- list of layers overlay -- color layer to apply is_end -- set to True if layers are at the end of a canvas Returns: list of layers """ if layers == []: return layers if overlay is None: return layers overlay_enc = self.op_encapsulate([overlay]) self.set_param(overlay_enc[0], "blend_method", sif.blend_methods["straight onto"]) ret = layers + overlay_enc if is_end: return ret else: return self.op_encapsulate(ret) def op_encapsulate(self, layers, name="Inline Canvas", is_end=False): """Encapsulate the given layers Keyword arguments: layers -- list of layers name -- Name of the PasteCanvas layer that is created is_end -- set to True if layers are at the end of a canvas Returns: list of one layer """ if layers == []: return layers layer = self.create_layer("PasteCanvas", name, params={"canvas":layers}) return [layer] def op_fade(self, layers, opacity, is_end=False): """Increase the opacity of the given layers by a certain amount Keyword arguments: layers -- list of layers opacity -- the opacity to apply (float between 0.0 to 1.0) name -- name of the Transform layer that is added is_end -- set to True if layers are at the end of a canvas Returns: list of layers """ # If there is blending involved, first encapsulate the layers for layer in layers: if self.get_param(layer, "blend_method") != sif.blend_methods["composite"]: return self.op_fade(self.op_encapsulate(layers), opacity, is_end) # Otherwise, set their amount for layer in layers: amount = self.get_param(layer, "amount") self.set_param(layer, "amount", amount*opacity) return layers def op_filter(self, layers, filter_id, is_end=False): """Apply a filter to the given layers Keyword arguments: layers -- list of layers filter_id -- id of the filter is_end -- set to True if layers are at the end of a canvas Returns: list of layers """ if filter_id not in self.filters.keys(): raise MalformedSVGError, "Filter %s not found" % filter_id try: ret = self.filters[filter_id](self, layers, is_end) assert type(ret) == list return ret except UnsupportedException: # If the filter is not supported, ignore it. return layers def op_set_blend(self, layers, blend_method, is_end=False): """Set the blend method of the given group of layers If more than one layer is supplied, they will be encapsulated. Keyword arguments: layers -- list of layers blend_method -- blend method to give the layers is_end -- set to True if layers are at the end of a canvas Returns: list of layers """ if layers == []: return layers if blend_method == "composite": return layers layer = layers[0] if len(layers) > 1 or self.get_param(layers[0], "amount") != 1.0: layer = self.op_encapsulate(layers)[0] layer = deepcopy(layer) self.set_param(layer, "blend_method", sif.blend_methods[blend_method]) return [layer] def op_transform(self, layers, mtx, name="Transform", is_end=False): """Apply a matrix transformation to the given layers Keyword arguments: layers -- list of layers mtx -- transformation matrix name -- name of the Transform layer that is added is_end -- set to True if layers are at the end of a canvas Returns: list of layers """ if layers == []: return layers if mtx is None or mtx == [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]: return layers src_tl = [100, 100] src_br = [200, 200] dest_tl = [100, 100] dest_tr = [200, 100] dest_br = [200, 200] dest_bl = [100, 200] simpletransform.applyTransformToPoint(mtx, dest_tl) simpletransform.applyTransformToPoint(mtx, dest_tr) simpletransform.applyTransformToPoint(mtx, dest_br) simpletransform.applyTransformToPoint(mtx, dest_bl) warp = self.create_layer("warp", name, params={ "src_tl": self.coor_svg2sif(src_tl), "src_br": self.coor_svg2sif(src_br), "dest_tl": self.coor_svg2sif(dest_tl), "dest_tr": self.coor_svg2sif(dest_tr), "dest_br": self.coor_svg2sif(dest_br), "dest_bl": self.coor_svg2sif(dest_bl) } ) if is_end: return layers + [warp] else: return self.op_encapsulate(layers + [warp]) ###### Utility Functions ################################## ### Path related def path_to_bline_list(path_d, nodetypes=None, mtx=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]): """ Convert a path to a BLine List bline_list format: Vertex: [[tg1x, tg1y], [x,y], [tg2x, tg2y], split = T/F] Vertex list: [ vertex, vertex, vertex, ...] Bline: { "points" : vertex_list, "loop" : True / False } """ # Exit on empty paths if not path_d: return [] # Parse the path path = simplepath.parsePath(path_d) # Append (more than) enough c's to the nodetypes if nodetypes is None: nt = "" else: nt = nodetypes for _ in range(len(path)): nt += "c" # Create bline list # borrows code from cubicsuperpath.py # bline_list := [bline, bline, ...] # bline := { # "points":[vertex, vertex, ...], # "loop":True/False, # } bline_list = [] subpathstart = [] last = [] lastctrl = [] lastsplit = True for s in path: cmd, params = s if cmd != "M" and bline_list == []: raise MalformedSVGError, "Bad path data: path doesn't start with moveto, %s, %s" % (s, path) elif cmd == "M": # Add previous point to subpath if last: bline_list[-1]["points"].append([lastctrl[:], last[:], last[:], lastsplit]) # Start a new subpath bline_list.append({"nodetypes":"", "loop":False, "points":[]}) # Save coordinates of this point subpathstart = params[:] last = params[:] lastctrl = params[:] lastsplit = False if nt[0] == "z" else True nt = nt[1:] elif cmd == 'L': bline_list[-1]["points"].append([lastctrl[:], last[:], last[:], lastsplit]) last = params[:] lastctrl = params[:] lastsplit = False if nt[0] == "z" else True nt = nt[1:] elif cmd == 'C': bline_list[-1]["points"].append([lastctrl[:], last[:], params[:2], lastsplit]) last = params[-2:] lastctrl = params[2:4] lastsplit = False if nt[0] == "z" else True nt = nt[1:] elif cmd == 'Q': q0 = last[:] q1 = params[0:2] q2 = params[2:4] x0 = q0[0] x1 = 1./3*q0[0]+2./3*q1[0] x2 = 2./3*q1[0]+1./3*q2[0] x3 = q2[0] y0 = q0[1] y1 = 1./3*q0[1]+2./3*q1[1] y2 = 2./3*q1[1]+1./3*q2[1] y3 = q2[1] bline_list[-1]["points"].append([lastctrl[:], [x0, y0], [x1, y1], lastsplit]) last = [x3, y3] lastctrl = [x2, y2] lastsplit = False if nt[0] == "z" else True nt = nt[1:] elif cmd == 'A': arcp = cubicsuperpath.ArcToPath(last[:], params[:]) arcp[ 0][0] = lastctrl[:] last = arcp[-1][1] lastctrl = arcp[-1][0] lastsplit = False if nt[0] == "z" else True nt = nt[1:] for el in arcp[:-1]: el.append(True) bline_list[-1]["points"].append(el) elif cmd == "Z": if len(bline_list[-1]["points"]) == 0: # If the path "loops" after only one point # e.g. "M 0 0 Z" bline_list[-1]["points"].append([lastctrl[:], last[:], last[:], False]) elif last == subpathstart: # If we are back to the original position # merge our tangent into the first point bline_list[-1]["points"][0][0] = lastctrl[:] else: # Otherwise draw a line to the starting point bline_list[-1]["points"].append([lastctrl[:], last[:], last[:], lastsplit]) # Clear the variables (no more points need to be added) last = [] lastctrl = [] lastsplit = True # Loop the subpath bline_list[-1]["loop"] = True # Append final superpoint, if needed if last: bline_list[-1]["points"].append([lastctrl[:], last[:], last[:], lastsplit]) # Apply the transformation if mtx != [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]: for bline in bline_list: for vertex in bline["points"]: for pt in vertex: if type(pt) != bool: simpletransform.applyTransformToPoint(mtx, pt) return bline_list ### Style related def extract_style(node, style_attrib="style"): #return simplestyle.parseStyle(node.get("style")) # Work around a simplestyle bug in older verions of Inkscape # that leaves spaces at the beginning and end of values s = node.get(style_attrib) if s is None: return {} else: return dict([[x.strip() for x in i.split(":")] for i in s.split(";") if len(i)]) def extract_color(style, color_attrib, *opacity_attribs): if color_attrib in style.keys(): if style[color_attrib] == "none": return [1, 1, 1, 0] c = simplestyle.parseColor(style[color_attrib]) else: c = (0, 0, 0) # Convert color scales and adjust gamma color = [pow(c[0]/255.0, sif.gamma), pow(c[1]/255.0, sif.gamma), pow(c[2]/255.0, sif.gamma), 1.0] for opacity in opacity_attribs: if opacity in style.keys(): color[3] = color[3] * float(style[opacity]) return color def extract_opacity(style, *opacity_attribs): ret = 1.0 for opacity in opacity_attribs: if opacity in style.keys(): ret = ret * float(style[opacity]) return ret def extract_width(style, width_attrib, mtx): if width_attrib in style.keys(): width = get_dimension(style[width_attrib]) else: width = 1 area_scale_factor = mtx[0][0]*mtx[1][1] - mtx[0][1]*mtx[1][0] linear_scale_factor = math.sqrt(abs(area_scale_factor)) return width*linear_scale_factor/sif.kux ###### Main Class ######################################### class SynfigExport(SynfigPrep): def __init__(self): SynfigPrep.__init__(self) def effect(self): # Prepare the document for exporting SynfigPrep.effect(self) svg = self.document.getroot() width = get_dimension(svg.get("width", 1024)) height = get_dimension(svg.get("height", 768)) title = svg.xpath("svg:title", namespaces=NSS) if len(title) == 1: name = title[0].text else: name = svg.get(addNS("docname", "sodipodi"), "Synfig Animation 1") d = SynfigDocument(width, height, name) layers = [] for node in svg.iterchildren(): layers += self.convert_node(node, d) root_canvas = d.get_root_canvas() for layer in layers: root_canvas.append(layer) d.get_root_tree().write(sys.stdout) def convert_node(self, node, d): """Convert an SVG node to a list of Synfig layers""" # Parse tags that don't draw any layers if node.tag == addNS("namedview", "sodipodi"): return [] elif node.tag == addNS("defs", "svg"): self.parse_defs(node, d) return [] elif node.tag == addNS("metadata", "svg"): return [] elif node.tag not in [ addNS("g", "svg"), addNS("a", "svg"), addNS("switch", "svg"), addNS("path", "svg")]: # An unsupported element return [] layers = [] if node.tag == addNS("g", "svg"): for subnode in node: layers += self.convert_node(subnode, d) if node.get(addNS("groupmode", "inkscape")) == "layer": name = node.get(addNS("label", "inkscape"), "Inline Canvas") layers = d.op_encapsulate(layers, name=name) elif (node.tag == addNS("a", "svg") or node.tag == addNS("switch", "svg")): # Treat anchor and switch as a group for subnode in node: layers += self.convert_node(subnode, d) elif node.tag == addNS("path", "svg"): layers = self.convert_path(node, d) style = extract_style(node) if "filter" in style.keys() and style["filter"].startswith("url"): filter_id = style["filter"][5:].split(")")[0] layers = d.op_filter(layers, filter_id) opacity = extract_opacity(style, "opacity") if opacity != 1.0: layers = d.op_fade(layers, opacity) return layers def parse_defs(self, node, d): for child in node.iterchildren(): if child.tag == addNS("linearGradient", "svg"): self.parse_gradient(child, d) elif child.tag == addNS("radialGradient", "svg"): self.parse_gradient(child, d) elif child.tag == addNS("filter", "svg"): self.parse_filter(child, d) def parse_gradient(self, node, d): if node.tag == addNS("linearGradient", "svg"): gradient_id = node.get("id", str(id(node))) x1 = float(node.get("x1", "0.0")) x2 = float(node.get("x2", "0.0")) y1 = float(node.get("y1", "0.0")) y2 = float(node.get("y2", "0.0")) mtx = simpletransform.parseTransform(node.get("gradientTransform")) link = node.get(addNS("href", "xlink"), "#")[1:] spread_method = node.get("spreadMethod", "pad") if link == "": stops = self.parse_stops(node, d) d.add_linear_gradient(gradient_id, [x1, y1], [x2, y2], mtx, stops=stops, spread_method=spread_method) else: d.add_linear_gradient(gradient_id, [x1, y1], [x2, y2], mtx, link=link, spread_method=spread_method) elif node.tag == addNS("radialGradient", "svg"): gradient_id = node.get("id", str(id(node))) cx = float(node.get("cx", "0.0")) cy = float(node.get("cy", "0.0")) r = float(node.get("r", "0.0")) fx = float(node.get("fx", "0.0")) fy = float(node.get("fy", "0.0")) mtx = simpletransform.parseTransform(node.get("gradientTransform")) link = node.get(addNS("href", "xlink"), "#")[1:] spread_method = node.get("spreadMethod", "pad") if link == "": stops = self.parse_stops(node, d) d.add_radial_gradient(gradient_id, [cx, cy], r, [fx, fy], mtx, stops=stops, spread_method=spread_method) else: d.add_radial_gradient(gradient_id, [cx, cy], r, [fx, fy], mtx, link=link, spread_method=spread_method) def parse_stops(self, node, d): stops = {} for stop in node.iterchildren(): if stop.tag == addNS("stop", "svg"): offset = float(stop.get("offset")) style = extract_style(stop) stops[offset] = extract_color(style, "stop-color", "stop-opacity") else: raise MalformedSVGError, "Child of gradient is not a stop" return stops def parse_filter(self, node, d): filter_id = node.get("id", str(id(node))) # A filter is just like an operator (the op_* functions), # except that it's created here def the_filter(d, layers, is_end=False): refs = { None : layers, #default "SourceGraphic" : layers } encapsulate_result = not is_end for child in node.iterchildren(): if child.get("in") not in refs: # "SourceAlpha", "BackgroundImage", # "BackgroundAlpha", "FillPaint", "StrokePaint" # are not supported raise UnsupportedException l_in = refs[child.get("in")] l_out = [] if child.tag == addNS("feGaussianBlur", "svg"): std_dev = child.get("stdDeviation", "0") std_dev = std_dev.replace(",", " ").split() x = float(std_dev[0]) if len(std_dev) > 1: y = float(std_dev[1]) else: y = x if x == 0 and y == 0: l_out = l_in else: x = d.distance_svg2sif(x) y = d.distance_svg2sif(y) l_out = d.op_blur(l_in, x, y, is_end=True) elif child.tag == addNS("feBlend", "svg"): # Note: Blend methods are not an exact match # because SVG uses alpha channel in places where # Synfig does not mode = child.get("mode", "normal") if mode == "normal": blend_method = "composite" elif mode == "multiply": blend_method = "multiply" elif mode == "screen": blend_method = "screen" elif mode == "darken": blend_method = "darken" elif mode == "lighten": blend_method = "brighten" else: raise MalformedSVGError, "Invalid blend method" if child.get("in2") == "BackgroundImage": encapsulate_result = False l_out = d.op_set_blend(l_in, blend_method) + d.op_set_blend(l_in, "behind") elif child.get("in2") not in refs: raise UnsupportedException else: l_in2 = refs[child.get("in2")] l_out = l_in2 + d.op_set_blend(l_in, blend_method) else: # This filter element is currently unsupported raise UnsupportedException # Output the layers if child.get("result"): refs[child.get("result")] = l_out # Set the default for the next filter element refs[None] = l_out # Return the output from the last element if len(refs[None]) > 1 and encapsulate_result: return d.op_encapsulate(refs[None]) else: return refs[None] d.add_filter(filter_id, the_filter) def convert_path(self, node, d): """Convert an SVG path node to a list of Synfig layers""" layers = [] node_id = node.get("id", str(id(node))) style = extract_style(node) mtx = simpletransform.parseTransform(node.get("transform")) blines = path_to_bline_list(node.get("d"), node.get(addNS("nodetypes", "sodipodi")), mtx) for bline in blines: d.bline_coor_svg2sif(bline) bline_guid = d.new_guid() if style.setdefault("fill", "#000000") != "none": if style["fill"].startswith("url"): # Set the color to black, so we can later overlay # the shape with a gradient or pattern color = [0, 0, 0, 1] else: color = extract_color(style, "fill", "fill-opacity") layer = d.create_layer("region", node_id, { "bline": bline, "color": color, "winding_style": 1 if style.setdefault("fill-rule", "nonzero") == "evenodd" else 0, }, guids={ "bline":bline_guid } ) if style["fill"].startswith("url"): color_layer = self.convert_url(style["fill"][5:].split(")")[0], mtx, d)[0] layer = d.op_color([layer], overlay=color_layer)[0] layer = d.op_fade([layer], extract_opacity(style, "fill-opacity"))[0] layers.append(layer) if style.setdefault("stroke", "none") != "none": if style["stroke"].startswith("url"): # Set the color to black, so we can later overlay # the shape with a gradient or pattern color = [0, 0, 0, 1] else: color = extract_color(style, "stroke", "stroke-opacity") layer = d.create_layer("outline", node_id, { "bline": bline, "color": color, "width": extract_width(style, "stroke-width", mtx), "sharp_cusps": True if style.setdefault("stroke-linejoin", "miter") == "miter" else False, "round_tip[0]": False if style.setdefault("stroke-linecap", "butt") == "butt" else True, "round_tip[1]": False if style.setdefault("stroke-linecap", "butt") == "butt" else True }, guids={ "bline":bline_guid } ) if style["stroke"].startswith("url"): color_layer = self.convert_url(style["stroke"][5:].split(")")[0], mtx, d)[0] layer = d.op_color([layer], overlay=color_layer)[0] layer = d.op_fade([layer], extract_opacity(style, "stroke-opacity"))[0] layers.append(layer) return layers def convert_url(self, url_id, mtx, d): """Return a list Synfig layers that represent the gradient with the given id""" gradient = d.get_gradient(url_id) if gradient is None: # Patterns and other URLs not supported return [None] if gradient["type"] == "linear": layer = d.create_layer("linear_gradient", url_id, d.gradient_to_params(gradient), guids={"gradient" : gradient["stops_guid"]} ) if gradient["type"] == "radial": layer = d.create_layer("radial_gradient", url_id, d.gradient_to_params(gradient), guids={"gradient" : gradient["stops_guid"]} ) return d.op_transform([layer], simpletransform.composeTransform(mtx, gradient["mtx"])) if __name__ == '__main__': try: e = SynfigExport() e.affect(output=False) except MalformedSVGError, e: errormsg(e) # vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
gpl-2.0
charaka1/test
geosmeta/bin/get_user_details.py
1
1588
#!/usr/bin/env python # # Copyright (c) The University of Edinburgh, 2014. # from geosmeta import api from geosmeta import util import argparse import sys import json if __name__ == '__main__': # Get command line arguments parser = argparse.ArgumentParser(description="Get user details from the GeosMeta system") parser.add_argument('--username', '-u', required=False, help='EASE username of the user (optional)') args = parser.parse_args() username = args.username # Prompt user for confirmation if (username): question = "Get user account for %s"\ % (username) else: question = "Get all user accounts?" response = util.queryYesNo(question) if (response): # Get user details print "Getting user details" try: if (username): resultJSON = api.getAccount(username) else: resultJSON = api.getAccounts() except Exception as err: sys.stderr.write('Error getting users:\n') sys.stderr.write('%s\n' % str(err)) sys.exit(1) else: sys.stdout.write('Got user details:\n\n') sys.stdout.write(json.dumps(resultJSON, indent=2, sort_keys=True)) sys.exit(0) else: sys.stdout.write('Not getting user details\n') sys.exit(0)
apache-2.0
lache/RacingKingLee
monitor/engine.win64/2.74/python/lib/site-packages/numpy/matlib.py
68
9569
from __future__ import division, absolute_import, print_function import numpy as np from numpy.matrixlib.defmatrix import matrix, asmatrix # need * as we're copying the numpy namespace from numpy import * __version__ = np.__version__ __all__ = np.__all__[:] # copy numpy namespace __all__ += ['rand', 'randn', 'repmat'] def empty(shape, dtype=None, order='C'): """ Return a new matrix of given shape and type, without initializing entries. Parameters ---------- shape : int or tuple of int Shape of the empty matrix. dtype : data-type, optional Desired output data-type. order : {'C', 'F'}, optional Whether to store multi-dimensional data in C (row-major) or Fortran (column-major) order in memory. See Also -------- empty_like, zeros Notes ----- `empty`, unlike `zeros`, does not set the matrix values to zero, and may therefore be marginally faster. On the other hand, it requires the user to manually set all the values in the array, and should be used with caution. Examples -------- >>> import numpy.matlib >>> np.matlib.empty((2, 2)) # filled with random data matrix([[ 6.76425276e-320, 9.79033856e-307], [ 7.39337286e-309, 3.22135945e-309]]) #random >>> np.matlib.empty((2, 2), dtype=int) matrix([[ 6600475, 0], [ 6586976, 22740995]]) #random """ return ndarray.__new__(matrix, shape, dtype, order=order) def ones(shape, dtype=None, order='C'): """ Matrix of ones. Return a matrix of given shape and type, filled with ones. Parameters ---------- shape : {sequence of ints, int} Shape of the matrix dtype : data-type, optional The desired data-type for the matrix, default is np.float64. order : {'C', 'F'}, optional Whether to store matrix in C- or Fortran-contiguous order, default is 'C'. Returns ------- out : matrix Matrix of ones of given shape, dtype, and order. See Also -------- ones : Array of ones. matlib.zeros : Zero matrix. Notes ----- If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, `out` becomes a single row matrix of shape ``(1,N)``. Examples -------- >>> np.matlib.ones((2,3)) matrix([[ 1., 1., 1.], [ 1., 1., 1.]]) >>> np.matlib.ones(2) matrix([[ 1., 1.]]) """ a = ndarray.__new__(matrix, shape, dtype, order=order) a.fill(1) return a def zeros(shape, dtype=None, order='C'): """ Return a matrix of given shape and type, filled with zeros. Parameters ---------- shape : int or sequence of ints Shape of the matrix dtype : data-type, optional The desired data-type for the matrix, default is float. order : {'C', 'F'}, optional Whether to store the result in C- or Fortran-contiguous order, default is 'C'. Returns ------- out : matrix Zero matrix of given shape, dtype, and order. See Also -------- numpy.zeros : Equivalent array function. matlib.ones : Return a matrix of ones. Notes ----- If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, `out` becomes a single row matrix of shape ``(1,N)``. Examples -------- >>> import numpy.matlib >>> np.matlib.zeros((2, 3)) matrix([[ 0., 0., 0.], [ 0., 0., 0.]]) >>> np.matlib.zeros(2) matrix([[ 0., 0.]]) """ a = ndarray.__new__(matrix, shape, dtype, order=order) a.fill(0) return a def identity(n,dtype=None): """ Returns the square identity matrix of given size. Parameters ---------- n : int Size of the returned identity matrix. dtype : data-type, optional Data-type of the output. Defaults to ``float``. Returns ------- out : matrix `n` x `n` matrix with its main diagonal set to one, and all other elements zero. See Also -------- numpy.identity : Equivalent array function. matlib.eye : More general matrix identity function. Examples -------- >>> import numpy.matlib >>> np.matlib.identity(3, dtype=int) matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) """ a = array([1]+n*[0], dtype=dtype) b = empty((n, n), dtype=dtype) b.flat = a return b def eye(n,M=None, k=0, dtype=float): """ Return a matrix with ones on the diagonal and zeros elsewhere. Parameters ---------- n : int Number of rows in the output. M : int, optional Number of columns in the output, defaults to `n`. k : int, optional Index of the diagonal: 0 refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : dtype, optional Data-type of the returned matrix. Returns ------- I : matrix A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- numpy.eye : Equivalent array function. identity : Square identity matrix. Examples -------- >>> import numpy.matlib >>> np.matlib.eye(3, k=1, dtype=float) matrix([[ 0., 1., 0.], [ 0., 0., 1.], [ 0., 0., 0.]]) """ return asmatrix(np.eye(n, M, k, dtype)) def rand(*args): """ Return a matrix of random values with given shape. Create a matrix of the given shape and propagate it with random samples from a uniform distribution over ``[0, 1)``. Parameters ---------- \\*args : Arguments Shape of the output. If given as N integers, each integer specifies the size of one dimension. If given as a tuple, this tuple gives the complete shape. Returns ------- out : ndarray The matrix of random values with shape given by `\\*args`. See Also -------- randn, numpy.random.rand Examples -------- >>> import numpy.matlib >>> np.matlib.rand(2, 3) matrix([[ 0.68340382, 0.67926887, 0.83271405], [ 0.00793551, 0.20468222, 0.95253525]]) #random >>> np.matlib.rand((2, 3)) matrix([[ 0.84682055, 0.73626594, 0.11308016], [ 0.85429008, 0.3294825 , 0.89139555]]) #random If the first argument is a tuple, other arguments are ignored: >>> np.matlib.rand((2, 3), 4) matrix([[ 0.46898646, 0.15163588, 0.95188261], [ 0.59208621, 0.09561818, 0.00583606]]) #random """ if isinstance(args[0], tuple): args = args[0] return asmatrix(np.random.rand(*args)) def randn(*args): """ Return a random matrix with data from the "standard normal" distribution. `randn` generates a matrix filled with random floats sampled from a univariate "normal" (Gaussian) distribution of mean 0 and variance 1. Parameters ---------- \\*args : Arguments Shape of the output. If given as N integers, each integer specifies the size of one dimension. If given as a tuple, this tuple gives the complete shape. Returns ------- Z : matrix of floats A matrix of floating-point samples drawn from the standard normal distribution. See Also -------- rand, random.randn Notes ----- For random samples from :math:`N(\\mu, \\sigma^2)`, use: ``sigma * np.matlib.randn(...) + mu`` Examples -------- >>> import numpy.matlib >>> np.matlib.randn(1) matrix([[-0.09542833]]) #random >>> np.matlib.randn(1, 2, 3) matrix([[ 0.16198284, 0.0194571 , 0.18312985], [-0.7509172 , 1.61055 , 0.45298599]]) #random Two-by-four matrix of samples from :math:`N(3, 6.25)`: >>> 2.5 * np.matlib.randn((2, 4)) + 3 matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922], [ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random """ if isinstance(args[0], tuple): args = args[0] return asmatrix(np.random.randn(*args)) def repmat(a, m, n): """ Repeat a 0-D to 2-D array or matrix MxN times. Parameters ---------- a : array_like The array or matrix to be repeated. m, n : int The number of times `a` is repeated along the first and second axes. Returns ------- out : ndarray The result of repeating `a`. Examples -------- >>> import numpy.matlib >>> a0 = np.array(1) >>> np.matlib.repmat(a0, 2, 3) array([[1, 1, 1], [1, 1, 1]]) >>> a1 = np.arange(4) >>> np.matlib.repmat(a1, 2, 2) array([[0, 1, 2, 3, 0, 1, 2, 3], [0, 1, 2, 3, 0, 1, 2, 3]]) >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) >>> np.matlib.repmat(a2, 2, 3) matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], [3, 4, 5, 3, 4, 5, 3, 4, 5], [0, 1, 2, 0, 1, 2, 0, 1, 2], [3, 4, 5, 3, 4, 5, 3, 4, 5]]) """ a = asanyarray(a) ndim = a.ndim if ndim == 0: origrows, origcols = (1, 1) elif ndim == 1: origrows, origcols = (1, a.shape[0]) else: origrows, origcols = a.shape rows = origrows * m cols = origcols * n c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) return c.reshape(rows, cols)
mit
devops2014/djangosite
django/conf/locale/nl/formats.py
504
4472
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' # '20 januari 2009' TIME_FORMAT = 'H:i' # '15:23' DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23' YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009' MONTH_DAY_FORMAT = 'j F' # '20 januari' SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009' SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23' FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag') # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d-%m-%Y', '%d-%m-%y', # '20-01-2009', '20-01-09' '%d/%m/%Y', '%d/%m/%y', # '20/01/2009', '20/01/09' # '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09' # '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09' ] # Kept ISO formats as one is in first position TIME_INPUT_FORMATS = [ '%H:%M:%S', # '15:23:35' '%H:%M:%S.%f', # '15:23:35.000200' '%H.%M:%S', # '15.23:35' '%H.%M:%S.%f', # '15.23:35.000200' '%H.%M', # '15.23' '%H:%M', # '15:23' ] DATETIME_INPUT_FORMATS = [ # With time in %H:%M:%S : '%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S', # '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35' '%d/%m/%Y %H:%M:%S', '%d/%m/%y %H:%M:%S', '%Y/%m/%d %H:%M:%S', # '20/01/2009 15:23:35', '20/01/09 15:23:35', '2009/01/20 15:23:35' # '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35' # '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35' # With time in %H:%M:%S.%f : '%d-%m-%Y %H:%M:%S.%f', '%d-%m-%y %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S.%f', # '20-01-2009 15:23:35.000200', '20-01-09 15:23:35.000200', '2009-01-20 15:23:35.000200' '%d/%m/%Y %H:%M:%S.%f', '%d/%m/%y %H:%M:%S.%f', '%Y/%m/%d %H:%M:%S.%f', # '20/01/2009 15:23:35.000200', '20/01/09 15:23:35.000200', '2009/01/20 15:23:35.000200' # With time in %H.%M:%S : '%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35' '%d/%m/%Y %H.%M:%S', '%d/%m/%y %H.%M:%S', # '20/01/2009 15.23:35', '20/01/09 15.23:35' # '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35' # '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35' # With time in %H.%M:%S.%f : '%d-%m-%Y %H.%M:%S.%f', '%d-%m-%y %H.%M:%S.%f', # '20-01-2009 15.23:35.000200', '20-01-09 15.23:35.000200' '%d/%m/%Y %H.%M:%S.%f', '%d/%m/%y %H.%M:%S.%f', # '20/01/2009 15.23:35.000200', '20/01/09 15.23:35.000200' # With time in %H:%M : '%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23' '%d/%m/%Y %H:%M', '%d/%m/%y %H:%M', '%Y/%m/%d %H:%M', # '20/01/2009 15:23', '20/01/09 15:23', '2009/01/20 15:23' # '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23' # '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23' # With time in %H.%M : '%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23' '%d/%m/%Y %H.%M', '%d/%m/%y %H.%M', # '20/01/2009 15.23', '20/01/09 15.23' # '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23' # '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23' # Without time : '%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20' '%d/%m/%Y', '%d/%m/%y', '%Y/%m/%d', # '20/01/2009', '20/01/09', '2009/01/20' # '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09' # '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
Jorge-Rodriguez/ansible-modules-core
cloud/amazon/ec2_ami.py
4
19453
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: ec2_ami version_added: "1.3" short_description: create or destroy an image in ec2 description: - Creates or deletes ec2 images. options: instance_id: description: - instance id of the image to create required: false default: null name: description: - The name of the new image to create required: false default: null wait: description: - wait for the AMI to be in state 'available' before returning. required: false default: "no" choices: [ "yes", "no" ] wait_timeout: description: - how long before wait gives up, in seconds default: 300 state: description: - create or deregister/delete image required: false default: 'present' description: description: - An optional human-readable string describing the contents and purpose of the AMI. required: false default: null no_reboot: description: - An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. The default choice is "no". required: false default: no choices: [ "yes", "no" ] image_id: description: - Image ID to be deregistered. required: false default: null device_mapping: version_added: "2.0" description: - An optional list of device hashes/dictionaries with custom configurations (same block-device-mapping parameters) - "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)" required: false default: null delete_snapshot: description: - Whether or not to delete snapshots when deregistering AMI. required: false default: "no" choices: [ "yes", "no" ] tags: description: - a dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}' required: false default: null version_added: "2.0" launch_permissions: description: - Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently. required: false default: null version_added: "2.0" author: - "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>" - "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>" extends_documentation_fragment: - aws - ec2 ''' # Thank you to iAcquire for sponsoring development of this module. EXAMPLES = ''' # Basic AMI Creation - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx wait: yes name: newtest tags: Name: newtest Service: TestService register: instance # Basic AMI Creation, without waiting - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx instance_id: i-xxxxxx wait: no name: newtest register: instance # AMI Creation, with a custom root-device size and another EBS attached - ec2_ami aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx name: newtest device_mapping: - device_name: /dev/sda1 size: XXX delete_on_termination: true volume_type: gp2 - device_name: /dev/sdb size: YYY delete_on_termination: false volume_type: gp2 register: instance # AMI Creation, excluding a volume attached at /dev/sdb - ec2_ami aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx name: newtest device_mapping: - device_name: /dev/sda1 size: XXX delete_on_termination: true volume_type: gp2 - device_name: /dev/sdb no_device: yes register: instance # Deregister/Delete AMI (keep associated snapshots) - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx image_id: "{{ instance.image_id }}" delete_snapshot: False state: absent # Deregister AMI (delete associated snapshots too) - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx image_id: "{{ instance.image_id }}" delete_snapshot: True state: absent # Update AMI Launch Permissions, making it public - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx image_id: "{{ instance.image_id }}" state: present launch_permissions: group_names: ['all'] # Allow AMI to be launched by another account - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx image_id: "{{ instance.image_id }}" state: present launch_permissions: user_ids: ['123456789012'] ''' RETURN = ''' architecture: description: architecture of image returned: when AMI is created or already exists type: string sample: "x86_64" block_device_mapping: description: block device mapping associated with image returned: when AMI is created or already exists type: a dictionary of block devices sample: { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 10, "snapshot_id": "snap-1a03b80e7", "volume_type": "standard" } } creationDate: description: creation date of image returned: when AMI is created or already exists type: string sample: "2015-10-15T22:43:44.000Z" description: description: description of image returned: when AMI is created or already exists type: string sample: "nat-server" hypervisor: description: type of hypervisor returned: when AMI is created or already exists type: string sample: "xen" is_public: description: whether image is public returned: when AMI is created or already exists type: bool sample: false location: description: location of image returned: when AMI is created or already exists type: string sample: "315210894379/nat-server" name: description: ami name of image returned: when AMI is created or already exists type: string sample: "nat-server" owner_id: description: owner of image returned: when AMI is created or already exists type: string sample: "435210894375" platform: description: plaform of image returned: when AMI is created or already exists type: string sample: null root_device_name: description: root device name of image returned: when AMI is created or already exists type: string sample: "/dev/sda1" root_device_type: description: root device type of image returned: when AMI is created or already exists type: string sample: "ebs" state: description: state of image returned: when AMI is created or already exists type: string sample: "available" tags: description: a dictionary of tags assigned to image returned: when AMI is created or already exists type: dictionary of tags sample: { "Env": "devel", "Name": "nat-server" } virtualization_type: description: image virtualization type returned: when AMI is created or already exists type: string sample: "hvm" snapshots_deleted: description: a list of snapshot ids deleted after deregistering image returned: after AMI is deregistered, if 'delete_snapshot' is set to 'yes' type: list sample: [ "snap-fbcccb8f", "snap-cfe7cdb4" ] ''' import sys import time try: import boto import boto.ec2 from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping HAS_BOTO = True except ImportError: HAS_BOTO = False def get_block_device_mapping(image): """ Retrieves block device mapping from AMI """ bdm_dict = dict() if image is not None and hasattr(image, 'block_device_mapping'): bdm = getattr(image,'block_device_mapping') for device_name in bdm.keys(): bdm_dict[device_name] = { 'size': bdm[device_name].size, 'snapshot_id': bdm[device_name].snapshot_id, 'volume_type': bdm[device_name].volume_type, 'encrypted': bdm[device_name].encrypted, 'delete_on_termination': bdm[device_name].delete_on_termination } return bdm_dict def get_ami_info(image): return dict( image_id=image.id, state=image.state, architecture=image.architecture, block_device_mapping=get_block_device_mapping(image), creationDate=image.creationDate, description=image.description, hypervisor=image.hypervisor, is_public=image.is_public, location=image.location, ownerId=image.ownerId, root_device_name=image.root_device_name, root_device_type=image.root_device_type, tags=image.tags, virtualization_type = image.virtualization_type ) def create_image(module, ec2): """ Creates new AMI module : AnsibleModule object ec2: authenticated ec2 connection object """ instance_id = module.params.get('instance_id') name = module.params.get('name') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) description = module.params.get('description') no_reboot = module.params.get('no_reboot') device_mapping = module.params.get('device_mapping') tags = module.params.get('tags') launch_permissions = module.params.get('launch_permissions') try: params = {'instance_id': instance_id, 'name': name, 'description': description, 'no_reboot': no_reboot} images = ec2.get_all_images(filters={'name': name}) if images and images[0]: module.exit_json(msg="AMI name already present", image_id=images[0].id, state=images[0].state, changed=False) if device_mapping: bdm = BlockDeviceMapping() for device in device_mapping: if 'device_name' not in device: module.fail_json(msg = 'Device name must be set for volume') device_name = device['device_name'] del device['device_name'] bd = BlockDeviceType(**device) bdm[device_name] = bd params['block_device_mapping'] = bdm image_id = ec2.create_image(**params) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) # Wait until the image is recognized. EC2 API has eventual consistency, # such that a successful CreateImage API call doesn't guarantee the success # of subsequent DescribeImages API call using the new image id returned. for i in range(wait_timeout): try: img = ec2.get_image(image_id) if img.state == 'available': break except boto.exception.EC2ResponseError as e: if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1: module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message)) finally: time.sleep(1) if img.state != 'available': module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") if tags: try: ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError as e: module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message)) if launch_permissions: try: img = ec2.get_image(image_id) img.set_launch_permissions(**launch_permissions) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img)) def deregister_image(module, ec2): """ Deregisters AMI """ image_id = module.params.get('image_id') delete_snapshot = module.params.get('delete_snapshot') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) img = ec2.get_image(image_id) if img == None: module.fail_json(msg = "Image %s does not exist" % image_id, changed=False) # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable snapshots = [] if hasattr(img, 'block_device_mapping'): for key in img.block_device_mapping: snapshots.append(img.block_device_mapping[key].snapshot_id) # When trying to re-delete already deleted image it doesn't raise an exception # It just returns an object without image attributes if hasattr(img, 'id'): try: params = {'image_id': image_id, 'delete_snapshot': delete_snapshot} res = ec2.deregister_image(**params) except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) else: module.exit_json(msg = "Image %s has already been deleted" % image_id, changed=False) # wait here until the image is gone img = ec2.get_image(image_id) wait_timeout = time.time() + wait_timeout while wait and wait_timeout > time.time() and img is not None: img = ec2.get_image(image_id) time.sleep(3) if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "timed out waiting for image to be deregistered/deleted") # Boto library has hardcoded the deletion of the snapshot for the root volume mounted as '/dev/sda1' only # Make it possible to delete all snapshots which belong to image, including root block device mapped as '/dev/xvda' if delete_snapshot: try: for snapshot_id in snapshots: ec2.delete_snapshot(snapshot_id) except boto.exception.BotoServerError as e: if e.error_code == 'InvalidSnapshot.NotFound': # Don't error out if root volume snapshot was already deleted as part of deregister_image pass module.exit_json(msg="AMI deregister/delete operation complete", changed=True, snapshots_deleted=snapshots) else: module.exit_json(msg="AMI deregister/delete operation complete", changed=True) def update_image(module, ec2): """ Updates AMI """ image_id = module.params.get('image_id') launch_permissions = module.params.get('launch_permissions') if 'user_ids' in launch_permissions: launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']] img = ec2.get_image(image_id) if img == None: module.fail_json(msg = "Image %s does not exist" % image_id, changed=False) try: set_permissions = img.get_launch_permissions() if set_permissions != launch_permissions: if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']): res = img.set_launch_permissions(**launch_permissions) elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']): res = img.remove_launch_permissions(**set_permissions) else: module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False) module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True) else: module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False) except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( instance_id = dict(), image_id = dict(), delete_snapshot = dict(default=False, type='bool'), name = dict(), wait = dict(type='bool', default=False), wait_timeout = dict(default=900), description = dict(default=""), no_reboot = dict(default=False, type='bool'), state = dict(default='present'), device_mapping = dict(type='list'), tags = dict(type='dict'), launch_permissions = dict(type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') try: ec2 = ec2_connect(module) except Exception as e: module.fail_json(msg="Error while connecting to aws: %s" % str(e)) if module.params.get('state') == 'absent': if not module.params.get('image_id'): module.fail_json(msg='image_id needs to be an ami image to registered/delete') deregister_image(module, ec2) elif module.params.get('state') == 'present': if module.params.get('image_id') and module.params.get('launch_permissions'): # Update image's launch permissions update_image(module, ec2) # Changed is always set to true when provisioning new AMI if not module.params.get('instance_id'): module.fail_json(msg='instance_id parameter is required for new image') if not module.params.get('name'): module.fail_json(msg='name parameter is required for new image') create_image(module, ec2) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
gpl-3.0
scality/manila
manila_tempest_tests/tests/api/test_security_services_mapping_negative.py
1
7370
# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log # noqa import six # noqa from tempest import config # noqa from tempest import test # noqa from tempest_lib import exceptions as lib_exc # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF LOG = log.getLogger(__name__) class SecServicesMappingNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SecServicesMappingNegativeTest, cls).resource_setup() cls.sn = cls.create_share_network(cleanup_in_class=True) cls.ss = cls.create_security_service(cleanup_in_class=True) cls.cl = cls.shares_client @test.attr(type=["gate", "smoke", "negative"]) def test_add_sec_service_twice_to_share_network(self): self.cl.add_sec_service_to_share_network(self.sn["id"], self.ss["id"]) self.assertRaises(lib_exc.Conflict, self.cl.add_sec_service_to_share_network, self.sn["id"], self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_add_nonexistant_sec_service_to_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, self.sn["id"], "wrong_ss_id") @test.attr(type=["gate", "smoke", "negative"]) def test_add_empty_sec_service_id_to_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, self.sn["id"], "") @test.attr(type=["gate", "smoke", "negative"]) def test_add_sec_service_to_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, "wrong_sn_id", self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_add_sec_service_to_share_network_with_empty_id(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, "", self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_list_sec_services_for_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.list_sec_services_for_share_network, "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_delete_nonexistant_sec_service_from_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.remove_sec_service_from_share_network, self.sn["id"], "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_delete_sec_service_from_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.remove_sec_service_from_share_network, "wrong_id", self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_delete_nonexistant_ss_from_nonexistant_sn(self): self.assertRaises(lib_exc.NotFound, self.cl.remove_sec_service_from_share_network, "wrong_id", "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_delete_ss_from_sn_used_by_share_server(self): sn = self.shares_client.get_share_network( self.os.shares_client.share_network_id) fresh_sn = self.create_share_network( neutron_net_id=sn["neutron_net_id"], neutron_subnet_id=sn["neutron_subnet_id"]) self.shares_client.add_sec_service_to_share_network( fresh_sn["id"], self.ss["id"]) # Security service with fake data is used, so if we use backend driver # that fails on wrong data, we expect error here. # We require any share that uses our share-network. try: self.create_share( share_network_id=fresh_sn["id"], cleanup_in_class=False) except Exception as e: # we do wait for either 'error' or 'available' status because # it is the only available statuses for proper deletion. LOG.warning("Caught exception. It is expected in case backend " "fails having security-service with improper data " "that leads to share-server creation error. " "%s" % six.text_type(e)) self.assertRaises(lib_exc.Forbidden, self.cl.remove_sec_service_from_share_network, fresh_sn["id"], self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_try_map_two_ss_with_same_type_to_sn(self): # create share network data = self.generate_share_network_data() sn = self.create_share_network(client=self.cl, **data) self.assertDictContainsSubset(data, sn) # create security services with same type security_services = [] for i in range(2): data = self.generate_security_service_data() ss = self.create_security_service(client=self.cl, **data) self.assertDictContainsSubset(data, ss) security_services.insert(i, ss) # Add security service to share network self.cl.add_sec_service_to_share_network( sn["id"], security_services[0]["id"]) # Try to add security service with same type self.assertRaises(lib_exc.Conflict, self.cl.add_sec_service_to_share_network, sn["id"], security_services[1]["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_try_delete_ss_that_assigned_to_sn(self): # create share network data = self.generate_share_network_data() sn = self.create_share_network(client=self.cl, **data) self.assertDictContainsSubset(data, sn) # create security service data = self.generate_security_service_data() ss = self.create_security_service(client=self.cl, **data) self.assertDictContainsSubset(data, ss) # Add security service to share network self.cl.add_sec_service_to_share_network(sn["id"], ss["id"]) # Try delete ss, that has been assigned to some sn self.assertRaises(lib_exc.Forbidden, self.cl.delete_security_service, ss["id"], ) # remove seurity service from share-network self.cl.remove_sec_service_from_share_network(sn["id"], ss["id"])
apache-2.0
synicalsyntax/zulip
zerver/migrations/0285_remove_realm_google_hangouts_domain.py
3
1363
# Generated by Django 2.2.13 on 2020-06-14 01:58 from django.db import migrations from django.db.backends.postgresql.schema import DatabaseSchemaEditor from django.db.migrations.state import StateApps VIDEO_CHAT_PROVIDERS = { 'jitsi_meet': { 'name': "Jitsi Meet", 'id': 1, }, 'google_hangouts': { 'name': "Google Hangouts", 'id': 2, }, } def remove_google_hangouts_provider(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None: # We are removing the Google Hangout integration because Google has # removed the Hangouts brand. All the realms that used Hangouts as # their video chat provided are now setted to the default, jitsi. Realm = apps.get_model('zerver', 'Realm') Realm.objects.filter(video_chat_provider=VIDEO_CHAT_PROVIDERS['google_hangouts']['id']).update( video_chat_provider=VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'] ) class Migration(migrations.Migration): dependencies = [ ('zerver', '0284_convert_realm_admins_to_realm_owners'), ] operations = [ migrations.RemoveField( model_name='realm', name='google_hangouts_domain', ), migrations.RunPython( remove_google_hangouts_provider, reverse_code=migrations.RunPython.noop, elidable=True ), ]
apache-2.0
Accelerite/cinder
cinder/tests/test_rbd.py
3
48012
# Copyright 2012 Josh Durgin # Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import os import tempfile import mock from oslo_utils import timeutils from oslo_utils import units from cinder import db from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder.openstack.common import log as logging from cinder import test from cinder.tests.image import fake as fake_image from cinder.tests.test_volume import DriverTestCase from cinder.volume import configuration as conf import cinder.volume.drivers.rbd as driver from cinder.volume.flows.manager import create_volume LOG = logging.getLogger(__name__) # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] class MockException(Exception): def __init__(self, *args, **kwargs): RAISED_EXCEPTIONS.append(self.__class__) class MockImageNotFoundException(MockException): """Used as mock for rbd.ImageNotFound.""" class MockImageBusyException(MockException): """Used as mock for rbd.ImageBusy.""" class MockImageExistsException(MockException): """Used as mock for rbd.ImageExists.""" def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') @mock.patch('cinder.volume.drivers.rbd.RADOSClient') @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd, mock_client, mock_proxy): inst.mock_rbd = mock_rbd inst.mock_rados = mock_rados inst.mock_client = mock_client inst.mock_proxy = mock_proxy inst.mock_rbd.RBD.Error = Exception inst.mock_rados.Error = Exception inst.mock_rbd.ImageBusy = MockImageBusyException inst.mock_rbd.ImageNotFound = MockImageNotFoundException inst.mock_rbd.ImageExists = MockImageExistsException inst.driver.rbd = inst.mock_rbd inst.driver.rados = inst.mock_rados return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 CEPH_MON_DUMP = """dumped monmap epoch 1 { "epoch": 1, "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", "modified": "2013-05-22 17:44:56.343618", "created": "2013-05-22 17:44:56.343618", "mons": [ { "rank": 0, "name": "a", "addr": "[::1]:6789\/0"}, { "rank": 1, "name": "b", "addr": "[::1]:6790\/0"}, { "rank": 2, "name": "c", "addr": "[::1]:6791\/0"}, { "rank": 3, "name": "d", "addr": "127.0.0.1:6792\/0"}, { "rank": 4, "name": "e", "addr": "example.com:6791\/0"}], "quorum": [ 0, 1, 2]} """ class RBDTestCase(test.TestCase): def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(RBDTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = None self.cfg.rbd_pool = 'rbd' self.cfg.rbd_ceph_conf = None self.cfg.rbd_secret_uuid = None self.cfg.rbd_user = None self.cfg.volume_dd_blocksize = '1M' self.cfg.rbd_store_chunk_size = 4 mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = driver.RBDDriver(execute=mock_exec, configuration=self.cfg) self.driver.set_initialized() self.volume_name = u'volume-00000001' self.snapshot_name = u'snapshot-00000001' self.volume_size = 1 self.volume = dict(name=self.volume_name, size=self.volume_size) self.snapshot = dict(volume_name=self.volume_name, name=self.snapshot_name) @common_mocks def test_create_volume(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver, '_supports_layering') as \ mock_supports_layering: mock_supports_layering.return_value = True self.driver.create_volume(self.volume) chunk_size = self.cfg.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) args = [client.ioctx, str(self.volume_name), self.volume_size * units.Gi, order] kwargs = {'old_format': False, 'features': self.mock_rbd.RBD_FEATURE_LAYERING} self.mock_rbd.RBD.return_value.create.assert_called_once_with( *args, **kwargs) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_supports_layering.assert_called_once_with() @common_mocks def test_manage_existing_get_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 2 * units.Gi existing_ref = {'source-name': self.volume_name} return_size = self.driver.manage_existing_get_size( self.volume, existing_ref) self.assertEqual(2, return_size) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing_get_invalid_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 'abcd' existing_ref = {'source-name': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, self.volume, existing_ref) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ mock_rbd_image_rename: exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} mock_rbd_image_rename.return_value = 0 self.driver.manage_existing(self.volume, existing_ref) mock_rbd_image_rename.assert_called_with( client.ioctx, exist_volume, self.volume_name) @common_mocks def test_manage_existing_with_exist_rbd_image(self): client = self.mock_client.return_value client.__enter__.return_value = client self.mock_rbd.RBD.return_value.rename.side_effect = ( MockImageExistsException) exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} self.assertRaises(self.mock_rbd.ImageExists, self.driver.manage_existing, self.volume, existing_ref) # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageExists]) @common_mocks def test_create_volume_no_layering(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver, '_supports_layering') as \ mock_supports_layering: mock_supports_layering.return_value = False self.driver.create_volume(self.volume) chunk_size = self.cfg.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) args = [client.ioctx, str(self.volume_name), self.volume_size * units.Gi, order] kwargs = {'old_format': True, 'features': 0} self.mock_rbd.RBD.return_value.create.assert_called_once_with( *args, **kwargs) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_supports_layering.assert_called_once_with() @common_mocks def test_delete_backup_snaps(self): self.driver.rbd.Image.remove_snap = mock.Mock() with mock.patch.object(self.driver, '_get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = [{'name': 'snap1'}] rbd_image = self.driver.rbd.Image() self.driver._delete_backup_snaps(rbd_image) mock_get_backup_snaps.assert_called_once_with(rbd_image) self.assertTrue( self.driver.rbd.Image.return_value.remove_snap.called) @common_mocks def test_delete_volume(self): client = self.mock_client.return_value self.driver.rbd.Image.return_value.list_snaps.return_value = [] with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: mock_get_clone_info.return_value = (None, None, None) self.driver.delete_volume(self.volume) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_name, None) (self.driver.rbd.Image.return_value .list_snaps.assert_called_once_with()) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.driver.rbd.RBD.return_value.remove.call_count) @common_mocks def delete_volume_not_found(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound self.assertIsNone(self.driver.delete_volume(self.volume)) self.mock_rbd.Image.assert_called_once_with() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_delete_busy_volume(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageBusy) with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: with mock.patch.object(driver, 'RADOSClient') as \ mock_rados_client: self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, self.volume) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_name, None) (self.mock_rbd.Image.return_value.list_snaps .assert_called_once_with()) mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageBusy]) @common_mocks def test_delete_volume_not_found(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageNotFound) with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: with mock.patch.object(driver, 'RADOSClient') as \ mock_rados_client: self.assertIsNone(self.driver.delete_volume(self.volume)) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_name, None) (self.mock_rbd.Image.return_value.list_snaps .assert_called_once_with()) mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_create_snapshot(self): proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.create_snapshot(self.snapshot) args = [str(self.snapshot_name)] proxy.create_snap.assert_called_with(*args) proxy.protect_snap.assert_called_with(*args) @common_mocks def test_delete_snapshot(self): proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.delete_snapshot(self.snapshot) args = [str(self.snapshot_name)] proxy.remove_snap.assert_called_with(*args) proxy.unprotect_snap.assert_called_with(*args) @common_mocks def test_get_clone_info(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, self.volume_name) self.assertEqual(info, parent_info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_snap(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) volume.parent_info.return_value = parent_info snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_name, snap=snapshot) self.assertEqual(info, parent_info) self.assertEqual(volume.set_snap.call_count, 2) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_exception(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() volume.parent_info.side_effect = self.mock_rbd.ImageNotFound snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_name, snap=snapshot) self.assertEqual(info, (None, None, None)) self.assertEqual(volume.set_snap.call_count, 2) volume.parent_info.assert_called_once_with() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_get_clone_info_deleted_volume(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, "%s.deleted" % (self.volume_name)) self.assertEqual(info, parent_info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @common_mocks def test_create_cloned_volume_same_size(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.driver.create_cloned_volume({'name': dst_name, 'size': 10}, {'name': src_name, 'size': 10}) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.mock_rbd.Image.return_value.close \ .assert_called_once_with() self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 0, mock_resize.call_count) @common_mocks def test_create_cloned_volume_different_size(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.driver.create_cloned_volume({'name': dst_name, 'size': 20}, {'name': src_name, 'size': 10}) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.mock_rbd.Image.return_value.close \ .assert_called_once_with() self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 1, mock_resize.call_count) @common_mocks def test_create_cloned_volume_w_flatten(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' self.cfg.rbd_max_clone_depth = 1 self.mock_rbd.RBD.return_value.clone.side_effect = ( self.mock_rbd.RBD.Error) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.assertRaises(self.mock_rbd.RBD.Error, self.driver.create_cloned_volume, dict(name=dst_name), dict(name=src_name)) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) (self.mock_rbd.Image.return_value.unprotect_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) (self.mock_rbd.Image.return_value.remove_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) self.mock_rbd.Image.return_value.close.assert_called_once_with() self.assertTrue(mock_get_clone_depth.called) @common_mocks def test_create_cloned_volume_w_clone_exception(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' self.cfg.rbd_max_clone_depth = 2 self.mock_rbd.RBD.return_value.clone.side_effect = ( self.mock_rbd.RBD.Error) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.assertRaises(self.mock_rbd.RBD.Error, self.driver.create_cloned_volume, {'name': dst_name}, {'name': src_name}) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) (self.mock_rbd.Image.return_value.unprotect_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) (self.mock_rbd.Image.return_value.remove_snap .assert_called_once_with('.'.join((dst_name, 'clone_snap')))) self.mock_rbd.Image.return_value.close.assert_called_once_with() @common_mocks def test_good_locations(self): locations = ['rbd://fsid/pool/image/snap', 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver._parse_location, locations) @common_mocks def test_bad_locations(self): locations = ['rbd://image', 'http://path/to/somewhere/else', 'rbd://image/extra', 'rbd://image/', 'rbd://fsid/pool/image/', 'rbd://fsid/pool/image/snap/', 'rbd://///', ] for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver._parse_location, loc) self.assertFalse( self.driver._is_cloneable(loc, {'disk_format': 'raw'})) @common_mocks def test_cloneable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' info = {'disk_format': 'raw'} self.assertTrue(self.driver._is_cloneable(location, info)) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_different_fsid(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://def/pool/image/snap' self.assertFalse( self.driver._is_cloneable(location, {'disk_format': 'raw'})) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_unreadable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' self.driver.rbd.Error = Exception self.mock_proxy.side_effect = Exception args = [location, {'disk_format': 'raw'}] self.assertFalse(self.driver._is_cloneable(*args)) self.assertEqual(1, self.mock_proxy.call_count) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_bad_format(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' formats = ['qcow2', 'vmdk', 'vdi'] for f in formats: self.assertFalse( self.driver._is_cloneable(location, {'disk_format': f})) self.assertTrue(mock_get_fsid.called) def _copy_image(self): with mock.patch.object(tempfile, 'NamedTemporaryFile'): with mock.patch.object(os.path, 'exists') as mock_exists: mock_exists.return_value = True with mock.patch.object(image_utils, 'fetch_to_raw'): with mock.patch.object(self.driver, 'delete_volume'): with mock.patch.object(self.driver, '_resize'): mock_image_service = mock.MagicMock() args = [None, {'name': 'test', 'size': 1}, mock_image_service, None] self.driver.copy_image_to_volume(*args) @common_mocks def test_copy_image_no_volume_tmp(self): self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = None self._copy_image() @common_mocks def test_copy_image_volume_tmp(self): self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = '/var/run/cinder/tmp' self._copy_image() @common_mocks def test_update_volume_stats(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.get_cluster_stats = mock.Mock() client.cluster.get_cluster_stats.return_value = {'kb': 1024 ** 3, 'kb_avail': 1024 ** 2} self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict( volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb=1024, free_capacity_gb=1, reserved_percentage=0) actual = self.driver.get_volume_stats(True) client.cluster.get_cluster_stats.assert_called_once_with() self.assertDictMatch(expected, actual) @common_mocks def test_update_volume_stats_error(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.get_cluster_stats = mock.Mock() client.cluster.get_cluster_stats.side_effect = Exception self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict(volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0) actual = self.driver.get_volume_stats(True) client.cluster.get_cluster_stats.assert_called_once_with() self.assertDictMatch(expected, actual) @common_mocks def test_get_mon_addrs(self): with mock.patch.object(self.driver, '_execute') as mock_execute: mock_execute.return_value = (CEPH_MON_DUMP, '') hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) @common_mocks def test_initialize_connection(self): hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] with mock.patch.object(self.driver, '_get_mon_addrs') as \ mock_get_mon_addrs: mock_get_mon_addrs.return_value = (hosts, ports) expected = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.cfg.rbd_pool, self.volume_name), 'hosts': hosts, 'ports': ports, 'auth_enabled': False, 'auth_username': None, 'secret_type': 'ceph', 'secret_uuid': None, } } volume = dict(name=self.volume_name) actual = self.driver.initialize_connection(volume, None) self.assertDictMatch(expected, actual) self.assertTrue(mock_get_mon_addrs.called) @common_mocks def test_clone(self): src_pool = u'images' src_image = u'image-name' src_snap = u'snapshot-name' client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = self.mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) self.driver._clone(self.volume, src_pool, src_image, src_snap) args = [client_stack[0].ioctx, str(src_image), str(src_snap), client_stack[1].ioctx, str(self.volume_name)] kwargs = {'features': self.mock_rbd.RBD_FEATURE_LAYERING} self.mock_rbd.RBD.return_value.clone.assert_called_once_with( *args, **kwargs) self.assertEqual(client.__enter__.call_count, 2) @common_mocks def test_extend_volume(self): fake_size = '20' fake_vol = {'project_id': 'testprjid', 'name': self.volume_name, 'size': fake_size, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} self.mox.StubOutWithMock(self.driver, '_resize') size = int(fake_size) * units.Gi self.driver._resize(fake_vol, size=size) self.mox.ReplayAll() self.driver.extend_volume(fake_vol, fake_size) self.mox.VerifyAll() @common_mocks def test_retype(self): context = {} diff = {'encryption': {}, 'extra_specs': {}} fake_volume = {'name': 'testvolume', 'host': 'currenthost'} fake_type = 'high-IOPS' # no support for migration host = {'host': 'anotherhost'} self.assertFalse(self.driver.retype(context, fake_volume, fake_type, diff, host)) host = {'host': 'currenthost'} # no support for changing encryption diff['encryption'] = {'non-empty': 'non-empty'} self.assertFalse(self.driver.retype(context, fake_volume, fake_type, diff, host)) diff['encryption'] = {} # no support for changing extra_specs diff['extra_specs'] = {'non-empty': 'non-empty'} self.assertFalse(self.driver.retype(context, fake_volume, fake_type, diff, host)) diff['extra_specs'] = {} self.assertTrue(self.driver.retype(context, fake_volume, fake_type, diff, host)) def test_rbd_volume_proxy_init(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = (None, None) with driver.RBDVolumeProxy(mock_driver, self.volume_name): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) mock_driver.reset_mock() snap = u'snapshot-name' with driver.RBDVolumeProxy(mock_driver, self.volume_name, snapshot=snap): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) @common_mocks def test_connect_to_rados(self): # Default self.cfg.rados_connect_timeout = -1 self.mock_rados.Rados.return_value.open_ioctx.return_value = \ self.mock_rados.Rados.return_value.ioctx # default configured pool ret = self.driver._connect_to_rados() self.assertTrue(self.mock_rados.Rados.return_value.connect.called) # Expect no timeout if default is used self.mock_rados.Rados.return_value.connect.assert_called_once_with() self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(ret[1], self.mock_rados.Rados.return_value.ioctx) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( self.cfg.rbd_pool) # different pool ret = self.driver._connect_to_rados('alt_pool') self.assertTrue(self.mock_rados.Rados.return_value.connect.called) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(ret[1], self.mock_rados.Rados.return_value.ioctx) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( 'alt_pool') # With timeout self.cfg.rados_connect_timeout = 1 self.mock_rados.Rados.return_value.connect.reset_mock() self.driver._connect_to_rados() self.mock_rados.Rados.return_value.connect.assert_called_once_with( timeout=1) # error self.mock_rados.Rados.return_value.open_ioctx.reset_mock() self.mock_rados.Rados.return_value.shutdown.reset_mock() self.mock_rados.Rados.return_value.open_ioctx.side_effect = ( self.mock_rados.Error) self.assertRaises(exception.VolumeBackendAPIException, self.driver._connect_to_rados) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.mock_rados.Rados.return_value.shutdown.assert_called_once_with() class RBDImageIOWrapperTestCase(test.TestCase): def setUp(self): super(RBDImageIOWrapperTestCase, self).setUp() self.meta = mock.Mock() self.meta.user = 'mock_user' self.meta.conf = 'mock_conf' self.meta.pool = 'mock_pool' self.meta.image = mock.Mock() self.meta.image.read = mock.Mock() self.meta.image.write = mock.Mock() self.meta.image.size = mock.Mock() self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta) self.data_length = 1024 self.full_data = 'abcd' * 256 def test_init(self): self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta) self.assertEqual(self.mock_rbd_wrapper._offset, 0) def test_inc_offset(self): self.mock_rbd_wrapper._inc_offset(10) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(self.mock_rbd_wrapper._offset, 20) def test_rbd_image(self): self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image) def test_rbd_user(self): self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user) def test_rbd_pool(self): self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf) def test_rbd_conf(self): self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool) def test_read(self): def mock_read(offset, length): return self.full_data[offset:length] self.meta.image.read.side_effect = mock_read self.meta.image.size.return_value = self.data_length data = self.mock_rbd_wrapper.read() self.assertEqual(data, self.full_data) data = self.mock_rbd_wrapper.read() self.assertEqual(data, '') self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read() self.assertEqual(data, self.full_data) self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read(10) self.assertEqual(data, self.full_data[:10]) def test_write(self): self.mock_rbd_wrapper.write(self.full_data) self.assertEqual(self.mock_rbd_wrapper._offset, 1024) def test_seekable(self): self.assertTrue(self.mock_rbd_wrapper.seekable) def test_seek(self): self.assertEqual(self.mock_rbd_wrapper._offset, 0) self.mock_rbd_wrapper.seek(10) self.assertEqual(self.mock_rbd_wrapper._offset, 10) self.mock_rbd_wrapper.seek(10) self.assertEqual(self.mock_rbd_wrapper._offset, 10) self.mock_rbd_wrapper.seek(10, 1) self.assertEqual(self.mock_rbd_wrapper._offset, 20) self.mock_rbd_wrapper.seek(0) self.mock_rbd_wrapper.write(self.full_data) self.meta.image.size.return_value = self.data_length self.mock_rbd_wrapper.seek(0) self.assertEqual(self.mock_rbd_wrapper._offset, 0) self.mock_rbd_wrapper.seek(10, 2) self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length + 10) self.mock_rbd_wrapper.seek(-10, 2) self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10) # test exceptions. self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3) self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1) # offset should not have been changed by any of the previous # operations. self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10) def test_tell(self): self.assertEqual(self.mock_rbd_wrapper.tell(), 0) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(self.mock_rbd_wrapper.tell(), 10) def test_flush(self): with mock.patch.object(driver, 'LOG') as mock_logger: self.meta.image.flush = mock.Mock() self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once_with() self.meta.image.flush.reset_mock() # this should be caught and logged silently. self.meta.image.flush.side_effect = AttributeError self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once_with() msg = _("flush() not supported in this version of librbd") mock_logger.warning.assert_called_with(msg) def test_fileno(self): self.assertRaises(IOError, self.mock_rbd_wrapper.fileno) def test_close(self): self.mock_rbd_wrapper.close() class ManagedRBDTestCase(DriverTestCase): driver_name = "cinder.volume.drivers.rbd.RBDDriver" def setUp(self): super(ManagedRBDTestCase, self).setUp() # TODO(dosaboy): need to remove dependency on mox stubs here once # image.fake has been converted to mock. fake_image.stub_out_image_service(self.stubs) self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} self.called = [] def _create_volume_from_image(self, expected_status, raw=False, clone_error=False): """Try to clone a volume from an image, and check the status afterwards. NOTE: if clone_error is True we force the image type to raw otherwise clone_image is not called """ volume_id = 1 # See tests.image.fake for image types. if raw: image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' # creating volume testdata db.volume_create(self.context, {'id': volume_id, 'updated_at': timeutils.utcnow(), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy'}) try: if not clone_error: self.volume.create_volume(self.context, volume_id, image_id=image_id) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id) def test_create_vol_from_image_status_available(self): """Clone raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, True with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(mock_create.called) def test_create_vol_from_non_raw_image_status_available(self): """Clone non-raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, False with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=False) self.assertTrue(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertTrue(mock_create.called) def test_create_vol_from_image_status_error(self): """Fail to clone raw image then verify volume is in error state.""" with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = exception.CinderException with mock.patch.object(self.volume.driver, 'create_volume'): with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('error', raw=True, clone_error=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(self.volume.driver.create_volume.called) def test_clone_failure(self): driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', lambda *args: False): image_loc = (mock.Mock(), mock.Mock()) actual = driver.clone_image(mock.Mock(), mock.Mock(), image_loc, {}, mock.Mock()) self.assertEqual(({}, False), actual) self.assertEqual(({}, False), driver.clone_image('', object(), None, {}, '')) def test_clone_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver with mock.patch.object(self.volume.driver, '_is_cloneable') as \ mock_is_cloneable: mock_is_cloneable.return_value = True with mock.patch.object(self.volume.driver, '_clone') as \ mock_clone: with mock.patch.object(self.volume.driver, '_resize') as \ mock_resize: image_loc = ('rbd://fee/fi/fo/fum', None) volume = {'name': 'vol1'} actual = driver.clone_image(mock.Mock(), volume, image_loc, {'disk_format': 'raw', 'id': 'id.foo'}, mock.Mock()) self.assertEqual(expected, actual) mock_clone.assert_called_once_with(volume, 'fi', 'fo', 'fum') mock_resize.assert_called_once_with(volume)
apache-2.0
hsoft/moneyguru
core/gui/import_window.py
1
15326
# Copyright 2019 Virgil Dupras # # This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.gnu.org/licenses/gpl-3.0.html import datetime from collections import defaultdict from core.util import dedupe, first as getfirst from core.trans import tr from ..model.date import DateFormat from .base import GUIObject from .import_table import ImportTable from .selectable_list import LinkedSelectableList DAY = 'day' MONTH = 'month' YEAR = 'year' class SwapType: DayMonth = 0 MonthYear = 1 DayYear = 2 DescriptionPayee = 3 InvertAmount = 4 def last_two_digits(year): return year - ((year // 100) * 100) def swapped_date(date, first, second): attrs = {DAY: date.day, MONTH: date.month, YEAR: last_two_digits(date.year)} newattrs = {first: attrs[second], second: attrs[first]} if YEAR in newattrs: newattrs[YEAR] += 2000 return date.replace(**newattrs) def swap_format_elements(format, first, second): # format is a DateFormat swapped = format.copy() elems = swapped.elements TYPE2CHAR = {DAY: 'd', MONTH: 'M', YEAR: 'y'} first_char = TYPE2CHAR[first] second_char = TYPE2CHAR[second] first_index = [i for i, x in enumerate(elems) if x.startswith(first_char)][0] second_index = [i for i, x in enumerate(elems) if x.startswith(second_char)][0] elems[first_index], elems[second_index] = elems[second_index], elems[first_index] return swapped class AccountPane: def __init__(self, iwin, account, target_account, parsing_date_format): self.iwin = iwin self.account = account self._selected_target = target_account self.name = account.name entries = iwin.loader.accounts.entries_for_account(account) self.count = len(entries) self.matches = [] # [[ref, imported]] self.parsing_date_format = parsing_date_format self.max_day = 31 self.max_month = 12 self.max_year = 99 # 2 digits self._match_entries() self._swap_possibilities = set() self._compute_swap_possibilities() def _compute_swap_possibilities(self): entries = list(self.iwin.loader.accounts.entries_for_account(self.account)) if not entries: return self._swap_possibilities = set([(DAY, MONTH), (MONTH, YEAR), (DAY, YEAR)]) for first, second in self._swap_possibilities.copy(): for entry in entries: try: swapped_date(entry.date, first, second) except ValueError: self._swap_possibilities.remove((first, second)) break def _match_entries(self): to_import = list(self.iwin.loader.accounts.entries_for_account(self.account)) reference2entry = {} for entry in (e for e in to_import if e.reference): reference2entry[entry.reference] = entry self.matches = [] if self.selected_target is not None: entries = self.iwin.document.accounts.entries_for_account(self.selected_target) for entry in entries: if entry.reference in reference2entry: other = reference2entry[entry.reference] if entry.reconciled: self.iwin.import_table.dont_import.add(other) to_import.remove(other) del reference2entry[entry.reference] else: other = None if other is not None or not entry.reconciled: self.matches.append([entry, other]) self.matches += [[None, entry] for entry in to_import] self._sort_matches() def _sort_matches(self): self.matches.sort(key=lambda t: t[0].date if t[0] is not None else t[1].date) def bind(self, existing, imported): [match1] = [m for m in self.matches if m[0] is existing] [match2] = [m for m in self.matches if m[1] is imported] assert match1[1] is None assert match2[0] is None match1[1] = match2[1] self.matches.remove(match2) def can_swap_date_fields(self, first, second): # 'day', 'month', 'year' return (first, second) in self._swap_possibilities or (second, first) in self._swap_possibilities def match_entries_by_date_and_amount(self, threshold): delta = datetime.timedelta(days=threshold) unmatched = ( to_import for ref, to_import in self.matches if ref is None) unmatched_refs = ( ref for ref, to_import in self.matches if to_import is None) amount2refs = defaultdict(list) for entry in unmatched_refs: amount2refs[entry.amount].append(entry) for entry in unmatched: if entry.amount not in amount2refs: continue potentials = amount2refs[entry.amount] for ref in potentials: if abs(ref.date - entry.date) <= delta: self.bind(ref, entry) potentials.remove(ref) self._sort_matches() def unbind(self, existing, imported): [match] = [m for m in self.matches if m[0] is existing and m[1] is imported] match[1] = None self.matches.append([None, imported]) self._sort_matches() @property def selected_target(self): return self._selected_target @selected_target.setter def selected_target(self, value): self._selected_target = value self._match_entries() # This is a modal window that is designed to be re-instantiated on each import # run. It is shown modally by the UI as soon as its created on the UI side. class ImportWindow(GUIObject): # --- View interface # close() # close_selected_tab() # set_swap_button_enabled(enabled: bool) # update_selected_pane() # show() # def __init__(self, mainwindow, target_account=None): super().__init__() if not hasattr(mainwindow, 'loader'): raise ValueError("Nothing to import!") self.mainwindow = mainwindow self.document = mainwindow.document self.app = self.document.app self._selected_pane_index = 0 self._selected_target_index = 0 def setfunc(index): self.view.set_swap_button_enabled(self.can_perform_swap()) self.swap_type_list = LinkedSelectableList(items=[ "<placeholder> Day <--> Month", "<placeholder> Month <--> Year", "<placeholder> Day <--> Year", tr("Description <--> Payee"), tr("Invert Amounts"), ], setfunc=setfunc) self.swap_type_list.selected_index = SwapType.DayMonth self.panes = [] self.import_table = ImportTable(self) self.loader = self.mainwindow.loader self.target_accounts = [ a for a in self.document.accounts if a.is_balance_sheet_account()] self.target_accounts.sort(key=lambda a: a.name.lower()) accounts = [] for account in self.loader.accounts: if account.is_balance_sheet_account(): entries = self.loader.accounts.entries_for_account(account) if len(entries): new_name = self.document.accounts.new_name(account.name) if new_name != account.name: self.loader.accounts.rename_account(account, new_name) accounts.append(account) parsing_date_format = DateFormat.from_sysformat(self.loader.parsing_date_format) for account in accounts: target = target_account if target is None and account.reference: target = getfirst( t for t in self.target_accounts if t.reference == account.reference ) self.panes.append( AccountPane(self, account, target, parsing_date_format)) # --- Private def _can_swap_date_fields(self, first, second): # 'day', 'month', 'year' pane = self.selected_pane if pane is None: return False return pane.can_swap_date_fields(first, second) def _invert_amounts(self, apply_to_all): if apply_to_all: panes = self.panes else: panes = [self.selected_pane] for pane in panes: entries = self.loader.accounts.entries_for_account(pane.account) txns = dedupe(e.transaction for e in entries) for txn in txns: for split in txn.splits: split.amount = -split.amount self.import_table.refresh() def _refresh_target_selection(self): if not self.panes: return target = self.selected_pane.selected_target self._selected_target_index = 0 if target is not None: try: self._selected_target_index = self.target_accounts.index(target) + 1 except ValueError: pass def _refresh_swap_list_items(self): if not self.panes: return items = [] basefmt = self.selected_pane.parsing_date_format for first, second in [(DAY, MONTH), (MONTH, YEAR), (DAY, YEAR)]: swapped = swap_format_elements(basefmt, first, second) items.append("{} --> {}".format(basefmt.iso_format, swapped.iso_format)) self.swap_type_list[:3] = items def _swap_date_fields(self, first, second, apply_to_all): # 'day', 'month', 'year' assert self._can_swap_date_fields(first, second) if apply_to_all: panes = [p for p in self.panes if p.can_swap_date_fields(first, second)] else: panes = [self.selected_pane] def switch_func(txn): txn.date = swapped_date(txn.date, first, second) self._swap_fields(panes, switch_func) # Now, lets' change the date format on these panes for pane in panes: basefmt = self.selected_pane.parsing_date_format swapped = swap_format_elements(basefmt, first, second) pane.parsing_date_format = swapped pane._sort_matches() self.import_table.refresh() self._refresh_swap_list_items() def _swap_description_payee(self, apply_to_all): if apply_to_all: panes = self.panes else: panes = [self.selected_pane] def switch_func(txn): txn.description, txn.payee = txn.payee, txn.description self._swap_fields(panes, switch_func) def _swap_fields(self, panes, switch_func): seen = set() for pane in panes: entries = self.loader.accounts.entries_for_account(pane.account) txns = dedupe(e.transaction for e in entries) for txn in txns: if txn.affected_accounts() & seen: # We've already swapped this txn in a previous pane. continue switch_func(txn) seen.add(pane.account) self.import_table.refresh() def _update_selected_pane(self): self.import_table.refresh() self._refresh_swap_list_items() self.view.update_selected_pane() self.view.set_swap_button_enabled(self.can_perform_swap()) # --- Override def _view_updated(self): if self.document.can_restore_from_prefs(): self.restore_view() # XXX Logically, we should call _update_selected_pane() but doing so # make tests fail. to investigate. self._refresh_target_selection() self.view.update_selected_pane() self._refresh_swap_list_items() self.import_table.refresh() # --- Public def can_perform_swap(self): index = self.swap_type_list.selected_index if index == SwapType.DayMonth: return self._can_swap_date_fields(DAY, MONTH) elif index == SwapType.MonthYear: return self._can_swap_date_fields(MONTH, YEAR) elif index == SwapType.DayYear: return self._can_swap_date_fields(DAY, YEAR) else: return True def close_pane(self, index): was_selected = index == self.selected_pane_index del self.panes[index] if not self.panes: self.view.close() return self._selected_pane_index = min(self._selected_pane_index, len(self.panes) - 1) if was_selected: self._update_selected_pane() def import_selected_pane(self): pane = self.selected_pane matches = pane.matches matches = [ (e, ref) for ref, e in matches if e is not None and e not in self.import_table.dont_import] if pane.selected_target is not None: # We import in an existing account, adjust all the transactions accordingly target_account = pane.selected_target else: target_account = None self.document.import_entries(target_account, pane.account, matches) self.mainwindow.revalidate() self.close_pane(self.selected_pane_index) self.view.close_selected_tab() def match_entries_by_date_and_amount(self, threshold): self.selected_pane.match_entries_by_date_and_amount(threshold) self.import_table.refresh() def perform_swap(self, apply_to_all=False): index = self.swap_type_list.selected_index if index == SwapType.DayMonth: self._swap_date_fields(DAY, MONTH, apply_to_all=apply_to_all) elif index == SwapType.MonthYear: self._swap_date_fields(MONTH, YEAR, apply_to_all=apply_to_all) elif index == SwapType.DayYear: self._swap_date_fields(DAY, YEAR, apply_to_all=apply_to_all) elif index == SwapType.DescriptionPayee: self._swap_description_payee(apply_to_all=apply_to_all) elif index == SwapType.InvertAmount: self._invert_amounts(apply_to_all=apply_to_all) def restore_view(self): self.import_table.columns.restore_columns() # --- Properties @property def selected_pane(self): return self.panes[self.selected_pane_index] if self.panes else None @property def selected_pane_index(self): return self._selected_pane_index @selected_pane_index.setter def selected_pane_index(self, value): if value >= len(self.panes): return self._selected_pane_index = value self._refresh_target_selection() self._update_selected_pane() @property def selected_target_account(self): return self.selected_pane.selected_target @property def selected_target_account_index(self): return self._selected_target_index @selected_target_account_index.setter def selected_target_account_index(self, value): target = self.target_accounts[value - 1] if value > 0 else None self.selected_pane.selected_target = target self._selected_target_index = value self.import_table.refresh() @property def target_account_names(self): return [tr('< New Account >')] + [a.name for a in self.target_accounts]
gpl-3.0
chipaca/snapcraft
snapcraft/internal/build_providers/_lxd/_lxd.py
1
18657
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2019-2020 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import os import subprocess import sys import urllib.parse import warnings from textwrap import dedent from time import sleep from typing import Dict, Optional, Sequence from snapcraft.internal import repo from snapcraft.internal.errors import SnapcraftEnvironmentError from .._base_provider import Provider, errors from ._images import get_image_source # LXD is only supported on Linux and causes issues when imported on Windows. # We conditionally import it and rely on ensure_provider() to check OS before # using pylxd. if sys.platform == "linux": import pylxd logger = logging.getLogger(__name__) # Filter out attribute setting warnings for properties that exist in LXD operations # but are unhandled in pylxd. warnings.filterwarnings("ignore", module="pylxd.models.operation") class LXD(Provider): """A LXD provider for snapcraft to execute its lifecycle.""" _PROJECT_DEVICE_NAME = "snapcraft-project" _PROJECT_EXPORTED_PRIME_NAME = "snapcraft-project-prime" # Given that we are running snapcraft from the snapcraft snap, which has # classic confinement and require using the lxd snap, the lxd and lxc # binaries should be found in /snap/bin _LXD_BIN = os.path.join(os.path.sep, "snap", "bin", "lxd") _LXC_BIN = os.path.join(os.path.sep, "snap", "bin", "lxc") @classmethod def ensure_provider(cls): error_message = None # type: Optional[str] prompt_installable = False if sys.platform != "linux": error_message = "LXD is not supported on this platform" else: try: if not repo.snaps.SnapPackage.is_snap_installed("lxd"): error_message = ( "The LXD snap is required to continue: snap install lxd" ) prompt_installable = True except repo.errors.SnapdConnectionError: error_message = ( "snap support is required to continue: " "https://docs.snapcraft.io/installing-snapd/6735" ) if error_message is not None: raise errors.ProviderNotFound( provider=cls._get_provider_name(), prompt_installable=prompt_installable, error_message=error_message, ) # If we reach this point, it means the lxd snap is properly setup. # Now is the time for additional sanity checks to ensure the provider # will work. try: # TODO: add support for more distributions. Maybe refactor a bit so that Repo behaves # similar to a build provider. if repo.Repo.is_package_installed("lxd") or repo.Repo.is_package_installed( "lxd-client" ): raise SnapcraftEnvironmentError( ( "The {!r} provider does not support having the 'lxd' or " "'lxd-client' deb packages installed. To completely migrate " "to the LXD snap run 'lxd.migrate' and try again." ).format(cls._get_provider_name()) ) except repo.errors.NoNativeBackendError: pass @classmethod def setup_provider(cls, *, echoer) -> None: repo.snaps.install_snaps(["lxd/latest/stable"]) try: subprocess.check_output([cls._LXD_BIN, "waitready", "--timeout=30"]) except subprocess.CalledProcessError as call_error: raise SnapcraftEnvironmentError( "Timeout reached waiting for LXD to start." ) from call_error try: subprocess.check_output([cls._LXD_BIN, "init", "--auto"]) except subprocess.CalledProcessError as call_error: raise SnapcraftEnvironmentError( "Failed to initialize LXD. " "Try manually initializing before trying again: lxd init --auto." ) from call_error @classmethod def _get_provider_name(cls): return "LXD" @classmethod def _get_is_snap_injection_capable(cls) -> bool: return True def __init__( self, *, project, echoer, is_ephemeral: bool = False, build_provider_flags: Dict[str, str] = None, ) -> None: super().__init__( project=project, echoer=echoer, is_ephemeral=is_ephemeral, build_provider_flags=build_provider_flags, ) # This endpoint is hardcoded everywhere lxc/lxd-pkg-snap#33 lxd_socket_path = "/var/snap/lxd/common/lxd/unix.socket" endpoint = "http+unix://{}".format(urllib.parse.quote(lxd_socket_path, safe="")) try: self._lxd_client: pylxd.Client = pylxd.Client(endpoint=endpoint) except pylxd.client.exceptions.ClientConnectionFailed: raise errors.ProviderCommunicationError( provider_name=self._get_provider_name(), message="cannot connect to the LXD socket ({!r}).".format( lxd_socket_path ), ) self._container: Optional[pylxd.models.container.Container] = None def _run( self, command: Sequence[str], hide_output: bool = False ) -> Optional[bytes]: self._ensure_container_running() env_command = super()._get_env_command() # TODO: use pylxd cmd = [self._LXC_BIN, "exec", self.instance_name, "--"] cmd.extend(env_command) cmd.extend(command) self._log_run(cmd) output = None try: if hide_output: output = subprocess.check_output(cmd) else: subprocess.check_call(cmd) except subprocess.CalledProcessError as process_error: raise errors.ProviderExecError( provider_name=self._get_provider_name(), command=command, exit_code=process_error.returncode, output=process_error.output, ) from process_error return output def _launch(self) -> None: build_base = self.project._get_build_base() try: source = get_image_source(base=build_base) except KeyError: raise errors.ProviderInvalidBaseError( provider_name=self._get_provider_name(), build_base=build_base ) config = {"name": self.instance_name, "source": source} try: container = self._lxd_client.containers.create(config, wait=True) except pylxd.exceptions.LXDAPIException as lxd_api_error: raise errors.ProviderLaunchError( provider_name=self._get_provider_name(), error_message=lxd_api_error ) from lxd_api_error container.save(wait=True) self._container = container self._start() def _supports_syscall_interception(self) -> bool: # syscall interception relies on the seccomp_listener kernel feature environment = self._lxd_client.host_info.get("environment", {}) kernel_features = environment.get("kernel_features", {}) return kernel_features.get("seccomp_listener", "false") == "true" def _start(self): if not self._lxd_client.containers.exists(self.instance_name): raise errors.ProviderInstanceNotFoundError(instance_name=self.instance_name) if self._container is None: self._container = self._lxd_client.containers.get(self.instance_name) self._container.sync() # map to the owner of the directory we are eventually going to write the # snap to. self._container.config["raw.idmap"] = "both {!s} 0".format( os.stat(self.project._project_dir).st_uid ) # If possible, allow container to make safe mknod calls. Documented at # https://linuxcontainers.org/lxd/docs/master/syscall-interception if self._supports_syscall_interception(): self._container.config["security.syscalls.intercept.mknod"] = "true" self._container.save(wait=True) if self._container.status.lower() != "running": try: self._container.start(wait=True) except pylxd.exceptions.LXDAPIException as lxd_api_error: print(self._container.status) raise errors.ProviderStartError( provider_name=self._get_provider_name(), error_message=lxd_api_error ) from lxd_api_error self.echoer.wrapped("Waiting for container to be ready") def _stop(self): # If _container is still None here it means creation/starting was not # successful. if self._container is None: return self._container.sync() if self._container.status.lower() != "stopped": try: self._container.stop(wait=True) except pylxd.exceptions.LXDAPIException as lxd_api_error: raise errors.ProviderStopError( provider_name=self._get_provider_name(), error_message=lxd_api_error ) from lxd_api_error def _push_file(self, *, source: str, destination: str) -> None: # Sanity check - developer error if container not initialized. if self._container is None: raise RuntimeError("Attempted to use container before starting.") self._ensure_container_running() # TODO: better handling of larger files. with open(source, "rb") as source_data: source_contents = source_data.read() try: self._container.files.put(destination, source_contents) except pylxd.exceptions.LXDAPIException as lxd_api_error: raise errors.ProviderFileCopyError( provider_name=self._get_provider_name(), error_message=lxd_api_error ) def create(self) -> None: """Create the LXD instance and setup the build environment.""" self.echoer.info("Launching a container.") self.launch_instance() self._mount_project() def destroy(self) -> None: """Destroy the instance, trying to stop it first.""" self._stop() def _get_mount_name(self, target: str) -> str: """Provide a formatted name for target mount point.""" home_dir = self._get_home_directory().as_posix() # Special cases for compatibility. if target == os.path.join(home_dir, "project"): return self._PROJECT_DEVICE_NAME elif target == os.path.join(home_dir, "prime"): return self._PROJECT_EXPORTED_PRIME_NAME # Replace home directory with "snapcraft". name = target.replace(home_dir, "snapcraft", 1) # Replace path separators with dashes. name = name.replace("/", "-") return name def _is_mounted(self, target: str) -> bool: """Query if there is a mount at target mount point.""" # Sanity check - developer error if container not initialized. if self._container is None: raise RuntimeError("Attempted to use container before starting.") name = self._get_mount_name(target) return name in self._container.devices def _mount(self, host_source: str, target: str) -> None: """Mount host source directory to target mount point.""" # Sanity check - developer error if container not initialized. if self._container is None: raise RuntimeError("Attempted to use container before starting.") if self._is_mounted(target): # Nothing to do if already mounted. return name = self._get_mount_name(target) self._container.sync() self._container.devices[name] = { "type": "disk", "source": host_source, "path": target, } try: self._container.save(wait=True) except pylxd.exceptions.LXDAPIException as lxd_api_error: raise errors.ProviderMountError( provider_name=self._get_provider_name(), error_message=lxd_api_error ) from lxd_api_error def clean_project(self) -> bool: was_cleaned = super().clean_project() if was_cleaned: if self._container is None: try: self._container = self._lxd_client.containers.get( self.instance_name ) except pylxd.exceptions.NotFound: # If no container found, nothing to delete. return was_cleaned self._stop() self._container.delete(wait=True) return was_cleaned def pull_file(self, name: str, destination: str, delete: bool = False) -> None: # Sanity check - developer error if container not initialized. if self._container is None: raise RuntimeError("Attempted to use container before starting.") self._ensure_container_running() # TODO: better handling of larger files. try: source_data = self._container.files.get(name) except pylxd.exceptions.LXDAPIException as lxd_api_error: raise errors.ProviderFileCopyError( provider_name=self._get_provider_name(), error_message=lxd_api_error ) else: with open(destination, "wb") as destination_data: destination_data.write(source_data) if delete and self._container.files.delete_available: self._container.files.delete(name) if delete and not self._container.files.delete_available: logger.warning("File deletion not supported by this LXD version.") def shell(self) -> None: self._run(command=["/bin/bash"]) def _wait_for_systemd(self) -> None: # systemctl states we care about here are: # - running: The system is fully operational. Process returncode: 0 # - degraded: The system is operational but one or more units failed. # Process returncode: 1 for i in range(40): try: self._run(["systemctl", "is-system-running"], hide_output=True) break except errors.ProviderExecError as exec_error: if exec_error.output is not None: running_state = exec_error.output.decode().strip() if running_state == "degraded": break logger.debug(f"systemctl is-system-running: {running_state!r}") sleep(0.5) else: self.echoer.warning("Timed out waiting for systemd to be ready...") def _wait_for_network(self) -> None: self.echoer.wrapped("Waiting for network to be ready...") for i in range(40): try: self._run(["getent", "hosts", "snapcraft.io"], hide_output=True) break except errors.ProviderExecError: sleep(0.5) else: self.echoer.warning("Failed to setup networking.") def _setup_environment(self) -> None: if self._container is None: raise RuntimeError("Attempted to use container before starting.") super()._setup_environment() self._install_file( path="/etc/systemd/network/10-eth0.network", content=dedent( """ [Match] Name=eth0 [Network] DHCP=ipv4 LinkLocalAddressing=ipv6 [DHCP] RouteMetric=100 UseMTU=true """ ), permissions="0644", ) self._install_file( path="/etc/hostname", content=self.instance_name, permissions="0644" ) self._wait_for_systemd() # Use resolv.conf managed by systemd-resolved. self._run( ["ln", "-sf", "/run/systemd/resolve/resolv.conf", "/etc/resolv.conf"], hide_output=True, ) self._run(["systemctl", "enable", "systemd-resolved"], hide_output=True) self._run(["systemctl", "enable", "systemd-networkd"], hide_output=True) self._run(["systemctl", "restart", "systemd-resolved"], hide_output=True) self._run(["systemctl", "restart", "systemd-networkd"], hide_output=True) self._wait_for_network() # Setup snapd to bootstrap. self._run(["apt-get", "update"]) # First install fuse and udev, snapd requires them. # Snapcraft requires dirmngr self._run(["apt-get", "install", "dirmngr", "udev", "fuse", "--yes"]) # the system needs networking self._run(["systemctl", "enable", "systemd-udevd"], hide_output=True) self._run(["systemctl", "start", "systemd-udevd"], hide_output=True) # And only then install snapd. self._run(["apt-get", "install", "snapd", "sudo", "--yes"]) self._run(["systemctl", "start", "snapd"], hide_output=True) def _setup_snapcraft(self): self._wait_for_network() super()._setup_snapcraft() def _ensure_container_running(self) -> None: # Sanity check - developer error if container not initialized. if self._container is None: raise RuntimeError("Attempted to use container before starting.") self._container.sync() if self._container.status.lower() != "running": raise errors.ProviderFileCopyError( provider_name=self._get_provider_name(), error_message=( "Container is not running, the current state is: {!r}. " "Ensure it has not been modified by external factors and try again" ).format(self._container.status), )
gpl-3.0
ryfeus/lambda-packs
Tensorflow/source/tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py
118
1403
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Some common SessionRunHook classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.training import basic_session_run_hooks # pylint: disable=invalid-name LoggingTensorHook = basic_session_run_hooks.LoggingTensorHook StopAtStepHook = basic_session_run_hooks.StopAtStepHook CheckpointSaverHook = basic_session_run_hooks.CheckpointSaverHook StepCounterHook = basic_session_run_hooks.StepCounterHook NanLossDuringTrainingError = basic_session_run_hooks.NanLossDuringTrainingError NanTensorHook = basic_session_run_hooks.NanTensorHook SummarySaverHook = basic_session_run_hooks.SummarySaverHook # pylint: enable=invalid-name
mit
jenalgit/django
tests/template_backends/test_utils.py
351
1521
from django.core.exceptions import ImproperlyConfigured from django.template import engines from django.test import SimpleTestCase, override_settings class TemplateStringsTests(SimpleTestCase): @override_settings(TEMPLATES=[{ 'BACKEND': 'raise.import.error', }]) def test_backend_import_error(self): """ Failing to import a backend keeps raising the original import error. Regression test for #24265. """ with self.assertRaises(ImportError): engines.all() with self.assertRaises(ImportError): engines.all() @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', # Incorrect: APP_DIRS and loaders are mutually incompatible. 'APP_DIRS': True, 'OPTIONS': {'loaders': []}, }]) def test_backend_improperly_configured(self): """ Failing to initialize a backend keeps raising the original exception. Regression test for #24265. """ with self.assertRaises(ImproperlyConfigured): engines.all() with self.assertRaises(ImproperlyConfigured): engines.all() @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', }, { 'BACKEND': 'django.template.backends.django.DjangoTemplates', }]) def test_backend_names_must_be_unique(self): with self.assertRaises(ImproperlyConfigured): engines.all()
bsd-3-clause
mj10777/QGIS
python/console/console_sci.py
21
30221
# -*- coding:utf-8 -*- """ /*************************************************************************** Python Console for QGIS ------------------- begin : 2012-09-10 copyright : (C) 2012 by Salvatore Larosa email : lrssvtml (at) gmail (dot) com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ Some portions of code were taken from https://code.google.com/p/pydee/ """ from qgis.PyQt.QtCore import Qt, QByteArray, QCoreApplication, QFile, QSize from qgis.PyQt.QtWidgets import QDialog, QMenu, QShortcut, QApplication from qgis.PyQt.QtGui import QColor, QKeySequence, QFont, QFontMetrics, QStandardItemModel, QStandardItem, QClipboard, QFontDatabase from qgis.PyQt.Qsci import QsciScintilla, QsciLexerPython, QsciAPIs import sys import os import code import codecs import re import traceback from qgis.core import QgsApplication, QgsSettings, Qgis from .ui_console_history_dlg import Ui_HistoryDialogPythonConsole _init_commands = ["import sys", "import os", "import re", "import math", "from qgis.core import *", "from qgis.gui import *", "from qgis.analysis import *", "import processing", "import qgis.utils", "from qgis.utils import iface", "from qgis.PyQt.QtCore import *", "from qgis.PyQt.QtGui import *", "from qgis.PyQt.QtWidgets import *", "from qgis.PyQt.QtNetwork import *", "from qgis.PyQt.QtXml import *"] _historyFile = os.path.join(QgsApplication.qgisSettingsDirPath(), "console_history.txt") class ShellScintilla(QsciScintilla, code.InteractiveInterpreter): DEFAULT_COLOR = "#4d4d4c" KEYWORD_COLOR = "#8959a8" CLASS_COLOR = "#4271ae" METHOD_COLOR = "#4271ae" DECORATION_COLOR = "#3e999f" NUMBER_COLOR = "#c82829" COMMENT_COLOR = "#8e908c" COMMENT_BLOCK_COLOR = "#8e908c" BACKGROUND_COLOR = "#ffffff" CURSOR_COLOR = "#636363" CARET_LINE_COLOR = "#efefef" SINGLE_QUOTE_COLOR = "#718c00" DOUBLE_QUOTE_COLOR = "#718c00" TRIPLE_SINGLE_QUOTE_COLOR = "#eab700" TRIPLE_DOUBLE_QUOTE_COLOR = "#eab700" MARGIN_BACKGROUND_COLOR = "#efefef" MARGIN_FOREGROUND_COLOR = "#636363" SELECTION_BACKGROUND_COLOR = "#d7d7d7" SELECTION_FOREGROUND_COLOR = "#303030" MATCHED_BRACE_BACKGROUND_COLOR = "#b7f907" MATCHED_BRACE_FOREGROUND_COLOR = "#303030" def __init__(self, parent=None): super(ShellScintilla, self).__init__(parent) code.InteractiveInterpreter.__init__(self, locals=None) self.parent = parent self.opening = ['(', '{', '[', "'", '"'] self.closing = [')', '}', ']', "'", '"'] self.settings = QgsSettings() # Enable non-ascii chars for editor self.setUtf8(True) self.new_input_line = True self.setMarginWidth(0, 0) self.setMarginWidth(1, 0) self.setMarginWidth(2, 0) self.buffer = [] self.displayPrompt(False) for line in _init_commands: self.runsource(line) self.history = [] self.historyIndex = 0 # Read history command file self.readHistoryFile() self.historyDlg = HistoryDialog(self) # Brace matching: enable for a brace immediately before or after # the current position self.setBraceMatching(QsciScintilla.SloppyBraceMatch) # Current line visible with special background color self.setCaretWidth(2) self.refreshSettingsShell() # Don't want to see the horizontal scrollbar at all # Use raw message to Scintilla here (all messages are documented # here: http://www.scintilla.org/ScintillaDoc.html) self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0) # not too small # self.setMinimumSize(500, 300) self.setWrapMode(QsciScintilla.WrapCharacter) self.SendScintilla(QsciScintilla.SCI_EMPTYUNDOBUFFER) # Disable command key ctrl, shift = self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16 self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('Z') + ctrl) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('Y') + ctrl) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl + shift) # New QShortcut = ctrl+space/ctrl+alt+space for Autocomplete self.newShortcutCSS = QShortcut(QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_Space), self) self.newShortcutCAS = QShortcut(QKeySequence(Qt.CTRL + Qt.ALT + Qt.Key_Space), self) self.newShortcutCSS.setContext(Qt.WidgetShortcut) self.newShortcutCAS.setContext(Qt.WidgetShortcut) self.newShortcutCAS.activated.connect(self.autoCompleteKeyBinding) self.newShortcutCSS.activated.connect(self.showHistory) def _setMinimumHeight(self): font = self.lexer.defaultFont(0) fm = QFontMetrics(font) self.setMinimumHeight(fm.height() + 10) def refreshSettingsShell(self): # Set Python lexer self.setLexers() threshold = self.settings.value("pythonConsole/autoCompThreshold", 2, type=int) self.setAutoCompletionThreshold(threshold) radioButtonSource = self.settings.value("pythonConsole/autoCompleteSource", 'fromAPI') autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabled", True, type=bool) if autoCompEnabled: if radioButtonSource == 'fromDoc': self.setAutoCompletionSource(self.AcsDocument) elif radioButtonSource == 'fromAPI': self.setAutoCompletionSource(self.AcsAPIs) elif radioButtonSource == 'fromDocAPI': self.setAutoCompletionSource(self.AcsAll) else: self.setAutoCompletionSource(self.AcsNone) cursorColor = self.settings.value("pythonConsole/cursorColor", QColor(self.CURSOR_COLOR)) self.setCaretForegroundColor(cursorColor) self.setSelectionForegroundColor(QColor(self.settings.value("pythonConsole/selectionForegroundColor", QColor(self.SELECTION_FOREGROUND_COLOR)))) self.setSelectionBackgroundColor(QColor(self.settings.value("pythonConsole/selectionBackgroundColor", QColor(self.SELECTION_BACKGROUND_COLOR)))) self.setMatchedBraceBackgroundColor(QColor(self.settings.value("pythonConsole/matchedBraceBackgroundColor", QColor(self.MATCHED_BRACE_BACKGROUND_COLOR)))) self.setMatchedBraceForegroundColor(QColor(self.settings.value("pythonConsole/matchedBraceForegroundColor", QColor(self.MATCHED_BRACE_FOREGROUND_COLOR)))) # Sets minimum height for input area based of font metric self._setMinimumHeight() def showHistory(self): if not self.historyDlg.isVisible(): self.historyDlg.show() self.historyDlg._reloadHistory() self.historyDlg.activateWindow() def autoCompleteKeyBinding(self): radioButtonSource = self.settings.value("pythonConsole/autoCompleteSource", 'fromAPI') autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabled", True, type=bool) if autoCompEnabled: if radioButtonSource == 'fromDoc': self.autoCompleteFromDocument() elif radioButtonSource == 'fromAPI': self.autoCompleteFromAPIs() elif radioButtonSource == 'fromDocAPI': self.autoCompleteFromAll() def commandConsole(self, commands): if not self.is_cursor_on_last_line(): self.move_cursor_to_end() line, pos = self.getCursorPosition() selCmdLength = len(self.text(line)) self.setSelection(line, 4, line, selCmdLength) self.removeSelectedText() for cmd in commands: self.append(cmd) self.entered() self.move_cursor_to_end() self.setFocus() def setLexers(self): self.lexer = QsciLexerPython() font = QFontDatabase.systemFont(QFontDatabase.FixedFont) loadFont = self.settings.value("pythonConsole/fontfamilytext") if loadFont: font.setFamily(loadFont) fontSize = self.settings.value("pythonConsole/fontsize", type=int) if fontSize: font.setPointSize(fontSize) self.lexer.setDefaultFont(font) self.lexer.setDefaultColor(QColor(self.settings.value("pythonConsole/defaultFontColor", QColor(self.DEFAULT_COLOR)))) self.lexer.setColor(QColor(self.settings.value("pythonConsole/commentFontColor", QColor(self.COMMENT_COLOR))), 1) self.lexer.setColor(QColor(self.settings.value("pythonConsole/numberFontColor", QColor(self.NUMBER_COLOR))), 2) self.lexer.setColor(QColor(self.settings.value("pythonConsole/keywordFontColor", QColor(self.KEYWORD_COLOR))), 5) self.lexer.setColor(QColor(self.settings.value("pythonConsole/classFontColor", QColor(self.CLASS_COLOR))), 8) self.lexer.setColor(QColor(self.settings.value("pythonConsole/methodFontColor", QColor(self.METHOD_COLOR))), 9) self.lexer.setColor(QColor(self.settings.value("pythonConsole/decorFontColor", QColor(self.DECORATION_COLOR))), 15) self.lexer.setColor(QColor(self.settings.value("pythonConsole/commentBlockFontColor", QColor(self.COMMENT_BLOCK_COLOR))), 12) self.lexer.setColor(QColor(self.settings.value("pythonConsole/singleQuoteFontColor", QColor(self.SINGLE_QUOTE_COLOR))), 4) self.lexer.setColor(QColor(self.settings.value("pythonConsole/doubleQuoteFontColor", QColor(self.DOUBLE_QUOTE_COLOR))), 3) self.lexer.setColor(QColor(self.settings.value("pythonConsole/tripleSingleQuoteFontColor", QColor(self.TRIPLE_SINGLE_QUOTE_COLOR))), 6) self.lexer.setColor(QColor(self.settings.value("pythonConsole/tripleDoubleQuoteFontColor", QColor(self.TRIPLE_DOUBLE_QUOTE_COLOR))), 7) self.lexer.setColor(QColor(self.settings.value("pythonConsole/defaultFontColorEditor", QColor(self.DEFAULT_COLOR))), 13) self.lexer.setFont(font, 1) self.lexer.setFont(font, 3) self.lexer.setFont(font, 4) self.lexer.setFont(font, QsciLexerPython.UnclosedString) for style in range(0, 33): paperColor = QColor(self.settings.value("pythonConsole/paperBackgroundColor", QColor(self.BACKGROUND_COLOR))) self.lexer.setPaper(paperColor, style) self.api = QsciAPIs(self.lexer) checkBoxAPI = self.settings.value("pythonConsole/preloadAPI", True, type=bool) checkBoxPreparedAPI = self.settings.value("pythonConsole/usePreparedAPIFile", False, type=bool) if checkBoxAPI: pap = os.path.join(QgsApplication.pkgDataPath(), "python", "qsci_apis", "pyqgis.pap") self.api.loadPrepared(pap) elif checkBoxPreparedAPI: self.api.loadPrepared(self.settings.value("pythonConsole/preparedAPIFile")) else: apiPath = self.settings.value("pythonConsole/userAPI", []) for i in range(0, len(apiPath)): self.api.load(apiPath[i]) self.api.prepare() self.lexer.setAPIs(self.api) self.setLexer(self.lexer) # TODO: show completion list for file and directory def getText(self): """ Get the text as a unicode string. """ value = self.getBytes().decode('utf-8') # print (value) printing can give an error because the console font # may not have all unicode characters return value def getBytes(self): """ Get the text as bytes (utf-8 encoded). This is how the data is stored internally. """ len = self.SendScintilla(self.SCI_GETLENGTH) + 1 bb = QByteArray(len, '0') self.SendScintilla(self.SCI_GETTEXT, len, bb) return bytes(bb)[:-1] def getTextLength(self): return self.SendScintilla(QsciScintilla.SCI_GETLENGTH) def get_end_pos(self): """Return (line, index) position of the last character""" line = self.lines() - 1 return line, len(self.text(line)) def is_cursor_at_end(self): """Return True if cursor is at the end of text""" cline, cindex = self.getCursorPosition() return (cline, cindex) == self.get_end_pos() def move_cursor_to_end(self): """Move cursor to end of text""" line, index = self.get_end_pos() self.setCursorPosition(line, index) self.ensureCursorVisible() self.ensureLineVisible(line) def is_cursor_on_last_line(self): """Return True if cursor is on the last line""" cline, _ = self.getCursorPosition() return cline == self.lines() - 1 def is_cursor_on_edition_zone(self): """ Return True if the cursor is in the edition zone """ cline, cindex = self.getCursorPosition() return cline == self.lines() - 1 and cindex >= 4 def new_prompt(self, prompt): """ Print a new prompt and save its (line, index) position """ self.write(prompt, prompt=True) # now we update our cursor giving end of prompt line, index = self.getCursorPosition() self.ensureCursorVisible() self.ensureLineVisible(line) def displayPrompt(self, more=False): self.append("... ") if more else self.append(">>> ") self.move_cursor_to_end() def updateHistory(self, command): if isinstance(command, list): for line in command: self.history.append(line) elif not command == "": if len(self.history) <= 0 or \ command != self.history[-1]: self.history.append(command) self.historyIndex = len(self.history) def writeHistoryFile(self, fromCloseConsole=False): ok = False try: wH = codecs.open(_historyFile, 'w', encoding='utf-8') for s in self.history: wH.write(s + '\n') ok = True except: raise wH.close() if ok and not fromCloseConsole: msgText = QCoreApplication.translate('PythonConsole', 'History saved successfully.') self.parent.callWidgetMessageBar(msgText) def readHistoryFile(self): fileExist = QFile.exists(_historyFile) if fileExist: with codecs.open(_historyFile, 'r', encoding='utf-8') as rH: for line in rH: if line != "\n": l = line.rstrip('\n') self.updateHistory(l) else: return def clearHistory(self, clearSession=False): if clearSession: self.history = [] msgText = QCoreApplication.translate('PythonConsole', 'Session and file history cleared successfully.') self.parent.callWidgetMessageBar(msgText) return ok = False try: cH = codecs.open(_historyFile, 'w', encoding='utf-8') ok = True except: raise cH.close() if ok: msgText = QCoreApplication.translate('PythonConsole', 'History cleared successfully.') self.parent.callWidgetMessageBar(msgText) def clearHistorySession(self): self.clearHistory(True) def showPrevious(self): if self.historyIndex < len(self.history) and self.history: line, pos = self.getCursorPosition() selCmdLength = len(self.text(line)) self.setSelection(line, 4, line, selCmdLength) self.removeSelectedText() self.historyIndex += 1 if self.historyIndex == len(self.history): self.insert("") pass else: self.insert(self.history[self.historyIndex]) self.move_cursor_to_end() #self.SendScintilla(QsciScintilla.SCI_DELETEBACK) def showNext(self): if self.historyIndex > 0 and self.history: line, pos = self.getCursorPosition() selCmdLength = len(self.text(line)) self.setSelection(line, 4, line, selCmdLength) self.removeSelectedText() self.historyIndex -= 1 if self.historyIndex == len(self.history): self.insert("") else: self.insert(self.history[self.historyIndex]) self.move_cursor_to_end() #self.SendScintilla(QsciScintilla.SCI_DELETEBACK) def keyPressEvent(self, e): startLine, startPos, endLine, endPos = self.getSelection() # handle invalid cursor position and multiline selections if not self.is_cursor_on_edition_zone() or startLine < endLine: # allow copying and selecting if e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier): if e.key() == Qt.Key_C: # only catch and return from Ctrl-C here if there's a selection if self.hasSelectedText(): QsciScintilla.keyPressEvent(self, e) return elif e.key() == Qt.Key_A: QsciScintilla.keyPressEvent(self, e) return else: return # allow selection if e.modifiers() & Qt.ShiftModifier: if e.key() in (Qt.Key_Left, Qt.Key_Right, Qt.Key_Home, Qt.Key_End): QsciScintilla.keyPressEvent(self, e) return # all other keystrokes get sent to the input line self.move_cursor_to_end() if e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier) and e.key() == Qt.Key_C and not self.hasSelectedText(): # keyboard interrupt sys.stdout.fire_keyboard_interrupt = True return line, index = self.getCursorPosition() cmd = self.text(line) if e.key() in (Qt.Key_Return, Qt.Key_Enter) and not self.isListActive(): self.entered() elif e.key() in (Qt.Key_Left, Qt.Key_Home): QsciScintilla.keyPressEvent(self, e) # check whether the cursor is moved out of the edition zone newline, newindex = self.getCursorPosition() if newline < line or newindex < 4: # fix selection and the cursor position if self.hasSelectedText(): self.setSelection(line, self.getSelection()[3], line, 4) else: self.setCursorPosition(line, 4) elif e.key() in (Qt.Key_Backspace, Qt.Key_Delete): QsciScintilla.keyPressEvent(self, e) # check whether the cursor is moved out of the edition zone _, newindex = self.getCursorPosition() if newindex < 4: # restore the prompt chars (if removed) and # fix the cursor position self.insert(cmd[:3 - newindex] + " ") self.setCursorPosition(line, 4) self.recolor() elif (e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier) and e.key() == Qt.Key_V) or \ (e.modifiers() & Qt.ShiftModifier and e.key() == Qt.Key_Insert): self.paste() e.accept() elif e.key() == Qt.Key_Down and not self.isListActive(): self.showPrevious() elif e.key() == Qt.Key_Up and not self.isListActive(): self.showNext() # TODO: press event for auto-completion file directory else: t = e.text() self.autoCloseBracket = self.settings.value("pythonConsole/autoCloseBracket", False, type=bool) self.autoImport = self.settings.value("pythonConsole/autoInsertionImport", True, type=bool) txt = cmd[:index].replace('>>> ', '').replace('... ', '') # Close bracket automatically if t in self.opening and self.autoCloseBracket: i = self.opening.index(t) if self.hasSelectedText() and startPos != 0: selText = self.selectedText() self.removeSelectedText() self.insert(self.opening[i] + selText + self.closing[i]) self.setCursorPosition(endLine, endPos + 2) return elif t == '(' and (re.match(r'^[ \t]*def \w+$', txt) or re.match(r'^[ \t]*class \w+$', txt)): self.insert('):') else: self.insert(self.closing[i]) # FIXES #8392 (automatically removes the redundant char # when autoclosing brackets option is enabled) elif t in [')', ']', '}'] and self.autoCloseBracket: txt = self.text(line) try: if txt[index - 1] in self.opening and t == txt[index]: self.setCursorPosition(line, index + 1) self.SendScintilla(QsciScintilla.SCI_DELETEBACK) except IndexError: pass elif t == ' ' and self.autoImport: ptrn = r'^[ \t]*from [\w.]+$' if re.match(ptrn, txt): self.insert(' import') self.setCursorPosition(line, index + 7) QsciScintilla.keyPressEvent(self, e) def contextMenuEvent(self, e): menu = QMenu(self) subMenu = QMenu(menu) titleHistoryMenu = QCoreApplication.translate("PythonConsole", "Command History") subMenu.setTitle(titleHistoryMenu) subMenu.addAction( QCoreApplication.translate("PythonConsole", "Show"), self.showHistory, 'Ctrl+Shift+SPACE') subMenu.addAction( QCoreApplication.translate("PythonConsole", "Clear File"), self.clearHistory) subMenu.addAction( QCoreApplication.translate("PythonConsole", "Clear Session"), self.clearHistorySession) menu.addMenu(subMenu) menu.addSeparator() copyAction = menu.addAction( QCoreApplication.translate("PythonConsole", "Copy"), self.copy, QKeySequence.Copy) pasteAction = menu.addAction( QCoreApplication.translate("PythonConsole", "Paste"), self.paste, QKeySequence.Paste) copyAction.setEnabled(False) pasteAction.setEnabled(False) if self.hasSelectedText(): copyAction.setEnabled(True) if QApplication.clipboard().text(): pasteAction.setEnabled(True) menu.exec_(self.mapToGlobal(e.pos())) def mousePressEvent(self, e): """ Re-implemented to handle the mouse press event. e: the mouse press event (QMouseEvent) """ self.setFocus() if e.button() == Qt.MidButton: stringSel = QApplication.clipboard().text(QClipboard.Selection) if not self.is_cursor_on_last_line(): self.move_cursor_to_end() self.insertFromDropPaste(stringSel) e.accept() else: QsciScintilla.mousePressEvent(self, e) def paste(self): """ Method to display data from the clipboard. XXX: It should reimplement the virtual QScintilla.paste method, but it seems not used by QScintilla code. """ stringPaste = QApplication.clipboard().text() if self.is_cursor_on_last_line(): if self.hasSelectedText(): self.removeSelectedText() else: self.move_cursor_to_end() self.insertFromDropPaste(stringPaste) # Drag and drop def dropEvent(self, e): if e.mimeData().hasText(): stringDrag = e.mimeData().text() self.insertFromDropPaste(stringDrag) self.setFocus() e.setDropAction(Qt.CopyAction) e.accept() else: QsciScintilla.dropEvent(self, e) def insertFromDropPaste(self, textDP): pasteList = textDP.splitlines() if pasteList: for line in pasteList[:-1]: cleanLine = line.replace(">>> ", "").replace("... ", "") self.insert(cleanLine) self.move_cursor_to_end() self.runCommand(self.currentCommand()) if pasteList[-1] != "": line = pasteList[-1] cleanLine = line.replace(">>> ", "").replace("... ", "") curpos = self.getCursorPosition() self.insert(cleanLine) self.setCursorPosition(curpos[0], curpos[1] + len(cleanLine)) def insertTextFromFile(self, listOpenFile): for line in listOpenFile[:-1]: self.append(line) self.move_cursor_to_end() self.SendScintilla(QsciScintilla.SCI_DELETEBACK) self.runCommand(self.currentCommand()) self.append(listOpenFile[-1]) self.move_cursor_to_end() self.SendScintilla(QsciScintilla.SCI_DELETEBACK) def entered(self): self.move_cursor_to_end() self.runCommand(self.currentCommand()) self.setFocus() self.move_cursor_to_end() def currentCommand(self): linenr, index = self.getCursorPosition() string = self.text() cmdLine = string[4:] cmd = cmdLine return cmd def runCommand(self, cmd): self.writeCMD(cmd) import webbrowser self.updateHistory(cmd) version = 'master' if 'master' in Qgis.QGIS_VERSION.lower() else re.findall(r'^\d.[0-9]*', Qgis.QGIS_VERSION)[0] if cmd in ('_pyqgis', '_api', '_cookbook'): if cmd == '_pyqgis': webbrowser.open("https://qgis.org/pyqgis/{}".format(version)) elif cmd == '_api': webbrowser.open("https://qgis.org/api/{}".format('' if version == 'master' else version)) elif cmd == '_cookbook': webbrowser.open("https://docs.qgis.org/{}/en/docs/pyqgis_developer_cookbook/".format( 'testing' if version == 'master' else version)) more = False else: self.buffer.append(cmd) src = "\n".join(self.buffer) more = self.runsource(src) if not more: self.buffer = [] # prevents to commands with more lines to break the console # in the case they have a eol different from '\n' self.setText('') self.move_cursor_to_end() self.displayPrompt(more) def write(self, txt): sys.stderr.write(txt) def writeCMD(self, txt): if sys.stdout: sys.stdout.fire_keyboard_interrupt = False if len(txt) > 0: getCmdString = self.text() prompt = getCmdString[0:4] sys.stdout.write(prompt + txt + '\n') def runsource(self, source, filename='<input>', symbol='single'): if sys.stdout: sys.stdout.fire_keyboard_interrupt = False hook = sys.excepthook try: def excepthook(etype, value, tb): self.write("".join(traceback.format_exception(etype, value, tb))) sys.excepthook = excepthook return super(ShellScintilla, self).runsource(source, filename, symbol) finally: sys.excepthook = hook class HistoryDialog(QDialog, Ui_HistoryDialogPythonConsole): def __init__(self, parent): QDialog.__init__(self, parent) self.setupUi(self) self.parent = parent self.setWindowTitle(QCoreApplication.translate("PythonConsole", "Python Console - Command History")) self.listView.setToolTip(QCoreApplication.translate("PythonConsole", "Double-click on item to execute")) self.model = QStandardItemModel(self.listView) self._reloadHistory() self.deleteScut = QShortcut(QKeySequence(Qt.Key_Delete), self) self.deleteScut.activated.connect(self._deleteItem) self.listView.doubleClicked.connect(self._runHistory) self.reloadHistory.clicked.connect(self._reloadHistory) self.saveHistory.clicked.connect(self._saveHistory) def _runHistory(self, item): cmd = item.data(Qt.DisplayRole) self.parent.runCommand(cmd) def _saveHistory(self): self.parent.writeHistoryFile(True) def _reloadHistory(self): self.model.clear() for i in self.parent.history: item = QStandardItem(i) if sys.platform.startswith('win'): item.setSizeHint(QSize(18, 18)) self.model.appendRow(item) self.listView.setModel(self.model) self.listView.scrollToBottom() def _deleteItem(self): itemsSelected = self.listView.selectionModel().selectedIndexes() if itemsSelected: item = itemsSelected[0].row() # Remove item from the command history (just for the current session) self.parent.history.pop(item) self.parent.historyIndex -= 1 # Remove row from the command history dialog self.model.removeRow(item)
gpl-2.0
nebulans/testfixtures
testfixtures/tests/test_log_capture.py
1
7257
from __future__ import absolute_import from logging import getLogger from unittest import TestCase from testfixtures.shouldraise import ShouldAssert from .mock import patch from testfixtures import ( log_capture, compare, Comparison as C, LogCapture, ShouldRaise ) root = getLogger() one = getLogger('one') two = getLogger('two') child = getLogger('one.child') class TestLog_Capture(TestCase): @log_capture('two', 'one.child') @log_capture('one') @log_capture() def test_logging(self, l1, l2, l3): # we can now log as normal root.info('1') one.info('2') two.info('3') child.info('4') # and later check what was logged l1.check( ('root', 'INFO', '1'), ('one', 'INFO', '2'), ('two', 'INFO', '3'), ('one.child', 'INFO', '4'), ) l2.check( ('one', 'INFO', '2'), ('one.child', 'INFO', '4') ) l3.check( ('two', 'INFO', '3'), ('one.child', 'INFO', '4') ) # each logger also exposes the real # log records should anything else be neeeded compare(l3.records, [ C('logging.LogRecord'), C('logging.LogRecord'), ]) def test_uninstall_properly(self): root = getLogger() child = getLogger('child') before_root = root.handlers[:] before_child = child.handlers[:] try: old_root_level = root.level root.setLevel(49) old_child_level = child.level child.setLevel(69) @log_capture('child') @log_capture() def test_method(l1, l2): root = getLogger() root.info('1') self.assertEqual(root.level, 1) child = getLogger('child') self.assertEqual(child.level, 1) child.info('2') l1.check( ('root', 'INFO', '1'), ('child', 'INFO', '2'), ) l2.check( ('child', 'INFO', '2'), ) test_method() self.assertEqual(root.level, 49) self.assertEqual(child.level, 69) self.assertEqual(root.handlers, before_root) self.assertEqual(child.handlers, before_child) finally: root.setLevel(old_root_level) child.setLevel(old_child_level) @log_capture() def test_decorator_returns_logcapture(self, l): # check for what we get, so we only have to write # tests in test_logcapture.py self.failUnless(isinstance(l, LogCapture)) def test_remove_existing_handlers(self): logger = getLogger() # get original handlers original = logger.handlers try: # put in a stub which will blow up if used logger.handlers = start = [object()] @log_capture() def test_method(l): logger.info('during') l.check(('root', 'INFO', 'during')) test_method() compare(logger.handlers, start) finally: logger.handlers = original def test_clear_global_state(self): from logging import _handlers, _handlerList capture = LogCapture() capture.uninstall() self.assertFalse(capture in _handlers) self.assertFalse(capture in _handlerList) def test_no_propogate(self): logger = getLogger('child') # paranoid check compare(logger.propagate, True) @log_capture('child', propagate=False) def test_method(l): logger.info('a log message') l.check(('child', 'INFO', 'a log message')) with LogCapture() as global_log: test_method() global_log.check() compare(logger.propagate, True) def test_different_attributes(self): with LogCapture(attributes=('funcName', 'processName')) as log: getLogger().info('oh hai') log.check( ('test_different_attributes', 'MainProcess') ) def test_missing_attribute(self): with LogCapture(attributes=('msg', 'lolwut')) as log: getLogger().info('oh %s', 'hai') log.check( ('oh %s', None) ) def test_single_attribute(self): # one which isn't a string, to boot! with LogCapture(attributes=['msg']) as log: getLogger().info(dict(foo='bar', baz='bob')) log.check( dict(foo='bar', baz='bob'), ) def test_callable_instead_of_attribute(self): def extract_msg(record): return {k: v for (k, v) in record.msg.items() if k != 'baz'} with LogCapture(attributes=extract_msg) as log: getLogger().info(dict(foo='bar', baz='bob')) log.check( dict(foo='bar'), ) def test_msg_is_none(self): with LogCapture(attributes=('msg', 'foo')) as log: getLogger().info(None, extra=dict(foo='bar')) log.check( (None, 'bar') ) def test_normal_check(self): with LogCapture() as log: getLogger().info('oh hai') with ShouldAssert( "sequence not as expected:\n\n" "same:\n" "()\n\n" "expected:\n" "(('root', 'INFO', 'oh noez'),)\n\n" "actual:\n" "(('root', 'INFO', 'oh hai'),)" ): log.check(('root', 'INFO', 'oh noez')) def test_recursive_check(self): with LogCapture(recursive_check=True) as log: getLogger().info('oh hai') with ShouldAssert( "sequence not as expected:\n\n" "same:\n()\n\n" "expected:\n(('root', 'INFO', 'oh noez'),)\n\n" "actual:\n(('root', 'INFO', 'oh hai'),)\n\n" "While comparing [0]: sequence not as expected:\n\n" "same:\n('root', 'INFO')\n\n" "expected:\n" "('oh noez',)\n\n" "actual:\n" "('oh hai',)\n\n" "While comparing [0][2]: 'oh noez' (expected) != 'oh hai' (actual)" ): log.check(('root', 'INFO', 'oh noez')) @log_capture() @patch('testfixtures.tests.sample1.SampleClassA') def test_patch_then_log(self, a1, a2): actual = [type(c).__name__ for c in (a1, a2)] compare(actual, expected=['MagicMock', 'LogCaptureForDecorator']) @patch('testfixtures.tests.sample1.SampleClassA') @log_capture() def test_log_then_patch(self, a1, a2): actual = [type(c).__name__ for c in (a1, a2)] compare(actual, expected=['LogCaptureForDecorator', 'MagicMock']) class BaseCaptureTest(TestCase): a = 33 @log_capture() def test_logs_if_a_smaller_than_44(self, logs): logger = getLogger() if self.a < 44: logger.info('{} is smaller than 44'.format(self.a)) logs.check( ('root', 'INFO', '{} is smaller than 44'.format(self.a)), ) class SubclassCaptureTest(BaseCaptureTest): a = 2
mit
doordash/pytest
testing/acceptance_test.py
17
23426
import sys import py, pytest from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR class TestGeneralUsage: def test_config_error(self, testdir): testdir.makeconftest(""" def pytest_configure(config): import pytest raise pytest.UsageError("hello") """) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 result.stderr.fnmatch_lines([ '*ERROR: hello' ]) def test_root_conftest_syntax_error(self, testdir): testdir.makepyfile(conftest="raise SyntaxError\n") result = testdir.runpytest() result.stderr.fnmatch_lines(["*raise SyntaxError*"]) assert result.ret != 0 def test_early_hook_error_issue38_1(self, testdir): testdir.makeconftest(""" def pytest_sessionstart(): 0 / 0 """) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 # tracestyle is native by default for hook failures result.stdout.fnmatch_lines([ '*INTERNALERROR*File*conftest.py*line 2*', '*0 / 0*', ]) result = testdir.runpytest(testdir.tmpdir, "--fulltrace") assert result.ret != 0 # tracestyle is native by default for hook failures result.stdout.fnmatch_lines([ '*INTERNALERROR*def pytest_sessionstart():*', '*INTERNALERROR*0 / 0*', ]) def test_early_hook_configure_error_issue38(self, testdir): testdir.makeconftest(""" def pytest_configure(): 0 / 0 """) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 # here we get it on stderr result.stderr.fnmatch_lines([ '*INTERNALERROR*File*conftest.py*line 2*', '*0 / 0*', ]) def test_file_not_found(self, testdir): result = testdir.runpytest("asd") assert result.ret != 0 result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) def test_file_not_found_unconfigure_issue143(self, testdir): testdir.makeconftest(""" def pytest_configure(): print("---configure") def pytest_unconfigure(): print("---unconfigure") """) result = testdir.runpytest("-s", "asd") assert result.ret == 4 # EXIT_USAGEERROR result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) result.stdout.fnmatch_lines([ "*---configure", "*---unconfigure", ]) def test_config_preparse_plugin_option(self, testdir): testdir.makepyfile(pytest_xyz=""" def pytest_addoption(parser): parser.addoption("--xyz", dest="xyz", action="store") """) testdir.makepyfile(test_one=""" def test_option(pytestconfig): assert pytestconfig.option.xyz == "123" """) result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True) assert result.ret == 0 result.stdout.fnmatch_lines([ '*1 passed*', ]) def test_assertion_magic(self, testdir): p = testdir.makepyfile(""" def test_this(): x = 0 assert x """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "> assert x", "E assert 0", ]) assert result.ret == 1 def test_nested_import_error(self, testdir): p = testdir.makepyfile(""" import import_fails def test_this(): assert import_fails.a == 1 """) testdir.makepyfile(import_fails="import does_not_work") result = testdir.runpytest(p) result.stdout.fnmatch_lines([ #XXX on jython this fails: "> import import_fails", "E ImportError: No module named *does_not_work*", ]) assert result.ret == 1 def test_not_collectable_arguments(self, testdir): p1 = testdir.makepyfile("") p2 = testdir.makefile(".pyc", "123") result = testdir.runpytest(p1, p2) assert result.ret result.stderr.fnmatch_lines([ "*ERROR: not found:*%s" %(p2.basename,) ]) def test_issue486_better_reporting_on_conftest_load_failure(self, testdir): testdir.makepyfile("") testdir.makeconftest("import qwerty") result = testdir.runpytest("--help") result.stdout.fnmatch_lines(""" *--version* *warning*conftest.py* """) result = testdir.runpytest() result.stderr.fnmatch_lines(""" *ERROR*could not load*conftest.py* """) def test_early_skip(self, testdir): testdir.mkdir("xyz") testdir.makeconftest(""" import pytest def pytest_collect_directory(): pytest.skip("early") """) result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED result.stdout.fnmatch_lines([ "*1 skip*" ]) def test_issue88_initial_file_multinodes(self, testdir): testdir.makeconftest(""" import pytest class MyFile(pytest.File): def collect(self): return [MyItem("hello", parent=self)] def pytest_collect_file(path, parent): return MyFile(path, parent) class MyItem(pytest.Item): pass """) p = testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest(p, "--collect-only") result.stdout.fnmatch_lines([ "*MyFile*test_issue88*", "*Module*test_issue88*", ]) def test_issue93_initialnode_importing_capturing(self, testdir): testdir.makeconftest(""" import sys print ("should not be seen") sys.stderr.write("stder42\\n") """) result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED assert "should not be seen" not in result.stdout.str() assert "stderr42" not in result.stderr.str() def test_conftest_printing_shows_if_error(self, testdir): testdir.makeconftest(""" print ("should be seen") assert 0 """) result = testdir.runpytest() assert result.ret != 0 assert "should be seen" in result.stdout.str() @pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'), reason="symlink not available on this platform") def test_chdir(self, testdir): testdir.tmpdir.join("py").mksymlinkto(py._pydir) p = testdir.tmpdir.join("main.py") p.write(py.code.Source(""" import sys, os sys.path.insert(0, '') import py print (py.__file__) print (py.__path__) os.chdir(os.path.dirname(os.getcwd())) print (py.log) """)) result = testdir.runpython(p) assert not result.ret def test_issue109_sibling_conftests_not_loaded(self, testdir): sub1 = testdir.tmpdir.mkdir("sub1") sub2 = testdir.tmpdir.mkdir("sub2") sub1.join("conftest.py").write("assert 0") result = testdir.runpytest(sub2) assert result.ret == EXIT_NOTESTSCOLLECTED sub2.ensure("__init__.py") p = sub2.ensure("test_hello.py") result = testdir.runpytest(p) assert result.ret == EXIT_NOTESTSCOLLECTED result = testdir.runpytest(sub1) assert result.ret == EXIT_USAGEERROR def test_directory_skipped(self, testdir): testdir.makeconftest(""" import pytest def pytest_ignore_collect(): pytest.skip("intentional") """) testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED result.stdout.fnmatch_lines([ "*1 skipped*" ]) def test_multiple_items_per_collector_byid(self, testdir): c = testdir.makeconftest(""" import pytest class MyItem(pytest.Item): def runtest(self): pass class MyCollector(pytest.File): def collect(self): return [MyItem(name="xyz", parent=self)] def pytest_collect_file(path, parent): if path.basename.startswith("conftest"): return MyCollector(path, parent) """) result = testdir.runpytest(c.basename+"::"+"xyz") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 pass*", ]) def test_skip_on_generated_funcarg_id(self, testdir): testdir.makeconftest(""" import pytest def pytest_generate_tests(metafunc): metafunc.addcall({'x': 3}, id='hello-123') def pytest_runtest_setup(item): print (item.keywords) if 'hello-123' in item.keywords: pytest.skip("hello") assert 0 """) p = testdir.makepyfile("""def test_func(x): pass""") res = testdir.runpytest(p) assert res.ret == 0 res.stdout.fnmatch_lines(["*1 skipped*"]) def test_direct_addressing_selects(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall({'i': 1}, id="1") metafunc.addcall({'i': 2}, id="2") def test_func(i): pass """) res = testdir.runpytest(p.basename + "::" + "test_func[1]") assert res.ret == 0 res.stdout.fnmatch_lines(["*1 passed*"]) def test_direct_addressing_notfound(self, testdir): p = testdir.makepyfile(""" def test_func(): pass """) res = testdir.runpytest(p.basename + "::" + "test_notfound") assert res.ret res.stderr.fnmatch_lines(["*ERROR*not found*"]) def test_docstring_on_hookspec(self): from _pytest import hookspec for name, value in vars(hookspec).items(): if name.startswith("pytest_"): assert value.__doc__, "no docstring for %s" % name def test_initialization_error_issue49(self, testdir): testdir.makeconftest(""" def pytest_configure(): x """) result = testdir.runpytest() assert result.ret == 3 # internal error result.stderr.fnmatch_lines([ "INTERNAL*pytest_configure*", "INTERNAL*x*", ]) assert 'sessionstarttime' not in result.stderr.str() @pytest.mark.parametrize('lookfor', ['test_fun.py', 'test_fun.py::test_a']) def test_issue134_report_syntaxerror_when_collecting_member(self, testdir, lookfor): testdir.makepyfile(test_fun=""" def test_a(): pass def""") result = testdir.runpytest(lookfor) result.stdout.fnmatch_lines(['*SyntaxError*']) if '::' in lookfor: result.stderr.fnmatch_lines([ '*ERROR*', ]) assert result.ret == 4 # usage error only if item not found def test_report_all_failed_collections_initargs(self, testdir): testdir.makepyfile(test_a="def", test_b="def") result = testdir.runpytest("test_a.py::a", "test_b.py::b") result.stderr.fnmatch_lines([ "*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*", ]) def test_namespace_import_doesnt_confuse_import_hook(self, testdir): # Ref #383. Python 3.3's namespace package messed with our import hooks # Importing a module that didn't exist, even if the ImportError was # gracefully handled, would make our test crash. testdir.mkdir('not_a_package') p = testdir.makepyfile(""" try: from not_a_package import doesnt_exist except ImportError: # We handle the import error gracefully here pass def test_whatever(): pass """) res = testdir.runpytest(p.basename) assert res.ret == 0 def test_unknown_option(self, testdir): result = testdir.runpytest("--qwlkej") result.stderr.fnmatch_lines(""" *unrecognized* """) def test_getsourcelines_error_issue553(self, testdir, monkeypatch): monkeypatch.setattr("inspect.getsourcelines", None) p = testdir.makepyfile(""" def raise_error(obj): raise IOError('source code not available') import inspect inspect.getsourcelines = raise_error def test_foo(invalid_fixture): pass """) res = testdir.runpytest(p) res.stdout.fnmatch_lines([ "*source code not available*", "*fixture 'invalid_fixture' not found", ]) def test_plugins_given_as_strings(self, tmpdir, monkeypatch): """test that str values passed to main() as `plugins` arg are interpreted as module names to be imported and registered. #855. """ with pytest.raises(ImportError) as excinfo: pytest.main([str(tmpdir)], plugins=['invalid.module']) assert 'invalid' in str(excinfo.value) p = tmpdir.join('test_test_plugins_given_as_strings.py') p.write('def test_foo(): pass') mod = py.std.types.ModuleType("myplugin") monkeypatch.setitem(sys.modules, 'myplugin', mod) assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0 class TestInvocationVariants: def test_earlyinit(self, testdir): p = testdir.makepyfile(""" import pytest assert hasattr(pytest, 'mark') """) result = testdir.runpython(p) assert result.ret == 0 @pytest.mark.xfail("sys.platform.startswith('java')") def test_pydoc(self, testdir): for name in ('py.test', 'pytest'): result = testdir.runpython_c("import %s;help(%s)" % (name, name)) assert result.ret == 0 s = result.stdout.str() assert 'MarkGenerator' in s def test_import_star_py_dot_test(self, testdir): p = testdir.makepyfile(""" from py.test import * #collect #cmdline #Item #assert collect.Item is Item #assert collect.Collector is Collector main skip xfail """) result = testdir.runpython(p) assert result.ret == 0 def test_import_star_pytest(self, testdir): p = testdir.makepyfile(""" from pytest import * #Item #File main skip xfail """) result = testdir.runpython(p) assert result.ret == 0 def test_double_pytestcmdline(self, testdir): p = testdir.makepyfile(run=""" import pytest pytest.main() pytest.main() """) testdir.makepyfile(""" def test_hello(): pass """) result = testdir.runpython(p) result.stdout.fnmatch_lines([ "*1 passed*", "*1 passed*", ]) @pytest.mark.skipif("sys.version_info < (2,5)") def test_python_minus_m_invocation_ok(self, testdir): p1 = testdir.makepyfile("def test_hello(): pass") res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) assert res.ret == 0 @pytest.mark.skipif("sys.version_info < (2,5)") def test_python_minus_m_invocation_fail(self, testdir): p1 = testdir.makepyfile("def test_fail(): 0/0") res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) assert res.ret == 1 @pytest.mark.skipif("sys.version_info < (2,5)") def test_python_pytest_package(self, testdir): p1 = testdir.makepyfile("def test_pass(): pass") res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) assert res.ret == 0 res.stdout.fnmatch_lines(["*1 passed*"]) def test_equivalence_pytest_pytest(self): assert pytest.main == py.test.cmdline.main def test_invoke_with_string(self, capsys): retcode = pytest.main("-h") assert not retcode out, err = capsys.readouterr() assert "--help" in out pytest.raises(ValueError, lambda: pytest.main(0)) def test_invoke_with_path(self, tmpdir, capsys): retcode = pytest.main(tmpdir) assert retcode == EXIT_NOTESTSCOLLECTED out, err = capsys.readouterr() def test_invoke_plugin_api(self, testdir, capsys): class MyPlugin: def pytest_addoption(self, parser): parser.addoption("--myopt") pytest.main(["-h"], plugins=[MyPlugin()]) out, err = capsys.readouterr() assert "--myopt" in out def test_pyargs_importerror(self, testdir, monkeypatch): monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) path = testdir.mkpydir("tpkg") path.join("test_hello.py").write('raise ImportError') result = testdir.runpytest("--pyargs", "tpkg.test_hello") assert result.ret != 0 # FIXME: It would be more natural to match NOT # "ERROR*file*or*package*not*found*". result.stdout.fnmatch_lines([ "*collected 0 items*" ]) def test_cmdline_python_package(self, testdir, monkeypatch): monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) path = testdir.mkpydir("tpkg") path.join("test_hello.py").write("def test_hello(): pass") path.join("test_world.py").write("def test_world(): pass") result = testdir.runpytest("--pyargs", "tpkg") assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*" ]) result = testdir.runpytest("--pyargs", "tpkg.test_hello") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 passed*" ]) def join_pythonpath(what): cur = py.std.os.environ.get('PYTHONPATH') if cur: return str(what) + ':' + cur return what empty_package = testdir.mkpydir("empty_package") monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package)) result = testdir.runpytest("--pyargs", ".") assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*" ]) monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir)) path.join('test_hello.py').remove() result = testdir.runpytest("--pyargs", "tpkg.test_hello") assert result.ret != 0 result.stderr.fnmatch_lines([ "*not*found*test_hello*", ]) def test_cmdline_python_package_not_exists(self, testdir): result = testdir.runpytest("--pyargs", "tpkgwhatv") assert result.ret result.stderr.fnmatch_lines([ "ERROR*file*or*package*not*found*", ]) @pytest.mark.xfail(reason="decide: feature or bug") def test_noclass_discovery_if_not_testcase(self, testdir): testpath = testdir.makepyfile(""" import unittest class TestHello(object): def test_hello(self): assert self.attr class RealTest(unittest.TestCase, TestHello): attr = 42 """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=1) def test_doctest_id(self, testdir): testdir.makefile('.txt', """ >>> x=3 >>> x 4 """) result = testdir.runpytest("-rf") lines = result.stdout.str().splitlines() for line in lines: if line.startswith("FAIL "): testid = line[5:].strip() break result = testdir.runpytest(testid, '-rf') result.stdout.fnmatch_lines([ line, "*1 failed*", ]) def test_core_backward_compatibility(self): """Test backward compatibility for get_plugin_manager function. See #787.""" import _pytest.config assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager def test_has_plugin(self, request): """Test hasplugin function of the plugin manager (#932).""" assert request.config.pluginmanager.hasplugin('python') class TestDurations: source = """ import time frag = 0.002 def test_something(): pass def test_2(): time.sleep(frag*5) def test_1(): time.sleep(frag) def test_3(): time.sleep(frag*10) """ def test_calls(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=10") assert result.ret == 0 result.stdout.fnmatch_lines_random([ "*durations*", "*call*test_3*", "*call*test_2*", "*call*test_1*", ]) def test_calls_show_2(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=2") assert result.ret == 0 lines = result.stdout.get_lines_after("*slowest*durations*") assert "4 passed" in lines[2] def test_calls_showall(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=0") assert result.ret == 0 for x in "123": for y in 'call',: #'setup', 'call', 'teardown': for line in result.stdout.lines: if ("test_%s" % x) in line and y in line: break else: raise AssertionError("not found %s %s" % (x,y)) def test_with_deselected(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret == 0 result.stdout.fnmatch_lines([ "*durations*", "*call*test_1*", ]) def test_with_failing_collection(self, testdir): testdir.makepyfile(self.source) testdir.makepyfile(test_collecterror="""xyz""") result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret != 0 result.stdout.fnmatch_lines([ "*durations*", "*call*test_1*", ]) class TestDurationWithFixture: source = """ import time frag = 0.001 def setup_function(func): time.sleep(frag * 3) def test_1(): time.sleep(frag*2) def test_2(): time.sleep(frag) """ def test_setup_function(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=10") assert result.ret == 0 result.stdout.fnmatch_lines_random(""" *durations* * setup *test_1* * call *test_1* """)
mit
nelson-liu/scikit-learn
benchmarks/bench_isolation_forest.py
46
3782
""" ========================================== IsolationForest benchmark ========================================== A test of IsolationForest on classical anomaly detection datasets. """ print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import IsolationForest from sklearn.metrics import roc_curve, auc from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata from sklearn.preprocessing import LabelBinarizer from sklearn.utils import shuffle as sh np.random.seed(1) datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover'] fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5)) for dat in datasets: # loading and vectorization print('loading data') if dat in ['http', 'smtp', 'SA', 'SF']: dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True) X = dataset.data y = dataset.target if dat == 'shuttle': dataset = fetch_mldata('shuttle') X = dataset.data y = dataset.target X, y = sh(X, y) # we remove data with label 4 # normal data are then those of class 1 s = (y != 4) X = X[s, :] y = y[s] y = (y != 1).astype(int) if dat == 'forestcover': dataset = fetch_covtype(shuffle=True) X = dataset.data y = dataset.target # normal data are those with attribute 2 # abnormal those with attribute 4 s = (y == 2) + (y == 4) X = X[s, :] y = y[s] y = (y != 2).astype(int) print('vectorizing data') if dat == 'SF': lb = LabelBinarizer() lb.fit(X[:, 1]) x1 = lb.transform(X[:, 1]) X = np.c_[X[:, :1], x1, X[:, 2:]] y = (y != 'normal.').astype(int) if dat == 'SA': lb = LabelBinarizer() lb.fit(X[:, 1]) x1 = lb.transform(X[:, 1]) lb.fit(X[:, 2]) x2 = lb.transform(X[:, 2]) lb.fit(X[:, 3]) x3 = lb.transform(X[:, 3]) X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]] y = (y != 'normal.').astype(int) if dat == 'http' or dat == 'smtp': y = (y != 'normal.').astype(int) n_samples, n_features = X.shape n_samples_train = n_samples // 2 X = X.astype(float) X_train = X[:n_samples_train, :] X_test = X[n_samples_train:, :] y_train = y[:n_samples_train] y_test = y[n_samples_train:] print('IsolationForest processing...') model = IsolationForest(n_jobs=-1) tstart = time() model.fit(X_train) fit_time = time() - tstart tstart = time() scoring = - model.decision_function(X_test) # the lower, the more normal # Show score histograms fig, ax = plt.subplots(3, sharex=True, sharey=True) bins = np.linspace(-0.5, 0.5, 200) ax[0].hist(scoring, bins, color='black') ax[0].set_title('decision function for %s dataset' % dat) ax[0].legend(loc="lower right") ax[1].hist(scoring[y_test == 0], bins, color='b', label='normal data') ax[1].legend(loc="lower right") ax[2].hist(scoring[y_test == 1], bins, color='r', label='outliers') ax[2].legend(loc="lower right") # Show ROC Curves predict_time = time() - tstart fpr, tpr, thresholds = roc_curve(y_test, scoring) AUC = auc(fpr, tpr) label = ('%s (area: %0.3f, train-time: %0.2fs, ' 'test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time)) ax_roc.plot(fpr, tpr, lw=1, label=label) ax_roc.set_xlim([-0.05, 1.05]) ax_roc.set_ylim([-0.05, 1.05]) ax_roc.set_xlabel('False Positive Rate') ax_roc.set_ylabel('True Positive Rate') ax_roc.set_title('Receiver operating characteristic (ROC) curves') ax_roc.legend(loc="lower right") fig_roc.tight_layout() plt.show()
bsd-3-clause
roxlu/video_capture
docs/source/conf.py
1
8166
# -*- coding: utf-8 -*- # # Video Capture documentation build configuration file, created by # sphinx-quickstart on Mon Mar 10 09:23:03 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Video Capture' copyright = u'2014, roxlu' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.0.1' # The full version, including alpha/beta/rc tags. release = '0.0.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'VideoCapturedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'VideoCapture.tex', u'Video Capture Documentation', u'roxlu', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'videocapture', u'Video Capture Documentation', [u'roxlu'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'VideoCapture', u'Video Capture Documentation', u'roxlu', 'VideoCapture', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
apache-2.0
nan86150/ImageFusion
lib/python2.7/site-packages/PIL/Image.py
38
67234
# # The Python Imaging Library. # $Id$ # # the Image class wrapper # # partial release history: # 1995-09-09 fl Created # 1996-03-11 fl PIL release 0.0 (proof of concept) # 1996-04-30 fl PIL release 0.1b1 # 1999-07-28 fl PIL release 1.0 final # 2000-06-07 fl PIL release 1.1 # 2000-10-20 fl PIL release 1.1.1 # 2001-05-07 fl PIL release 1.1.2 # 2002-03-15 fl PIL release 1.1.3 # 2003-05-10 fl PIL release 1.1.4 # 2005-03-28 fl PIL release 1.1.5 # 2006-12-02 fl PIL release 1.1.6 # 2009-11-15 fl PIL release 1.1.7 # # Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. # Copyright (c) 1995-2009 by Fredrik Lundh. # # See the README file for information on usage and redistribution. # VERSION = "1.1.7" try: import warnings except ImportError: warnings = None class _imaging_not_installed: # module placeholder def __getattr__(self, id): raise ImportError("The _imaging C module is not installed") try: # give Tk a chance to set up the environment, in case we're # using an _imaging module linked against libtcl/libtk (use # __import__ to hide this from naive packagers; we don't really # depend on Tk unless ImageTk is used, and that module already # imports Tkinter) __import__("FixTk") except ImportError: pass try: # If the _imaging C module is not present, you can still use # the "open" function to identify files, but you cannot load # them. Note that other modules should not refer to _imaging # directly; import Image and use the Image.core variable instead. import _imaging core = _imaging del _imaging except ImportError, v: core = _imaging_not_installed() if str(v)[:20] == "Module use of python" and warnings: # The _imaging C module is present, but not compiled for # the right version (windows only). Print a warning, if # possible. warnings.warn( "The _imaging extension was built for another version " "of Python; most PIL functions will be disabled", RuntimeWarning ) import ImageMode import ImagePalette import os, string, sys # type stuff from types import IntType, StringType, TupleType try: UnicodeStringType = type(unicode("")) ## # (Internal) Checks if an object is a string. If the current # Python version supports Unicode, this checks for both 8-bit # and Unicode strings. def isStringType(t): return isinstance(t, StringType) or isinstance(t, UnicodeStringType) except NameError: def isStringType(t): return isinstance(t, StringType) ## # (Internal) Checks if an object is a tuple. def isTupleType(t): return isinstance(t, TupleType) ## # (Internal) Checks if an object is an image object. def isImageType(t): return hasattr(t, "im") ## # (Internal) Checks if an object is a string, and that it points to a # directory. def isDirectory(f): return isStringType(f) and os.path.isdir(f) from operator import isNumberType, isSequenceType # # Debug level DEBUG = 0 # # Constants (also defined in _imagingmodule.c!) NONE = 0 # transpose FLIP_LEFT_RIGHT = 0 FLIP_TOP_BOTTOM = 1 ROTATE_90 = 2 ROTATE_180 = 3 ROTATE_270 = 4 # transforms AFFINE = 0 EXTENT = 1 PERSPECTIVE = 2 QUAD = 3 MESH = 4 # resampling filters NONE = 0 NEAREST = 0 ANTIALIAS = 1 # 3-lobed lanczos LINEAR = BILINEAR = 2 CUBIC = BICUBIC = 3 # dithers NONE = 0 NEAREST = 0 ORDERED = 1 # Not yet implemented RASTERIZE = 2 # Not yet implemented FLOYDSTEINBERG = 3 # default # palettes/quantizers WEB = 0 ADAPTIVE = 1 # categories NORMAL = 0 SEQUENCE = 1 CONTAINER = 2 # -------------------------------------------------------------------- # Registries ID = [] OPEN = {} MIME = {} SAVE = {} EXTENSION = {} # -------------------------------------------------------------------- # Modes supported by this version _MODEINFO = { # NOTE: this table will be removed in future versions. use # getmode* functions or ImageMode descriptors instead. # official modes "1": ("L", "L", ("1",)), "L": ("L", "L", ("L",)), "I": ("L", "I", ("I",)), "F": ("L", "F", ("F",)), "P": ("RGB", "L", ("P",)), "RGB": ("RGB", "L", ("R", "G", "B")), "RGBX": ("RGB", "L", ("R", "G", "B", "X")), "RGBA": ("RGB", "L", ("R", "G", "B", "A")), "CMYK": ("RGB", "L", ("C", "M", "Y", "K")), "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")), # Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and # BGR;24. Use these modes only if you know exactly what you're # doing... } try: byteorder = sys.byteorder except AttributeError: import struct if struct.unpack("h", "\0\1")[0] == 1: byteorder = "big" else: byteorder = "little" if byteorder == 'little': _ENDIAN = '<' else: _ENDIAN = '>' _MODE_CONV = { # official modes "1": ('|b1', None), # broken "L": ('|u1', None), "I": (_ENDIAN + 'i4', None), "F": (_ENDIAN + 'f4', None), "P": ('|u1', None), "RGB": ('|u1', 3), "RGBX": ('|u1', 4), "RGBA": ('|u1', 4), "CMYK": ('|u1', 4), "YCbCr": ('|u1', 4), } def _conv_type_shape(im): shape = im.size[1], im.size[0] typ, extra = _MODE_CONV[im.mode] if extra is None: return shape, typ else: return shape+(extra,), typ MODES = _MODEINFO.keys() MODES.sort() # raw modes that may be memory mapped. NOTE: if you change this, you # may have to modify the stride calculation in map.c too! _MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B") ## # Gets the "base" mode for given mode. This function returns "L" for # images that contain grayscale data, and "RGB" for images that # contain color data. # # @param mode Input mode. # @return "L" or "RGB". # @exception KeyError If the input mode was not a standard mode. def getmodebase(mode): return ImageMode.getmode(mode).basemode ## # Gets the storage type mode. Given a mode, this function returns a # single-layer mode suitable for storing individual bands. # # @param mode Input mode. # @return "L", "I", or "F". # @exception KeyError If the input mode was not a standard mode. def getmodetype(mode): return ImageMode.getmode(mode).basetype ## # Gets a list of individual band names. Given a mode, this function # returns a tuple containing the names of individual bands (use # {@link #getmodetype} to get the mode used to store each individual # band. # # @param mode Input mode. # @return A tuple containing band names. The length of the tuple # gives the number of bands in an image of the given mode. # @exception KeyError If the input mode was not a standard mode. def getmodebandnames(mode): return ImageMode.getmode(mode).bands ## # Gets the number of individual bands for this mode. # # @param mode Input mode. # @return The number of bands in this mode. # @exception KeyError If the input mode was not a standard mode. def getmodebands(mode): return len(ImageMode.getmode(mode).bands) # -------------------------------------------------------------------- # Helpers _initialized = 0 ## # Explicitly loads standard file format drivers. def preinit(): "Load standard file format drivers." global _initialized if _initialized >= 1: return try: import BmpImagePlugin except ImportError: pass try: import GifImagePlugin except ImportError: pass try: import JpegImagePlugin except ImportError: pass try: import PpmImagePlugin except ImportError: pass try: import PngImagePlugin except ImportError: pass # try: # import TiffImagePlugin # except ImportError: # pass _initialized = 1 ## # Explicitly initializes the Python Imaging Library. This function # loads all available file format drivers. def init(): "Load all file format drivers." global _initialized if _initialized >= 2: return 0 visited = {} directories = sys.path try: directories = directories + [os.path.dirname(__file__)] except NameError: pass # only check directories (including current, if present in the path) for directory in filter(isDirectory, directories): fullpath = os.path.abspath(directory) if visited.has_key(fullpath): continue for file in os.listdir(directory): if file[-14:] == "ImagePlugin.py": f, e = os.path.splitext(file) try: sys.path.insert(0, directory) try: __import__(f, globals(), locals(), []) finally: del sys.path[0] except ImportError: if DEBUG: print "Image: failed to import", print f, ":", sys.exc_value visited[fullpath] = None if OPEN or SAVE: _initialized = 2 return 1 # -------------------------------------------------------------------- # Codec factories (used by tostring/fromstring and ImageFile.load) def _getdecoder(mode, decoder_name, args, extra=()): # tweak arguments if args is None: args = () elif not isTupleType(args): args = (args,) try: # get decoder decoder = getattr(core, decoder_name + "_decoder") # print decoder, (mode,) + args + extra return apply(decoder, (mode,) + args + extra) except AttributeError: raise IOError("decoder %s not available" % decoder_name) def _getencoder(mode, encoder_name, args, extra=()): # tweak arguments if args is None: args = () elif not isTupleType(args): args = (args,) try: # get encoder encoder = getattr(core, encoder_name + "_encoder") # print encoder, (mode,) + args + extra return apply(encoder, (mode,) + args + extra) except AttributeError: raise IOError("encoder %s not available" % encoder_name) # -------------------------------------------------------------------- # Simple expression analyzer class _E: def __init__(self, data): self.data = data def __coerce__(self, other): return self, _E(other) def __add__(self, other): return _E((self.data, "__add__", other.data)) def __mul__(self, other): return _E((self.data, "__mul__", other.data)) def _getscaleoffset(expr): stub = ["stub"] data = expr(_E(stub)).data try: (a, b, c) = data # simplified syntax if (a is stub and b == "__mul__" and isNumberType(c)): return c, 0.0 if (a is stub and b == "__add__" and isNumberType(c)): return 1.0, c except TypeError: pass try: ((a, b, c), d, e) = data # full syntax if (a is stub and b == "__mul__" and isNumberType(c) and d == "__add__" and isNumberType(e)): return c, e except TypeError: pass raise ValueError("illegal expression") # -------------------------------------------------------------------- # Implementation wrapper ## # This class represents an image object. To create Image objects, use # the appropriate factory functions. There's hardly ever any reason # to call the Image constructor directly. # # @see #open # @see #new # @see #fromstring class Image: format = None format_description = None def __init__(self): # FIXME: take "new" parameters / other image? # FIXME: turn mode and size into delegating properties? self.im = None self.mode = "" self.size = (0, 0) self.palette = None self.info = {} self.category = NORMAL self.readonly = 0 def _new(self, im): new = Image() new.im = im new.mode = im.mode new.size = im.size new.palette = self.palette if im.mode == "P": new.palette = ImagePalette.ImagePalette() try: new.info = self.info.copy() except AttributeError: # fallback (pre-1.5.2) new.info = {} for k, v in self.info: new.info[k] = v return new _makeself = _new # compatibility def _copy(self): self.load() self.im = self.im.copy() self.readonly = 0 def _dump(self, file=None, format=None): import tempfile if not file: file = tempfile.mktemp() self.load() if not format or format == "PPM": self.im.save_ppm(file) else: file = file + "." + format self.save(file, format) return file def __repr__(self): return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % ( self.__class__.__module__, self.__class__.__name__, self.mode, self.size[0], self.size[1], id(self) ) def __getattr__(self, name): if name == "__array_interface__": # numpy array interface support new = {} shape, typestr = _conv_type_shape(self) new['shape'] = shape new['typestr'] = typestr new['data'] = self.tostring() return new raise AttributeError(name) ## # Returns a string containing pixel data. # # @param encoder_name What encoder to use. The default is to # use the standard "raw" encoder. # @param *args Extra arguments to the encoder. # @return An 8-bit string. def tostring(self, encoder_name="raw", *args): "Return image as a binary string" # may pass tuple instead of argument list if len(args) == 1 and isTupleType(args[0]): args = args[0] if encoder_name == "raw" and args == (): args = self.mode self.load() # unpack data e = _getencoder(self.mode, encoder_name, args) e.setimage(self.im) bufsize = max(65536, self.size[0] * 4) # see RawEncode.c data = [] while 1: l, s, d = e.encode(bufsize) data.append(d) if s: break if s < 0: raise RuntimeError("encoder error %d in tostring" % s) return string.join(data, "") ## # Returns the image converted to an X11 bitmap. This method # only works for mode "1" images. # # @param name The name prefix to use for the bitmap variables. # @return A string containing an X11 bitmap. # @exception ValueError If the mode is not "1" def tobitmap(self, name="image"): "Return image as an XBM bitmap" self.load() if self.mode != "1": raise ValueError("not a bitmap") data = self.tostring("xbm") return string.join(["#define %s_width %d\n" % (name, self.size[0]), "#define %s_height %d\n"% (name, self.size[1]), "static char %s_bits[] = {\n" % name, data, "};"], "") ## # Loads this image with pixel data from a string. # <p> # This method is similar to the {@link #fromstring} function, but # loads data into this image instead of creating a new image # object. def fromstring(self, data, decoder_name="raw", *args): "Load data to image from binary string" # may pass tuple instead of argument list if len(args) == 1 and isTupleType(args[0]): args = args[0] # default format if decoder_name == "raw" and args == (): args = self.mode # unpack data d = _getdecoder(self.mode, decoder_name, args) d.setimage(self.im) s = d.decode(data) if s[0] >= 0: raise ValueError("not enough image data") if s[1] != 0: raise ValueError("cannot decode image data") ## # Allocates storage for the image and loads the pixel data. In # normal cases, you don't need to call this method, since the # Image class automatically loads an opened image when it is # accessed for the first time. # # @return An image access object. def load(self): "Explicitly load pixel data." if self.im and self.palette and self.palette.dirty: # realize palette apply(self.im.putpalette, self.palette.getdata()) self.palette.dirty = 0 self.palette.mode = "RGB" self.palette.rawmode = None if self.info.has_key("transparency"): self.im.putpalettealpha(self.info["transparency"], 0) self.palette.mode = "RGBA" if self.im: return self.im.pixel_access(self.readonly) ## # Verifies the contents of a file. For data read from a file, this # method attempts to determine if the file is broken, without # actually decoding the image data. If this method finds any # problems, it raises suitable exceptions. If you need to load # the image after using this method, you must reopen the image # file. def verify(self): "Verify file contents." pass ## # Returns a converted copy of this image. For the "P" mode, this # method translates pixels through the palette. If mode is # omitted, a mode is chosen so that all information in the image # and the palette can be represented without a palette. # <p> # The current version supports all possible conversions between # "L", "RGB" and "CMYK." # <p> # When translating a colour image to black and white (mode "L"), # the library uses the ITU-R 601-2 luma transform: # <p> # <b>L = R * 299/1000 + G * 587/1000 + B * 114/1000</b> # <p> # When translating a greyscale image into a bilevel image (mode # "1"), all non-zero values are set to 255 (white). To use other # thresholds, use the {@link #Image.point} method. # # @def convert(mode, matrix=None, **options) # @param mode The requested mode. # @param matrix An optional conversion matrix. If given, this # should be 4- or 16-tuple containing floating point values. # @param options Additional options, given as keyword arguments. # @keyparam dither Dithering method, used when converting from # mode "RGB" to "P". # Available methods are NONE or FLOYDSTEINBERG (default). # @keyparam palette Palette to use when converting from mode "RGB" # to "P". Available palettes are WEB or ADAPTIVE. # @keyparam colors Number of colors to use for the ADAPTIVE palette. # Defaults to 256. # @return An Image object. def convert(self, mode=None, data=None, dither=None, palette=WEB, colors=256): "Convert to other pixel format" if not mode: # determine default mode if self.mode == "P": self.load() if self.palette: mode = self.palette.mode else: mode = "RGB" else: return self.copy() self.load() if data: # matrix conversion if mode not in ("L", "RGB"): raise ValueError("illegal conversion") im = self.im.convert_matrix(mode, data) return self._new(im) if mode == "P" and palette == ADAPTIVE: im = self.im.quantize(colors) return self._new(im) # colourspace conversion if dither is None: dither = FLOYDSTEINBERG try: im = self.im.convert(mode, dither) except ValueError: try: # normalize source image and try again im = self.im.convert(getmodebase(self.mode)) im = im.convert(mode, dither) except KeyError: raise ValueError("illegal conversion") return self._new(im) def quantize(self, colors=256, method=0, kmeans=0, palette=None): # methods: # 0 = median cut # 1 = maximum coverage # NOTE: this functionality will be moved to the extended # quantizer interface in a later version of PIL. self.load() if palette: # use palette from reference image palette.load() if palette.mode != "P": raise ValueError("bad mode for palette image") if self.mode != "RGB" and self.mode != "L": raise ValueError( "only RGB or L mode images can be quantized to a palette" ) im = self.im.convert("P", 1, palette.im) return self._makeself(im) im = self.im.quantize(colors, method, kmeans) return self._new(im) ## # Copies this image. Use this method if you wish to paste things # into an image, but still retain the original. # # @return An Image object. def copy(self): "Copy raster data" self.load() im = self.im.copy() return self._new(im) ## # Returns a rectangular region from this image. The box is a # 4-tuple defining the left, upper, right, and lower pixel # coordinate. # <p> # This is a lazy operation. Changes to the source image may or # may not be reflected in the cropped image. To break the # connection, call the {@link #Image.load} method on the cropped # copy. # # @param The crop rectangle, as a (left, upper, right, lower)-tuple. # @return An Image object. def crop(self, box=None): "Crop region from image" self.load() if box is None: return self.copy() # lazy operation return _ImageCrop(self, box) ## # Configures the image file loader so it returns a version of the # image that as closely as possible matches the given mode and # size. For example, you can use this method to convert a colour # JPEG to greyscale while loading it, or to extract a 128x192 # version from a PCD file. # <p> # Note that this method modifies the Image object in place. If # the image has already been loaded, this method has no effect. # # @param mode The requested mode. # @param size The requested size. def draft(self, mode, size): "Configure image decoder" pass def _expand(self, xmargin, ymargin=None): if ymargin is None: ymargin = xmargin self.load() return self._new(self.im.expand(xmargin, ymargin, 0)) ## # Filters this image using the given filter. For a list of # available filters, see the <b>ImageFilter</b> module. # # @param filter Filter kernel. # @return An Image object. # @see ImageFilter def filter(self, filter): "Apply environment filter to image" self.load() if callable(filter): filter = filter() if not hasattr(filter, "filter"): raise TypeError("filter argument should be ImageFilter.Filter instance or class") if self.im.bands == 1: return self._new(filter.filter(self.im)) # fix to handle multiband images since _imaging doesn't ims = [] for c in range(self.im.bands): ims.append(self._new(filter.filter(self.im.getband(c)))) return merge(self.mode, ims) ## # Returns a tuple containing the name of each band in this image. # For example, <b>getbands</b> on an RGB image returns ("R", "G", "B"). # # @return A tuple containing band names. def getbands(self): "Get band names" return ImageMode.getmode(self.mode).bands ## # Calculates the bounding box of the non-zero regions in the # image. # # @return The bounding box is returned as a 4-tuple defining the # left, upper, right, and lower pixel coordinate. If the image # is completely empty, this method returns None. def getbbox(self): "Get bounding box of actual data (non-zero pixels) in image" self.load() return self.im.getbbox() ## # Returns a list of colors used in this image. # # @param maxcolors Maximum number of colors. If this number is # exceeded, this method returns None. The default limit is # 256 colors. # @return An unsorted list of (count, pixel) values. def getcolors(self, maxcolors=256): "Get colors from image, up to given limit" self.load() if self.mode in ("1", "L", "P"): h = self.im.histogram() out = [] for i in range(256): if h[i]: out.append((h[i], i)) if len(out) > maxcolors: return None return out return self.im.getcolors(maxcolors) ## # Returns the contents of this image as a sequence object # containing pixel values. The sequence object is flattened, so # that values for line one follow directly after the values of # line zero, and so on. # <p> # Note that the sequence object returned by this method is an # internal PIL data type, which only supports certain sequence # operations. To convert it to an ordinary sequence (e.g. for # printing), use <b>list(im.getdata())</b>. # # @param band What band to return. The default is to return # all bands. To return a single band, pass in the index # value (e.g. 0 to get the "R" band from an "RGB" image). # @return A sequence-like object. def getdata(self, band = None): "Get image data as sequence object." self.load() if band is not None: return self.im.getband(band) return self.im # could be abused ## # Gets the the minimum and maximum pixel values for each band in # the image. # # @return For a single-band image, a 2-tuple containing the # minimum and maximum pixel value. For a multi-band image, # a tuple containing one 2-tuple for each band. def getextrema(self): "Get min/max value" self.load() if self.im.bands > 1: extrema = [] for i in range(self.im.bands): extrema.append(self.im.getband(i).getextrema()) return tuple(extrema) return self.im.getextrema() ## # Returns a PyCObject that points to the internal image memory. # # @return A PyCObject object. def getim(self): "Get PyCObject pointer to internal image memory" self.load() return self.im.ptr ## # Returns the image palette as a list. # # @return A list of color values [r, g, b, ...], or None if the # image has no palette. def getpalette(self): "Get palette contents." self.load() try: return map(ord, self.im.getpalette()) except ValueError: return None # no palette ## # Returns the pixel value at a given position. # # @param xy The coordinate, given as (x, y). # @return The pixel value. If the image is a multi-layer image, # this method returns a tuple. def getpixel(self, xy): "Get pixel value" self.load() return self.im.getpixel(xy) ## # Returns the horizontal and vertical projection. # # @return Two sequences, indicating where there are non-zero # pixels along the X-axis and the Y-axis, respectively. def getprojection(self): "Get projection to x and y axes" self.load() x, y = self.im.getprojection() return map(ord, x), map(ord, y) ## # Returns a histogram for the image. The histogram is returned as # a list of pixel counts, one for each pixel value in the source # image. If the image has more than one band, the histograms for # all bands are concatenated (for example, the histogram for an # "RGB" image contains 768 values). # <p> # A bilevel image (mode "1") is treated as a greyscale ("L") image # by this method. # <p> # If a mask is provided, the method returns a histogram for those # parts of the image where the mask image is non-zero. The mask # image must have the same size as the image, and be either a # bi-level image (mode "1") or a greyscale image ("L"). # # @def histogram(mask=None) # @param mask An optional mask. # @return A list containing pixel counts. def histogram(self, mask=None, extrema=None): "Take histogram of image" self.load() if mask: mask.load() return self.im.histogram((0, 0), mask.im) if self.mode in ("I", "F"): if extrema is None: extrema = self.getextrema() return self.im.histogram(extrema) return self.im.histogram() ## # (Deprecated) Returns a copy of the image where the data has been # offset by the given distances. Data wraps around the edges. If # yoffset is omitted, it is assumed to be equal to xoffset. # <p> # This method is deprecated. New code should use the <b>offset</b> # function in the <b>ImageChops</b> module. # # @param xoffset The horizontal distance. # @param yoffset The vertical distance. If omitted, both # distances are set to the same value. # @return An Image object. def offset(self, xoffset, yoffset=None): "(deprecated) Offset image in horizontal and/or vertical direction" if warnings: warnings.warn( "'offset' is deprecated; use 'ImageChops.offset' instead", DeprecationWarning, stacklevel=2 ) import ImageChops return ImageChops.offset(self, xoffset, yoffset) ## # Pastes another image into this image. The box argument is either # a 2-tuple giving the upper left corner, a 4-tuple defining the # left, upper, right, and lower pixel coordinate, or None (same as # (0, 0)). If a 4-tuple is given, the size of the pasted image # must match the size of the region. # <p> # If the modes don't match, the pasted image is converted to the # mode of this image (see the {@link #Image.convert} method for # details). # <p> # Instead of an image, the source can be a integer or tuple # containing pixel values. The method then fills the region # with the given colour. When creating RGB images, you can # also use colour strings as supported by the ImageColor module. # <p> # If a mask is given, this method updates only the regions # indicated by the mask. You can use either "1", "L" or "RGBA" # images (in the latter case, the alpha band is used as mask). # Where the mask is 255, the given image is copied as is. Where # the mask is 0, the current value is preserved. Intermediate # values can be used for transparency effects. # <p> # Note that if you paste an "RGBA" image, the alpha band is # ignored. You can work around this by using the same image as # both source image and mask. # # @param im Source image or pixel value (integer or tuple). # @param box An optional 4-tuple giving the region to paste into. # If a 2-tuple is used instead, it's treated as the upper left # corner. If omitted or None, the source is pasted into the # upper left corner. # <p> # If an image is given as the second argument and there is no # third, the box defaults to (0, 0), and the second argument # is interpreted as a mask image. # @param mask An optional mask image. # @return An Image object. def paste(self, im, box=None, mask=None): "Paste other image into region" if isImageType(box) and mask is None: # abbreviated paste(im, mask) syntax mask = box; box = None if box is None: # cover all of self box = (0, 0) + self.size if len(box) == 2: # lower left corner given; get size from image or mask if isImageType(im): size = im.size elif isImageType(mask): size = mask.size else: # FIXME: use self.size here? raise ValueError( "cannot determine region size; use 4-item box" ) box = box + (box[0]+size[0], box[1]+size[1]) if isStringType(im): import ImageColor im = ImageColor.getcolor(im, self.mode) elif isImageType(im): im.load() if self.mode != im.mode: if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"): # should use an adapter for this! im = im.convert(self.mode) im = im.im self.load() if self.readonly: self._copy() if mask: mask.load() self.im.paste(im, box, mask.im) else: self.im.paste(im, box) ## # Maps this image through a lookup table or function. # # @param lut A lookup table, containing 256 values per band in the # image. A function can be used instead, it should take a single # argument. The function is called once for each possible pixel # value, and the resulting table is applied to all bands of the # image. # @param mode Output mode (default is same as input). In the # current version, this can only be used if the source image # has mode "L" or "P", and the output has mode "1". # @return An Image object. def point(self, lut, mode=None): "Map image through lookup table" self.load() if isinstance(lut, ImagePointHandler): return lut.point(self) if not isSequenceType(lut): # if it isn't a list, it should be a function if self.mode in ("I", "I;16", "F"): # check if the function can be used with point_transform scale, offset = _getscaleoffset(lut) return self._new(self.im.point_transform(scale, offset)) # for other modes, convert the function to a table lut = map(lut, range(256)) * self.im.bands if self.mode == "F": # FIXME: _imaging returns a confusing error message for this case raise ValueError("point operation not supported for this mode") return self._new(self.im.point(lut, mode)) ## # Adds or replaces the alpha layer in this image. If the image # does not have an alpha layer, it's converted to "LA" or "RGBA". # The new layer must be either "L" or "1". # # @param im The new alpha layer. This can either be an "L" or "1" # image having the same size as this image, or an integer or # other color value. def putalpha(self, alpha): "Set alpha layer" self.load() if self.readonly: self._copy() if self.mode not in ("LA", "RGBA"): # attempt to promote self to a matching alpha mode try: mode = getmodebase(self.mode) + "A" try: self.im.setmode(mode) except (AttributeError, ValueError): # do things the hard way im = self.im.convert(mode) if im.mode not in ("LA", "RGBA"): raise ValueError # sanity check self.im = im self.mode = self.im.mode except (KeyError, ValueError): raise ValueError("illegal image mode") if self.mode == "LA": band = 1 else: band = 3 if isImageType(alpha): # alpha layer if alpha.mode not in ("1", "L"): raise ValueError("illegal image mode") alpha.load() if alpha.mode == "1": alpha = alpha.convert("L") else: # constant alpha try: self.im.fillband(band, alpha) except (AttributeError, ValueError): # do things the hard way alpha = new("L", self.size, alpha) else: return self.im.putband(alpha.im, band) ## # Copies pixel data to this image. This method copies data from a # sequence object into the image, starting at the upper left # corner (0, 0), and continuing until either the image or the # sequence ends. The scale and offset values are used to adjust # the sequence values: <b>pixel = value*scale + offset</b>. # # @param data A sequence object. # @param scale An optional scale value. The default is 1.0. # @param offset An optional offset value. The default is 0.0. def putdata(self, data, scale=1.0, offset=0.0): "Put data from a sequence object into an image." self.load() if self.readonly: self._copy() self.im.putdata(data, scale, offset) ## # Attaches a palette to this image. The image must be a "P" or # "L" image, and the palette sequence must contain 768 integer # values, where each group of three values represent the red, # green, and blue values for the corresponding pixel # index. Instead of an integer sequence, you can use an 8-bit # string. # # @def putpalette(data) # @param data A palette sequence (either a list or a string). def putpalette(self, data, rawmode="RGB"): "Put palette data into an image." if self.mode not in ("L", "P"): raise ValueError("illegal image mode") self.load() if isinstance(data, ImagePalette.ImagePalette): palette = ImagePalette.raw(data.rawmode, data.palette) else: if not isStringType(data): data = string.join(map(chr, data), "") palette = ImagePalette.raw(rawmode, data) self.mode = "P" self.palette = palette self.palette.mode = "RGB" self.load() # install new palette ## # Modifies the pixel at the given position. The colour is given as # a single numerical value for single-band images, and a tuple for # multi-band images. # <p> # Note that this method is relatively slow. For more extensive # changes, use {@link #Image.paste} or the <b>ImageDraw</b> module # instead. # # @param xy The pixel coordinate, given as (x, y). # @param value The pixel value. # @see #Image.paste # @see #Image.putdata # @see ImageDraw def putpixel(self, xy, value): "Set pixel value" self.load() if self.readonly: self._copy() return self.im.putpixel(xy, value) ## # Returns a resized copy of this image. # # @def resize(size, filter=NEAREST) # @param size The requested size in pixels, as a 2-tuple: # (width, height). # @param filter An optional resampling filter. This can be # one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b> # (linear interpolation in a 2x2 environment), <b>BICUBIC</b> # (cubic spline interpolation in a 4x4 environment), or # <b>ANTIALIAS</b> (a high-quality downsampling filter). # If omitted, or if the image has mode "1" or "P", it is # set <b>NEAREST</b>. # @return An Image object. def resize(self, size, resample=NEAREST): "Resize image" if resample not in (NEAREST, BILINEAR, BICUBIC, ANTIALIAS): raise ValueError("unknown resampling filter") self.load() if self.mode in ("1", "P"): resample = NEAREST if resample == ANTIALIAS: # requires stretch support (imToolkit & PIL 1.1.3) try: im = self.im.stretch(size, resample) except AttributeError: raise ValueError("unsupported resampling filter") else: im = self.im.resize(size, resample) return self._new(im) ## # Returns a rotated copy of this image. This method returns a # copy of this image, rotated the given number of degrees counter # clockwise around its centre. # # @def rotate(angle, filter=NEAREST) # @param angle In degrees counter clockwise. # @param filter An optional resampling filter. This can be # one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b> # (linear interpolation in a 2x2 environment), or <b>BICUBIC</b> # (cubic spline interpolation in a 4x4 environment). # If omitted, or if the image has mode "1" or "P", it is # set <b>NEAREST</b>. # @param expand Optional expansion flag. If true, expands the output # image to make it large enough to hold the entire rotated image. # If false or omitted, make the output image the same size as the # input image. # @return An Image object. def rotate(self, angle, resample=NEAREST, expand=0): "Rotate image. Angle given as degrees counter-clockwise." if expand: import math angle = -angle * math.pi / 180 matrix = [ math.cos(angle), math.sin(angle), 0.0, -math.sin(angle), math.cos(angle), 0.0 ] def transform(x, y, (a, b, c, d, e, f)=matrix): return a*x + b*y + c, d*x + e*y + f # calculate output size w, h = self.size xx = [] yy = [] for x, y in ((0, 0), (w, 0), (w, h), (0, h)): x, y = transform(x, y) xx.append(x) yy.append(y) w = int(math.ceil(max(xx)) - math.floor(min(xx))) h = int(math.ceil(max(yy)) - math.floor(min(yy))) # adjust center x, y = transform(w / 2.0, h / 2.0) matrix[2] = self.size[0] / 2.0 - x matrix[5] = self.size[1] / 2.0 - y return self.transform((w, h), AFFINE, matrix, resample) if resample not in (NEAREST, BILINEAR, BICUBIC): raise ValueError("unknown resampling filter") self.load() if self.mode in ("1", "P"): resample = NEAREST return self._new(self.im.rotate(angle, resample)) ## # Saves this image under the given filename. If no format is # specified, the format to use is determined from the filename # extension, if possible. # <p> # Keyword options can be used to provide additional instructions # to the writer. If a writer doesn't recognise an option, it is # silently ignored. The available options are described later in # this handbook. # <p> # You can use a file object instead of a filename. In this case, # you must always specify the format. The file object must # implement the <b>seek</b>, <b>tell</b>, and <b>write</b> # methods, and be opened in binary mode. # # @def save(file, format=None, **options) # @param file File name or file object. # @param format Optional format override. If omitted, the # format to use is determined from the filename extension. # If a file object was used instead of a filename, this # parameter should always be used. # @param **options Extra parameters to the image writer. # @return None # @exception KeyError If the output format could not be determined # from the file name. Use the format option to solve this. # @exception IOError If the file could not be written. The file # may have been created, and may contain partial data. def save(self, fp, format=None, **params): "Save image to file or stream" if isStringType(fp): filename = fp else: if hasattr(fp, "name") and isStringType(fp.name): filename = fp.name else: filename = "" # may mutate self! self.load() self.encoderinfo = params self.encoderconfig = () preinit() ext = string.lower(os.path.splitext(filename)[1]) if not format: try: format = EXTENSION[ext] except KeyError: init() try: format = EXTENSION[ext] except KeyError: raise KeyError(ext) # unknown extension try: save_handler = SAVE[string.upper(format)] except KeyError: init() save_handler = SAVE[string.upper(format)] # unknown format if isStringType(fp): import __builtin__ fp = __builtin__.open(fp, "wb") close = 1 else: close = 0 try: save_handler(self, fp, filename) finally: # do what we can to clean up if close: fp.close() ## # Seeks to the given frame in this sequence file. If you seek # beyond the end of the sequence, the method raises an # <b>EOFError</b> exception. When a sequence file is opened, the # library automatically seeks to frame 0. # <p> # Note that in the current version of the library, most sequence # formats only allows you to seek to the next frame. # # @param frame Frame number, starting at 0. # @exception EOFError If the call attempts to seek beyond the end # of the sequence. # @see #Image.tell def seek(self, frame): "Seek to given frame in sequence file" # overridden by file handlers if frame != 0: raise EOFError ## # Displays this image. This method is mainly intended for # debugging purposes. # <p> # On Unix platforms, this method saves the image to a temporary # PPM file, and calls the <b>xv</b> utility. # <p> # On Windows, it saves the image to a temporary BMP file, and uses # the standard BMP display utility to show it (usually Paint). # # @def show(title=None) # @param title Optional title to use for the image window, # where possible. def show(self, title=None, command=None): "Display image (for debug purposes only)" _show(self, title=title, command=command) ## # Split this image into individual bands. This method returns a # tuple of individual image bands from an image. For example, # splitting an "RGB" image creates three new images each # containing a copy of one of the original bands (red, green, # blue). # # @return A tuple containing bands. def split(self): "Split image into bands" if self.im.bands == 1: ims = [self.copy()] else: ims = [] self.load() for i in range(self.im.bands): ims.append(self._new(self.im.getband(i))) return tuple(ims) ## # Returns the current frame number. # # @return Frame number, starting with 0. # @see #Image.seek def tell(self): "Return current frame number" return 0 ## # Make this image into a thumbnail. This method modifies the # image to contain a thumbnail version of itself, no larger than # the given size. This method calculates an appropriate thumbnail # size to preserve the aspect of the image, calls the {@link # #Image.draft} method to configure the file reader (where # applicable), and finally resizes the image. # <p> # Note that the bilinear and bicubic filters in the current # version of PIL are not well-suited for thumbnail generation. # You should use <b>ANTIALIAS</b> unless speed is much more # important than quality. # <p> # Also note that this function modifies the Image object in place. # If you need to use the full resolution image as well, apply this # method to a {@link #Image.copy} of the original image. # # @param size Requested size. # @param resample Optional resampling filter. This can be one # of <b>NEAREST</b>, <b>BILINEAR</b>, <b>BICUBIC</b>, or # <b>ANTIALIAS</b> (best quality). If omitted, it defaults # to <b>NEAREST</b> (this will be changed to ANTIALIAS in a # future version). # @return None def thumbnail(self, size, resample=NEAREST): "Create thumbnail representation (modifies image in place)" # FIXME: the default resampling filter will be changed # to ANTIALIAS in future versions # preserve aspect ratio x, y = self.size if x > size[0]: y = max(y * size[0] / x, 1); x = size[0] if y > size[1]: x = max(x * size[1] / y, 1); y = size[1] size = x, y if size == self.size: return self.draft(None, size) self.load() try: im = self.resize(size, resample) except ValueError: if resample != ANTIALIAS: raise im = self.resize(size, NEAREST) # fallback self.im = im.im self.mode = im.mode self.size = size self.readonly = 0 # FIXME: the different tranform methods need further explanation # instead of bloating the method docs, add a separate chapter. ## # Transforms this image. This method creates a new image with the # given size, and the same mode as the original, and copies data # to the new image using the given transform. # <p> # @def transform(size, method, data, resample=NEAREST) # @param size The output size. # @param method The transformation method. This is one of # <b>EXTENT</b> (cut out a rectangular subregion), <b>AFFINE</b> # (affine transform), <b>PERSPECTIVE</b> (perspective # transform), <b>QUAD</b> (map a quadrilateral to a # rectangle), or <b>MESH</b> (map a number of source quadrilaterals # in one operation). # @param data Extra data to the transformation method. # @param resample Optional resampling filter. It can be one of # <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b> # (linear interpolation in a 2x2 environment), or # <b>BICUBIC</b> (cubic spline interpolation in a 4x4 # environment). If omitted, or if the image has mode # "1" or "P", it is set to <b>NEAREST</b>. # @return An Image object. def transform(self, size, method, data=None, resample=NEAREST, fill=1): "Transform image" if isinstance(method, ImageTransformHandler): return method.transform(size, self, resample=resample, fill=fill) if hasattr(method, "getdata"): # compatibility w. old-style transform objects method, data = method.getdata() if data is None: raise ValueError("missing method data") im = new(self.mode, size, None) if method == MESH: # list of quads for box, quad in data: im.__transformer(box, self, QUAD, quad, resample, fill) else: im.__transformer((0, 0)+size, self, method, data, resample, fill) return im def __transformer(self, box, image, method, data, resample=NEAREST, fill=1): # FIXME: this should be turned into a lazy operation (?) w = box[2]-box[0] h = box[3]-box[1] if method == AFFINE: # change argument order to match implementation data = (data[2], data[0], data[1], data[5], data[3], data[4]) elif method == EXTENT: # convert extent to an affine transform x0, y0, x1, y1 = data xs = float(x1 - x0) / w ys = float(y1 - y0) / h method = AFFINE data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys) elif method == PERSPECTIVE: # change argument order to match implementation data = (data[2], data[0], data[1], data[5], data[3], data[4], data[6], data[7]) elif method == QUAD: # quadrilateral warp. data specifies the four corners # given as NW, SW, SE, and NE. nw = data[0:2]; sw = data[2:4]; se = data[4:6]; ne = data[6:8] x0, y0 = nw; As = 1.0 / w; At = 1.0 / h data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At, (se[0]-sw[0]-ne[0]+x0)*As*At, y0, (ne[1]-y0)*As, (sw[1]-y0)*At, (se[1]-sw[1]-ne[1]+y0)*As*At) else: raise ValueError("unknown transformation method") if resample not in (NEAREST, BILINEAR, BICUBIC): raise ValueError("unknown resampling filter") image.load() self.load() if image.mode in ("1", "P"): resample = NEAREST self.im.transform2(box, image.im, method, data, resample, fill) ## # Returns a flipped or rotated copy of this image. # # @param method One of <b>FLIP_LEFT_RIGHT</b>, <b>FLIP_TOP_BOTTOM</b>, # <b>ROTATE_90</b>, <b>ROTATE_180</b>, or <b>ROTATE_270</b>. def transpose(self, method): "Transpose image (flip or rotate in 90 degree steps)" self.load() im = self.im.transpose(method) return self._new(im) # -------------------------------------------------------------------- # Lazy operations class _ImageCrop(Image): def __init__(self, im, box): Image.__init__(self) x0, y0, x1, y1 = box if x1 < x0: x1 = x0 if y1 < y0: y1 = y0 self.mode = im.mode self.size = x1-x0, y1-y0 self.__crop = x0, y0, x1, y1 self.im = im.im def load(self): # lazy evaluation! if self.__crop: self.im = self.im.crop(self.__crop) self.__crop = None if self.im: return self.im.pixel_access(self.readonly) # FIXME: future versions should optimize crop/paste # sequences! # -------------------------------------------------------------------- # Abstract handlers. class ImagePointHandler: # used as a mixin by point transforms (for use with im.point) pass class ImageTransformHandler: # used as a mixin by geometry transforms (for use with im.transform) pass # -------------------------------------------------------------------- # Factories # # Debugging def _wedge(): "Create greyscale wedge (for debugging only)" return Image()._new(core.wedge("L")) ## # Creates a new image with the given mode and size. # # @param mode The mode to use for the new image. # @param size A 2-tuple, containing (width, height) in pixels. # @param color What colour to use for the image. Default is black. # If given, this should be a single integer or floating point value # for single-band modes, and a tuple for multi-band modes (one value # per band). When creating RGB images, you can also use colour # strings as supported by the ImageColor module. If the colour is # None, the image is not initialised. # @return An Image object. def new(mode, size, color=0): "Create a new image" if color is None: # don't initialize return Image()._new(core.new(mode, size)) if isStringType(color): # css3-style specifier import ImageColor color = ImageColor.getcolor(color, mode) return Image()._new(core.fill(mode, size, color)) ## # Creates an image memory from pixel data in a string. # <p> # In its simplest form, this function takes three arguments # (mode, size, and unpacked pixel data). # <p> # You can also use any pixel decoder supported by PIL. For more # information on available decoders, see the section <a # href="pil-decoder.htm"><i>Writing Your Own File Decoder</i></a>. # <p> # Note that this function decodes pixel data only, not entire images. # If you have an entire image in a string, wrap it in a # <b>StringIO</b> object, and use {@link #open} to load it. # # @param mode The image mode. # @param size The image size. # @param data An 8-bit string containing raw data for the given mode. # @param decoder_name What decoder to use. # @param *args Additional parameters for the given decoder. # @return An Image object. def fromstring(mode, size, data, decoder_name="raw", *args): "Load image from string" # may pass tuple instead of argument list if len(args) == 1 and isTupleType(args[0]): args = args[0] if decoder_name == "raw" and args == (): args = mode im = new(mode, size) im.fromstring(data, decoder_name, args) return im ## # (New in 1.1.4) Creates an image memory from pixel data in a string # or byte buffer. # <p> # This function is similar to {@link #fromstring}, but uses data in # the byte buffer, where possible. This means that changes to the # original buffer object are reflected in this image). Not all modes # can share memory; supported modes include "L", "RGBX", "RGBA", and # "CMYK". # <p> # Note that this function decodes pixel data only, not entire images. # If you have an entire image file in a string, wrap it in a # <b>StringIO</b> object, and use {@link #open} to load it. # <p> # In the current version, the default parameters used for the "raw" # decoder differs from that used for {@link fromstring}. This is a # bug, and will probably be fixed in a future release. The current # release issues a warning if you do this; to disable the warning, # you should provide the full set of parameters. See below for # details. # # @param mode The image mode. # @param size The image size. # @param data An 8-bit string or other buffer object containing raw # data for the given mode. # @param decoder_name What decoder to use. # @param *args Additional parameters for the given decoder. For the # default encoder ("raw"), it's recommended that you provide the # full set of parameters: # <b>frombuffer(mode, size, data, "raw", mode, 0, 1)</b>. # @return An Image object. # @since 1.1.4 def frombuffer(mode, size, data, decoder_name="raw", *args): "Load image from string or buffer" # may pass tuple instead of argument list if len(args) == 1 and isTupleType(args[0]): args = args[0] if decoder_name == "raw": if args == (): if warnings: warnings.warn( "the frombuffer defaults may change in a future release; " "for portability, change the call to read:\n" " frombuffer(mode, size, data, 'raw', mode, 0, 1)", RuntimeWarning, stacklevel=2 ) args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6 if args[0] in _MAPMODES: im = new(mode, (1,1)) im = im._new( core.map_buffer(data, size, decoder_name, None, 0, args) ) im.readonly = 1 return im return fromstring(mode, size, data, decoder_name, args) ## # (New in 1.1.6) Creates an image memory from an object exporting # the array interface (using the buffer protocol). # # If obj is not contiguous, then the tostring method is called # and {@link frombuffer} is used. # # @param obj Object with array interface # @param mode Mode to use (will be determined from type if None) # @return An image memory. def fromarray(obj, mode=None): arr = obj.__array_interface__ shape = arr['shape'] ndim = len(shape) try: strides = arr['strides'] except KeyError: strides = None if mode is None: try: typekey = (1, 1) + shape[2:], arr['typestr'] mode, rawmode = _fromarray_typemap[typekey] except KeyError: # print typekey raise TypeError("Cannot handle this data type") else: rawmode = mode if mode in ["1", "L", "I", "P", "F"]: ndmax = 2 elif mode == "RGB": ndmax = 3 else: ndmax = 4 if ndim > ndmax: raise ValueError("Too many dimensions.") size = shape[1], shape[0] if strides is not None: obj = obj.tostring() return frombuffer(mode, size, obj, "raw", rawmode, 0, 1) _fromarray_typemap = { # (shape, typestr) => mode, rawmode # first two members of shape are set to one # ((1, 1), "|b1"): ("1", "1"), # broken ((1, 1), "|u1"): ("L", "L"), ((1, 1), "|i1"): ("I", "I;8"), ((1, 1), "<i2"): ("I", "I;16"), ((1, 1), ">i2"): ("I", "I;16B"), ((1, 1), "<i4"): ("I", "I;32"), ((1, 1), ">i4"): ("I", "I;32B"), ((1, 1), "<f4"): ("F", "F;32F"), ((1, 1), ">f4"): ("F", "F;32BF"), ((1, 1), "<f8"): ("F", "F;64F"), ((1, 1), ">f8"): ("F", "F;64BF"), ((1, 1, 3), "|u1"): ("RGB", "RGB"), ((1, 1, 4), "|u1"): ("RGBA", "RGBA"), } # shortcuts _fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I") _fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F") ## # Opens and identifies the given image file. # <p> # This is a lazy operation; this function identifies the file, but the # actual image data is not read from the file until you try to process # the data (or call the {@link #Image.load} method). # # @def open(file, mode="r") # @param file A filename (string) or a file object. The file object # must implement <b>read</b>, <b>seek</b>, and <b>tell</b> methods, # and be opened in binary mode. # @param mode The mode. If given, this argument must be "r". # @return An Image object. # @exception IOError If the file cannot be found, or the image cannot be # opened and identified. # @see #new def open(fp, mode="r"): "Open an image file, without loading the raster data" if mode != "r": raise ValueError("bad mode") if isStringType(fp): import __builtin__ filename = fp fp = __builtin__.open(fp, "rb") else: filename = "" prefix = fp.read(16) preinit() for i in ID: try: factory, accept = OPEN[i] if not accept or accept(prefix): fp.seek(0) return factory(fp, filename) except (SyntaxError, IndexError, TypeError): pass if init(): for i in ID: try: factory, accept = OPEN[i] if not accept or accept(prefix): fp.seek(0) return factory(fp, filename) except (SyntaxError, IndexError, TypeError): pass raise IOError("cannot identify image file") # # Image processing. ## # Creates a new image by interpolating between two input images, using # a constant alpha. # # <pre> # out = image1 * (1.0 - alpha) + image2 * alpha # </pre> # # @param im1 The first image. # @param im2 The second image. Must have the same mode and size as # the first image. # @param alpha The interpolation alpha factor. If alpha is 0.0, a # copy of the first image is returned. If alpha is 1.0, a copy of # the second image is returned. There are no restrictions on the # alpha value. If necessary, the result is clipped to fit into # the allowed output range. # @return An Image object. def blend(im1, im2, alpha): "Interpolate between images." im1.load() im2.load() return im1._new(core.blend(im1.im, im2.im, alpha)) ## # Creates a new image by interpolating between two input images, # using the mask as alpha. # # @param image1 The first image. # @param image2 The second image. Must have the same mode and # size as the first image. # @param mask A mask image. This image can can have mode # "1", "L", or "RGBA", and must have the same size as the # other two images. def composite(image1, image2, mask): "Create composite image by blending images using a transparency mask" image = image2.copy() image.paste(image1, None, mask) return image ## # Applies the function (which should take one argument) to each pixel # in the given image. If the image has more than one band, the same # function is applied to each band. Note that the function is # evaluated once for each possible pixel value, so you cannot use # random components or other generators. # # @def eval(image, function) # @param image The input image. # @param function A function object, taking one integer argument. # @return An Image object. def eval(image, *args): "Evaluate image expression" return image.point(args[0]) ## # Creates a new image from a number of single-band images. # # @param mode The mode to use for the output image. # @param bands A sequence containing one single-band image for # each band in the output image. All bands must have the # same size. # @return An Image object. def merge(mode, bands): "Merge a set of single band images into a new multiband image." if getmodebands(mode) != len(bands) or "*" in mode: raise ValueError("wrong number of bands") for im in bands[1:]: if im.mode != getmodetype(mode): raise ValueError("mode mismatch") if im.size != bands[0].size: raise ValueError("size mismatch") im = core.new(mode, bands[0].size) for i in range(getmodebands(mode)): bands[i].load() im.putband(bands[i].im, i) return bands[0]._new(im) # -------------------------------------------------------------------- # Plugin registry ## # Register an image file plugin. This function should not be used # in application code. # # @param id An image format identifier. # @param factory An image file factory method. # @param accept An optional function that can be used to quickly # reject images having another format. def register_open(id, factory, accept=None): id = string.upper(id) ID.append(id) OPEN[id] = factory, accept ## # Registers an image MIME type. This function should not be used # in application code. # # @param id An image format identifier. # @param mimetype The image MIME type for this format. def register_mime(id, mimetype): MIME[string.upper(id)] = mimetype ## # Registers an image save function. This function should not be # used in application code. # # @param id An image format identifier. # @param driver A function to save images in this format. def register_save(id, driver): SAVE[string.upper(id)] = driver ## # Registers an image extension. This function should not be # used in application code. # # @param id An image format identifier. # @param extension An extension used for this format. def register_extension(id, extension): EXTENSION[string.lower(extension)] = string.upper(id) # -------------------------------------------------------------------- # Simple display support. User code may override this. def _show(image, **options): # override me, as necessary apply(_showxv, (image,), options) def _showxv(image, title=None, **options): import ImageShow apply(ImageShow.show, (image, title), options)
mit
rancher/validation-tests
tests/v2_validation/cattlevalidationtest/core/test_openldap.py
1
11707
from common_fixtures import * # NOQA from requests.auth import AuthBase from selenium import webdriver from test_github import URL from common_fixtures import _client_for_user if_ldap = pytest.mark.skipif(not os.environ.get('API_AUTH_OPEN_LDAP_SERVER'), reason='API_AUTH_OPEN_LDAP_SERVER is not set') class OpenLDAPAuth(AuthBase): def __init__(self, jwt, prj_id=None): # setup any auth-related data here self.jwt = jwt self.prj_id = prj_id def __call__(self, r): # modify and return the request r.headers['Authorization'] = 'Bearer ' + self.jwt if self.prj_id is not None: r.headers['X-API-Project-Id'] = self.prj_id return r def create_ldap_client(username=os.getenv('LDAP_USER1', 'devUserA'), password=os.getenv('LDAP_USER1_PASSWORD', 'Password1'), project_id=None): client = _client_for_user('user', accounts()) client.delete_by_id = delete_by_id assert client.valid() jwt = get_authed_token(username=username, password=password)['jwt'] client._access_key = None client._secret_key = None client._auth = OpenLDAPAuth(jwt, prj_id=project_id) client.reload_schema() assert client.valid() identities = client.list_identity().data assert len(identities) > 0 is_ldap_user = False for identity in identities: if (identity.externalIdType == 'openldap_user'): is_ldap_user = True assert is_ldap_user return client def get_authed_token(username=os.getenv('LDAP_USER1', 'devUserA'), password=os.getenv('LDAP_USER1_PASSWORD', 'Password1')): token = requests.post(base_url() + 'token', { 'code': username + ':' + password }) token = token.json() assert token['type'] != 'error' assert token['user'] == username assert token['userIdentity']['login'] == username return token def load_config(): config = { "accessMode": "unrestricted", 'domain': os.environ.get( 'API_AUTH_OPEN_LDAP_DOMAIN', "dc=rancher,dc=io"), 'groupNameField': os.environ.get('API_AUTH_OPEN_LDAP_GROUP_NAME_FIELD', 'name'), 'groupObjectClass': os.environ.get( 'API_AUTH_OPEN_LDAP_GROUP_OBJECT_CLASS', 'group'), 'groupSearchField': os.environ.get( 'API_AUTH_OPEN_LDAP_GROUP_SEARCH_FIELD', 'sAMAccountName'), 'loginDomain': os.environ.get( 'API_AUTH_OPEN_LDAP_LOGIN_NAME', 'rancher'), 'port': os.environ.get('API_AUTH_OPEN_LDAP_PORT', 389), 'enabled': True, 'server': os.environ.get('API_AUTH_OPEN_LDAP_SERVER', 'ad.rancher.io'), 'serviceAccountPassword': os.environ.get('API_AUTH_OPEN_LDAP_' 'SERVICE_ACCOUNT_PASSWORD', 'Password1'), 'serviceAccountUsername': os.environ.get('API_AUTH_OPEN_LDAP_' 'SERVICE_ACCOUNT_USERNAME', 'cattle'), 'tls': False, 'userDisabledBitMask': os.environ.get('API_AUTH_OPEN_LDAP_' 'USER_DISABLED_BIT_MASK', '2'), 'userEnabledAttribute': os.environ.get('API_AUTH_OPEN_LDAP_' 'USER_ENABLED_ATTRIBUTE', 'userAccountControl'), 'userLoginField': os.environ.get('API_AUTH_OPEN_LDAP_USER_LOGIN_FIELD', 'sAMAccountName'), 'userNameField': os.environ.get('API_AUTH_OPEN_LDAP_' 'USER_NAME_FIELD', 'name'), 'userObjectClass': os.environ.get( 'API_AUTH_OPEN_LDAP_USER_OBJECT_CLASS', 'person'), 'userSearchField': os.environ.get( 'API_AUTH_OPEN_LDAP_USER_SEARCH_FIELD', 'name') } return config @pytest.fixture(scope='module') def ldap_config(admin_client, request): config = load_config() admin_client.create_ldapconfig(config) service_account_dn = os.getenv('API_AUTH_OPEN_LDAP_SERVICE_ACCOUNT_DN', "cn=Cattle," "ou=Rancher Labs,dc=rancher,dc=io") x = admin_client.by_id('identity', 'ldap_user:' + service_account_dn) assert x.login == config['serviceAccountUsername'] def fin(): config = load_config() config['enabled'] = None admin_client.create_ldapconfig(config) request.addfinalizer(fin) @if_ldap def test_turn_on_ldap_ui(admin_client): config = load_config() config['enabled'] = None admin_client.create_ldapconfig(config) port = int(os.getenv('PHANTOMJS_WEBDRIVER_PORT', 4444)) phantom_bin = os.getenv('PHANTOMJS_BIN', '/usr/local/bin/phantomjs') driver = webdriver.PhantomJS(phantom_bin, port=port) driver.delete_all_cookies() max_wait = 60 driver.set_page_load_timeout(max_wait) driver.set_script_timeout(max_wait) driver.implicitly_wait(10) driver.set_window_size(1120, 550) driver.get('{}logout'.format(base_url()[:-3])) url = '{}admin/access/openldap'.format(base_url()[:-3]) driver.get(url) inputs = driver.find_elements_by_class_name('ember-text-field') config = [ os.environ.get('API_AUTH_OPEN_LDAP_SERVER', 'ad.rancher.io'), os.environ.get('API_AUTH_OPEN_LDAP_PORT', 389), os.environ.get( 'API_AUTH_OPEN_LDAP_SERVICE_ACCOUNT_USERNAME', 'cattle'), os.environ.get( 'API_AUTH_OPEN_LDAP_SERVICE_ACCOUNT_PASSWORD', 'Password1'), os.environ.get('API_AUTH_OPEN_LDAP_DOMAIN', "dc=rancher,dc=io"), os.environ.get('API_AUTH_OPEN_LDAP_LOGIN_NAME', 'rancher'), os.environ.get('API_AUTH_OPEN_LDAP_USER_OBJECT_CLASS', 'person'), os.environ.get( 'API_AUTH_OPEN_LDAP_USER_LOGIN_FIELD', 'sAMAccountName'), os.environ.get('API_AUTH_OPEN_LDAP_USER_NAME_FIELD', 'name'), os.environ.get('API_AUTH_OPEN_LDAP_USER_SEARCH_FIELD', 'name'), os.environ.get('API_AUTH_OPEN_LDAP_USER_ENABLED_ATTRIBUTE', 'userAccountControl'), os.environ.get( 'API_AUTH_OPEN_LDAP_USER_DISABLED_BIT_MASK', '2'), os.environ.get('API_AUTH_OPEN_LDAP_GROUP_OBJECT_CLASS', 'group'), os.environ.get('API_AUTH_OPEN_LDAP_GROUP_NAME_FIELD', 'name'), os.environ.get( 'API_AUTH_OPEN_LDAP_GROUP_SEARCH_FIELD', 'sAMAccountName'), os.getenv('LDAP_USER1', 'devUserA'), os.getenv('LDAP_USER1_PASSWORD', 'Password1') ] for i in range(0, len(inputs)): inputs[i].clear() inputs[i].send_keys(config[i]) driver.find_element_by_class_name('btn-primary').click() try: driver.find_element_by_class_name('btn-primary').click() except: pass time.sleep(10) no_auth = requests.get(URL) assert no_auth.status_code == 401 @if_ldap def test_ldap_search_get_user(admin_client, ldap_config): search_user = os.getenv('LDAP_USER1', 'devUserA') search_user_name = os.getenv('LDAP_USER_NAME', 'Dev A. User') user = admin_client.list_identity(name=search_user_name).data[0] assert user.name == search_user_name assert user.login == search_user user_copy = admin_client.by_id('identity', user.id) assert user.name == user_copy.name assert user.id == user_copy.id assert user.login == user_copy.login assert user.profilePicture == user_copy.profilePicture assert user.profileUrl == user_copy.profileUrl @if_ldap def test_ldap_search_get_group(admin_client, ldap_config): search_group = os.getenv('LDAP_GROUP', 'qualityAssurance') group = admin_client.list_identity(name=search_group).data[0] group_copy = admin_client.by_id('identity', group.id) assert group.name == group_copy.name assert group.id == group_copy.id assert group.login == group_copy.login assert group.profilePicture == group_copy.profilePicture assert group.profileUrl == group_copy.profileUrl @if_ldap def test_ldap_login(admin_client, cattle_url, ldap_config): create_ldap_client() @if_ldap def test_ldap_incorrect_login(ldap_config): username = os.getenv('LDAP_USER1', 'devUserA') token = requests.post(base_url() + 'token', { 'code': username + ':' + random_str(), 'authProvider': 'ldapconfig' }) assert token.status_code == 401 token = token.json() assert token['type'] == 'error' assert token['status'] == 401 token = requests.post(base_url() + 'token', { 'code': username + ':' + "", 'authProvider': 'ldapconfig' }) assert token.status_code == 401 token = token.json() assert token['type'] == 'error' assert token['status'] == 401 token = requests.post(base_url() + 'token', { 'code': username + ':' + " ", 'authProvider': 'ldapconfig' }) assert token.status_code == 401 token = token.json() assert token['type'] == 'error' assert token['status'] == 401 @if_ldap def test_ldap_unauthorized_login(ldap_config): username = os.environ.get('API_AUTH_OPEN_LDAP_' 'SERVICE_ACCOUNT_PASSWORD', 'Password1') password = os.environ.get('API_AUTH_OPEN_LDAP_' 'SERVICE_ACCOUNT_USERNAME', 'cattle') token = requests.post(base_url() + 'token', { 'code': username + ':' + password, 'authProvider': 'ldapconfig' }) assert token.status_code == 401 token = token.json() assert token['type'] == 'error' assert token['status'] == 401 @if_ldap def test_ldap_project_members(ldap_config): user1_client = create_ldap_client() user1_identity = get_authed_token()['userIdentity'] username = os.getenv('LDAP_USER2', 'devUserB') password = os.getenv('LDAP_USER2_PASSWORD', 'Password1') user2_client = create_ldap_client(username=username, password=password) user2_identity = get_authed_token(username=username, password=password)['userIdentity'] group = os.getenv('LDAP_GROUP', 'qualityAssurance') group = user1_client.list_identity(name=group).data[0] project = user1_client.create_project(members=[ idToMember(user1_identity, 'owner'), idToMember(user2_identity, 'member') ]) project = user1_client.wait_success(project) user2_client.by_id('project', project.id) project.setmembers(members=[ idToMember(group, 'owner') ]) project = user2_client.by_id('project', project.id) user2_client.delete(project) def idToMember(identity, role): return { 'externalId': identity['externalId'], 'externalIdType': identity['externalIdType'], 'role': role } @if_ldap def test_ldap_project_create(ldap_config): user1_client = create_ldap_client() identity = get_authed_token()['userIdentity'] members = [idToMember(identity, 'owner')] project = user1_client.create_project(members=members) project = user1_client.wait_success(project) assert project is not None user1_client.delete(project)
apache-2.0
redhat-openstack/neutron
neutron/db/portsecurity_db.py
21
7724
# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from neutron.api.v2 import attributes as attrs from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import portsecurity as psec from neutron.openstack.common import log as logging LOG = logging.getLogger(__name__) class PortSecurityBinding(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) port_security_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model in order to be to able to # instruct SQLAlchemy to eagerly load port security binding port = orm.relationship( models_v2.Port, backref=orm.backref("port_security", uselist=False, cascade='delete', lazy='joined')) class NetworkSecurityBinding(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) port_security_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model in order to be able to instruct # SQLAlchemy to eagerly load default port security setting for ports # on this network network = orm.relationship( models_v2.Network, backref=orm.backref("port_security", uselist=False, cascade='delete', lazy='joined')) class PortSecurityDbMixin(object): """Mixin class to add port security.""" def _process_network_port_security_create( self, context, network_req, network_res): with context.session.begin(subtransactions=True): db = NetworkSecurityBinding( network_id=network_res['id'], port_security_enabled=network_req[psec.PORTSECURITY]) context.session.add(db) network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY] return self._make_network_port_security_dict(db) def _process_port_port_security_create( self, context, port_req, port_res): with context.session.begin(subtransactions=True): db = PortSecurityBinding( port_id=port_res['id'], port_security_enabled=port_req[psec.PORTSECURITY]) context.session.add(db) port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY] return self._make_port_security_dict(db) def _extend_port_security_dict(self, response_data, db_data): if ('port-security' in getattr(self, 'supported_extension_aliases', [])): psec_value = db_data['port_security'][psec.PORTSECURITY] response_data[psec.PORTSECURITY] = psec_value def _get_network_security_binding(self, context, network_id): try: query = self._model_query(context, NetworkSecurityBinding) binding = query.filter( NetworkSecurityBinding.network_id == network_id).one() except exc.NoResultFound: raise psec.PortSecurityBindingNotFound() return binding[psec.PORTSECURITY] def _get_port_security_binding(self, context, port_id): try: query = self._model_query(context, PortSecurityBinding) binding = query.filter( PortSecurityBinding.port_id == port_id).one() except exc.NoResultFound: raise psec.PortSecurityBindingNotFound() return binding[psec.PORTSECURITY] def _process_port_port_security_update( self, context, port_req, port_res): if psec.PORTSECURITY in port_req: port_security_enabled = port_req[psec.PORTSECURITY] else: return try: query = self._model_query(context, PortSecurityBinding) port_id = port_res['id'] binding = query.filter( PortSecurityBinding.port_id == port_id).one() binding.port_security_enabled = port_security_enabled port_res[psec.PORTSECURITY] = port_security_enabled except exc.NoResultFound: raise psec.PortSecurityBindingNotFound() def _process_network_port_security_update( self, context, network_req, network_res): if psec.PORTSECURITY in network_req: port_security_enabled = network_req[psec.PORTSECURITY] else: return try: query = self._model_query(context, NetworkSecurityBinding) network_id = network_res['id'] binding = query.filter( NetworkSecurityBinding.network_id == network_id).one() binding.port_security_enabled = port_security_enabled network_res[psec.PORTSECURITY] = port_security_enabled except exc.NoResultFound: raise psec.PortSecurityBindingNotFound() def _make_network_port_security_dict(self, port_security, fields=None): res = {'network_id': port_security['network_id'], psec.PORTSECURITY: port_security[psec.PORTSECURITY]} return self._fields(res, fields) def _determine_port_security_and_has_ip(self, context, port): """Returns a tuple of booleans (port_security_enabled, has_ip). Port_security is the value associated with the port if one is present otherwise the value associated with the network is returned. has_ip is if the port is associated with an ip or not. """ has_ip = self._ip_on_port(port) # we don't apply security groups for dhcp, router if (port.get('device_owner') and port['device_owner'].startswith('network:')): return (False, has_ip) if (psec.PORTSECURITY in port and isinstance(port[psec.PORTSECURITY], bool)): port_security_enabled = port[psec.PORTSECURITY] # If port has an ip and security_groups are passed in # conveniently set port_security_enabled to true this way # user doesn't also have to pass in port_security_enabled=True # when creating ports. elif (has_ip and attrs.is_attr_set('security_groups')): port_security_enabled = True else: port_security_enabled = self._get_network_security_binding( context, port['network_id']) return (port_security_enabled, has_ip) def _make_port_security_dict(self, port, fields=None): res = {'port_id': port['port_id'], psec.PORTSECURITY: port[psec.PORTSECURITY]} return self._fields(res, fields) def _ip_on_port(self, port): return bool(port.get('fixed_ips')) # Register dict extend functions for ports and networks db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attrs.NETWORKS, ['_extend_port_security_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attrs.PORTS, ['_extend_port_security_dict'])
apache-2.0
neumerance/cloudloon2
.venv/lib/python2.7/site-packages/django/contrib/auth/urls.py
170
1205
# The views used below are normally mapped in django.contrib.admin.urls.py # This URLs file is used to provide a reliable view deployment for test purposes. # It is also provided as a convenience to those who want to deploy these URLs # elsewhere. from django.conf.urls import patterns, url urlpatterns = patterns('', url(r'^login/$', 'django.contrib.auth.views.login', name='login'), url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'), url(r'^password_change/$', 'django.contrib.auth.views.password_change', name='password_change'), url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done', name='password_change_done'), url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'), url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'), url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', 'django.contrib.auth.views.password_reset_confirm', name='password_reset_confirm'), url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'), )
apache-2.0
valhallasw/pywikibot-core
tests/proofreadpage_tests.py
6
7286
# -*- coding: utf-8 -*- """Tests for the proofreadpage module.""" # # (C) Pywikibot team, 2015 # # Distributed under the terms of the MIT license. # from __future__ import unicode_literals __version__ = '$Id$' import json import pywikibot from pywikibot.proofreadpage import ProofreadPage from pywikibot.data import api from tests.aspects import unittest, TestCase from tests.basepage_tests import ( BasePageMethodsTestBase, BasePageLoadRevisionsCachingTestBase, ) class TestProofreadPageInvalidSite(TestCase): """Test ProofreadPage class.""" family = 'wikipedia' code = 'en' cached = True def test_invalid_site_source(self): """Test ProofreadPage from invalid Site as source.""" self.assertRaises(pywikibot.UnknownExtension, ProofreadPage, self.site, 'title') class TestBasePageMethods(BasePageMethodsTestBase): """Test behavior of ProofreadPage methods inherited from BasePage.""" family = 'wikisource' code = 'en' def setUp(self): """Set up test case.""" self._page = ProofreadPage( self.site, 'Page:Popular Science Monthly Volume 1.djvu/12') super(TestBasePageMethods, self).setUp() def test_basepage_methods(self): """Test ProofreadPage methods inherited from superclass BasePage.""" self._test_invoke() self._test_return_datatypes() class TestLoadRevisionsCaching(BasePageLoadRevisionsCachingTestBase): """Test site.loadrevisions() caching.""" family = 'wikisource' code = 'en' def setUp(self): """Set up test case.""" self._page = ProofreadPage( self.site, 'Page:Popular Science Monthly Volume 1.djvu/12') super(TestLoadRevisionsCaching, self).setUp() def test_page_text(self): """Test site.loadrevisions() with Page.text.""" self._test_page_text() class TestProofreadPageValidSite(TestCase): """Test ProofreadPage class.""" family = 'wikisource' code = 'en' cached = True valid = { 'title': 'Page:Popular Science Monthly Volume 1.djvu/12', 'ql': 4, 'user': 'T. Mazzei', 'header': u"{{rh|2|''THE POPULAR SCIENCE MONTHLY.''}}", 'footer': u'\n{{smallrefs}}', } existing_invalid = { 'title': 'Main Page', } not_existing_invalid = { 'title': 'User:cannot_exists', 'title1': 'User:Popular Science Monthly Volume 1.djvu/12' } def test_valid_site_source(self): """Test ProofreadPage from valid Site as source.""" page = ProofreadPage(self.site, 'title') self.assertEqual(page.namespace(), self.site.proofread_page_ns) def test_invalid_existing_page_source_in_valid_site(self): """Test ProofreadPage from invalid existing Page as source.""" source = pywikibot.Page(self.site, self.existing_invalid['title']) self.assertRaises(ValueError, ProofreadPage, source) def test_invalid_not_existing_page_source_in_valid_site(self): """Test ProofreadPage from invalid not existing Page as source.""" # namespace is forced source = pywikibot.Page(self.site, self.not_existing_invalid['title']) fixed_source = pywikibot.Page(self.site, source.title(withNamespace=False), ns=self.site.proofread_page_ns) page = ProofreadPage(fixed_source) self.assertEqual(page.title(), fixed_source.title()) def test_invalid_not_existing_page_source_in_valid_site_wrong_ns(self): """Test ProofreadPage from Page not existing in non-Page ns as source.""" source = pywikibot.Page(self.site, self.not_existing_invalid['title1']) self.assertRaises(ValueError, ProofreadPage, source) def test_invalid_link_source_in_valid_site(self): """Test ProofreadPage from invalid Link as source.""" source = pywikibot.Link(self.not_existing_invalid['title'], source=self.site) self.assertRaises(ValueError, ProofreadPage, source) def test_valid_link_source_in_valid_site(self): """Test ProofreadPage from valid Link as source.""" source = pywikibot.Link( self.valid['title'], source=self.site, defaultNamespace=self.site.proofread_page_ns) page = ProofreadPage(source) self.assertEqual(page.title(withNamespace=False), source.title) self.assertEqual(page.namespace(), source.namespace) def test_valid_parsing(self): """Test ProofreadPage page parsing functions.""" page = ProofreadPage(self.site, self.valid['title']) self.assertEqual(page.ql, self.valid['ql']) self.assertEqual(page.user, self.valid['user']) self.assertEqual(page.header, self.valid['header']) self.assertEqual(page.footer, self.valid['footer']) def test_decompose_recompose_text(self): """Test ProofreadPage page decomposing/composing text.""" page = ProofreadPage(self.site, self.valid['title']) plain_text = pywikibot.Page(self.site, self.valid['title']).text assert page.text self.assertEqual(plain_text, page.text) def test_preload_from_not_existing_page(self): """Test ProofreadPage page decomposing/composing text.""" page = ProofreadPage(self.site, 'dummy test page') self.assertEqual(page.text, '<noinclude><pagequality level="1" user="%s" />' '<div class="pagetext">\n\n\n</noinclude>' '<noinclude><references/></div></noinclude>' % self.site.username()) def test_preload_from_empty_text(self): """Test ProofreadPage page decomposing/composing text.""" page = ProofreadPage(self.site, 'dummy test page') page.text = '' self.assertEqual(page.text, '<noinclude><pagequality level="1" user="%s" />' '<div class="pagetext">\n\n\n</noinclude>' '<noinclude></div></noinclude>' % self.site.username()) def test_json_format(self): """Test conversion to json format.""" page = ProofreadPage(self.site, self.valid['title']) rvargs = {'rvprop': 'ids|flags|timestamp|user|comment|content', 'rvcontentformat': 'application/json', 'titles': page, } rvgen = self.site._generator(api.PropertyGenerator, type_arg='info|revisions', total=1, **rvargs) rvgen.set_maximum_items(-1) # suppress use of rvlimit parameter try: pagedict = next(iter(rvgen)) loaded_text = pagedict.get('revisions')[0].get('*') except (StopIteration, TypeError, KeyError, ValueError, IndexError): page_text = '' page_text = page._page_to_json() self.assertEqual(json.loads(page_text), json.loads(loaded_text)) if __name__ == '__main__': try: unittest.main() except SystemExit: pass
mit
MattsFleaMarket/python-for-android
python-build/python-libs/gdata/src/gdata/tlslite/TLSRecordLayer.py
270
44020
"""Helper class for TLSConnection.""" from __future__ import generators from utils.compat import * from utils.cryptomath import * from utils.cipherfactory import createAES, createRC4, createTripleDES from utils.codec import * from errors import * from messages import * from mathtls import * from constants import * from utils.cryptomath import getRandomBytes from utils import hmac from FileObject import FileObject import sha import md5 import socket import errno import traceback class _ConnectionState: def __init__(self): self.macContext = None self.encContext = None self.seqnum = 0 def getSeqNumStr(self): w = Writer(8) w.add(self.seqnum, 8) seqnumStr = bytesToString(w.bytes) self.seqnum += 1 return seqnumStr class TLSRecordLayer: """ This class handles data transmission for a TLS connection. Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've separated the code in this class from TLSConnection to make things more readable. @type sock: socket.socket @ivar sock: The underlying socket object. @type session: L{tlslite.Session.Session} @ivar session: The session corresponding to this connection. Due to TLS session resumption, multiple connections can correspond to the same underlying session. @type version: tuple @ivar version: The TLS version being used for this connection. (3,0) means SSL 3.0, and (3,1) means TLS 1.0. @type closed: bool @ivar closed: If this connection is closed. @type resumed: bool @ivar resumed: If this connection is based on a resumed session. @type allegedSharedKeyUsername: str or None @ivar allegedSharedKeyUsername: This is set to the shared-key username asserted by the client, whether the handshake succeeded or not. If the handshake fails, this can be inspected to determine if a guessing attack is in progress against a particular user account. @type allegedSrpUsername: str or None @ivar allegedSrpUsername: This is set to the SRP username asserted by the client, whether the handshake succeeded or not. If the handshake fails, this can be inspected to determine if a guessing attack is in progress against a particular user account. @type closeSocket: bool @ivar closeSocket: If the socket should be closed when the connection is closed (writable). If you set this to True, TLS Lite will assume the responsibility of closing the socket when the TLS Connection is shutdown (either through an error or through the user calling close()). The default is False. @type ignoreAbruptClose: bool @ivar ignoreAbruptClose: If an abrupt close of the socket should raise an error (writable). If you set this to True, TLS Lite will not raise a L{tlslite.errors.TLSAbruptCloseError} exception if the underlying socket is unexpectedly closed. Such an unexpected closure could be caused by an attacker. However, it also occurs with some incorrect TLS implementations. You should set this to True only if you're not worried about an attacker truncating the connection, and only if necessary to avoid spurious errors. The default is False. @sort: __init__, read, readAsync, write, writeAsync, close, closeAsync, getCipherImplementation, getCipherName """ def __init__(self, sock): self.sock = sock #My session object (Session instance; read-only) self.session = None #Am I a client or server? self._client = None #Buffers for processing messages self._handshakeBuffer = [] self._readBuffer = "" #Handshake digests self._handshake_md5 = md5.md5() self._handshake_sha = sha.sha() #TLS Protocol Version self.version = (0,0) #read-only self._versionCheck = False #Once we choose a version, this is True #Current and Pending connection states self._writeState = _ConnectionState() self._readState = _ConnectionState() self._pendingWriteState = _ConnectionState() self._pendingReadState = _ConnectionState() #Is the connection open? self.closed = True #read-only self._refCount = 0 #Used to trigger closure #Is this a resumed (or shared-key) session? self.resumed = False #read-only #What username did the client claim in his handshake? self.allegedSharedKeyUsername = None self.allegedSrpUsername = None #On a call to close(), do we close the socket? (writeable) self.closeSocket = False #If the socket is abruptly closed, do we ignore it #and pretend the connection was shut down properly? (writeable) self.ignoreAbruptClose = False #Fault we will induce, for testing purposes self.fault = None #********************************************************* # Public Functions START #********************************************************* def read(self, max=None, min=1): """Read some data from the TLS connection. This function will block until at least 'min' bytes are available (or the connection is closed). If an exception is raised, the connection will have been automatically closed. @type max: int @param max: The maximum number of bytes to return. @type min: int @param min: The minimum number of bytes to return @rtype: str @return: A string of no more than 'max' bytes, and no fewer than 'min' (unless the connection has been closed, in which case fewer than 'min' bytes may be returned). @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. """ for result in self.readAsync(max, min): pass return result def readAsync(self, max=None, min=1): """Start a read operation on the TLS connection. This function returns a generator which behaves similarly to read(). Successive invocations of the generator will return 0 if it is waiting to read from the socket, 1 if it is waiting to write to the socket, or a string if the read operation has completed. @rtype: iterable @return: A generator; see above for details. """ try: while len(self._readBuffer)<min and not self.closed: try: for result in self._getMsg(ContentType.application_data): if result in (0,1): yield result applicationData = result self._readBuffer += bytesToString(applicationData.write()) except TLSRemoteAlert, alert: if alert.description != AlertDescription.close_notify: raise except TLSAbruptCloseError: if not self.ignoreAbruptClose: raise else: self._shutdown(True) if max == None: max = len(self._readBuffer) returnStr = self._readBuffer[:max] self._readBuffer = self._readBuffer[max:] yield returnStr except: self._shutdown(False) raise def write(self, s): """Write some data to the TLS connection. This function will block until all the data has been sent. If an exception is raised, the connection will have been automatically closed. @type s: str @param s: The data to transmit to the other party. @raise socket.error: If a socket error occurs. """ for result in self.writeAsync(s): pass def writeAsync(self, s): """Start a write operation on the TLS connection. This function returns a generator which behaves similarly to write(). Successive invocations of the generator will return 1 if it is waiting to write to the socket, or will raise StopIteration if the write operation has completed. @rtype: iterable @return: A generator; see above for details. """ try: if self.closed: raise ValueError() index = 0 blockSize = 16384 skipEmptyFrag = False while 1: startIndex = index * blockSize endIndex = startIndex + blockSize if startIndex >= len(s): break if endIndex > len(s): endIndex = len(s) block = stringToBytes(s[startIndex : endIndex]) applicationData = ApplicationData().create(block) for result in self._sendMsg(applicationData, skipEmptyFrag): yield result skipEmptyFrag = True #only send an empy fragment on 1st message index += 1 except: self._shutdown(False) raise def close(self): """Close the TLS connection. This function will block until it has exchanged close_notify alerts with the other party. After doing so, it will shut down the TLS connection. Further attempts to read through this connection will return "". Further attempts to write through this connection will raise ValueError. If makefile() has been called on this connection, the connection will be not be closed until the connection object and all file objects have been closed. Even if an exception is raised, the connection will have been closed. @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. """ if not self.closed: for result in self._decrefAsync(): pass def closeAsync(self): """Start a close operation on the TLS connection. This function returns a generator which behaves similarly to close(). Successive invocations of the generator will return 0 if it is waiting to read from the socket, 1 if it is waiting to write to the socket, or will raise StopIteration if the close operation has completed. @rtype: iterable @return: A generator; see above for details. """ if not self.closed: for result in self._decrefAsync(): yield result def _decrefAsync(self): self._refCount -= 1 if self._refCount == 0 and not self.closed: try: for result in self._sendMsg(Alert().create(\ AlertDescription.close_notify, AlertLevel.warning)): yield result alert = None while not alert: for result in self._getMsg((ContentType.alert, \ ContentType.application_data)): if result in (0,1): yield result if result.contentType == ContentType.alert: alert = result if alert.description == AlertDescription.close_notify: self._shutdown(True) else: raise TLSRemoteAlert(alert) except (socket.error, TLSAbruptCloseError): #If the other side closes the socket, that's okay self._shutdown(True) except: self._shutdown(False) raise def getCipherName(self): """Get the name of the cipher used with this connection. @rtype: str @return: The name of the cipher used with this connection. Either 'aes128', 'aes256', 'rc4', or '3des'. """ if not self._writeState.encContext: return None return self._writeState.encContext.name def getCipherImplementation(self): """Get the name of the cipher implementation used with this connection. @rtype: str @return: The name of the cipher implementation used with this connection. Either 'python', 'cryptlib', 'openssl', or 'pycrypto'. """ if not self._writeState.encContext: return None return self._writeState.encContext.implementation #Emulate a socket, somewhat - def send(self, s): """Send data to the TLS connection (socket emulation). @raise socket.error: If a socket error occurs. """ self.write(s) return len(s) def sendall(self, s): """Send data to the TLS connection (socket emulation). @raise socket.error: If a socket error occurs. """ self.write(s) def recv(self, bufsize): """Get some data from the TLS connection (socket emulation). @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. """ return self.read(bufsize) def makefile(self, mode='r', bufsize=-1): """Create a file object for the TLS connection (socket emulation). @rtype: L{tlslite.FileObject.FileObject} """ self._refCount += 1 return FileObject(self, mode, bufsize) def getsockname(self): """Return the socket's own address (socket emulation).""" return self.sock.getsockname() def getpeername(self): """Return the remote address to which the socket is connected (socket emulation).""" return self.sock.getpeername() def settimeout(self, value): """Set a timeout on blocking socket operations (socket emulation).""" return self.sock.settimeout(value) def gettimeout(self): """Return the timeout associated with socket operations (socket emulation).""" return self.sock.gettimeout() def setsockopt(self, level, optname, value): """Set the value of the given socket option (socket emulation).""" return self.sock.setsockopt(level, optname, value) #********************************************************* # Public Functions END #********************************************************* def _shutdown(self, resumable): self._writeState = _ConnectionState() self._readState = _ConnectionState() #Don't do this: self._readBuffer = "" self.version = (0,0) self._versionCheck = False self.closed = True if self.closeSocket: self.sock.close() #Even if resumable is False, we'll never toggle this on if not resumable and self.session: self.session.resumable = False def _sendError(self, alertDescription, errorStr=None): alert = Alert().create(alertDescription, AlertLevel.fatal) for result in self._sendMsg(alert): yield result self._shutdown(False) raise TLSLocalAlert(alert, errorStr) def _sendMsgs(self, msgs): skipEmptyFrag = False for msg in msgs: for result in self._sendMsg(msg, skipEmptyFrag): yield result skipEmptyFrag = True def _sendMsg(self, msg, skipEmptyFrag=False): bytes = msg.write() contentType = msg.contentType #Whenever we're connected and asked to send a message, #we first send an empty Application Data message. This prevents #an attacker from launching a chosen-plaintext attack based on #knowing the next IV. if not self.closed and not skipEmptyFrag and self.version == (3,1): if self._writeState.encContext: if self._writeState.encContext.isBlockCipher: for result in self._sendMsg(ApplicationData(), skipEmptyFrag=True): yield result #Update handshake hashes if contentType == ContentType.handshake: bytesStr = bytesToString(bytes) self._handshake_md5.update(bytesStr) self._handshake_sha.update(bytesStr) #Calculate MAC if self._writeState.macContext: seqnumStr = self._writeState.getSeqNumStr() bytesStr = bytesToString(bytes) mac = self._writeState.macContext.copy() mac.update(seqnumStr) mac.update(chr(contentType)) if self.version == (3,0): mac.update( chr( int(len(bytes)/256) ) ) mac.update( chr( int(len(bytes)%256) ) ) elif self.version in ((3,1), (3,2)): mac.update(chr(self.version[0])) mac.update(chr(self.version[1])) mac.update( chr( int(len(bytes)/256) ) ) mac.update( chr( int(len(bytes)%256) ) ) else: raise AssertionError() mac.update(bytesStr) macString = mac.digest() macBytes = stringToBytes(macString) if self.fault == Fault.badMAC: macBytes[0] = (macBytes[0]+1) % 256 #Encrypt for Block or Stream Cipher if self._writeState.encContext: #Add padding and encrypt (for Block Cipher): if self._writeState.encContext.isBlockCipher: #Add TLS 1.1 fixed block if self.version == (3,2): bytes = self.fixedIVBlock + bytes #Add padding: bytes = bytes + (macBytes + paddingBytes) currentLength = len(bytes) + len(macBytes) + 1 blockLength = self._writeState.encContext.block_size paddingLength = blockLength-(currentLength % blockLength) paddingBytes = createByteArraySequence([paddingLength] * \ (paddingLength+1)) if self.fault == Fault.badPadding: paddingBytes[0] = (paddingBytes[0]+1) % 256 endBytes = concatArrays(macBytes, paddingBytes) bytes = concatArrays(bytes, endBytes) #Encrypt plaintext = stringToBytes(bytes) ciphertext = self._writeState.encContext.encrypt(plaintext) bytes = stringToBytes(ciphertext) #Encrypt (for Stream Cipher) else: bytes = concatArrays(bytes, macBytes) plaintext = bytesToString(bytes) ciphertext = self._writeState.encContext.encrypt(plaintext) bytes = stringToBytes(ciphertext) #Add record header and send r = RecordHeader3().create(self.version, contentType, len(bytes)) s = bytesToString(concatArrays(r.write(), bytes)) while 1: try: bytesSent = self.sock.send(s) #Might raise socket.error except socket.error, why: if why[0] == errno.EWOULDBLOCK: yield 1 continue else: raise if bytesSent == len(s): return s = s[bytesSent:] yield 1 def _getMsg(self, expectedType, secondaryType=None, constructorType=None): try: if not isinstance(expectedType, tuple): expectedType = (expectedType,) #Spin in a loop, until we've got a non-empty record of a type we #expect. The loop will be repeated if: # - we receive a renegotiation attempt; we send no_renegotiation, # then try again # - we receive an empty application-data fragment; we try again while 1: for result in self._getNextRecord(): if result in (0,1): yield result recordHeader, p = result #If this is an empty application-data fragment, try again if recordHeader.type == ContentType.application_data: if p.index == len(p.bytes): continue #If we received an unexpected record type... if recordHeader.type not in expectedType: #If we received an alert... if recordHeader.type == ContentType.alert: alert = Alert().parse(p) #We either received a fatal error, a warning, or a #close_notify. In any case, we're going to close the #connection. In the latter two cases we respond with #a close_notify, but ignore any socket errors, since #the other side might have already closed the socket. if alert.level == AlertLevel.warning or \ alert.description == AlertDescription.close_notify: #If the sendMsg() call fails because the socket has #already been closed, we will be forgiving and not #report the error nor invalidate the "resumability" #of the session. try: alertMsg = Alert() alertMsg.create(AlertDescription.close_notify, AlertLevel.warning) for result in self._sendMsg(alertMsg): yield result except socket.error: pass if alert.description == \ AlertDescription.close_notify: self._shutdown(True) elif alert.level == AlertLevel.warning: self._shutdown(False) else: #Fatal alert: self._shutdown(False) #Raise the alert as an exception raise TLSRemoteAlert(alert) #If we received a renegotiation attempt... if recordHeader.type == ContentType.handshake: subType = p.get(1) reneg = False if self._client: if subType == HandshakeType.hello_request: reneg = True else: if subType == HandshakeType.client_hello: reneg = True #Send no_renegotiation, then try again if reneg: alertMsg = Alert() alertMsg.create(AlertDescription.no_renegotiation, AlertLevel.warning) for result in self._sendMsg(alertMsg): yield result continue #Otherwise: this is an unexpected record, but neither an #alert nor renegotiation for result in self._sendError(\ AlertDescription.unexpected_message, "received type=%d" % recordHeader.type): yield result break #Parse based on content_type if recordHeader.type == ContentType.change_cipher_spec: yield ChangeCipherSpec().parse(p) elif recordHeader.type == ContentType.alert: yield Alert().parse(p) elif recordHeader.type == ContentType.application_data: yield ApplicationData().parse(p) elif recordHeader.type == ContentType.handshake: #Convert secondaryType to tuple, if it isn't already if not isinstance(secondaryType, tuple): secondaryType = (secondaryType,) #If it's a handshake message, check handshake header if recordHeader.ssl2: subType = p.get(1) if subType != HandshakeType.client_hello: for result in self._sendError(\ AlertDescription.unexpected_message, "Can only handle SSLv2 ClientHello messages"): yield result if HandshakeType.client_hello not in secondaryType: for result in self._sendError(\ AlertDescription.unexpected_message): yield result subType = HandshakeType.client_hello else: subType = p.get(1) if subType not in secondaryType: for result in self._sendError(\ AlertDescription.unexpected_message, "Expecting %s, got %s" % (str(secondaryType), subType)): yield result #Update handshake hashes sToHash = bytesToString(p.bytes) self._handshake_md5.update(sToHash) self._handshake_sha.update(sToHash) #Parse based on handshake type if subType == HandshakeType.client_hello: yield ClientHello(recordHeader.ssl2).parse(p) elif subType == HandshakeType.server_hello: yield ServerHello().parse(p) elif subType == HandshakeType.certificate: yield Certificate(constructorType).parse(p) elif subType == HandshakeType.certificate_request: yield CertificateRequest().parse(p) elif subType == HandshakeType.certificate_verify: yield CertificateVerify().parse(p) elif subType == HandshakeType.server_key_exchange: yield ServerKeyExchange(constructorType).parse(p) elif subType == HandshakeType.server_hello_done: yield ServerHelloDone().parse(p) elif subType == HandshakeType.client_key_exchange: yield ClientKeyExchange(constructorType, \ self.version).parse(p) elif subType == HandshakeType.finished: yield Finished(self.version).parse(p) else: raise AssertionError() #If an exception was raised by a Parser or Message instance: except SyntaxError, e: for result in self._sendError(AlertDescription.decode_error, formatExceptionTrace(e)): yield result #Returns next record or next handshake message def _getNextRecord(self): #If there's a handshake message waiting, return it if self._handshakeBuffer: recordHeader, bytes = self._handshakeBuffer[0] self._handshakeBuffer = self._handshakeBuffer[1:] yield (recordHeader, Parser(bytes)) return #Otherwise... #Read the next record header bytes = createByteArraySequence([]) recordHeaderLength = 1 ssl2 = False while 1: try: s = self.sock.recv(recordHeaderLength-len(bytes)) except socket.error, why: if why[0] == errno.EWOULDBLOCK: yield 0 continue else: raise #If the connection was abruptly closed, raise an error if len(s)==0: raise TLSAbruptCloseError() bytes += stringToBytes(s) if len(bytes)==1: if bytes[0] in ContentType.all: ssl2 = False recordHeaderLength = 5 elif bytes[0] == 128: ssl2 = True recordHeaderLength = 2 else: raise SyntaxError() if len(bytes) == recordHeaderLength: break #Parse the record header if ssl2: r = RecordHeader2().parse(Parser(bytes)) else: r = RecordHeader3().parse(Parser(bytes)) #Check the record header fields if r.length > 18432: for result in self._sendError(AlertDescription.record_overflow): yield result #Read the record contents bytes = createByteArraySequence([]) while 1: try: s = self.sock.recv(r.length - len(bytes)) except socket.error, why: if why[0] == errno.EWOULDBLOCK: yield 0 continue else: raise #If the connection is closed, raise a socket error if len(s)==0: raise TLSAbruptCloseError() bytes += stringToBytes(s) if len(bytes) == r.length: break #Check the record header fields (2) #We do this after reading the contents from the socket, so that #if there's an error, we at least don't leave extra bytes in the #socket.. # # THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP. # SO WE LEAVE IT OUT FOR NOW. # #if self._versionCheck and r.version != self.version: # for result in self._sendError(AlertDescription.protocol_version, # "Version in header field: %s, should be %s" % (str(r.version), # str(self.version))): # yield result #Decrypt the record for result in self._decryptRecord(r.type, bytes): if result in (0,1): yield result else: break bytes = result p = Parser(bytes) #If it doesn't contain handshake messages, we can just return it if r.type != ContentType.handshake: yield (r, p) #If it's an SSLv2 ClientHello, we can return it as well elif r.ssl2: yield (r, p) else: #Otherwise, we loop through and add the handshake messages to the #handshake buffer while 1: if p.index == len(bytes): #If we're at the end if not self._handshakeBuffer: for result in self._sendError(\ AlertDescription.decode_error, \ "Received empty handshake record"): yield result break #There needs to be at least 4 bytes to get a header if p.index+4 > len(bytes): for result in self._sendError(\ AlertDescription.decode_error, "A record has a partial handshake message (1)"): yield result p.get(1) # skip handshake type msgLength = p.get(3) if p.index+msgLength > len(bytes): for result in self._sendError(\ AlertDescription.decode_error, "A record has a partial handshake message (2)"): yield result handshakePair = (r, bytes[p.index-4 : p.index+msgLength]) self._handshakeBuffer.append(handshakePair) p.index += msgLength #We've moved at least one handshake message into the #handshakeBuffer, return the first one recordHeader, bytes = self._handshakeBuffer[0] self._handshakeBuffer = self._handshakeBuffer[1:] yield (recordHeader, Parser(bytes)) def _decryptRecord(self, recordType, bytes): if self._readState.encContext: #Decrypt if it's a block cipher if self._readState.encContext.isBlockCipher: blockLength = self._readState.encContext.block_size if len(bytes) % blockLength != 0: for result in self._sendError(\ AlertDescription.decryption_failed, "Encrypted data not a multiple of blocksize"): yield result ciphertext = bytesToString(bytes) plaintext = self._readState.encContext.decrypt(ciphertext) if self.version == (3,2): #For TLS 1.1, remove explicit IV plaintext = plaintext[self._readState.encContext.block_size : ] bytes = stringToBytes(plaintext) #Check padding paddingGood = True paddingLength = bytes[-1] if (paddingLength+1) > len(bytes): paddingGood=False totalPaddingLength = 0 else: if self.version == (3,0): totalPaddingLength = paddingLength+1 elif self.version in ((3,1), (3,2)): totalPaddingLength = paddingLength+1 paddingBytes = bytes[-totalPaddingLength:-1] for byte in paddingBytes: if byte != paddingLength: paddingGood = False totalPaddingLength = 0 else: raise AssertionError() #Decrypt if it's a stream cipher else: paddingGood = True ciphertext = bytesToString(bytes) plaintext = self._readState.encContext.decrypt(ciphertext) bytes = stringToBytes(plaintext) totalPaddingLength = 0 #Check MAC macGood = True macLength = self._readState.macContext.digest_size endLength = macLength + totalPaddingLength if endLength > len(bytes): macGood = False else: #Read MAC startIndex = len(bytes) - endLength endIndex = startIndex + macLength checkBytes = bytes[startIndex : endIndex] #Calculate MAC seqnumStr = self._readState.getSeqNumStr() bytes = bytes[:-endLength] bytesStr = bytesToString(bytes) mac = self._readState.macContext.copy() mac.update(seqnumStr) mac.update(chr(recordType)) if self.version == (3,0): mac.update( chr( int(len(bytes)/256) ) ) mac.update( chr( int(len(bytes)%256) ) ) elif self.version in ((3,1), (3,2)): mac.update(chr(self.version[0])) mac.update(chr(self.version[1])) mac.update( chr( int(len(bytes)/256) ) ) mac.update( chr( int(len(bytes)%256) ) ) else: raise AssertionError() mac.update(bytesStr) macString = mac.digest() macBytes = stringToBytes(macString) #Compare MACs if macBytes != checkBytes: macGood = False if not (paddingGood and macGood): for result in self._sendError(AlertDescription.bad_record_mac, "MAC failure (or padding failure)"): yield result yield bytes def _handshakeStart(self, client): self._client = client self._handshake_md5 = md5.md5() self._handshake_sha = sha.sha() self._handshakeBuffer = [] self.allegedSharedKeyUsername = None self.allegedSrpUsername = None self._refCount = 1 def _handshakeDone(self, resumed): self.resumed = resumed self.closed = False def _calcPendingStates(self, clientRandom, serverRandom, implementations): if self.session.cipherSuite in CipherSuite.aes128Suites: macLength = 20 keyLength = 16 ivLength = 16 createCipherFunc = createAES elif self.session.cipherSuite in CipherSuite.aes256Suites: macLength = 20 keyLength = 32 ivLength = 16 createCipherFunc = createAES elif self.session.cipherSuite in CipherSuite.rc4Suites: macLength = 20 keyLength = 16 ivLength = 0 createCipherFunc = createRC4 elif self.session.cipherSuite in CipherSuite.tripleDESSuites: macLength = 20 keyLength = 24 ivLength = 8 createCipherFunc = createTripleDES else: raise AssertionError() if self.version == (3,0): createMACFunc = MAC_SSL elif self.version in ((3,1), (3,2)): createMACFunc = hmac.HMAC outputLength = (macLength*2) + (keyLength*2) + (ivLength*2) #Calculate Keying Material from Master Secret if self.version == (3,0): keyBlock = PRF_SSL(self.session.masterSecret, concatArrays(serverRandom, clientRandom), outputLength) elif self.version in ((3,1), (3,2)): keyBlock = PRF(self.session.masterSecret, "key expansion", concatArrays(serverRandom,clientRandom), outputLength) else: raise AssertionError() #Slice up Keying Material clientPendingState = _ConnectionState() serverPendingState = _ConnectionState() p = Parser(keyBlock) clientMACBlock = bytesToString(p.getFixBytes(macLength)) serverMACBlock = bytesToString(p.getFixBytes(macLength)) clientKeyBlock = bytesToString(p.getFixBytes(keyLength)) serverKeyBlock = bytesToString(p.getFixBytes(keyLength)) clientIVBlock = bytesToString(p.getFixBytes(ivLength)) serverIVBlock = bytesToString(p.getFixBytes(ivLength)) clientPendingState.macContext = createMACFunc(clientMACBlock, digestmod=sha) serverPendingState.macContext = createMACFunc(serverMACBlock, digestmod=sha) clientPendingState.encContext = createCipherFunc(clientKeyBlock, clientIVBlock, implementations) serverPendingState.encContext = createCipherFunc(serverKeyBlock, serverIVBlock, implementations) #Assign new connection states to pending states if self._client: self._pendingWriteState = clientPendingState self._pendingReadState = serverPendingState else: self._pendingWriteState = serverPendingState self._pendingReadState = clientPendingState if self.version == (3,2) and ivLength: #Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC #residue to create the IV for each sent block) self.fixedIVBlock = getRandomBytes(ivLength) def _changeWriteState(self): self._writeState = self._pendingWriteState self._pendingWriteState = _ConnectionState() def _changeReadState(self): self._readState = self._pendingReadState self._pendingReadState = _ConnectionState() def _sendFinished(self): #Send ChangeCipherSpec for result in self._sendMsg(ChangeCipherSpec()): yield result #Switch to pending write state self._changeWriteState() #Calculate verification data verifyData = self._calcFinished(True) if self.fault == Fault.badFinished: verifyData[0] = (verifyData[0]+1)%256 #Send Finished message under new state finished = Finished(self.version).create(verifyData) for result in self._sendMsg(finished): yield result def _getFinished(self): #Get and check ChangeCipherSpec for result in self._getMsg(ContentType.change_cipher_spec): if result in (0,1): yield result changeCipherSpec = result if changeCipherSpec.type != 1: for result in self._sendError(AlertDescription.illegal_parameter, "ChangeCipherSpec type incorrect"): yield result #Switch to pending read state self._changeReadState() #Calculate verification data verifyData = self._calcFinished(False) #Get and check Finished message under new state for result in self._getMsg(ContentType.handshake, HandshakeType.finished): if result in (0,1): yield result finished = result if finished.verify_data != verifyData: for result in self._sendError(AlertDescription.decrypt_error, "Finished message is incorrect"): yield result def _calcFinished(self, send=True): if self.version == (3,0): if (self._client and send) or (not self._client and not send): senderStr = "\x43\x4C\x4E\x54" else: senderStr = "\x53\x52\x56\x52" verifyData = self._calcSSLHandshakeHash(self.session.masterSecret, senderStr) return verifyData elif self.version in ((3,1), (3,2)): if (self._client and send) or (not self._client and not send): label = "client finished" else: label = "server finished" handshakeHashes = stringToBytes(self._handshake_md5.digest() + \ self._handshake_sha.digest()) verifyData = PRF(self.session.masterSecret, label, handshakeHashes, 12) return verifyData else: raise AssertionError() #Used for Finished messages and CertificateVerify messages in SSL v3 def _calcSSLHandshakeHash(self, masterSecret, label): masterSecretStr = bytesToString(masterSecret) imac_md5 = self._handshake_md5.copy() imac_sha = self._handshake_sha.copy() imac_md5.update(label + masterSecretStr + '\x36'*48) imac_sha.update(label + masterSecretStr + '\x36'*40) md5Str = md5.md5(masterSecretStr + ('\x5c'*48) + \ imac_md5.digest()).digest() shaStr = sha.sha(masterSecretStr + ('\x5c'*40) + \ imac_sha.digest()).digest() return stringToBytes(md5Str + shaStr)
apache-2.0
DailyActie/Surrogate-Model
surrogate/crossover/cxOrdered.py
1
3234
# MIT License # # Copyright (c) 2016 Daily Actie # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Author: Quan Pan <quanpan302@hotmail.com> # License: MIT License # Create: 2016-12-02 import random def cxOrdered(var1, var2): """Executes an ordered crossover (OX) on the input individuals. The two individuals are modified in place. This crossover expects :term:`sequence` individuals of indices, the result for any other type of individuals is unpredictable. :param var1: The first variable participating in the crossover. :param var2: The second variable participating in the crossover. :returns: A tuple of two variables. Moreover, this crossover generates holes in the input individuals. A hole is created when an attribute of an individual is between the two crossover points of the other individual. Then it rotates the element so that all holes are between the crossover points and fills them with the removed elements in order. For more details see [Goldberg1989]_. This function uses the :func:`~random.sample` function from the python base :mod:`random` module. .. [Goldberg1989] Goldberg. Genetic algorithms in search, optimization and machine learning. Addison Wesley, 1989 """ size = min(len(var1), len(var2)) # size = min(var1.size, var2.size) a, b = random.sample(xrange(size), 2) if a > b: a, b = b, a holes1, holes2 = [True] * size, [True] * size for i in range(size): if i < a or i > b: holes1[var2[i]] = False holes2[var1[i]] = False # We must keep the original values somewhere before scrambling everything temp1, temp2 = var1, var2 k1, k2 = b + 1, b + 1 for i in range(size): if not holes1[temp1[(i + b + 1) % size]]: var1[k1 % size] = temp1[(i + b + 1) % size] k1 += 1 if not holes2[temp2[(i + b + 1) % size]]: var2[k2 % size] = temp2[(i + b + 1) % size] k2 += 1 # Swap the content between a and b (included) for i in range(a, b + 1): var1[i], var2[i] = var2[i], var1[i] # var1[i], var2[i] = var2[i].copy(), var1[i].copy() return var1, var2
mit
flyher/pymo
android/pgs4a-0.9.6/python-install/lib/python2.7/distutils/core.py
175
9093
"""distutils.core The only module that needs to be imported to use the Distutils; provides the 'setup' function (which is to be called from the setup script). Also indirectly provides the Distribution and Command classes, although they are really defined in distutils.dist and distutils.cmd. """ __revision__ = "$Id$" import sys import os from distutils.debug import DEBUG from distutils.errors import (DistutilsSetupError, DistutilsArgError, DistutilsError, CCompilerError) from distutils.util import grok_environment_error # Mainly import these so setup scripts can "from distutils.core import" them. from distutils.dist import Distribution from distutils.cmd import Command from distutils.config import PyPIRCCommand from distutils.extension import Extension # This is a barebones help message generated displayed when the user # runs the setup script with no arguments at all. More useful help # is generated with various --help options: global help, list commands, # and per-command help. USAGE = """\ usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] or: %(script)s --help [cmd1 cmd2 ...] or: %(script)s --help-commands or: %(script)s cmd --help """ def gen_usage(script_name): script = os.path.basename(script_name) return USAGE % {'script': script} # Some mild magic to control the behaviour of 'setup()' from 'run_setup()'. _setup_stop_after = None _setup_distribution = None # Legal keyword arguments for the setup() function setup_keywords = ('distclass', 'script_name', 'script_args', 'options', 'name', 'version', 'author', 'author_email', 'maintainer', 'maintainer_email', 'url', 'license', 'description', 'long_description', 'keywords', 'platforms', 'classifiers', 'download_url', 'requires', 'provides', 'obsoletes', ) # Legal keyword arguments for the Extension constructor extension_keywords = ('name', 'sources', 'include_dirs', 'define_macros', 'undef_macros', 'library_dirs', 'libraries', 'runtime_library_dirs', 'extra_objects', 'extra_compile_args', 'extra_link_args', 'swig_opts', 'export_symbols', 'depends', 'language') def setup(**attrs): """The gateway to the Distutils: do everything your setup script needs to do, in a highly flexible and user-driven way. Briefly: create a Distribution instance; find and parse config files; parse the command line; run each Distutils command found there, customized by the options supplied to 'setup()' (as keyword arguments), in config files, and on the command line. The Distribution instance might be an instance of a class supplied via the 'distclass' keyword argument to 'setup'; if no such class is supplied, then the Distribution class (in dist.py) is instantiated. All other arguments to 'setup' (except for 'cmdclass') are used to set attributes of the Distribution instance. The 'cmdclass' argument, if supplied, is a dictionary mapping command names to command classes. Each command encountered on the command line will be turned into a command class, which is in turn instantiated; any class found in 'cmdclass' is used in place of the default, which is (for command 'foo_bar') class 'foo_bar' in module 'distutils.command.foo_bar'. The command class must provide a 'user_options' attribute which is a list of option specifiers for 'distutils.fancy_getopt'. Any command-line options between the current and the next command are used to set attributes of the current command object. When the entire command-line has been successfully parsed, calls the 'run()' method on each command object in turn. This method will be driven entirely by the Distribution object (which each command object has a reference to, thanks to its constructor), and the command-specific options that became attributes of each command object. """ global _setup_stop_after, _setup_distribution # Determine the distribution class -- either caller-supplied or # our Distribution (see below). klass = attrs.get('distclass') if klass: del attrs['distclass'] else: klass = Distribution if 'script_name' not in attrs: attrs['script_name'] = os.path.basename(sys.argv[0]) if 'script_args' not in attrs: attrs['script_args'] = sys.argv[1:] # Create the Distribution instance, using the remaining arguments # (ie. everything except distclass) to initialize it try: _setup_distribution = dist = klass(attrs) except DistutilsSetupError, msg: if 'name' in attrs: raise SystemExit, "error in %s setup command: %s" % \ (attrs['name'], msg) else: raise SystemExit, "error in setup command: %s" % msg if _setup_stop_after == "init": return dist # Find and parse the config file(s): they will override options from # the setup script, but be overridden by the command line. dist.parse_config_files() if DEBUG: print "options (after parsing config files):" dist.dump_option_dicts() if _setup_stop_after == "config": return dist # Parse the command line and override config files; any # command-line errors are the end user's fault, so turn them into # SystemExit to suppress tracebacks. try: ok = dist.parse_command_line() except DistutilsArgError, msg: raise SystemExit, gen_usage(dist.script_name) + "\nerror: %s" % msg if DEBUG: print "options (after parsing command line):" dist.dump_option_dicts() if _setup_stop_after == "commandline": return dist # And finally, run all the commands found on the command line. if ok: try: dist.run_commands() except KeyboardInterrupt: raise SystemExit, "interrupted" except (IOError, os.error), exc: error = grok_environment_error(exc) if DEBUG: sys.stderr.write(error + "\n") raise else: raise SystemExit, error except (DistutilsError, CCompilerError), msg: if DEBUG: raise else: raise SystemExit, "error: " + str(msg) return dist def run_setup(script_name, script_args=None, stop_after="run"): """Run a setup script in a somewhat controlled environment, and return the Distribution instance that drives things. This is useful if you need to find out the distribution meta-data (passed as keyword args from 'script' to 'setup()', or the contents of the config files or command-line. 'script_name' is a file that will be run with 'execfile()'; 'sys.argv[0]' will be replaced with 'script' for the duration of the call. 'script_args' is a list of strings; if supplied, 'sys.argv[1:]' will be replaced by 'script_args' for the duration of the call. 'stop_after' tells 'setup()' when to stop processing; possible values: init stop after the Distribution instance has been created and populated with the keyword arguments to 'setup()' config stop after config files have been parsed (and their data stored in the Distribution instance) commandline stop after the command-line ('sys.argv[1:]' or 'script_args') have been parsed (and the data stored in the Distribution) run [default] stop after all commands have been run (the same as if 'setup()' had been called in the usual way Returns the Distribution instance, which provides all information used to drive the Distutils. """ if stop_after not in ('init', 'config', 'commandline', 'run'): raise ValueError, "invalid value for 'stop_after': %r" % (stop_after,) global _setup_stop_after, _setup_distribution _setup_stop_after = stop_after save_argv = sys.argv g = {'__file__': script_name} l = {} try: try: sys.argv[0] = script_name if script_args is not None: sys.argv[1:] = script_args f = open(script_name) try: exec f.read() in g, l finally: f.close() finally: sys.argv = save_argv _setup_stop_after = None except SystemExit: # Hmm, should we do something if exiting with a non-zero code # (ie. error)? pass except: raise if _setup_distribution is None: raise RuntimeError, \ ("'distutils.core.setup()' was never called -- " "perhaps '%s' is not a Distutils setup script?") % \ script_name # I wonder if the setup script's namespace -- g and l -- would be of # any interest to callers? return _setup_distribution
mit
Jgarcia-IAS/SAT
openerp/addons/hr_attendance/res_config.py
434
1406
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class hr_attendance_config_settings(osv.osv_memory): _inherit = 'hr.config.settings' _columns = { 'group_hr_attendance': fields.boolean('Track attendances for all employees', implied_group='base.group_hr_attendance', help="Allocates attendance group to all users."), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
SNAPPETITE/backend
flask/lib/python2.7/site-packages/pip/__init__.py
57
10427
#!/usr/bin/env python from __future__ import absolute_import import locale import logging import os import optparse import warnings import sys import re from pip.exceptions import InstallationError, CommandError, PipError from pip.utils import get_installed_distributions, get_prog from pip.utils import deprecation, dist_is_editable from pip.vcs import git, mercurial, subversion, bazaar # noqa from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter from pip.commands import get_summaries, get_similar_commands from pip.commands import commands_dict from pip._vendor.requests.packages.urllib3.exceptions import ( InsecureRequestWarning, ) # assignment for flake8 to be happy # This fixes a peculiarity when importing via __import__ - as we are # initialising the pip module, "from pip import cmdoptions" is recursive # and appears not to work properly in that situation. import pip.cmdoptions cmdoptions = pip.cmdoptions # The version as used in the setup.py and the docs conf.py __version__ = "8.1.1" logger = logging.getLogger(__name__) # Hide the InsecureRequestWArning from urllib3 warnings.filterwarnings("ignore", category=InsecureRequestWarning) def autocomplete(): """Command and option completion for the main option parser (and options) and its subcommands (and options). Enable by sourcing one of the completion shell scripts (bash or zsh). """ # Don't complete if user hasn't sourced bash_completion file. if 'PIP_AUTO_COMPLETE' not in os.environ: return cwords = os.environ['COMP_WORDS'].split()[1:] cword = int(os.environ['COMP_CWORD']) try: current = cwords[cword - 1] except IndexError: current = '' subcommands = [cmd for cmd, summary in get_summaries()] options = [] # subcommand try: subcommand_name = [w for w in cwords if w in subcommands][0] except IndexError: subcommand_name = None parser = create_main_parser() # subcommand options if subcommand_name: # special case: 'help' subcommand has no options if subcommand_name == 'help': sys.exit(1) # special case: list locally installed dists for uninstall command if subcommand_name == 'uninstall' and not current.startswith('-'): installed = [] lc = current.lower() for dist in get_installed_distributions(local_only=True): if dist.key.startswith(lc) and dist.key not in cwords[1:]: installed.append(dist.key) # if there are no dists installed, fall back to option completion if installed: for dist in installed: print(dist) sys.exit(1) subcommand = commands_dict[subcommand_name]() options += [(opt.get_opt_string(), opt.nargs) for opt in subcommand.parser.option_list_all if opt.help != optparse.SUPPRESS_HELP] # filter out previously specified options from available options prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] options = [(x, v) for (x, v) in options if x not in prev_opts] # filter options by current input options = [(k, v) for k, v in options if k.startswith(current)] for option in options: opt_label = option[0] # append '=' to options which require args if option[1]: opt_label += '=' print(opt_label) else: # show main parser options only when necessary if current.startswith('-') or current.startswith('--'): opts = [i.option_list for i in parser.option_groups] opts.append(parser.option_list) opts = (o for it in opts for o in it) subcommands += [i.get_opt_string() for i in opts if i.help != optparse.SUPPRESS_HELP] print(' '.join([x for x in subcommands if x.startswith(current)])) sys.exit(1) def create_main_parser(): parser_kw = { 'usage': '\n%prog <command> [options]', 'add_help_option': False, 'formatter': UpdatingDefaultsHelpFormatter(), 'name': 'global', 'prog': get_prog(), } parser = ConfigOptionParser(**parser_kw) parser.disable_interspersed_args() pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) parser.version = 'pip %s from %s (python %s)' % ( __version__, pip_pkg_dir, sys.version[:3]) # add the general options gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) parser.add_option_group(gen_opts) parser.main = True # so the help formatter knows # create command listing for description command_summaries = get_summaries() description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] parser.description = '\n'.join(description) return parser def parseopts(args): parser = create_main_parser() # Note: parser calls disable_interspersed_args(), so the result of this # call is to split the initial args into the general options before the # subcommand and everything else. # For example: # args: ['--timeout=5', 'install', '--user', 'INITools'] # general_options: ['--timeout==5'] # args_else: ['install', '--user', 'INITools'] general_options, args_else = parser.parse_args(args) # --version if general_options.version: sys.stdout.write(parser.version) sys.stdout.write(os.linesep) sys.exit() # pip || pip help -> print_help() if not args_else or (args_else[0] == 'help' and len(args_else) == 1): parser.print_help() sys.exit() # the subcommand name cmd_name = args_else[0] if cmd_name not in commands_dict: guess = get_similar_commands(cmd_name) msg = ['unknown command "%s"' % cmd_name] if guess: msg.append('maybe you meant "%s"' % guess) raise CommandError(' - '.join(msg)) # all the args without the subcommand cmd_args = args[:] cmd_args.remove(cmd_name) return cmd_name, cmd_args def check_isolated(args): isolated = False if "--isolated" in args: isolated = True return isolated def main(args=None): if args is None: args = sys.argv[1:] # Configure our deprecation warnings to be sent through loggers deprecation.install_warning_logger() autocomplete() try: cmd_name, cmd_args = parseopts(args) except PipError as exc: sys.stderr.write("ERROR: %s" % exc) sys.stderr.write(os.linesep) sys.exit(1) # Needed for locale.getpreferredencoding(False) to work # in pip.utils.encoding.auto_decode locale.setlocale(locale.LC_ALL, '') command = commands_dict[cmd_name](isolated=check_isolated(cmd_args)) return command.main(cmd_args) # ########################################################### # # Writing freeze files class FrozenRequirement(object): def __init__(self, name, req, editable, comments=()): self.name = name self.req = req self.editable = editable self.comments = comments _rev_re = re.compile(r'-r(\d+)$') _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') @classmethod def from_dist(cls, dist, dependency_links): location = os.path.normcase(os.path.abspath(dist.location)) comments = [] from pip.vcs import vcs, get_src_requirement if dist_is_editable(dist) and vcs.get_backend_name(location): editable = True try: req = get_src_requirement(dist, location) except InstallationError as exc: logger.warning( "Error when trying to get requirement for VCS system %s, " "falling back to uneditable format", exc ) req = None if req is None: logger.warning( 'Could not determine repository location of %s', location ) comments.append( '## !! Could not determine repository location' ) req = dist.as_requirement() editable = False else: editable = False req = dist.as_requirement() specs = req.specs assert len(specs) == 1 and specs[0][0] in ["==", "==="], \ 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \ (specs, dist) version = specs[0][1] ver_match = cls._rev_re.search(version) date_match = cls._date_re.search(version) if ver_match or date_match: svn_backend = vcs.get_backend('svn') if svn_backend: svn_location = svn_backend().get_location( dist, dependency_links, ) if not svn_location: logger.warning( 'Warning: cannot find svn location for %s', req) comments.append( '## FIXME: could not find svn URL in dependency_links ' 'for this package:' ) else: comments.append( '# Installing as editable to satisfy requirement %s:' % req ) if ver_match: rev = ver_match.group(1) else: rev = '{%s}' % date_match.group(1) editable = True req = '%s@%s#egg=%s' % ( svn_location, rev, cls.egg_name(dist) ) return cls(dist.project_name, req, editable, comments) @staticmethod def egg_name(dist): name = dist.egg_name() match = re.search(r'-py\d\.\d$', name) if match: name = name[:match.start()] return name def __str__(self): req = self.req if self.editable: req = '-e %s' % req return '\n'.join(list(self.comments) + [str(req)]) + '\n' if __name__ == '__main__': sys.exit(main())
mit
babble/babble
include/jython/Lib/xml/dom/domreg.py
126
3481
"""Registration facilities for DOM. This module should not be used directly. Instead, the functions getDOMImplementation and registerDOMImplementation should be imported from xml.dom.""" from xml.dom.minicompat import * # isinstance, StringTypes # This is a list of well-known implementations. Well-known names # should be published by posting to xml-sig@python.org, and are # subsequently recorded in this file. well_known_implementations = { 'minidom':'xml.dom.minidom', '4DOM': 'xml.dom.DOMImplementation', } # DOM implementations not officially registered should register # themselves with their registered = {} def registerDOMImplementation(name, factory): """registerDOMImplementation(name, factory) Register the factory function with the name. The factory function should return an object which implements the DOMImplementation interface. The factory function can either return the same object, or a new one (e.g. if that implementation supports some customization).""" registered[name] = factory def _good_enough(dom, features): "_good_enough(dom, features) -> Return 1 if the dom offers the features" for f,v in features: if not dom.hasFeature(f,v): return 0 return 1 def getDOMImplementation(name = None, features = ()): """getDOMImplementation(name = None, features = ()) -> DOM implementation. Return a suitable DOM implementation. The name is either well-known, the module name of a DOM implementation, or None. If it is not None, imports the corresponding module and returns DOMImplementation object if the import succeeds. If name is not given, consider the available implementations to find one with the required feature set. If no implementation can be found, raise an ImportError. The features list must be a sequence of (feature, version) pairs which are passed to hasFeature.""" import os creator = None mod = well_known_implementations.get(name) if mod: mod = __import__(mod, {}, {}, ['getDOMImplementation']) return mod.getDOMImplementation() elif name: return registered[name]() elif os.environ.has_key("PYTHON_DOM"): return getDOMImplementation(name = os.environ["PYTHON_DOM"]) # User did not specify a name, try implementations in arbitrary # order, returning the one that has the required features if isinstance(features, StringTypes): features = _parse_feature_string(features) for creator in registered.values(): dom = creator() if _good_enough(dom, features): return dom for creator in well_known_implementations.keys(): try: dom = getDOMImplementation(name = creator) except StandardError: # typically ImportError, or AttributeError continue if _good_enough(dom, features): return dom raise ImportError,"no suitable DOM implementation found" def _parse_feature_string(s): features = [] parts = s.split() i = 0 length = len(parts) while i < length: feature = parts[i] if feature[0] in "0123456789": raise ValueError, "bad feature name: " + `feature` i = i + 1 version = None if i < length: v = parts[i] if v[0] in "0123456789": i = i + 1 version = v features.append((feature, version)) return tuple(features)
apache-2.0
vincentltz/ns-3-dev-git
src/virtual-net-device/bindings/modulegen__gcc_LP64.py
38
206220
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.virtual_net_device', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice [class] module.add_class('VirtualNetDevice', parent=root_module['ns3::NetDevice']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3VirtualNetDevice_methods(root_module, root_module['ns3::VirtualNetDevice']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3VirtualNetDevice_methods(root_module, cls): ## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice::VirtualNetDevice(ns3::VirtualNetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::VirtualNetDevice const &', 'arg0')]) ## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice::VirtualNetDevice() [constructor] cls.add_constructor([]) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): ns3::Ptr<ns3::Channel> ns3::VirtualNetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): uint32_t ns3::VirtualNetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): uint16_t ns3::VirtualNetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): ns3::Ptr<ns3::Node> ns3::VirtualNetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): static ns3::TypeId ns3::VirtualNetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::Receive(ns3::Ptr<ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function] cls.add_method('Receive', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')]) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetIsPointToPoint(bool isPointToPoint) [member function] cls.add_method('SetIsPointToPoint', 'void', [param('bool', 'isPointToPoint')]) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetNeedsArp(bool needsArp) [member function] cls.add_method('SetNeedsArp', 'void', [param('bool', 'needsArp')]) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetSendCallback(ns3::Callback<bool, ns3::Ptr<ns3::Packet>, ns3::Address const&, ns3::Address const&, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> transmitCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::Address const &, ns3::Address const &, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'transmitCb')]) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetSupportsSendFrom(bool supportsSendFrom) [member function] cls.add_method('SetSupportsSendFrom', 'void', [param('bool', 'supportsSendFrom')]) ## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
RishonLi/PythonExecise
pythonExercise/python/NaiveBayes.py
1
3475
#-*- coding:utf-8 -*- from __future__ import division class BayesClassifier(): # 简单贝叶斯分类器 def __init__(self): pass def train(self, features, labels): # 训练简单贝叶斯分类器 featuresNum = len(features[1]) # 样本特征数目 self.sampleNum = len(features) # 样本数目 self.countDic = {} # 统计各个条件出现次数 self.sumDic = {} # 统计各条件概率数 # 结果标签 self.labelSet = set([]) # 集合存放类标,如:Y=1 or Y=-1 for i in range(0, len(labels)): # 统计类标不同值出现的次数 tempStr = 'Y=' + str(labels[i]) self.labelSet.add(str(labels[i])) if tempStr in self.countDic: self.countDic[tempStr] += 1 else: self.countDic[tempStr] = 1 # 计算类标占样本总数的比例 for label in self.labelSet: # 计算类标概率P(Y=b) tagName = "Y=" + str(label) # 成功 或 失败 数量 / 样本总数 self.sumDic[tagName] = self.countDic[tagName] / self.sampleNum # 计算各样本每个属性对应类标的数量 for i in range(0, self.sampleNum): # 统计各个条件概率组合出现的次数 for j in range(0, featuresNum): featureTag = 'F' + str(j) + '=' + str(features[i][j]) + '|' + 'Y=' + str(labels[i]) if featureTag in self.countDic: self.countDic[featureTag] += 1 else: self.countDic[featureTag] = 1 # 计算各属性占所属标签概率 for key in self.countDic.keys(): # 遍历次数统计字典计算概率 if key.find('|') == -1: # 计算条件概率P(Fi=a|Y=b) continue tagName = key[key.find('|') + 1:] # 类标字符串: Y=1 or Y=-1 # 当前特征成功的数据 / 成功的总数 # 算出条件概率P(Fi=a|Y=b)=Count(Fi=a,Y=b)/Count(Y=b) self.sumDic[key] = self.countDic[key] / self.countDic[tagName] # 分类 def classify(self, feature): # 使用训练后的贝叶斯分类器分类新样本 # 计算后验概率P(Y=b|Sample=feature) probabilityMap = {} for label in self.labelSet: # 各特征属性在当前类标下的概率 currProb = 1.0 for i in range(0, len(feature)): tempStr = 'F' + str(i) + '=' + str(feature[i]) + '|Y=' + label # 默认概率 if tempStr not in self.countDic: # 遇到新的特征值,导致该概率P(Fi=a|Y=b)为0,将它校正为非0值(1/Count(Y=b)) currProb *= (1.0 / self.sumDic['Y=' + label]) / self.sampleNum else: currProb *= self.sumDic[tempStr] currProb *= self.sumDic['Y=' + label] probabilityMap[label] = currProb maxProbability = 0.0 for label in self.labelSet: # 选取使后验概率P(Y=b|Sample=feature)最大的类标作为目标类标 # 记录最大概率可能的标签 if probabilityMap[label] > maxProbability: maxProbability = probabilityMap[label] targetLabel = label probabilityMap.clear() return targetLabel def __del__(self): self.countDic.clear() self.sumDic.clear()
apache-2.0
lyricat/Hotot
hotot/view.py
4
5423
# -*- coding: UTF-8 -*- import gtk gtk.gdk.threads_init() ## fix issue 24 import webkit import agent import config from webkit import WebView import utils import json import gobject import os import platform try: import i18n except: from gettext import gettext as _ TARGET_TYPE_URI_LIST = 80 dnd_list = [ ( 'text/uri-list', 0, TARGET_TYPE_URI_LIST ) ] class MainView(WebView): def __init__(self): WebView.__init__(self) self.load_finish_flag = False self.set_property('can-focus', True) self.set_property('can-default', True) self.set_full_content_zoom(1) self.clipbord = gtk.Clipboard() settings = self.get_settings() try: settings.set_property('enable-universal-access-from-file-uris', True) settings.set_property('javascript-can-access-clipboard', True) settings.set_property('enable-default-context-menu', True) settings.set_property('enable-page-cache', True) settings.set_property('tab-key-cycles-through-elements', True) settings.set_property('enable-file-access-from-file-uris', True) settings.set_property('enable-spell-checking', False) settings.set_property('enable-caret-browsing', False) try: # Since 1.7.5 settings.set_property('enable-accelerated-compositing', True) except TypeError: pass except: print 'Error: settings property was not set.' webkit.set_web_database_directory_path(config.DB_DIR) webkit.set_default_web_database_quota(1024**3L) ## bind events self.connect('navigation-requested', self.on_navigation_requested); self.connect('new-window-policy-decision-requested', self.on_new_window_requested); self.connect('script-alert', self.on_script_alert) self.connect('load-finished', self.on_load_finish) self.connect("hovering-over-link", self.on_over_link) # self.connect('drag_data_received', self.on_drag_data_received) # self.connect('drag_motion', self.on_drag_motion) # self.connect('drag_drop', self.on_drag_drop) # self.drag_dest_set(gtk.DEST_DEFAULT_DROP, # dnd_list, gtk.gdk.ACTION_COPY) templatefile = utils.get_ui_object(config.TEMPLATE) template = open(templatefile, 'rb').read() self.load_html_string(template, 'file://' + templatefile) def on_navigation_requested(self, view, webframe, request): return self.handle_uri(request.get_uri()) def on_new_window_requested(self, view, frame, request, decision, u_data): return self.handle_uri(request.get_uri()) def handle_uri(self, uri): if uri.startswith('file://'): return False elif uri.startswith('hotot:'): self.on_hotot_action(uri) return True elif uri.startswith('about:'): return True elif uri.startswith('http://stat.hotot.org'): return False else: utils.open_webbrowser(uri) return True def on_script_alert(self, view, webframe, message): if message.startswith('hotot:'): self.on_hotot_action(message) return True return False def on_hotot_action(self, uri): if uri.startswith('hotot:'): agent.crack_hotot(uri[6:]) return True def on_load_finish(self, view, webframe): if self.load_finish_flag: return self.load_finish_flag = True; agent.webv = self # overlay extra variables of web part variables = { 'platform': platform.system() , 'wrapper': 'python-gtk2' , 'conf_dir': config.CONF_DIR , 'cache_dir': config.CACHE_DIR , 'avatar_cache_dir': config.AVATAR_CACHE_DIR , 'extra_fonts': utils.get_extra_fonts() , 'extra_exts': utils.get_extra_exts() , 'extra_themes': utils.get_extra_themes() , 'locale': utils.get_locale() }; # and then, notify web part i am ready to work :) gobject.idle_add(view.execute_script, ''' overlay_variables(%s); globals.load_flags = 1; ''' % json.dumps(variables)) def on_over_link(self, view, alt, href): href = href or "" if not alt and not href.startswith('file:'): self.parent.set_tooltip_text(href) def on_drag_motion(self, view, context, x, y, time): context.drag_status(gtk.gdk.ACTION_COPY, time) return True def on_drag_drop(self, view, context, x, y, time): context.finish(True, False, time) return True def on_drag_data_received(self, view, context, x, y, selection, target_type, time): if target_type != TARGET_TYPE_URI_LIST: print target_type, 'is not supported.' return uri = selection.data.strip('\r\n\x00') # print 'uri', uri uri_splitted = uri.split() if len(uri_splitted) >= 1: path =utils.get_file_path_from_dnd_dropped_uri(uri_splitted[0]) else: return if os.path.isfile(path): gobject.idle_add(view.execute_script, ''' ui.ImageUploader.pyload("%s"); ui.ImageUploader.show(); ''' % path)
lgpl-3.0
Shrulik/Open-Knesset
video/management/commands/sub_commands/UpdateMembersAboutVideo.py
14
5374
# encoding: utf-8 from mks.models import Member from video.utils import get_videos_queryset from video.utils.youtube import GetYoutubeVideos from video.management.commands.sub_commands import SubCommand from video.utils.parse_dict import validate_dict from video.models import Video class UpdateMembersAboutVideo(SubCommand): def __init__(self,command,members=None,only_current_knesset=False,member_ids=[]): SubCommand.__init__(self,command) if members is None: if len(member_ids)>0: members=Member.objects.filter(id__in=member_ids) elif only_current_knesset is True: members=Member.current_knesset.filter(is_current=True) self._debug('only current knesset') else: members=Member.objects.all() self._debug('updating about videos for '+str(len(members))+' members') for member in members: self._debug(member.name) self._check_timer() sourceVideos=self._fetchSourceVideosOrderedByPublishedDesc(member) for sourceVideo in sourceVideos: if self._isValidSourceVideo(sourceVideo,member): videos=self._getVideosFromSource(sourceVideo,member) if len(videos)==0: # the source video does not exist in our database # this is the about video for this member! self._updateMemberAboutVideo(sourceVideo,member) break else: # got some videos that match the source video # check if any of them are 'related' videos relatedVideo=None for video in videos: if video.group=='related' and not video.sticky and not video.hide: relatedVideo=video break if relatedVideo is not None: # got a related video that is not sticky and not hidden # hide it in the related and create it again as about # (we could just change the video's group field # but it's better to create it again to make sure # it's got all the relevant data) self._hideRelatedVideo(relatedVideo) self._updateMemberAboutVideo(sourceVideo,member) else: break def _fetchSourceVideosOrderedByPublishedDesc(self,member): videos=[] for name in member.names: for video in self._fetchSourceVideos(name): if validate_dict(video,['published','title']) and name in video['title']: videos.append(video) return sorted(videos,key=lambda video: video['published'], reverse=True) def _fetchSourceVideos(self,name): return self._getYoutubeVideos(q=u"כרטיס ביקור ערוץ הכנסת "+name) def _isValidSourceVideo(self,video,member): ans=False if validate_dict(video,[ 'title','embed_url_autoplay','thumbnail480x360', 'id','description','link','published' ]): titledesc=video['title']+video['description'] if ( u'כרטיס ביקור' in titledesc and u'ערוץ הכנסת' in titledesc ): for name in member.names: if name in titledesc: ans=True break return ans def _getVideosFromSource(self,sourceVideo,member): return self._getVideos( getVideosQuerysetParams={'obj':member, 'ignoreHide':True}, filterParams={'source_id':sourceVideo['id'],'source_type':'youtube'} ) def _updateMemberAboutVideo(self,sourceVideo,member): self._hideMemberAboutVideos(member) self._saveVideo({ 'embed_link':sourceVideo['embed_url_autoplay'], 'image_link':sourceVideo['thumbnail480x360'], 'title':sourceVideo['title'], 'description':sourceVideo['description'], 'link':sourceVideo['link'], 'source_type':'youtube', 'source_id':sourceVideo['id'], 'published':sourceVideo['published'], 'group':'about', 'content_object':member }) # the following functions perform low level operations that will not be performed when testing # e.g. saving database data or fetching from remote sites def _getYoutubeVideos(self,**kwargs): return GetYoutubeVideos(**kwargs).videos def _getVideos(self, getVideosQuerysetParams, filterParams): return get_videos_queryset(**getVideosQuerysetParams).filter(**filterParams) def _saveVideo(self,videoFields): v=Video(**videoFields) v.save() def _hideRelatedVideo(self,video): video.hide=True video.save() def _hideMemberAboutVideos(self,member): videos=get_videos_queryset(member,group='about') for video in videos: video.hide=True video.save()
bsd-3-clause
wiml/pycurl.github.com
examples/file_upload.py
13
1239
#! /usr/bin/env python # -*- coding: iso-8859-1 -*- # vi:ts=4:et # $Id: file_upload.py,v 1.5 2005/02/13 08:53:13 mfx Exp $ import os, sys import pycurl # Class which holds a file reference and the read callback class FileReader: def __init__(self, fp): self.fp = fp def read_callback(self, size): return self.fp.read(size) # Check commandline arguments if len(sys.argv) < 3: print "Usage: %s <url> <file to upload>" % sys.argv[0] raise SystemExit url = sys.argv[1] filename = sys.argv[2] if not os.path.exists(filename): print "Error: the file '%s' does not exist" % filename raise SystemExit # Initialize pycurl c = pycurl.Curl() c.setopt(pycurl.URL, url) c.setopt(pycurl.UPLOAD, 1) # Two versions with the same semantics here, but the filereader version # is useful when you have to process the data which is read before returning if 1: c.setopt(pycurl.READFUNCTION, FileReader(open(filename, 'rb')).read_callback) else: c.setopt(pycurl.READFUNCTION, open(filename, 'rb').read) # Set size of file to be uploaded. filesize = os.path.getsize(filename) c.setopt(pycurl.INFILESIZE, filesize) # Start transfer print 'Uploading file %s to url %s' % (filename, url) c.perform() c.close()
lgpl-2.1
nypl-spacetime/oldnyc
viewer/simplejson/tests/test_indent.py
78
2570
from unittest import TestCase import simplejson as json import textwrap from StringIO import StringIO class TestIndent(TestCase): def test_indent(self): h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', {'nifty': 87}, {'field': 'yes', 'morefield': False} ] expect = textwrap.dedent("""\ [ \t[ \t\t"blorpie" \t], \t[ \t\t"whoops" \t], \t[], \t"d-shtaeou", \t"d-nthiouh", \t"i-vhbjkhnth", \t{ \t\t"nifty": 87 \t}, \t{ \t\t"field": "yes", \t\t"morefield": false \t} ]""") d1 = json.dumps(h) d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': ')) d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': ')) d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) h1 = json.loads(d1) h2 = json.loads(d2) h3 = json.loads(d3) h4 = json.loads(d4) self.assertEquals(h1, h) self.assertEquals(h2, h) self.assertEquals(h3, h) self.assertEquals(h4, h) self.assertEquals(d3, expect.replace('\t', ' ')) self.assertEquals(d4, expect.replace('\t', ' ')) # NOTE: Python 2.4 textwrap.dedent converts tabs to spaces, # so the following is expected to fail. Python 2.4 is not a # supported platform in simplejson 2.1.0+. self.assertEquals(d2, expect) def test_indent0(self): h = {3: 1} def check(indent, expected): d1 = json.dumps(h, indent=indent) self.assertEquals(d1, expected) sio = StringIO() json.dump(h, sio, indent=indent) self.assertEquals(sio.getvalue(), expected) # indent=0 should emit newlines check(0, '{\n"3": 1\n}') # indent=None is more compact check(None, '{"3": 1}') def test_separators(self): lst = [1,2,3,4] expect = '[\n1,\n2,\n3,\n4\n]' expect_spaces = '[\n1, \n2, \n3, \n4\n]' # Ensure that separators still works self.assertEquals( expect_spaces, json.dumps(lst, indent=0, separators=(', ', ': '))) # Force the new defaults self.assertEquals( expect, json.dumps(lst, indent=0, separators=(',', ': '))) # Added in 2.1.4 self.assertEquals( expect, json.dumps(lst, indent=0))
apache-2.0
atuljain/odoo
addons/account_voucher/report/account_voucher_sales_receipt.py
56
6527
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp import tools class sale_receipt_report(osv.osv): _name = "sale.receipt.report" _description = "Sales Receipt Statistics" _auto = False _rec_name = 'date' _columns = { 'date': fields.date('Date', readonly=True), 'year': fields.char('Year', size=4, readonly=True), 'day': fields.char('Day', size=128, readonly=True), 'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True), 'currency_id': fields.many2one('res.currency', 'Currency', readonly=True), 'journal_id': fields.many2one('account.journal', 'Journal', readonly=True), 'partner_id': fields.many2one('res.partner', 'Partner', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'user_id': fields.many2one('res.users', 'Salesperson', readonly=True), 'price_total': fields.float('Total Without Tax', readonly=True), 'price_total_tax': fields.float('Total With Tax', readonly=True), 'nbr':fields.integer('# of Voucher Lines', readonly=True), 'type': fields.selection([ ('sale','Sale'), ('purchase','Purchase'), ('payment','Payment'), ('receipt','Receipt'), ],'Type', readonly=True), 'state': fields.selection([ ('draft','Draft'), ('proforma','Pro-forma'), ('posted','Posted'), ('cancel','Cancelled') ], 'Voucher Status', readonly=True), 'pay_now':fields.selection([ ('pay_now','Pay Directly'), ('pay_later','Pay Later or Group Funds'), ],'Payment', readonly=True), 'date_due': fields.date('Due Date', readonly=True), 'account_id': fields.many2one('account.account', 'Account',readonly=True), 'delay_to_pay': fields.float('Avg. Delay To Pay', readonly=True, group_operator="avg"), 'due_delay': fields.float('Avg. Due Delay', readonly=True, group_operator="avg") } _order = 'date desc' def init(self, cr): tools.drop_view_if_exists(cr, 'sale_receipt_report') cr.execute(""" create or replace view sale_receipt_report as ( select min(avl.id) as id, av.date as date, to_char(av.date, 'YYYY') as year, to_char(av.date, 'MM') as month, to_char(av.date, 'YYYY-MM-DD') as day, av.partner_id as partner_id, aj.currency as currency_id, av.journal_id as journal_id, rp.user_id as user_id, av.company_id as company_id, count(avl.*) as nbr, av.type as type, av.state, av.pay_now, av.date_due as date_due, av.account_id as account_id, sum(av.amount-av.tax_amount)/(select count(l.id) from account_voucher_line as l left join account_voucher as a ON (a.id=l.voucher_id) where a.id=av.id) as price_total, sum(av.amount)/(select count(l.id) from account_voucher_line as l left join account_voucher as a ON (a.id=l.voucher_id) where a.id=av.id) as price_total_tax, sum((select extract(epoch from avg(date_trunc('day',aml.date_created)-date_trunc('day',l.create_date)))/(24*60*60)::decimal(16,2) from account_move_line as aml left join account_voucher as a ON (a.move_id=aml.move_id) left join account_voucher_line as l ON (a.id=l.voucher_id) where a.id=av.id)) as delay_to_pay, sum((select extract(epoch from avg(date_trunc('day',a.date_due)-date_trunc('day',a.date)))/(24*60*60)::decimal(16,2) from account_move_line as aml left join account_voucher as a ON (a.move_id=aml.move_id) left join account_voucher_line as l ON (a.id=l.voucher_id) where a.id=av.id)) as due_delay from account_voucher_line as avl left join account_voucher as av on (av.id=avl.voucher_id) left join res_partner as rp ON (rp.id=av.partner_id) left join account_journal as aj ON (aj.id=av.journal_id) where av.type='sale' and aj.type in ('sale','sale_refund') group by av.date, av.id, to_char(av.date, 'YYYY'), to_char(av.date, 'MM'), to_char(av.date, 'YYYY-MM-DD'), av.partner_id, aj.currency, av.journal_id, rp.user_id, av.company_id, av.type, av.state, av.date_due, av.account_id, av.tax_amount, av.amount, av.tax_amount, av.pay_now ) """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
martynovp/edx-platform
common/djangoapps/enrollment/serializers.py
23
3994
""" Serializers for all Course Enrollment related return objects. """ import logging from rest_framework import serializers from student.models import CourseEnrollment from course_modes.models import CourseMode log = logging.getLogger(__name__) class StringListField(serializers.CharField): """Custom Serializer for turning a comma delimited string into a list. This field is designed to take a string such as "1,2,3" and turn it into an actual list [1,2,3] """ def field_to_native(self, obj, field_name): """ Serialize the object's class name. """ if not obj.suggested_prices: return [] items = obj.suggested_prices.split(',') return [int(item) for item in items] class CourseField(serializers.RelatedField): """Read-Only representation of course enrollment information. Aggregates course information from the CourseDescriptor as well as the Course Modes configured for enrolling in the course. """ def to_native(self, course, **kwargs): course_id = unicode(course.id) course_modes = ModeSerializer( CourseMode.modes_for_course(course.id, kwargs.get('include_expired', False), only_selectable=False) ).data # pylint: disable=no-member return { "course_id": course_id, "enrollment_start": course.enrollment_start, "enrollment_end": course.enrollment_end, "course_start": course.start, "course_end": course.end, "invite_only": course.invitation_only, "course_modes": course_modes, } class CourseEnrollmentSerializer(serializers.ModelSerializer): """Serializes CourseEnrollment models Aggregates all data from the Course Enrollment table, and pulls in the serialization for the Course Descriptor and course modes, to give a complete representation of course enrollment. """ course_details = serializers.SerializerMethodField('get_course_details') user = serializers.SerializerMethodField('get_username') @property def data(self): serialized_data = super(CourseEnrollmentSerializer, self).data # filter the results with empty courses 'course_details' if isinstance(serialized_data, dict): if serialized_data.get('course_details') is None: return None return serialized_data return [enrollment for enrollment in serialized_data if enrollment.get('course_details')] def get_course_details(self, model): if model.course is None: msg = u"Course '{0}' does not exist (maybe deleted), in which User (user_id: '{1}') is enrolled.".format( model.course_id, model.user.id ) log.warning(msg) return None field = CourseField() return field.to_native(model.course) def get_username(self, model): """Retrieves the username from the associated model.""" return model.username class Meta(object): # pylint: disable=missing-docstring model = CourseEnrollment fields = ('created', 'mode', 'is_active', 'course_details', 'user') lookup_field = 'username' class ModeSerializer(serializers.Serializer): """Serializes a course's 'Mode' tuples Returns a serialized representation of the modes available for course enrollment. The course modes models are designed to return a tuple instead of the model object itself. This serializer does not handle the model object itself, but the tuple. """ slug = serializers.CharField(max_length=100) name = serializers.CharField(max_length=255) min_price = serializers.IntegerField() suggested_prices = StringListField(max_length=255) currency = serializers.CharField(max_length=8) expiration_datetime = serializers.DateTimeField() description = serializers.CharField() sku = serializers.CharField()
agpl-3.0
ric2b/Vivaldi-browser
chromium/third_party/blink/web_tests/external/wpt/tools/pywebsocket/test/test_handshake_hybi.py
8
21628
#!/usr/bin/env python # # Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for handshake module.""" import unittest import set_sys_path # Update sys.path to locate mod_pywebsocket module. from mod_pywebsocket import common from mod_pywebsocket.handshake._base import AbortedByUserException from mod_pywebsocket.handshake._base import HandshakeException from mod_pywebsocket.handshake._base import VersionException from mod_pywebsocket.handshake.hybi import Handshaker import mock class RequestDefinition(object): """A class for holding data for constructing opening handshake strings for testing the opening handshake processor. """ def __init__(self, method, uri, headers): self.method = method self.uri = uri self.headers = headers def _create_good_request_def(): return RequestDefinition( 'GET', '/demo', {'Host': 'server.example.com', 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==', 'Sec-WebSocket-Version': '13', 'Origin': 'http://example.com'}) def _create_request(request_def): conn = mock.MockConn('') return mock.MockRequest( method=request_def.method, uri=request_def.uri, headers_in=request_def.headers, connection=conn) def _create_handshaker(request): handshaker = Handshaker(request, mock.MockDispatcher()) return handshaker class SubprotocolChoosingDispatcher(object): """A dispatcher for testing. This dispatcher sets the i-th subprotocol of requested ones to ws_protocol where i is given on construction as index argument. If index is negative, default_value will be set to ws_protocol. """ def __init__(self, index, default_value=None): self.index = index self.default_value = default_value def do_extra_handshake(self, conn_context): if self.index >= 0: conn_context.ws_protocol = conn_context.ws_requested_protocols[ self.index] else: conn_context.ws_protocol = self.default_value def transfer_data(self, conn_context): pass class HandshakeAbortedException(Exception): pass class AbortingDispatcher(object): """A dispatcher for testing. This dispatcher raises an exception in do_extra_handshake to reject the request. """ def do_extra_handshake(self, conn_context): raise HandshakeAbortedException('An exception to reject the request') def transfer_data(self, conn_context): pass class AbortedByUserDispatcher(object): """A dispatcher for testing. This dispatcher raises an AbortedByUserException in do_extra_handshake to reject the request. """ def do_extra_handshake(self, conn_context): raise AbortedByUserException('An AbortedByUserException to reject the ' 'request') def transfer_data(self, conn_context): pass _EXPECTED_RESPONSE = ( 'HTTP/1.1 101 Switching Protocols\r\n' 'Upgrade: websocket\r\n' 'Connection: Upgrade\r\n' 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n') class HandshakerTest(unittest.TestCase): """A unittest for draft-ietf-hybi-thewebsocketprotocol-06 and later handshake processor. """ def test_do_handshake(self): request = _create_request(_create_good_request_def()) dispatcher = mock.MockDispatcher() handshaker = Handshaker(request, dispatcher) handshaker.do_handshake() self.assertTrue(dispatcher.do_extra_handshake_called) self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) self.assertEqual('/demo', request.ws_resource) self.assertEqual('http://example.com', request.ws_origin) self.assertEqual(None, request.ws_protocol) self.assertEqual(None, request.ws_extensions) self.assertEqual(common.VERSION_HYBI_LATEST, request.ws_version) def test_do_handshake_with_extra_headers(self): request_def = _create_good_request_def() # Add headers not related to WebSocket opening handshake. request_def.headers['FooKey'] = 'BarValue' request_def.headers['EmptyKey'] = '' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) def test_do_handshake_with_capitalized_value(self): request_def = _create_good_request_def() request_def.headers['upgrade'] = 'WEBSOCKET' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) request_def = _create_good_request_def() request_def.headers['Connection'] = 'UPGRADE' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) def test_do_handshake_with_multiple_connection_values(self): request_def = _create_good_request_def() request_def.headers['Connection'] = 'Upgrade, keep-alive, , ' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) def test_aborting_handshake(self): handshaker = Handshaker( _create_request(_create_good_request_def()), AbortingDispatcher()) # do_extra_handshake raises an exception. Check that it's not caught by # do_handshake. self.assertRaises(HandshakeAbortedException, handshaker.do_handshake) def test_do_handshake_with_protocol(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat' request = _create_request(request_def) handshaker = Handshaker(request, SubprotocolChoosingDispatcher(0)) handshaker.do_handshake() EXPECTED_RESPONSE = ( 'HTTP/1.1 101 Switching Protocols\r\n' 'Upgrade: websocket\r\n' 'Connection: Upgrade\r\n' 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n' 'Sec-WebSocket-Protocol: chat\r\n\r\n') self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data()) self.assertEqual('chat', request.ws_protocol) def test_do_handshake_protocol_not_in_request_but_in_response(self): request_def = _create_good_request_def() request = _create_request(request_def) handshaker = Handshaker( request, SubprotocolChoosingDispatcher(-1, 'foobar')) # No request has been made but ws_protocol is set. HandshakeException # must be raised. self.assertRaises(HandshakeException, handshaker.do_handshake) def test_do_handshake_with_protocol_no_protocol_selection(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat' request = _create_request(request_def) handshaker = _create_handshaker(request) # ws_protocol is not set. HandshakeException must be raised. self.assertRaises(HandshakeException, handshaker.do_handshake) def test_do_handshake_with_extensions(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( 'permessage-deflate; server_no_context_takeover') EXPECTED_RESPONSE = ( 'HTTP/1.1 101 Switching Protocols\r\n' 'Upgrade: websocket\r\n' 'Connection: Upgrade\r\n' 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n' 'Sec-WebSocket-Extensions: ' 'permessage-deflate; server_no_context_takeover\r\n' '\r\n') request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data()) self.assertEqual(1, len(request.ws_extensions)) extension = request.ws_extensions[0] self.assertEqual(common.PERMESSAGE_DEFLATE_EXTENSION, extension.name()) self.assertEqual(['server_no_context_takeover'], extension.get_parameter_names()) self.assertEqual(None, extension.get_parameter_value( 'server_no_context_takeover')) self.assertEqual(1, len(request.ws_extension_processors)) self.assertEqual('deflate', request.ws_extension_processors[0].name()) def test_do_handshake_with_quoted_extensions(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( 'permessage-deflate, , ' 'unknown; e = "mc^2"; ma="\r\n \\\rf "; pv=nrt') request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual(2, len(request.ws_requested_extensions)) first_extension = request.ws_requested_extensions[0] self.assertEqual('permessage-deflate', first_extension.name()) second_extension = request.ws_requested_extensions[1] self.assertEqual('unknown', second_extension.name()) self.assertEqual( ['e', 'ma', 'pv'], second_extension.get_parameter_names()) self.assertEqual('mc^2', second_extension.get_parameter_value('e')) self.assertEqual(' \rf ', second_extension.get_parameter_value('ma')) self.assertEqual('nrt', second_extension.get_parameter_value('pv')) def test_do_handshake_with_optional_headers(self): request_def = _create_good_request_def() request_def.headers['EmptyValue'] = '' request_def.headers['AKey'] = 'AValue' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( 'AValue', request.headers_in['AKey']) self.assertEqual( '', request.headers_in['EmptyValue']) def test_abort_extra_handshake(self): handshaker = Handshaker( _create_request(_create_good_request_def()), AbortedByUserDispatcher()) # do_extra_handshake raises an AbortedByUserException. Check that it's # not caught by do_handshake. self.assertRaises(AbortedByUserException, handshaker.do_handshake) def test_do_handshake_with_mux_and_deflate_frame(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % ( common.MUX_EXTENSION, common.DEFLATE_FRAME_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() # mux should be rejected. self.assertEqual(1, len(request.ws_extensions)) self.assertEqual(common.DEFLATE_FRAME_EXTENSION, request.ws_extensions[0].name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[0].name()) self.assertEqual(common.DEFLATE_FRAME_EXTENSION, request.ws_extension_processors[1].name()) self.assertFalse(hasattr(request, 'mux_processor')) def test_do_handshake_with_deflate_frame_and_mux(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % ( common.DEFLATE_FRAME_EXTENSION, common.MUX_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() # mux should be rejected. self.assertEqual(1, len(request.ws_extensions)) first_extension = request.ws_extensions[0] self.assertEqual(common.DEFLATE_FRAME_EXTENSION, first_extension.name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual(common.DEFLATE_FRAME_EXTENSION, request.ws_extension_processors[0].name()) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[1].name()) self.assertFalse(hasattr(request, 'mux')) def test_do_handshake_with_permessage_deflate_and_mux(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( '%s, %s' % ( common.PERMESSAGE_DEFLATE_EXTENSION, common.MUX_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual(1, len(request.ws_extensions)) self.assertEqual(common.MUX_EXTENSION, request.ws_extensions[0].name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual('deflate', request.ws_extension_processors[0].name()) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[1].name()) self.assertTrue(hasattr(request, 'mux_processor')) self.assertTrue(request.mux_processor.is_active()) mux_extensions = request.mux_processor.extensions() self.assertEqual(1, len(mux_extensions)) self.assertEqual(common.PERMESSAGE_DEFLATE_EXTENSION, mux_extensions[0].name()) def test_do_handshake_with_mux_and_permessage_deflate(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( '%s, %s' % ( common.MUX_EXTENSION, common.PERMESSAGE_DEFLATE_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() # mux should be rejected. self.assertEqual(1, len(request.ws_extensions)) first_extension = request.ws_extensions[0] self.assertEqual(common.PERMESSAGE_DEFLATE_EXTENSION, first_extension.name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[0].name()) self.assertEqual('deflate', request.ws_extension_processors[1].name()) self.assertFalse(hasattr(request, 'mux_processor')) def test_bad_requests(self): bad_cases = [ ('HTTP request', RequestDefinition( 'GET', '/demo', {'Host': 'www.google.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;' ' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3' ' GTB6 GTBA', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,' '*/*;q=0.8', 'Accept-Language': 'en-us,en;q=0.5', 'Accept-Encoding': 'gzip,deflate', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive': '300', 'Connection': 'keep-alive'}), None, True)] request_def = _create_good_request_def() request_def.method = 'POST' bad_cases.append(('Wrong method', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Host'] bad_cases.append(('Missing Host', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Upgrade'] bad_cases.append(('Missing Upgrade', request_def, None, True)) request_def = _create_good_request_def() request_def.headers['Upgrade'] = 'nonwebsocket' bad_cases.append(('Wrong Upgrade', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Connection'] bad_cases.append(('Missing Connection', request_def, None, True)) request_def = _create_good_request_def() request_def.headers['Connection'] = 'Downgrade' bad_cases.append(('Wrong Connection', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Sec-WebSocket-Key'] bad_cases.append(('Missing Sec-WebSocket-Key', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Key'] = ( 'dGhlIHNhbXBsZSBub25jZQ==garbage') bad_cases.append(('Wrong Sec-WebSocket-Key (with garbage on the tail)', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Key'] = 'YQ==' # BASE64 of 'a' bad_cases.append( ('Wrong Sec-WebSocket-Key (decoded value is not 16 octets long)', request_def, 400, True)) request_def = _create_good_request_def() # The last character right before == must be any of A, Q, w and g. request_def.headers['Sec-WebSocket-Key'] = ( 'AQIDBAUGBwgJCgsMDQ4PEC==') bad_cases.append( ('Wrong Sec-WebSocket-Key (padding bits are not zero)', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Key'] = ( 'dGhlIHNhbXBsZSBub25jZQ==,dGhlIHNhbXBsZSBub25jZQ==') bad_cases.append( ('Wrong Sec-WebSocket-Key (multiple values)', request_def, 400, True)) request_def = _create_good_request_def() del request_def.headers['Sec-WebSocket-Version'] bad_cases.append(('Missing Sec-WebSocket-Version', request_def, None, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Version'] = '3' bad_cases.append(('Wrong Sec-WebSocket-Version', request_def, None, False)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Version'] = '13, 13' bad_cases.append(('Wrong Sec-WebSocket-Version (multiple values)', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = 'illegal\x09protocol' bad_cases.append(('Illegal Sec-WebSocket-Protocol', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = '' bad_cases.append(('Empty Sec-WebSocket-Protocol', request_def, 400, True)) for (case_name, request_def, expected_status, expect_handshake_exception) in bad_cases: request = _create_request(request_def) handshaker = Handshaker(request, mock.MockDispatcher()) try: handshaker.do_handshake() self.fail('No exception thrown for \'%s\' case' % case_name) except HandshakeException as e: self.assertTrue(expect_handshake_exception) self.assertEqual(expected_status, e.status) except VersionException as e: self.assertFalse(expect_handshake_exception) if __name__ == '__main__': unittest.main() # vi:sts=4 sw=4 et
bsd-3-clause
jpallas/beakerx
beakerx/beakerx_magics/groovy_magic.py
1
1190
# Copyright 2017 TWO SIGMA OPEN SOURCE, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from IPython import get_ipython from IPython.core.magic import (magics_class, cell_magic) from .kernel_magic import KernelMagics @magics_class class GroovyMagics(KernelMagics): def __init__(self, shell): super(GroovyMagics, self).__init__(shell) def start(self): super(GroovyMagics, self).start('groovy') @cell_magic def groovy(self, line, cell): return self.run_cell(line, cell) def load_ipython_extension(ipython): ipython.register_magics(GroovyMagics) if __name__ == '__main__': ip = get_ipython() ip.register_magics(GroovyMagics)
apache-2.0
jalama/drupdates
drupdates/tests/__init__.py
1
5728
""" Build the base repos testing repos are cloned from. """ import drupdates, git, os, shutil, yaml, glob, nose, subprocess from os.path import expanduser from git import Repo from drupdates.drush import Drush from drupdates.settings import DrupdatesError def setup_package(): """ Setup the basic base repos and base directory. """ setup_tests = Setup() setup_tests.build_directory() setup_tests.build_base_repos() def teardown_package(): """ Teardown the testing directory. """ setup_tests = Setup() setup_tests.destroy_directory() class Setup(object): """ Set-up the basic test repos for other tests to clone and run. """ def __init__(self): self.test_dir = os.path.join(os.path.expanduser('~'), '.drupdates', 'testing') self.make_file = '' self.current_dir = os.path.dirname(os.path.realpath(__file__)) def build_directory(self): """ Build the base testing directory. """ if not os.path.isdir(self.test_dir): os.makedirs(self.test_dir) files = [] directory = os.path.join(expanduser('~'), '.drupdates') files.append(os.path.join(directory, 'settings.yaml')) files.append(os.path.join(directory, 'report.yaml')) files.append(os.path.join(directory, 'report.json')) files.append(os.path.join(directory, 'drupdates.debug')) files.append(os.path.join(expanduser('~'), '.drush', 'drupdates.aliases.drushrc.php')) for file_name in files: if os.path.isfile(file_name): os.remove(file_name) def destroy_directory(self): """ Destroy base testing directory and remove base settings file. """ shutil.rmtree(self.test_dir) def build_base_repos(self): """ Build out the base repo used by the functional tests. """ directory_list = open(os.path.join(self.current_dir, 'base_dirs.yaml')) base_directory_list = yaml.load(directory_list) for directory, options in base_directory_list['dirs'].items(): self.get_make_file(options['version'], options['make_format']) base_directory = self.build_base_directory(directory) if not base_directory: continue if not options['build'] or 'subfolder' in options: make_file_name = "{0}.{1}".format(options['make_file'], options['make_format']) self.copy_make_file(make_file_name, base_directory) path = base_directory if options['build']: subfolder = '' if 'subfolder' in options: subfolder = options['subfolder'] path = self.run_drush_make(base_directory, subfolder) if 'commands' in options: os.chdir(path) for commands in options['commands']: popen = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE) popen.communicate() if 'sites' in options: for site in options['sites']: destination = "--contrib-destination=sites/{0}.com".format(site) add_cmds = [destination, '--no-core'] self.make_file = os.path.join(self.current_dir, 'makefiles', "{0}.yaml".format(site)) self.run_drush_make(base_directory, subfolder, add_cmds) if 'custom_settings' in options: settings_file_directory = os.path.join(path, '.drupdates') settings_file = "{0}/settings.yaml".format(settings_file_directory) with open(settings_file, 'w') as outfile: outfile.write(yaml.dump(options['custom_settings'], default_flow_style=False)) Setup.make_git_repo(base_directory) def build_base_directory(self, target_directory): """ Build the empty base directory. """ folder = os.path.join(self.test_dir, 'builds', target_directory) if not os.path.isdir(folder): os.makedirs(folder) return folder else: return "" def get_make_file(self, drupal_version, make_format): """ Get the name and location of Drush Make file to build base repo. """ makefile = "drupal{0}.{1}".format(drupal_version, make_format) self.make_file = os.path.join(self.current_dir, 'makefiles', makefile) def copy_make_file(self, target_file, target_directory): """ Copy target_file to target_directory. """ make_file_path = os.path.join(target_directory, target_file) shutil.copyfile(self.make_file, make_file_path) def run_drush_make(self, target_directory, subfolder, add_cmds=None): """ Run drush make to build the base repo. """ path = os.path.join(target_directory, subfolder) cmds = ['make', self.make_file, path] if add_cmds and isinstance(add_cmds, list): cmds += add_cmds else: if os.path.isdir(path): shutil.rmtree(path) try: Drush.call(cmds) except DrupdatesError as error: print(error.msg) return path @staticmethod def make_git_repo(directory): """ Make the repo folder a git repo. """ repo = Repo.init(directory) index = repo.index files = repo.untracked_files index.add(files) index.commit('Initial Commit') repo.heads.master.checkout(b='dev') repo.heads.master.checkout()
mit
rkawale/Internalhr-frappe
frappe/widgets/moduleview.py
2
4396
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe from frappe.widgets import reportview from frappe.utils import cint from frappe import _ from copy import deepcopy @frappe.whitelist() def get(module): data = get_data(module) out = { "data": data, "item_count": { data[0]["label"]: get_section_count(section=data[0]) }, "reports": get_report_list(module) } return out def get_data(module): data = build_config_from_file(module) if not data: data = build_standard_config(module) data = combine_common_sections(data) return data def build_config_from_file(module): data = [] module = frappe.scrub(module) for app in frappe.get_installed_apps(): try: data += get_config(app, module) except ImportError: pass return data def build_standard_config(module): if not frappe.db.get_value("Module Def", module): frappe.throw(_("Module Not Found")) data = [] doctypes = frappe.db.sql("""select "doctype" as type, name, description, ifnull(document_type, "") as document_type from `tabDocType` where module=%s and ifnull(istable, 0)=0 order by document_type desc, name asc""", module, as_dict=True) documents = [d for d in doctypes if d.document_type in ("Transaction", "Master", "")] if documents: data.append({ "label": _("Documents"), "icon": "icon-star", "items": documents }) setup = [d for d in doctypes if d.document_type in ("System", "Other")] if setup: data.append({ "label": _("Setup"), "icon": "icon-cog", "items": setup }) reports = get_report_list(module, is_standard="Yes") if reports: data.append({ "label": _("Standard Reports"), "icon": "icon-list", "items": reports }) return data def combine_common_sections(data): sections = [] sections_dict = {} for each in data: if each["label"] not in sections_dict: sections_dict[each["label"]] = each sections.append(each) else: sections_dict[each["label"]]["items"] += each["items"] return sections def get_config(app, module): config = frappe.get_module("{app}.config.{module}".format(app=app, module=module)) config = deepcopy(config.get_data() if hasattr(config, "get_data") else config.data) for section in config: for item in section["items"]: if not "label" in item: item["label"] = _(item["name"]) return config def add_setup_section(config, app, module, label, icon): try: setup_section = get_setup_section(app, module, label, icon) if setup_section: config.append(setup_section) except ImportError: pass def get_setup_section(app, module, label, icon): config = get_config(app, module) for section in config: if section.get("label")==_("Setup"): return { "label": label, "icon": icon, "items": section["items"] } @frappe.whitelist() def get_section_count(section=None, module=None, section_label=None): doctypes = [] if module and section_label: data = get_data(module) for each in data: if each["label"] == section_label: section = each break if section: doctypes = get_doctypes(section) count = get_count(doctypes) return count def get_doctypes(section): doctypes = [] for item in section.get("items", []): if item.get("type")=="doctype": doctypes.append(item["name"]) elif item.get("doctype"): doctypes.append(item["doctype"]) return list(set(doctypes)) def get_count(doctypes): count = {} can_read = frappe.user.get_can_read() for d in doctypes: if d in can_read: count[d] = get_doctype_count_from_table(d) return count def get_doctype_count_from_table(doctype): try: count = reportview.execute(doctype, fields=["count(*)"], as_list=True)[0][0] except Exception, e: if e.args[0]==1146: count = None else: raise return cint(count) def get_report_list(module, is_standard="No"): """return list on new style reports for modules""" reports = frappe.get_list("Report", fields=["name", "ref_doctype", "report_type"], filters= {"is_standard": is_standard, "disabled": ("in", ("0", "NULL")), "module": module}, order_by="name") out = [] for r in reports: out.append({ "type": "report", "doctype": r.ref_doctype, "is_query_report": 1 if r.report_type in ("Query Report", "Script Report") else 0, "description": r.report_type, "label": _(r.name) }) return out
mit
TeamExodus/external_chromium_org
tools/code_coverage/croc_scan_test.py
178
7181
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for croc_scan.py.""" import re import unittest import croc_scan class TestScanner(unittest.TestCase): """Tests for croc_scan.Scanner.""" def testInit(self): """Test __init()__.""" s = croc_scan.Scanner() self.assertEqual(s.re_token.pattern, '#') self.assertEqual(s.comment_to_eol, ['#']) self.assertEqual(s.comment_start, None) self.assertEqual(s.comment_end, None) def testScanLines(self): """Test ScanLines().""" s = croc_scan.Scanner() # Set up imaginary language: # ':' = comment to EOL # '"' = string start/end # '(' = comment start # ')' = comment end s.re_token = re.compile(r'([\:\"\(\)])') s.comment_to_eol = [':'] s.comment_start = '(' s.comment_end = ')' # No input file = no output lines self.assertEqual(s.ScanLines([]), []) # Empty lines and lines with only whitespace are ignored self.assertEqual(s.ScanLines([ '', # 1 'line', # 2 exe ' \t ', # 3 ]), [2]) # Comments to EOL are stripped, but not inside strings self.assertEqual(s.ScanLines([ 'test', # 1 exe ' : A comment', # 2 '"a : in a string"', # 3 exe 'test2 : with comment to EOL', # 4 exe 'foo = "a multiline string with an empty line', # 5 exe '', # 6 exe ': and a comment-to-EOL character"', # 7 exe ': done', # 8 ]), [1, 3, 4, 5, 6, 7]) # Test Comment start/stop detection self.assertEqual(s.ScanLines([ '( a comment on one line)', # 1 'text (with a comment)', # 2 exe '( a comment with a : in the middle)', # 3 '( a multi-line', # 4 ' comment)', # 5 'a string "with a ( in it"', # 6 exe 'not in a multi-line comment', # 7 exe '(a comment with a " in it)', # 8 ': not in a string, so this gets stripped', # 9 'more text "with an uninteresting string"', # 10 exe ]), [2, 6, 7, 10]) # TODO: Test Scan(). Low priority, since it just wraps ScanLines(). class TestPythonScanner(unittest.TestCase): """Tests for croc_scan.PythonScanner.""" def testScanLines(self): """Test ScanLines().""" s = croc_scan.PythonScanner() # No input file = no output lines self.assertEqual(s.ScanLines([]), []) self.assertEqual(s.ScanLines([ '# a comment', # 1 '', # 2 '"""multi-line string', # 3 exe '# not a comment', # 4 exe 'end of multi-line string"""', # 5 exe ' ', # 6 '"single string with #comment"', # 7 exe '', # 8 '\'\'\'multi-line string, single-quote', # 9 exe '# not a comment', # 10 exe 'end of multi-line string\'\'\'', # 11 exe '', # 12 '"string with embedded \\" is handled"', # 13 exe '# quoted "', # 14 '"\\""', # 15 exe '# quoted backslash', # 16 '"\\\\"', # 17 exe 'main()', # 18 exe '# end', # 19 ]), [3, 4, 5, 7, 9, 10, 11, 13, 15, 17, 18]) class TestCppScanner(unittest.TestCase): """Tests for croc_scan.CppScanner.""" def testScanLines(self): """Test ScanLines().""" s = croc_scan.CppScanner() # No input file = no output lines self.assertEqual(s.ScanLines([]), []) self.assertEqual(s.ScanLines([ '// a comment', # 1 '# a preprocessor define', # 2 '', # 3 '\'#\', \'"\'', # 4 exe '', # 5 '/* a multi-line comment', # 6 'with a " in it', # 7 '*/', # 8 '', # 9 '"a string with /* and \' in it"', # 10 exe '', # 11 '"a multi-line string\\', # 12 exe '// not a comment\\', # 13 exe 'ending here"', # 14 exe '', # 15 '"string with embedded \\" is handled"', # 16 exe '', # 17 'main()', # 18 exe '// end', # 19 ]), [4, 10, 12, 13, 14, 16, 18]) class TestScanFile(unittest.TestCase): """Tests for croc_scan.ScanFile().""" class MockScanner(object): """Mock scanner.""" def __init__(self, language): """Constructor.""" self.language = language def Scan(self, filename): """Mock Scan() method.""" return 'scan %s %s' % (self.language, filename) def MockPythonScanner(self): return self.MockScanner('py') def MockCppScanner(self): return self.MockScanner('cpp') def setUp(self): """Per-test setup.""" # Hook scanners self.old_python_scanner = croc_scan.PythonScanner self.old_cpp_scanner = croc_scan.CppScanner croc_scan.PythonScanner = self.MockPythonScanner croc_scan.CppScanner = self.MockCppScanner def tearDown(self): """Per-test cleanup.""" croc_scan.PythonScanner = self.old_python_scanner croc_scan.CppScanner = self.old_cpp_scanner def testScanFile(self): """Test ScanFile().""" self.assertEqual(croc_scan.ScanFile('foo', 'python'), 'scan py foo') self.assertEqual(croc_scan.ScanFile('bar1', 'C'), 'scan cpp bar1') self.assertEqual(croc_scan.ScanFile('bar2', 'C++'), 'scan cpp bar2') self.assertEqual(croc_scan.ScanFile('bar3', 'ObjC'), 'scan cpp bar3') self.assertEqual(croc_scan.ScanFile('bar4', 'ObjC++'), 'scan cpp bar4') self.assertEqual(croc_scan.ScanFile('bar', 'fortran'), []) if __name__ == '__main__': unittest.main()
bsd-3-clause
sameetb-cuelogic/edx-platform-test
common/test/acceptance/fixtures/discussion.py
11
3453
""" Tools for creating discussion content fixture data. """ from datetime import datetime import json import factory import requests from . import COMMENTS_STUB_URL class ContentFactory(factory.Factory): FACTORY_FOR = dict id = None user_id = "dummy-user-id" username = "dummy-username" course_id = "dummy-course-id" commentable_id = "dummy-commentable-id" anonymous = False anonymous_to_peers = False at_position_list = [] abuse_flaggers = [] created_at = datetime.utcnow().isoformat() updated_at = datetime.utcnow().isoformat() endorsed = False closed = False votes = {"up_count": 0} class Thread(ContentFactory): thread_type = "discussion" anonymous = False anonymous_to_peers = False comments_count = 0 unread_comments_count = 0 title = "dummy thread title" body = "dummy thread body" type = "thread" group_id = None pinned = False read = False class Comment(ContentFactory): thread_id = "dummy thread" depth = 0 type = "comment" body = "dummy comment body" class Response(Comment): depth = 1 body = "dummy response body" class SearchResult(factory.Factory): FACTORY_FOR = dict discussion_data = [] annotated_content_info = {} num_pages = 1 page = 1 corrected_text = None class DiscussionContentFixture(object): def push(self): """ Push the data to the stub comments service. """ requests.put( '{}/set_config'.format(COMMENTS_STUB_URL), data=self.get_config_data() ) def get_config_data(self): """ return a dictionary with the fixture's data serialized for PUTting to the stub server's config endpoint. """ raise NotImplementedError() class SingleThreadViewFixture(DiscussionContentFixture): def __init__(self, thread): self.thread = thread def addResponse(self, response, comments=[]): response['children'] = comments if self.thread["thread_type"] == "discussion": responseListAttr = "children" elif response["endorsed"]: responseListAttr = "endorsed_responses" else: responseListAttr = "non_endorsed_responses" self.thread.setdefault(responseListAttr, []).append(response) self.thread['comments_count'] += len(comments) + 1 def _get_comment_map(self): """ Generate a dict mapping each response/comment in the thread by its `id`. """ def _visit(obj): res = [] for child in obj.get('children', []): res.append((child['id'], child)) if 'children' in child: res += _visit(child) return res return dict(_visit(self.thread)) def get_config_data(self): return { "threads": json.dumps({self.thread['id']: self.thread}), "comments": json.dumps(self._get_comment_map()) } class UserProfileViewFixture(DiscussionContentFixture): def __init__(self, threads): self.threads = threads def get_config_data(self): return {"active_threads": json.dumps(self.threads)} class SearchResultFixture(DiscussionContentFixture): def __init__(self, result): self.result = result def get_config_data(self): return {"search_result": json.dumps(self.result)}
agpl-3.0
vaygr/ansible
lib/ansible/modules/network/netscaler/netscaler_servicegroup.py
52
35086
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2017 Citrix Systems # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: netscaler_servicegroup short_description: Manage service group configuration in Netscaler description: - Manage service group configuration in Netscaler. - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance. version_added: "2.4" author: George Nikolopoulos (@giorgos-nikolopoulos) options: servicegroupname: description: - >- Name of the service group. Must begin with an ASCII alphabetic or underscore C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals C(=), and hyphen C(-) characters. Can be changed after the name is created. - "Minimum length = 1" servicetype: choices: - 'HTTP' - 'FTP' - 'TCP' - 'UDP' - 'SSL' - 'SSL_BRIDGE' - 'SSL_TCP' - 'DTLS' - 'NNTP' - 'RPCSVR' - 'DNS' - 'ADNS' - 'SNMP' - 'RTSP' - 'DHCPRA' - 'ANY' - 'SIP_UDP' - 'SIP_TCP' - 'SIP_SSL' - 'DNS_TCP' - 'ADNS_TCP' - 'MYSQL' - 'MSSQL' - 'ORACLE' - 'RADIUS' - 'RADIUSListener' - 'RDP' - 'DIAMETER' - 'SSL_DIAMETER' - 'TFTP' - 'SMPP' - 'PPTP' - 'GRE' - 'SYSLOGTCP' - 'SYSLOGUDP' - 'FIX' - 'SSL_FIX' description: - "Protocol used to exchange data with the service." cachetype: choices: - 'TRANSPARENT' - 'REVERSE' - 'FORWARD' description: - "Cache type supported by the cache server." maxclient: description: - "Maximum number of simultaneous open connections for the service group." - "Minimum value = C(0)" - "Maximum value = C(4294967294)" maxreq: description: - "Maximum number of requests that can be sent on a persistent connection to the service group." - "Note: Connection requests beyond this value are rejected." - "Minimum value = C(0)" - "Maximum value = C(65535)" cacheable: description: - "Use the transparent cache redirection virtual server to forward the request to the cache server." - "Note: Do not set this parameter if you set the Cache Type." type: bool cip: choices: - 'enabled' - 'disabled' description: - "Insert the Client IP header in requests forwarded to the service." cipheader: description: - >- Name of the HTTP header whose value must be set to the IP address of the client. Used with the Client IP parameter. If client IP insertion is enabled, and the client IP header is not specified, the value of Client IP Header parameter or the value set by the set ns config command is used as client's IP header name. - "Minimum length = 1" usip: description: - >- Use client's IP address as the source IP address when initiating connection to the server. With the NO setting, which is the default, a mapped IP (MIP) address or subnet IP (SNIP) address is used as the source IP address to initiate server side connections. pathmonitor: description: - "Path monitoring for clustering." pathmonitorindv: description: - "Individual Path monitoring decisions." useproxyport: description: - >- Use the proxy port as the source port when initiating connections with the server. With the NO setting, the client-side connection port is used as the source port for the server-side connection. - "Note: This parameter is available only when the Use Source IP C(usip) parameter is set to C(yes)." type: bool healthmonitor: description: - "Monitor the health of this service. Available settings function as follows:" - "C(yes) - Send probes to check the health of the service." - >- C(no) - Do not send probes to check the health of the service. With the NO option, the appliance shows the service as UP at all times. type: bool sp: description: - "Enable surge protection for the service group." type: bool rtspsessionidremap: description: - "Enable RTSP session ID mapping for the service group." type: bool clttimeout: description: - "Time, in seconds, after which to terminate an idle client connection." - "Minimum value = C(0)" - "Maximum value = C(31536000)" svrtimeout: description: - "Time, in seconds, after which to terminate an idle server connection." - "Minimum value = C(0)" - "Maximum value = C(31536000)" cka: description: - "Enable client keep-alive for the service group." type: bool tcpb: description: - "Enable TCP buffering for the service group." type: bool cmp: description: - "Enable compression for the specified service." type: bool maxbandwidth: description: - "Maximum bandwidth, in Kbps, allocated for all the services in the service group." - "Minimum value = C(0)" - "Maximum value = C(4294967287)" monthreshold: description: - >- Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to mark a service as UP or DOWN. - "Minimum value = C(0)" - "Maximum value = C(65535)" downstateflush: choices: - 'enabled' - 'disabled' description: - >- Flush all active transactions associated with all the services in the service group whose state transitions from UP to DOWN. Do not enable this option for applications that must complete their transactions. tcpprofilename: description: - "Name of the TCP profile that contains TCP configuration settings for the service group." - "Minimum length = 1" - "Maximum length = 127" httpprofilename: description: - "Name of the HTTP profile that contains HTTP configuration settings for the service group." - "Minimum length = 1" - "Maximum length = 127" comment: description: - "Any information about the service group." appflowlog: choices: - 'enabled' - 'disabled' description: - "Enable logging of AppFlow information for the specified service group." netprofile: description: - "Network profile for the service group." - "Minimum length = 1" - "Maximum length = 127" autoscale: choices: - 'DISABLED' - 'DNS' - 'POLICY' description: - "Auto scale option for a servicegroup." memberport: description: - "member port." graceful: description: - "Wait for all existing connections to the service to terminate before shutting down the service." type: bool servicemembers: description: - A list of dictionaries describing each service member of the service group. suboptions: ip: description: - IP address of the service. Must not overlap with an existing server entity defined by name. port: description: - Server port number. - Range C(1) - C(65535) - "* in CLI is represented as 65535 in NITRO API" state: choices: - 'enabled' - 'disabled' description: - Initial state of the service after binding. hashid: description: - The hash identifier for the service. - This must be unique for each service. - This parameter is used by hash based load balancing methods. - Minimum value = C(1) serverid: description: - The identifier for the service. - This is used when the persistency type is set to Custom Server ID. servername: description: - Name of the server to which to bind the service group. - The server must already be configured as a named server. - Minimum length = 1 customserverid: description: - The identifier for this IP:Port pair. - Used when the persistency type is set to Custom Server ID. weight: description: - Weight to assign to the servers in the service group. - Specifies the capacity of the servers relative to the other servers in the load balancing configuration. - The higher the weight, the higher the percentage of requests sent to the service. - Minimum value = C(1) - Maximum value = C(100) monitorbindings: description: - A list of monitornames to bind to this service - Note that the monitors must have already been setup possibly using the M(netscaler_lb_monitor) module or some other method suboptions: monitorname: description: - The monitor name to bind to this servicegroup. weight: description: - Weight to assign to the binding between the monitor and servicegroup. disabled: description: - When set to C(yes) the service group state will be set to DISABLED. - When set to C(no) the service group state will be set to ENABLED. - >- Note that due to limitations of the underlying NITRO API a C(disabled) state change alone does not cause the module result to report a changed status. type: bool default: false extends_documentation_fragment: netscaler requirements: - nitro python sdk ''' EXAMPLES = ''' # The LB Monitors monitor-1 and monitor-2 must already exist # Service members defined by C(ip) must not redefine an existing server's ip address. # Service members defined by C(servername) must already exist. - name: Setup http service with ip members delegate_to: localhost netscaler_servicegroup: nsip: 172.18.0.2 nitro_user: nsroot nitro_pass: nsroot state: present servicegroupname: service-group-1 servicetype: HTTP servicemembers: - ip: 10.78.78.78 port: 80 weight: 50 - ip: 10.79.79.79 port: 80 weight: 40 - servername: server-1 port: 80 weight: 10 monitorbindings: - monitorname: monitor-1 weight: 50 - monitorname: monitor-2 weight: 50 ''' RETURN = ''' loglines: description: list of logged messages by the module returned: always type: list sample: ['message 1', 'message 2'] msg: description: Message detailing the failure reason returned: failure type: str sample: "Action does not exist" diff: description: List of differences between the actual configured object and the configuration specified in the module returned: failure type: dict sample: { 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' } ''' from ansible.module_utils.basic import AnsibleModule import copy from ansible.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, \ loglines, get_immutables_intersection try: from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding import servicegroup_lbmonitor_binding from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding import lbmonitor_servicegroup_binding PYTHON_SDK_IMPORTED = True except ImportError as e: PYTHON_SDK_IMPORTED = False def servicegroup_exists(client, module): log('Checking if service group exists') count = servicegroup.count_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname']) log('count is %s' % count) if count > 0: return True else: return False def servicegroup_identical(client, module, servicegroup_proxy): log('Checking if service group is identical') servicegroups = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname']) if servicegroup_proxy.has_equal_attributes(servicegroups[0]): return True else: return False def get_configured_service_members(client, module): log('get_configured_service_members') readwrite_attrs = [ 'servicegroupname', 'ip', 'port', 'state', 'hashid', 'serverid', 'servername', 'customserverid', 'weight' ] readonly_attrs = [ 'delay', 'statechangetimesec', 'svrstate', 'tickssincelaststatechange', 'graceful', ] members = [] if module.params['servicemembers'] is None: return members for config in module.params['servicemembers']: # Make a copy to update config = copy.deepcopy(config) config['servicegroupname'] = module.params['servicegroupname'] member_proxy = ConfigProxy( actual=servicegroup_servicegroupmember_binding(), client=client, attribute_values_dict=config, readwrite_attrs=readwrite_attrs, readonly_attrs=readonly_attrs ) members.append(member_proxy) return members def get_actual_service_members(client, module): try: # count() raises nitro exception instead of returning 0 count = servicegroup_servicegroupmember_binding.count(client, module.params['servicegroupname']) if count > 0: servicegroup_members = servicegroup_servicegroupmember_binding.get(client, module.params['servicegroupname']) else: servicegroup_members = [] except nitro_exception as e: if e.errorcode == 258: servicegroup_members = [] else: raise return servicegroup_members def servicemembers_identical(client, module): log('servicemembers_identical') servicegroup_members = get_actual_service_members(client, module) log('servicemembers %s' % servicegroup_members) module_servicegroups = get_configured_service_members(client, module) log('Number of service group members %s' % len(servicegroup_members)) if len(servicegroup_members) != len(module_servicegroups): return False # Fallthrough to member evaluation identical_count = 0 for actual_member in servicegroup_members: for member in module_servicegroups: if member.has_equal_attributes(actual_member): identical_count += 1 break if identical_count != len(servicegroup_members): return False # Fallthrough to success return True def sync_service_members(client, module): log('sync_service_members') configured_service_members = get_configured_service_members(client, module) actual_service_members = get_actual_service_members(client, module) skip_add = [] skip_delete = [] # Find positions of identical service members for (configured_index, configured_service) in enumerate(configured_service_members): for (actual_index, actual_service) in enumerate(actual_service_members): if configured_service.has_equal_attributes(actual_service): skip_add.append(configured_index) skip_delete.append(actual_index) # Delete actual that are not identical to any configured for (actual_index, actual_service) in enumerate(actual_service_members): # Skip identical if actual_index in skip_delete: log('Skipping actual delete at index %s' % actual_index) continue # Fallthrouth to deletion if all([ hasattr(actual_service, 'ip'), actual_service.ip is not None, hasattr(actual_service, 'servername'), actual_service.servername is not None, ]): actual_service.ip = None actual_service.servicegroupname = module.params['servicegroupname'] servicegroup_servicegroupmember_binding.delete(client, actual_service) # Add configured that are not already present in actual for (configured_index, configured_service) in enumerate(configured_service_members): # Skip identical if configured_index in skip_add: log('Skipping configured add at index %s' % configured_index) continue # Fallthrough to addition configured_service.add() def monitor_binding_equal(configured, actual): if any([configured.monitorname != actual.monitor_name, configured.servicegroupname != actual.servicegroupname, configured.weight != float(actual.weight)]): return False return True def get_configured_monitor_bindings(client, module): log('Entering get_configured_monitor_bindings') bindings = {} if 'monitorbindings' in module.params and module.params['monitorbindings'] is not None: for binding in module.params['monitorbindings']: readwrite_attrs = [ 'monitorname', 'servicegroupname', 'weight', ] readonly_attrs = [] attribute_values_dict = copy.deepcopy(binding) attribute_values_dict['servicegroupname'] = module.params['servicegroupname'] binding_proxy = ConfigProxy( actual=lbmonitor_servicegroup_binding(), client=client, attribute_values_dict=attribute_values_dict, readwrite_attrs=readwrite_attrs, readonly_attrs=readonly_attrs, ) key = attribute_values_dict['monitorname'] bindings[key] = binding_proxy return bindings def get_actual_monitor_bindings(client, module): log('Entering get_actual_monitor_bindings') bindings = {} try: # count() raises nitro exception instead of returning 0 count = servicegroup_lbmonitor_binding.count(client, module.params['servicegroupname']) except nitro_exception as e: if e.errorcode == 258: return bindings else: raise if count == 0: return bindings # Fallthrough to rest of execution for binding in servicegroup_lbmonitor_binding.get(client, module.params['servicegroupname']): log('Gettign actual monitor with name %s' % binding.monitor_name) key = binding.monitor_name bindings[key] = binding return bindings def monitor_bindings_identical(client, module): log('Entering monitor_bindings_identical') configured_bindings = get_configured_monitor_bindings(client, module) actual_bindings = get_actual_monitor_bindings(client, module) configured_key_set = set(configured_bindings.keys()) actual_key_set = set(actual_bindings.keys()) symmetrical_diff = configured_key_set ^ actual_key_set for default_monitor in ('tcp-default', 'ping-default'): if default_monitor in symmetrical_diff: log('Excluding %s monitor from key comparison' % default_monitor) symmetrical_diff.remove(default_monitor) if len(symmetrical_diff) > 0: return False # Compare key to key for key in configured_key_set: configured_proxy = configured_bindings[key] # Follow nscli convention for missing weight value if not hasattr(configured_proxy, 'weight'): configured_proxy.weight = 1 log('configured_proxy %s' % [configured_proxy.monitorname, configured_proxy.servicegroupname, configured_proxy.weight]) log('actual_bindings %s' % [actual_bindings[key].monitor_name, actual_bindings[key].servicegroupname, actual_bindings[key].weight]) if not monitor_binding_equal(configured_proxy, actual_bindings[key]): return False # Fallthrought to success return True def sync_monitor_bindings(client, module): log('Entering sync_monitor_bindings') actual_bindings = get_actual_monitor_bindings(client, module) # Exclude default monitors from deletion for monitorname in ('tcp-default', 'ping-default'): if monitorname in actual_bindings: del actual_bindings[monitorname] configured_bindings = get_configured_monitor_bindings(client, module) to_remove = list(set(actual_bindings.keys()) - set(configured_bindings.keys())) to_add = list(set(configured_bindings.keys()) - set(actual_bindings.keys())) to_modify = list(set(configured_bindings.keys()) & set(actual_bindings.keys())) # Delete existing and modifiable bindings for key in to_remove + to_modify: binding = actual_bindings[key] b = lbmonitor_servicegroup_binding() b.monitorname = binding.monitor_name b.servicegroupname = module.params['servicegroupname'] # Cannot remove default monitor bindings if b.monitorname in ('tcp-default', 'ping-default'): continue lbmonitor_servicegroup_binding.delete(client, b) # Add new and modified bindings for key in to_add + to_modify: binding = configured_bindings[key] log('Adding %s' % binding.monitorname) binding.add() def diff(client, module, servicegroup_proxy): servicegroup_list = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname']) diff_object = servicegroup_proxy.diff_object(servicegroup_list[0]) return diff_object def do_state_change(client, module, servicegroup_proxy): if module.params['disabled']: log('Disabling service') result = servicegroup.disable(client, servicegroup_proxy.actual) else: log('Enabling service') result = servicegroup.enable(client, servicegroup_proxy.actual) return result def main(): module_specific_arguments = dict( servicegroupname=dict(type='str'), servicetype=dict( type='str', choices=[ 'HTTP', 'FTP', 'TCP', 'UDP', 'SSL', 'SSL_BRIDGE', 'SSL_TCP', 'DTLS', 'NNTP', 'RPCSVR', 'DNS', 'ADNS', 'SNMP', 'RTSP', 'DHCPRA', 'ANY', 'SIP_UDP', 'SIP_TCP', 'SIP_SSL', 'DNS_TCP', 'ADNS_TCP', 'MYSQL', 'MSSQL', 'ORACLE', 'RADIUS', 'RADIUSListener', 'RDP', 'DIAMETER', 'SSL_DIAMETER', 'TFTP', 'SMPP', 'PPTP', 'GRE', 'SYSLOGTCP', 'SYSLOGUDP', 'FIX', 'SSL_FIX', ] ), cachetype=dict( type='str', choices=[ 'TRANSPARENT', 'REVERSE', 'FORWARD', ] ), maxclient=dict(type='float'), maxreq=dict(type='float'), cacheable=dict(type='bool'), cip=dict( type='str', choices=[ 'enabled', 'disabled', ] ), cipheader=dict(type='str'), usip=dict(type='bool'), pathmonitor=dict(type='bool'), pathmonitorindv=dict(type='bool'), useproxyport=dict(type='bool'), healthmonitor=dict(type='bool'), sp=dict(type='bool'), rtspsessionidremap=dict(type='bool'), clttimeout=dict(type='float'), svrtimeout=dict(type='float'), cka=dict(type='bool'), tcpb=dict(type='bool'), cmp=dict(type='bool'), maxbandwidth=dict(type='float'), monthreshold=dict(type='float'), downstateflush=dict( type='str', choices=[ 'enabled', 'disabled', ] ), tcpprofilename=dict(type='str'), httpprofilename=dict(type='str'), comment=dict(type='str'), appflowlog=dict( type='str', choices=[ 'enabled', 'disabled', ] ), netprofile=dict(type='str'), autoscale=dict( type='str', choices=[ 'DISABLED', 'DNS', 'POLICY', ] ), memberport=dict(type='int'), graceful=dict(type='bool'), ) hand_inserted_arguments = dict( servicemembers=dict(type='list'), monitorbindings=dict(type='list'), disabled=dict( type='bool', default=False, ), ) argument_spec = dict() argument_spec.update(netscaler_common_arguments) argument_spec.update(module_specific_arguments) argument_spec.update(hand_inserted_arguments) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) module_result = dict( changed=False, failed=False, loglines=loglines, ) # Fail the module if imports failed if not PYTHON_SDK_IMPORTED: module.fail_json(msg='Could not load nitro python sdk') # Fallthrough to rest of execution client = get_nitro_client(module) try: client.login() except nitro_exception as e: msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) module.fail_json(msg=msg) except Exception as e: if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>": module.fail_json(msg='Connection error %s' % str(e)) elif str(type(e)) == "<class 'requests.exceptions.SSLError'>": module.fail_json(msg='SSL Error %s' % str(e)) else: module.fail_json(msg='Unexpected error during login %s' % str(e)) # Instantiate service group configuration object readwrite_attrs = [ 'servicegroupname', 'servicetype', 'cachetype', 'maxclient', 'maxreq', 'cacheable', 'cip', 'cipheader', 'usip', 'pathmonitor', 'pathmonitorindv', 'useproxyport', 'healthmonitor', 'sp', 'rtspsessionidremap', 'clttimeout', 'svrtimeout', 'cka', 'tcpb', 'cmp', 'maxbandwidth', 'monthreshold', 'downstateflush', 'tcpprofilename', 'httpprofilename', 'comment', 'appflowlog', 'netprofile', 'autoscale', 'memberport', 'graceful', ] readonly_attrs = [ 'numofconnections', 'serviceconftype', 'value', 'svrstate', 'ip', 'monstatcode', 'monstatparam1', 'monstatparam2', 'monstatparam3', 'statechangetimemsec', 'stateupdatereason', 'clmonowner', 'clmonview', 'groupcount', 'riseapbrstatsmsgcode2', 'serviceipstr', 'servicegroupeffectivestate' ] immutable_attrs = [ 'servicegroupname', 'servicetype', 'cachetype', 'td', 'cipheader', 'state', 'autoscale', 'memberport', 'servername', 'port', 'serverid', 'monitor_name_svc', 'dup_weight', 'riseapbrstatsmsgcode', 'delay', 'graceful', 'includemembers', 'newname', ] transforms = { 'pathmonitorindv': ['bool_yes_no'], 'cacheable': ['bool_yes_no'], 'cka': ['bool_yes_no'], 'pathmonitor': ['bool_yes_no'], 'tcpb': ['bool_yes_no'], 'sp': ['bool_on_off'], 'usip': ['bool_yes_no'], 'healthmonitor': ['bool_yes_no'], 'useproxyport': ['bool_yes_no'], 'rtspsessionidremap': ['bool_on_off'], 'graceful': ['bool_yes_no'], 'cmp': ['bool_yes_no'], 'cip': [lambda v: v.upper()], 'downstateflush': [lambda v: v.upper()], 'appflowlog': [lambda v: v.upper()], } # Instantiate config proxy servicegroup_proxy = ConfigProxy( actual=servicegroup(), client=client, attribute_values_dict=module.params, readwrite_attrs=readwrite_attrs, readonly_attrs=readonly_attrs, immutable_attrs=immutable_attrs, transforms=transforms, ) try: if module.params['state'] == 'present': log('Applying actions for state present') if not servicegroup_exists(client, module): if not module.check_mode: log('Adding service group') servicegroup_proxy.add() if module.params['save_config']: client.save_config() module_result['changed'] = True elif not servicegroup_identical(client, module, servicegroup_proxy): # Check if we try to change value of immutable attributes diff_dict = diff(client, module, servicegroup_proxy) immutables_changed = get_immutables_intersection(servicegroup_proxy, diff_dict.keys()) if immutables_changed != []: msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,) module.fail_json(msg=msg, diff=diff_dict, **module_result) if not module.check_mode: servicegroup_proxy.update() if module.params['save_config']: client.save_config() module_result['changed'] = True else: module_result['changed'] = False # Check bindings if not monitor_bindings_identical(client, module): if not module.check_mode: sync_monitor_bindings(client, module) if module.params['save_config']: client.save_config() module_result['changed'] = True if not servicemembers_identical(client, module): if not module.check_mode: sync_service_members(client, module) if module.params['save_config']: client.save_config() module_result['changed'] = True if not module.check_mode: res = do_state_change(client, module, servicegroup_proxy) if res.errorcode != 0: msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) module.fail_json(msg=msg, **module_result) # Sanity check for state if not module.check_mode: log('Sanity checks for state present') if not servicegroup_exists(client, module): module.fail_json(msg='Service group is not present', **module_result) if not servicegroup_identical(client, module, servicegroup_proxy): module.fail_json( msg='Service group is not identical to configuration', diff=diff(client, module, servicegroup_proxy), **module_result ) if not servicemembers_identical(client, module): module.fail_json(msg='Service group members differ from configuration', **module_result) if not monitor_bindings_identical(client, module): module.fail_json(msg='Monitor bindings are not identical', **module_result) elif module.params['state'] == 'absent': log('Applying actions for state absent') if servicegroup_exists(client, module): if not module.check_mode: servicegroup_proxy.delete() if module.params['save_config']: client.save_config() module_result['changed'] = True else: module_result['changed'] = False # Sanity check for state if not module.check_mode: log('Sanity checks for state absent') if servicegroup_exists(client, module): module.fail_json(msg='Service group is present', **module_result) except nitro_exception as e: msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message module.fail_json(msg=msg, **module_result) client.logout() module.exit_json(**module_result) if __name__ == "__main__": main()
gpl-3.0
GdZ/scriptfile
software/googleAppEngine/lib/django_0_96/django/db/backends/mysql/creation.py
62
1283
# This dictionary maps Field objects to their associated MySQL column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. DATA_TYPES = { 'AutoField': 'integer AUTO_INCREMENT', 'BooleanField': 'bool', 'CharField': 'varchar(%(maxlength)s)', 'CommaSeparatedIntegerField': 'varchar(%(maxlength)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'FileField': 'varchar(100)', 'FilePathField': 'varchar(100)', 'FloatField': 'numeric(%(max_digits)s, %(decimal_places)s)', 'ImageField': 'varchar(100)', 'IntegerField': 'integer', 'IPAddressField': 'char(15)', 'ManyToManyField': None, 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PhoneNumberField': 'varchar(20)', 'PositiveIntegerField': 'integer UNSIGNED', 'PositiveSmallIntegerField': 'smallint UNSIGNED', 'SlugField': 'varchar(%(maxlength)s)', 'SmallIntegerField': 'smallint', 'TextField': 'longtext', 'TimeField': 'time', 'USStateField': 'varchar(2)', }
mit
cdfassnacht/CodeCDF
python/gradefuncs.py
1
3401
""" Functions that are useful in plotting a grade histogram """ import numpy as np from matplotlib import pyplot as plt from astropy.io import ascii #--------------------------------------------------------------------------- def read_table(infile, colname): """ This new code can be used to read both the old-school text files (if they had the two-column format - see help for the read_text function) but also (and more importantly) the information directly from a CSV table of the form that is exported from smartsite or canvas. Inputs: infile - input file name colname - the name of the column containing the score of interest. NOTE: for the old-school text files, this will be 'col2' while for the CSV files it could be something like 'Midterm 2 (32620)' or 'MT2' or 'Final Score' """ """ Read in the table """ try: tab = ascii.read(infile, guess=False, format='csv') except: tab = ascii.read(infile) print(tab.colnames) """ Get the relevant information """ try: tot = tab[colname].copy() except KeyError: print('') print('Could not find a column matching %s in %s' % (colname,infile)) tot = None return tot #--------------------------------------------------------------------------- def read_text(infile, nscorecols=1): """ Function to read in the scores from the old-school text files that were created by modifying the csv files that came from downloading the gradebook from smartsite or canvas. There are two expected input formats: Two-column, designated by setting nscorecols=1 Name total_score Three-column, designated by setting nscorecols=2 Name multiple_choice_score short_answer_score The old code used the numpy loadtxt function to load the data, but this new code uses the astropy.io.ascii read function """ """ Read the data into an astropy Table structure """ tab = ascii.read(infile) """ Generate the total score array """ if nscorecols == 1: tot = tab['col2'].copy() else: tot = tab['col2'] + tab['col3'] return tot #--------------------------------------------------------------------------- def plot_tothist(infile, tot, maxy, binsize=3): """ Plot the total-score histogram, where the total score (tot) has been previous calculated or read-in by the input functions """ """ Calculate moments of the distribution """ mn = tot.mean() med = np.median(tot) mp = tot.mean() + tot.std() mm = tot.mean() - tot.std() """ Report on the properties of the distibution """ print('') print("Statistics for %s" % infile) print("---------------------------------") print(" Mean: %5.1f" % mn) print(" Median: %5.1f" % med) print(" Sigma: %5.1f" % tot.std()) print(" Mean - 1 sig: %5.1f" % mm) print(" Mean + 1 sig: %5.1f" % mp) print('') """ Plot the distribution """ binhist = range(int(tot.min())-1,int(tot.max())+3,binsize) plt.hist(tot,binhist,histtype='step',ec='k') plt.ylim(0,maxy) plt.axvline(x=mn, ymin=0, ymax=maxy, c='r', lw=3) plt.axvline(x=mm, ymin=0, ymax=maxy, c='b', lw=3) plt.axvline(x=mp, ymin=0, ymax=maxy, c='b', lw=3) plt.title("Distribution of scores for %s" % infile) plt.xlabel("Scores") plt.ylabel("N") plt.show()
mit
windyuuy/opera
chromium/src/chrome/installer/linux/sysroot_scripts/install-debian.wheezy.sysroot.py
30
4251
#!/usr/bin/env python # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Script to install a Debian Wheezy sysroot for making official Google Chrome # Linux builds. # The sysroot is needed to make Chrome work for Debian Wheezy. # This script can be run manually but is more often run as part of gclient # hooks. When run from hooks this script should be a no-op on non-linux # platforms. # The sysroot image could be constructed from scratch based on the current # state or Debian Wheezy but for consistency we currently use a pre-built root # image. The image will normally need to be rebuilt every time chrome's build # dependancies are changed. import platform import optparse import os import re import shutil import subprocess import sys SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) URL_PREFIX = 'https://commondatastorage.googleapis.com' URL_PATH = 'chrome-linux-sysroot/toolchain' REVISION = 36982 TARBALL_AMD64 = 'debian_wheezy_amd64_sysroot.tgz' TARBALL_I386 = 'debian_wheezy_i386_sysroot.tgz' SYSROOT_DIR_AMD64 = 'debian_wheezy_amd64-sysroot' SYSROOT_DIR_I386 = 'debian_wheezy_i386-sysroot' def main(args): if options.arch not in ['amd64', 'i386']: print 'Unknown architecture: %s' % options.arch return 1 if options.linux_only: # This argument is passed when run from the gclient hooks. # In this case we return early on non-linux platforms. if not sys.platform.startswith('linux'): return 0 # Only install the sysroot for an Official Chrome Linux build. defined = ['branding=Chrome', 'buildtype=Official'] undefined = ['chromeos=1'] gyp_defines = os.environ.get('GYP_DEFINES', '') for option in defined: if option not in gyp_defines: return 0 for option in undefined: if option in gyp_defines: return 0 # Check for optional target_arch and only install for that architecture. # If target_arch is not specified, then only install for the host # architecture. host_arch = '' if 'target_arch=x64' in gyp_defines: host_arch = 'amd64' elif 'target_arch=ia32' in gyp_defines: host_arch = 'i386' else: # Figure out host arch, like the host_arch variable in build/common.gypi. machine_type = platform.machine() if machine_type in ['amd64', 'x86_64']: host_arch = 'amd64' elif re.match('(i[3-6]86|i86pc)$', machine_type): host_arch = 'i386' if host_arch != options.arch: return 0 # The sysroot directory should match the one specified in build/common.gypi. # TODO(thestig) Consider putting this else where to avoid having to recreate # it on every build. linux_dir = os.path.dirname(SCRIPT_DIR) if options.arch == 'amd64': sysroot = os.path.join(linux_dir, SYSROOT_DIR_AMD64) tarball_filename = TARBALL_AMD64 else: sysroot = os.path.join(linux_dir, SYSROOT_DIR_I386) tarball_filename = TARBALL_I386 url = '%s/%s/%s/%s' % (URL_PREFIX, URL_PATH, REVISION, tarball_filename) stamp = os.path.join(sysroot, '.stamp') if os.path.exists(stamp): with open(stamp) as s: if s.read() == url: print 'Debian Wheezy %s root image already up-to-date: %s' % \ (options.arch, sysroot) return 0 print 'Installing Debian Wheezy %s root image: %s' % (options.arch, sysroot) if os.path.isdir(sysroot): shutil.rmtree(sysroot) os.mkdir(sysroot) tarball = os.path.join(sysroot, tarball_filename) subprocess.check_call(['curl', '-L', url, '-o', tarball]) subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot]) os.remove(tarball) with open(stamp, 'w') as s: s.write(url) return 0 if __name__ == '__main__': parser = optparse.OptionParser('usage: %prog [OPTIONS]') parser.add_option('', '--linux-only', dest='linux_only', action='store_true', default=False, help='Only install sysroot for official ' 'Linux builds') parser.add_option('', '--arch', dest='arch', help='Sysroot architecture, i386 or amd64') options, args = parser.parse_args() sys.exit(main(options))
bsd-3-clause
teosz/servo
components/script/dom/bindings/codegen/parser/tests/test_distinguishability.py
50
12785
def firstArgType(method): return method.signatures()[0][1][0].type def WebIDLTest(parser, harness): parser.parse(""" dictionary Dict { }; callback interface Foo { }; interface Bar { // Bit of a pain to get things that have dictionary types void passDict(optional Dict arg); void passFoo(Foo arg); void passNullableUnion((object? or DOMString) arg); void passNullable(Foo? arg); }; """) results = parser.finish() iface = results[2] harness.ok(iface.isInterface(), "Should have interface") dictMethod = iface.members[0] ifaceMethod = iface.members[1] nullableUnionMethod = iface.members[2] nullableIfaceMethod = iface.members[3] dictType = firstArgType(dictMethod) ifaceType = firstArgType(ifaceMethod) harness.ok(dictType.isDictionary(), "Should have dictionary type"); harness.ok(ifaceType.isInterface(), "Should have interface type"); harness.ok(ifaceType.isCallbackInterface(), "Should have callback interface type"); harness.ok(not dictType.isDistinguishableFrom(ifaceType), "Dictionary not distinguishable from callback interface") harness.ok(not ifaceType.isDistinguishableFrom(dictType), "Callback interface not distinguishable from dictionary") nullableUnionType = firstArgType(nullableUnionMethod) nullableIfaceType = firstArgType(nullableIfaceMethod) harness.ok(nullableUnionType.isUnion(), "Should have union type"); harness.ok(nullableIfaceType.isInterface(), "Should have interface type"); harness.ok(nullableIfaceType.nullable(), "Should have nullable type"); harness.ok(not nullableUnionType.isDistinguishableFrom(nullableIfaceType), "Nullable type not distinguishable from union with nullable " "member type") harness.ok(not nullableIfaceType.isDistinguishableFrom(nullableUnionType), "Union with nullable member type not distinguishable from " "nullable type") parser = parser.reset() parser.parse(""" interface TestIface { void passKid(Kid arg); void passParent(Parent arg); void passGrandparent(Grandparent arg); void passImplemented(Implemented arg); void passImplementedParent(ImplementedParent arg); void passUnrelated1(Unrelated1 arg); void passUnrelated2(Unrelated2 arg); void passArrayBuffer(ArrayBuffer arg); void passArrayBuffer(ArrayBufferView arg); }; interface Kid : Parent {}; interface Parent : Grandparent {}; interface Grandparent {}; interface Implemented : ImplementedParent {}; Parent implements Implemented; interface ImplementedParent {}; interface Unrelated1 {}; interface Unrelated2 {}; """) results = parser.finish() iface = results[0] harness.ok(iface.isInterface(), "Should have interface") argTypes = [firstArgType(method) for method in iface.members] unrelatedTypes = [firstArgType(method) for method in iface.members[-3:]] for type1 in argTypes: for type2 in argTypes: distinguishable = (type1 is not type2 and (type1 in unrelatedTypes or type2 in unrelatedTypes)) harness.check(type1.isDistinguishableFrom(type2), distinguishable, "Type %s should %sbe distinguishable from type %s" % (type1, "" if distinguishable else "not ", type2)) harness.check(type2.isDistinguishableFrom(type1), distinguishable, "Type %s should %sbe distinguishable from type %s" % (type2, "" if distinguishable else "not ", type1)) parser = parser.reset() parser.parse(""" interface Dummy {}; interface TestIface { void method(long arg1, TestIface arg2); void method(long arg1, long arg2); void method(long arg1, Dummy arg2); void method(DOMString arg1, DOMString arg2, DOMString arg3); }; """) results = parser.finish() harness.check(len(results[1].members), 1, "Should look like we have one method") harness.check(len(results[1].members[0].signatures()), 4, "Should have four signatures") parser = parser.reset() threw = False try: parser.parse(""" interface Dummy {}; interface TestIface { void method(long arg1, TestIface arg2); void method(long arg1, long arg2); void method(any arg1, Dummy arg2); void method(DOMString arg1, DOMString arg2, DOMString arg3); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should throw when args before the distinguishing arg are not " "all the same type") parser = parser.reset() threw = False try: parser.parse(""" interface Dummy {}; interface TestIface { void method(long arg1, TestIface arg2); void method(long arg1, long arg2); void method(any arg1, DOMString arg2); void method(DOMString arg1, DOMString arg2, DOMString arg3); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should throw when there is no distinguishing index") # Now let's test our whole distinguishability table argTypes = [ "long", "short", "long?", "short?", "boolean", "boolean?", "DOMString", "ByteString", "Enum", "Enum2", "Interface", "Interface?", "AncestorInterface", "UnrelatedInterface", "ImplementedInterface", "CallbackInterface", "CallbackInterface?", "CallbackInterface2", "object", "Callback", "Callback2", "optional Dict", "optional Dict2", "sequence<long>", "sequence<short>", "MozMap<object>", "MozMap<Dict>", "MozMap<long>", "Date", "Date?", "any", "Promise<any>", "Promise<any>?", "USVString", "ArrayBuffer", "ArrayBufferView", "SharedArrayBuffer", "Uint8Array", "Uint16Array" ] # When we can parse Date and RegExp, we need to add them here. # Try to categorize things a bit to keep list lengths down def allBut(list1, list2): return [a for a in list1 if a not in list2 and (a != "any" and a != "Promise<any>" and a != "Promise<any>?")] numerics = [ "long", "short", "long?", "short?" ] booleans = [ "boolean", "boolean?" ] primitives = numerics + booleans nonNumerics = allBut(argTypes, numerics) nonBooleans = allBut(argTypes, booleans) strings = [ "DOMString", "ByteString", "Enum", "Enum2", "USVString" ] nonStrings = allBut(argTypes, strings) nonObjects = primitives + strings objects = allBut(argTypes, nonObjects ) bufferSourceTypes = ["ArrayBuffer", "ArrayBufferView", "Uint8Array", "Uint16Array"] sharedBufferSourceTypes = ["SharedArrayBuffer"] interfaces = [ "Interface", "Interface?", "AncestorInterface", "UnrelatedInterface", "ImplementedInterface" ] + bufferSourceTypes + sharedBufferSourceTypes nullables = ["long?", "short?", "boolean?", "Interface?", "CallbackInterface?", "optional Dict", "optional Dict2", "Date?", "any", "Promise<any>?"] dates = [ "Date", "Date?" ] sequences = [ "sequence<long>", "sequence<short>" ] nonUserObjects = nonObjects + interfaces + dates + sequences otherObjects = allBut(argTypes, nonUserObjects + ["object"]) notRelatedInterfaces = (nonObjects + ["UnrelatedInterface"] + otherObjects + dates + sequences + bufferSourceTypes + sharedBufferSourceTypes) mozMaps = [ "MozMap<object>", "MozMap<Dict>", "MozMap<long>" ] # Build a representation of the distinguishability table as a dict # of dicts, holding True values where needed, holes elsewhere. data = dict(); for type in argTypes: data[type] = dict() def setDistinguishable(type, types): for other in types: data[type][other] = True setDistinguishable("long", nonNumerics) setDistinguishable("short", nonNumerics) setDistinguishable("long?", allBut(nonNumerics, nullables)) setDistinguishable("short?", allBut(nonNumerics, nullables)) setDistinguishable("boolean", nonBooleans) setDistinguishable("boolean?", allBut(nonBooleans, nullables)) setDistinguishable("DOMString", nonStrings) setDistinguishable("ByteString", nonStrings) setDistinguishable("USVString", nonStrings) setDistinguishable("Enum", nonStrings) setDistinguishable("Enum2", nonStrings) setDistinguishable("Interface", notRelatedInterfaces) setDistinguishable("Interface?", allBut(notRelatedInterfaces, nullables)) setDistinguishable("AncestorInterface", notRelatedInterfaces) setDistinguishable("UnrelatedInterface", allBut(argTypes, ["object", "UnrelatedInterface"])) setDistinguishable("ImplementedInterface", notRelatedInterfaces) setDistinguishable("CallbackInterface", nonUserObjects) setDistinguishable("CallbackInterface?", allBut(nonUserObjects, nullables)) setDistinguishable("CallbackInterface2", nonUserObjects) setDistinguishable("object", nonObjects) setDistinguishable("Callback", nonUserObjects) setDistinguishable("Callback2", nonUserObjects) setDistinguishable("optional Dict", allBut(nonUserObjects, nullables)) setDistinguishable("optional Dict2", allBut(nonUserObjects, nullables)) setDistinguishable("sequence<long>", allBut(argTypes, sequences + ["object"])) setDistinguishable("sequence<short>", allBut(argTypes, sequences + ["object"])) setDistinguishable("MozMap<object>", nonUserObjects) setDistinguishable("MozMap<Dict>", nonUserObjects) setDistinguishable("MozMap<long>", nonUserObjects) setDistinguishable("Date", allBut(argTypes, dates + ["object"])) setDistinguishable("Date?", allBut(argTypes, dates + nullables + ["object"])) setDistinguishable("any", []) setDistinguishable("Promise<any>", []) setDistinguishable("Promise<any>?", []) setDistinguishable("ArrayBuffer", allBut(argTypes, ["ArrayBuffer", "object"])) setDistinguishable("ArrayBufferView", allBut(argTypes, ["ArrayBufferView", "Uint8Array", "Uint16Array", "object"])) setDistinguishable("Uint8Array", allBut(argTypes, ["ArrayBufferView", "Uint8Array", "object"])) setDistinguishable("Uint16Array", allBut(argTypes, ["ArrayBufferView", "Uint16Array", "object"])) setDistinguishable("SharedArrayBuffer", allBut(argTypes, ["SharedArrayBuffer", "object"])) def areDistinguishable(type1, type2): return data[type1].get(type2, False) def checkDistinguishability(parser, type1, type2): idlTemplate = """ enum Enum { "a", "b" }; enum Enum2 { "c", "d" }; interface Interface : AncestorInterface {}; interface AncestorInterface {}; interface UnrelatedInterface {}; interface ImplementedInterface {}; Interface implements ImplementedInterface; callback interface CallbackInterface {}; callback interface CallbackInterface2 {}; callback Callback = any(); callback Callback2 = long(short arg); dictionary Dict {}; dictionary Dict2 {}; interface _Promise {}; interface TestInterface {%s }; """ methodTemplate = """ void myMethod(%s arg);""" methods = (methodTemplate % type1) + (methodTemplate % type2) idl = idlTemplate % methods parser = parser.reset() threw = False try: parser.parse(idl) results = parser.finish() except: threw = True if areDistinguishable(type1, type2): harness.ok(not threw, "Should not throw for '%s' and '%s' because they are distinguishable" % (type1, type2)) else: harness.ok(threw, "Should throw for '%s' and '%s' because they are not distinguishable" % (type1, type2)) # Enumerate over everything in both orders, since order matters in # terms of our implementation of distinguishability checks for type1 in argTypes: for type2 in argTypes: checkDistinguishability(parser, type1, type2)
mpl-2.0
fitzgen/servo
tests/wpt/web-platform-tests/tools/pytest/testing/test_assertinterpret.py
171
6254
"PYTEST_DONT_REWRITE" import py import pytest from _pytest.assertion import util def exvalue(): return py.std.sys.exc_info()[1] def f(): return 2 def test_not_being_rewritten(): assert "@py_builtins" not in globals() def test_assert(): try: assert f() == 3 except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 == 3\n') def test_assert_with_explicit_message(): try: assert f() == 3, "hello" except AssertionError: e = exvalue() assert e.msg == 'hello' def test_assert_within_finally(): excinfo = pytest.raises(ZeroDivisionError, """ try: 1/0 finally: i = 42 """) s = excinfo.exconly() assert py.std.re.search("division.+by zero", s) is not None #def g(): # A.f() #excinfo = getexcinfo(TypeError, g) #msg = getmsg(excinfo) #assert msg.find("must be called with A") != -1 def test_assert_multiline_1(): try: assert (f() == 3) except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 == 3\n') def test_assert_multiline_2(): try: assert (f() == (4, 3)[-1]) except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 ==') def test_in(): try: assert "hi" in [1, 2] except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 'hi' in") def test_is(): try: assert 1 is 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 is 2") def test_attrib(): class Foo(object): b = 1 i = Foo() try: assert i.b == 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 == 2") def test_attrib_inst(): class Foo(object): b = 1 try: assert Foo().b == 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 == 2") def test_len(): l = list(range(42)) try: assert len(l) == 100 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 42 == 100") assert "where 42 = len([" in s def test_assert_non_string_message(): class A: def __str__(self): return "hello" try: assert 0 == 1, A() except AssertionError: e = exvalue() assert e.msg == "hello" def test_assert_keyword_arg(): def f(x=3): return False try: assert f(x=5) except AssertionError: e = exvalue() assert "x=5" in e.msg def test_private_class_variable(): class X: def __init__(self): self.__v = 41 def m(self): assert self.__v == 42 try: X().m() except AssertionError: e = exvalue() assert "== 42" in e.msg # These tests should both fail, but should fail nicely... class WeirdRepr: def __repr__(self): return '<WeirdRepr\nsecond line>' def bug_test_assert_repr(): v = WeirdRepr() try: assert v == 1 except AssertionError: e = exvalue() assert e.msg.find('WeirdRepr') != -1 assert e.msg.find('second line') != -1 assert 0 def test_assert_non_string(): try: assert 0, ['list'] except AssertionError: e = exvalue() assert e.msg.find("list") != -1 def test_assert_implicit_multiline(): try: x = [1,2,3] assert x != [1, 2, 3] except AssertionError: e = exvalue() assert e.msg.find('assert [1, 2, 3] !=') != -1 def test_assert_with_brokenrepr_arg(): class BrokenRepr: def __repr__(self): 0 / 0 e = AssertionError(BrokenRepr()) if e.msg.find("broken __repr__") == -1: pytest.fail("broken __repr__ not handle correctly") def test_multiple_statements_per_line(): try: a = 1; assert a == 2 except AssertionError: e = exvalue() assert "assert 1 == 2" in e.msg def test_power(): try: assert 2**3 == 7 except AssertionError: e = exvalue() assert "assert (2 ** 3) == 7" in e.msg def test_assert_customizable_reprcompare(monkeypatch): monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello') try: assert 3 == 4 except AssertionError: e = exvalue() s = str(e) assert "hello" in s def test_assert_long_source_1(): try: assert len == [ (None, ['somet text', 'more text']), ] except AssertionError: e = exvalue() s = str(e) assert 're-run' not in s assert 'somet text' in s def test_assert_long_source_2(): try: assert(len == [ (None, ['somet text', 'more text']), ]) except AssertionError: e = exvalue() s = str(e) assert 're-run' not in s assert 'somet text' in s def test_assert_raise_alias(testdir): testdir.makepyfile(""" "PYTEST_DONT_REWRITE" import sys EX = AssertionError def test_hello(): raise EX("hello" "multi" "line") """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*def test_hello*", "*raise EX*", "*1 failed*", ]) def test_assert_raise_subclass(): class SomeEx(AssertionError): def __init__(self, *args): super(SomeEx, self).__init__() try: raise SomeEx("hello") except AssertionError: s = str(exvalue()) assert 're-run' not in s assert 'could not determine' in s def test_assert_raises_in_nonzero_of_object_pytest_issue10(): class A(object): def __nonzero__(self): raise ValueError(42) def __lt__(self, other): return A() def __repr__(self): return "<MY42 object>" def myany(x): return True try: assert not(myany(A() < 0)) except AssertionError: e = exvalue() s = str(e) assert "<MY42 object> < 0" in s
mpl-2.0
dracos/django
tests/invalid_models_tests/test_relative_fields.py
44
58589
from django.core.checks import Error, Warning as DjangoWarning from django.db import models from django.db.models.fields.related import ForeignObject from django.test.testcases import SimpleTestCase, skipIfDBFeature from django.test.utils import isolate_apps, override_settings @isolate_apps('invalid_models_tests') class RelativeFieldTests(SimpleTestCase): def test_valid_foreign_key_without_accessor(self): class Target(models.Model): # There would be a clash if Model.field installed an accessor. model = models.IntegerField() class Model(models.Model): field = models.ForeignKey(Target, models.CASCADE, related_name='+') field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_foreign_key_to_missing_model(self): # Model names are resolved when a model is being created, so we cannot # test relative fields in isolation and we need to attach them to a # model. class Model(models.Model): foreign_key = models.ForeignKey('Rel1', models.CASCADE) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( "Field defines a relation with model 'Rel1', " "which is either not installed, or is abstract.", obj=field, id='fields.E300', ), ]) @isolate_apps('invalid_models_tests') def test_foreign_key_to_isolate_apps_model(self): """ #25723 - Referenced model registration lookup should be run against the field's model registry. """ class OtherModel(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('OtherModel', models.CASCADE) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(from_model=Model), []) def test_many_to_many_to_missing_model(self): class Model(models.Model): m2m = models.ManyToManyField("Rel2") field = Model._meta.get_field('m2m') self.assertEqual(field.check(from_model=Model), [ Error( "Field defines a relation with model 'Rel2', " "which is either not installed, or is abstract.", obj=field, id='fields.E300', ), ]) @isolate_apps('invalid_models_tests') def test_many_to_many_to_isolate_apps_model(self): """ #25723 - Referenced model registration lookup should be run against the field's model registry. """ class OtherModel(models.Model): pass class Model(models.Model): m2m = models.ManyToManyField('OtherModel') field = Model._meta.get_field('m2m') self.assertEqual(field.check(from_model=Model), []) def test_many_to_many_with_limit_choices_auto_created_no_warning(self): class Model(models.Model): name = models.CharField(max_length=20) class ModelM2M(models.Model): m2m = models.ManyToManyField(Model, limit_choices_to={'name': 'test_name'}) self.assertEqual(ModelM2M.check(), []) def test_many_to_many_with_useless_options(self): class Model(models.Model): name = models.CharField(max_length=20) class ModelM2M(models.Model): m2m = models.ManyToManyField( Model, null=True, validators=[lambda x: x], limit_choices_to={'name': 'test_name'}, through='ThroughModel', through_fields=('modelm2m', 'model'), ) class ThroughModel(models.Model): model = models.ForeignKey('Model', models.CASCADE) modelm2m = models.ForeignKey('ModelM2M', models.CASCADE) field = ModelM2M._meta.get_field('m2m') self.assertEqual(ModelM2M.check(), [ DjangoWarning( 'null has no effect on ManyToManyField.', obj=field, id='fields.W340', ), DjangoWarning( 'ManyToManyField does not support validators.', obj=field, id='fields.W341', ), DjangoWarning( 'limit_choices_to has no effect on ManyToManyField ' 'with a through model.', obj=field, id='fields.W343', ), ]) def test_ambiguous_relationship_model(self): class Person(models.Model): pass class Group(models.Model): field = models.ManyToManyField('Person', through="AmbiguousRelationship", related_name='tertiary') class AmbiguousRelationship(models.Model): # Too much foreign keys to Person. first_person = models.ForeignKey(Person, models.CASCADE, related_name="first") second_person = models.ForeignKey(Person, models.CASCADE, related_name="second") second_model = models.ForeignKey(Group, models.CASCADE) field = Group._meta.get_field('field') self.assertEqual(field.check(from_model=Group), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Group.field', but it has more than one " "foreign key to 'Person', which is ambiguous. You must specify " "which foreign key Django should use via the through_fields " "keyword argument.", hint=( 'If you want to create a recursive relationship, use ' 'ForeignKey("self", symmetrical=False, through="AmbiguousRelationship").' ), obj=field, id='fields.E335', ), ]) def test_relationship_model_with_foreign_key_to_wrong_model(self): class WrongModel(models.Model): pass class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="InvalidRelationship") class InvalidRelationship(models.Model): person = models.ForeignKey(Person, models.CASCADE) wrong_foreign_key = models.ForeignKey(WrongModel, models.CASCADE) # The last foreign key should point to Group model. field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Group.members', but it does not " "have a foreign key to 'Group' or 'Person'.", obj=InvalidRelationship, id='fields.E336', ), ]) def test_relationship_model_missing_foreign_key(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="InvalidRelationship") class InvalidRelationship(models.Model): group = models.ForeignKey(Group, models.CASCADE) # No foreign key to Person field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Group.members', but it does not have " "a foreign key to 'Group' or 'Person'.", obj=InvalidRelationship, id='fields.E336', ), ]) def test_missing_relationship_model(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="MissingM2MModel") field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( "Field specifies a many-to-many relation through model " "'MissingM2MModel', which has not been installed.", obj=field, id='fields.E331', ), ]) def test_missing_relationship_model_on_model_check(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through='MissingM2MModel') self.assertEqual(Group.check(), [ Error( "Field specifies a many-to-many relation through model " "'MissingM2MModel', which has not been installed.", obj=Group._meta.get_field('members'), id='fields.E331', ), ]) @isolate_apps('invalid_models_tests') def test_many_to_many_through_isolate_apps_model(self): """ #25723 - Through model registration lookup should be run against the field's model registry. """ class GroupMember(models.Model): person = models.ForeignKey('Person', models.CASCADE) group = models.ForeignKey('Group', models.CASCADE) class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through='GroupMember') field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), []) def test_symmetrical_self_referential_field(self): class Person(models.Model): # Implicit symmetrical=False. friends = models.ManyToManyField('self', through="Relationship") class Relationship(models.Model): first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set") second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set") field = Person._meta.get_field('friends') self.assertEqual(field.check(from_model=Person), [ Error( 'Many-to-many fields with intermediate tables must not be symmetrical.', obj=field, id='fields.E332', ), ]) def test_too_many_foreign_keys_in_self_referential_model(self): class Person(models.Model): friends = models.ManyToManyField('self', through="InvalidRelationship", symmetrical=False) class InvalidRelationship(models.Model): first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set_2") second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set_2") third = models.ForeignKey(Person, models.CASCADE, related_name="too_many_by_far") field = Person._meta.get_field('friends') self.assertEqual(field.check(from_model=Person), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Person.friends', but it has more than two " "foreign keys to 'Person', which is ambiguous. You must specify " "which two foreign keys Django should use via the through_fields " "keyword argument.", hint='Use through_fields to specify which two foreign keys Django should use.', obj=InvalidRelationship, id='fields.E333', ), ]) def test_symmetric_self_reference_with_intermediate_table(self): class Person(models.Model): # Explicit symmetrical=True. friends = models.ManyToManyField('self', through="Relationship", symmetrical=True) class Relationship(models.Model): first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set") second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set") field = Person._meta.get_field('friends') self.assertEqual(field.check(from_model=Person), [ Error( 'Many-to-many fields with intermediate tables must not be symmetrical.', obj=field, id='fields.E332', ), ]) def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self): """ Using through_fields in a m2m with an intermediate model shouldn't mask its incompatibility with symmetry. """ class Person(models.Model): # Explicit symmetrical=True. friends = models.ManyToManyField( 'self', symmetrical=True, through="Relationship", through_fields=('first', 'second'), ) class Relationship(models.Model): first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set") second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set") referee = models.ForeignKey(Person, models.CASCADE, related_name="referred") field = Person._meta.get_field('friends') self.assertEqual(field.check(from_model=Person), [ Error( 'Many-to-many fields with intermediate tables must not be symmetrical.', obj=field, id='fields.E332', ), ]) def test_foreign_key_to_abstract_model(self): class AbstractModel(models.Model): class Meta: abstract = True class Model(models.Model): rel_string_foreign_key = models.ForeignKey('AbstractModel', models.CASCADE) rel_class_foreign_key = models.ForeignKey(AbstractModel, models.CASCADE) fields = [ Model._meta.get_field('rel_string_foreign_key'), Model._meta.get_field('rel_class_foreign_key'), ] expected_error = Error( "Field defines a relation with model 'AbstractModel', " "which is either not installed, or is abstract.", id='fields.E300', ) for field in fields: expected_error.obj = field self.assertEqual(field.check(), [expected_error]) def test_m2m_to_abstract_model(self): class AbstractModel(models.Model): class Meta: abstract = True class Model(models.Model): rel_string_m2m = models.ManyToManyField('AbstractModel') rel_class_m2m = models.ManyToManyField(AbstractModel) fields = [ Model._meta.get_field('rel_string_m2m'), Model._meta.get_field('rel_class_m2m'), ] expected_error = Error( "Field defines a relation with model 'AbstractModel', " "which is either not installed, or is abstract.", id='fields.E300', ) for field in fields: expected_error.obj = field self.assertEqual(field.check(from_model=Model), [expected_error]) def test_unique_m2m(self): class Person(models.Model): name = models.CharField(max_length=5) class Group(models.Model): members = models.ManyToManyField('Person', unique=True) field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( 'ManyToManyFields cannot be unique.', obj=field, id='fields.E330', ), ]) def test_foreign_key_to_non_unique_field(self): class Target(models.Model): bad = models.IntegerField() # No unique=True class Model(models.Model): foreign_key = models.ForeignKey('Target', models.CASCADE, to_field='bad') field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( "'Target.bad' must set unique=True because it is referenced by a foreign key.", obj=field, id='fields.E311', ), ]) def test_foreign_key_to_non_unique_field_under_explicit_model(self): class Target(models.Model): bad = models.IntegerField() class Model(models.Model): field = models.ForeignKey(Target, models.CASCADE, to_field='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'Target.bad' must set unique=True because it is referenced by a foreign key.", obj=field, id='fields.E311', ), ]) def test_foreign_object_to_non_unique_fields(self): class Person(models.Model): # Note that both fields are not unique. country_id = models.IntegerField() city_id = models.IntegerField() class MMembership(models.Model): person_country_id = models.IntegerField() person_city_id = models.IntegerField() person = models.ForeignObject( Person, on_delete=models.CASCADE, from_fields=['person_country_id', 'person_city_id'], to_fields=['country_id', 'city_id'], ) field = MMembership._meta.get_field('person') self.assertEqual(field.check(), [ Error( "No subset of the fields 'country_id', 'city_id' on model 'Person' is unique.", hint=( "Add unique=True on any of those fields or add at least " "a subset of them to a unique_together constraint." ), obj=field, id='fields.E310', ) ]) def test_on_delete_set_null_on_non_nullable_field(self): class Person(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('Person', models.SET_NULL) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( 'Field specifies on_delete=SET_NULL, but cannot be null.', hint='Set null=True argument on the field, or change the on_delete rule.', obj=field, id='fields.E320', ), ]) def test_on_delete_set_default_without_default_value(self): class Person(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('Person', models.SET_DEFAULT) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( 'Field specifies on_delete=SET_DEFAULT, but has no default value.', hint='Set a default value, or change the on_delete rule.', obj=field, id='fields.E321', ), ]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_nullable_primary_key(self): class Model(models.Model): field = models.IntegerField(primary_key=True, null=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( 'Primary keys must not have null=True.', hint='Set null=False on the field, or remove primary_key=True argument.', obj=field, id='fields.E007', ), ]) def test_not_swapped_model(self): class SwappableModel(models.Model): # A model that can be, but isn't swapped out. References to this # model should *not* raise any validation error. class Meta: swappable = 'TEST_SWAPPABLE_MODEL' class Model(models.Model): explicit_fk = models.ForeignKey( SwappableModel, models.CASCADE, related_name='explicit_fk', ) implicit_fk = models.ForeignKey( 'invalid_models_tests.SwappableModel', models.CASCADE, related_name='implicit_fk', ) explicit_m2m = models.ManyToManyField(SwappableModel, related_name='explicit_m2m') implicit_m2m = models.ManyToManyField( 'invalid_models_tests.SwappableModel', related_name='implicit_m2m', ) explicit_fk = Model._meta.get_field('explicit_fk') self.assertEqual(explicit_fk.check(), []) implicit_fk = Model._meta.get_field('implicit_fk') self.assertEqual(implicit_fk.check(), []) explicit_m2m = Model._meta.get_field('explicit_m2m') self.assertEqual(explicit_m2m.check(from_model=Model), []) implicit_m2m = Model._meta.get_field('implicit_m2m') self.assertEqual(implicit_m2m.check(from_model=Model), []) @override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement') def test_referencing_to_swapped_model(self): class Replacement(models.Model): pass class SwappedModel(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL' class Model(models.Model): explicit_fk = models.ForeignKey( SwappedModel, models.CASCADE, related_name='explicit_fk', ) implicit_fk = models.ForeignKey( 'invalid_models_tests.SwappedModel', models.CASCADE, related_name='implicit_fk', ) explicit_m2m = models.ManyToManyField(SwappedModel, related_name='explicit_m2m') implicit_m2m = models.ManyToManyField( 'invalid_models_tests.SwappedModel', related_name='implicit_m2m', ) fields = [ Model._meta.get_field('explicit_fk'), Model._meta.get_field('implicit_fk'), Model._meta.get_field('explicit_m2m'), Model._meta.get_field('implicit_m2m'), ] expected_error = Error( ("Field defines a relation with the model " "'invalid_models_tests.SwappedModel', which has been swapped out."), hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.", id='fields.E301', ) for field in fields: expected_error.obj = field self.assertEqual(field.check(from_model=Model), [expected_error]) def test_related_field_has_invalid_related_name(self): digit = 0 illegal_non_alphanumeric = '!' whitespace = '\t' invalid_related_names = [ '%s_begins_with_digit' % digit, '%s_begins_with_illegal_non_alphanumeric' % illegal_non_alphanumeric, '%s_begins_with_whitespace' % whitespace, 'contains_%s_illegal_non_alphanumeric' % illegal_non_alphanumeric, 'contains_%s_whitespace' % whitespace, 'ends_with_with_illegal_non_alphanumeric_%s' % illegal_non_alphanumeric, 'ends_with_whitespace_%s' % whitespace, 'with', # a Python keyword 'related_name\n', '', ',', # non-ASCII ] class Parent(models.Model): pass for invalid_related_name in invalid_related_names: Child = type('Child%s' % invalid_related_name, (models.Model,), { 'parent': models.ForeignKey('Parent', models.CASCADE, related_name=invalid_related_name), '__module__': Parent.__module__, }) field = Child._meta.get_field('parent') self.assertEqual(Child.check(), [ Error( "The name '%s' is invalid related_name for field Child%s.parent" % (invalid_related_name, invalid_related_name), hint="Related name must be a valid Python identifier or end with a '+'", obj=field, id='fields.E306', ), ]) def test_related_field_has_valid_related_name(self): lowercase = 'a' uppercase = 'A' digit = 0 related_names = [ '%s_starts_with_lowercase' % lowercase, '%s_tarts_with_uppercase' % uppercase, '_starts_with_underscore', 'contains_%s_digit' % digit, 'ends_with_plus+', '_+', '+', '試', '試驗+', ] class Parent(models.Model): pass for related_name in related_names: Child = type('Child%s' % related_name, (models.Model,), { 'parent': models.ForeignKey('Parent', models.CASCADE, related_name=related_name), '__module__': Parent.__module__, }) self.assertEqual(Child.check(), []) def test_to_fields_exist(self): class Parent(models.Model): pass class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() parent = ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b'), to_fields=('a', 'b'), ) field = Child._meta.get_field('parent') self.assertEqual(field.check(), [ Error( "The to_field 'a' doesn't exist on the related model 'invalid_models_tests.Parent'.", obj=field, id='fields.E312', ), Error( "The to_field 'b' doesn't exist on the related model 'invalid_models_tests.Parent'.", obj=field, id='fields.E312', ), ]) def test_to_fields_not_checked_if_related_model_doesnt_exist(self): class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() parent = ForeignObject( 'invalid_models_tests.Parent', on_delete=models.SET_NULL, from_fields=('a', 'b'), to_fields=('a', 'b'), ) field = Child._meta.get_field('parent') self.assertEqual(field.check(), [ Error( "Field defines a relation with model 'invalid_models_tests.Parent', " "which is either not installed, or is abstract.", id='fields.E300', obj=field, ), ]) def test_invalid_related_query_name(self): class Target(models.Model): pass class Model(models.Model): first = models.ForeignKey(Target, models.CASCADE, related_name='contains__double') second = models.ForeignKey(Target, models.CASCADE, related_query_name='ends_underscore_') self.assertEqual(Model.check(), [ Error( "Reverse query name 'contains__double' must not contain '__'.", hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=Model._meta.get_field('first'), id='fields.E309', ), Error( "Reverse query name 'ends_underscore_' must not end with an " "underscore.", hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=Model._meta.get_field('second'), id='fields.E308', ), ]) @isolate_apps('invalid_models_tests') class AccessorClashTests(SimpleTestCase): def test_fk_to_integer(self): self._test_accessor_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_fk(self): self._test_accessor_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_m2m(self): self._test_accessor_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', models.CASCADE)) def test_m2m_to_integer(self): self._test_accessor_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target')) def test_m2m_to_fk(self): self._test_accessor_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target')) def test_m2m_to_m2m(self): self._test_accessor_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target')) def _test_accessor_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): model_set = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.", hint=("Rename field 'Target.model_set', or add/change " "a related_name argument to the definition " "for field 'Model.rel'."), obj=Model._meta.get_field('rel'), id='fields.E302', ), ]) def test_clash_between_accessors(self): class Target(models.Model): pass class Model(models.Model): foreign = models.ForeignKey(Target, models.CASCADE) m2m = models.ManyToManyField(Target) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.foreign' or 'Model.m2m'." ), obj=Model._meta.get_field('foreign'), id='fields.E304', ), Error( "Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.m2m' or 'Model.foreign'." ), obj=Model._meta.get_field('m2m'), id='fields.E304', ), ]) def test_m2m_to_m2m_with_inheritance(self): """ Ref #22047. """ class Target(models.Model): pass class Model(models.Model): children = models.ManyToManyField('Child', related_name="m2m_clash", related_query_name="no_clash") class Parent(models.Model): m2m_clash = models.ManyToManyField('Target') class Child(Parent): pass self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.", hint=( "Rename field 'Child.m2m_clash', or add/change a related_name " "argument to the definition for field 'Model.children'." ), obj=Model._meta.get_field('children'), id='fields.E302', ) ]) def test_no_clash_for_hidden_related_name(self): class Stub(models.Model): pass class ManyToManyRel(models.Model): thing1 = models.ManyToManyField(Stub, related_name='+') thing2 = models.ManyToManyField(Stub, related_name='+') class FKRel(models.Model): thing1 = models.ForeignKey(Stub, models.CASCADE, related_name='+') thing2 = models.ForeignKey(Stub, models.CASCADE, related_name='+') self.assertEqual(ManyToManyRel.check(), []) self.assertEqual(FKRel.check(), []) @isolate_apps('invalid_models_tests') class ReverseQueryNameClashTests(SimpleTestCase): def test_fk_to_integer(self): self._test_reverse_query_name_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_fk(self): self._test_reverse_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_m2m(self): self._test_reverse_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', models.CASCADE)) def test_m2m_to_integer(self): self._test_reverse_query_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target')) def test_m2m_to_fk(self): self._test_reverse_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target')) def test_m2m_to_m2m(self): self._test_reverse_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target')) def _test_reverse_query_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): model = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.model'.", hint=( "Rename field 'Target.model', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class ExplicitRelatedNameClashTests(SimpleTestCase): def test_fk_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', models.CASCADE, related_name='clash')) def test_fk_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey('Target', models.CASCADE, related_name='clash')) def test_fk_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', models.CASCADE, related_name='clash')) def test_m2m_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target', related_name='clash')) def test_m2m_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target', related_name='clash')) def test_m2m_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target', related_name='clash')) def _test_explicit_related_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): clash = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.", hint=( "Rename field 'Target.clash', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E302', ), Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.", hint=( "Rename field 'Target.clash', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class ExplicitRelatedQueryNameClashTests(SimpleTestCase): def test_fk_to_integer(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.IntegerField(), relative=models.ForeignKey( 'Target', models.CASCADE, related_name=related_name, related_query_name='clash', ) ) def test_hidden_fk_to_integer(self, related_name=None): self.test_fk_to_integer(related_name='+') def test_fk_to_fk(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey( 'Target', models.CASCADE, related_name=related_name, related_query_name='clash', ) ) def test_hidden_fk_to_fk(self): self.test_fk_to_fk(related_name='+') def test_fk_to_m2m(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey( 'Target', models.CASCADE, related_name=related_name, related_query_name='clash', ) ) def test_hidden_fk_to_m2m(self): self.test_fk_to_m2m(related_name='+') def test_m2m_to_integer(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash')) def test_hidden_m2m_to_integer(self): self.test_m2m_to_integer(related_name='+') def test_m2m_to_fk(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash')) def test_hidden_m2m_to_fk(self): self.test_m2m_to_fk(related_name='+') def test_m2m_to_m2m(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField( 'Target', related_name=related_name, related_query_name='clash', ) ) def test_hidden_m2m_to_m2m(self): self.test_m2m_to_m2m(related_name='+') def _test_explicit_related_query_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): clash = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.", hint=( "Rename field 'Target.clash', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class SelfReferentialM2MClashTests(SimpleTestCase): def test_clash_between_accessors(self): class Model(models.Model): first_m2m = models.ManyToManyField('self', symmetrical=False) second_m2m = models.ManyToManyField('self', symmetrical=False) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.first_m2m' or 'Model.second_m2m'." ), obj=Model._meta.get_field('first_m2m'), id='fields.E304', ), Error( "Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.second_m2m' or 'Model.first_m2m'." ), obj=Model._meta.get_field('second_m2m'), id='fields.E304', ), ]) def test_accessor_clash(self): class Model(models.Model): model_set = models.ManyToManyField("self", symmetrical=False) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.", hint=( "Rename field 'Model.model_set', or add/change a related_name " "argument to the definition for field 'Model.model_set'." ), obj=Model._meta.get_field('model_set'), id='fields.E302', ), ]) def test_reverse_query_name_clash(self): class Model(models.Model): model = models.ManyToManyField("self", symmetrical=False) self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.model' clashes with field name 'Model.model'.", hint=( "Rename field 'Model.model', or add/change a related_name " "argument to the definition for field 'Model.model'." ), obj=Model._meta.get_field('model'), id='fields.E303', ), ]) def test_clash_under_explicit_related_name(self): class Model(models.Model): clash = models.IntegerField() m2m = models.ManyToManyField("self", symmetrical=False, related_name='clash') self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.m2m'." ), obj=Model._meta.get_field('m2m'), id='fields.E302', ), Error( "Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.m2m'." ), obj=Model._meta.get_field('m2m'), id='fields.E303', ), ]) def test_valid_model(self): class Model(models.Model): first = models.ManyToManyField("self", symmetrical=False, related_name='first_accessor') second = models.ManyToManyField("self", symmetrical=False, related_name='second_accessor') self.assertEqual(Model.check(), []) @isolate_apps('invalid_models_tests') class SelfReferentialFKClashTests(SimpleTestCase): def test_accessor_clash(self): class Model(models.Model): model_set = models.ForeignKey("Model", models.CASCADE) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.", hint=( "Rename field 'Model.model_set', or add/change " "a related_name argument to the definition " "for field 'Model.model_set'." ), obj=Model._meta.get_field('model_set'), id='fields.E302', ), ]) def test_reverse_query_name_clash(self): class Model(models.Model): model = models.ForeignKey("Model", models.CASCADE) self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.model' clashes with field name 'Model.model'.", hint=( "Rename field 'Model.model', or add/change a related_name " "argument to the definition for field 'Model.model'." ), obj=Model._meta.get_field('model'), id='fields.E303', ), ]) def test_clash_under_explicit_related_name(self): class Model(models.Model): clash = models.CharField(max_length=10) foreign = models.ForeignKey("Model", models.CASCADE, related_name='clash') self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.foreign'." ), obj=Model._meta.get_field('foreign'), id='fields.E302', ), Error( "Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.foreign'." ), obj=Model._meta.get_field('foreign'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class ComplexClashTests(SimpleTestCase): # New tests should not be included here, because this is a single, # self-contained sanity check, not a test of everything. def test_complex_clash(self): class Target(models.Model): tgt_safe = models.CharField(max_length=10) clash = models.CharField(max_length=10) model = models.CharField(max_length=10) clash1_set = models.CharField(max_length=10) class Model(models.Model): src_safe = models.CharField(max_length=10) foreign_1 = models.ForeignKey(Target, models.CASCADE, related_name='id') foreign_2 = models.ForeignKey(Target, models.CASCADE, related_name='src_safe') m2m_1 = models.ManyToManyField(Target, related_name='id') m2m_2 = models.ManyToManyField(Target, related_name='src_safe') self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.foreign_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E302', ), Error( "Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.foreign_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E303', ), Error( "Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_1' or 'Model.m2m_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E304', ), Error( "Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_1' or 'Model.m2m_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E305', ), Error( "Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.", hint=("Add or change a related_name argument " "to the definition for 'Model.foreign_2' or 'Model.m2m_2'."), obj=Model._meta.get_field('foreign_2'), id='fields.E304', ), Error( "Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_2' or 'Model.m2m_2'."), obj=Model._meta.get_field('foreign_2'), id='fields.E305', ), Error( "Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.m2m_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E302', ), Error( "Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.m2m_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E303', ), Error( "Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_1' or 'Model.foreign_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E304', ), Error( "Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.m2m_1' or 'Model.foreign_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E305', ), Error( "Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_2' or 'Model.foreign_2'."), obj=Model._meta.get_field('m2m_2'), id='fields.E304', ), Error( "Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_2' or 'Model.foreign_2'."), obj=Model._meta.get_field('m2m_2'), id='fields.E305', ), ]) @isolate_apps('invalid_models_tests') class M2mThroughFieldsTests(SimpleTestCase): def test_m2m_field_argument_validation(self): """ ManyToManyField accepts the ``through_fields`` kwarg only if an intermediary table is specified. """ class Fan(models.Model): pass with self.assertRaisesMessage(ValueError, 'Cannot specify through_fields without a through model'): models.ManyToManyField(Fan, through_fields=('f1', 'f2')) def test_invalid_order(self): """ Mixing up the order of link fields to ManyToManyField.through_fields triggers validation errors. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event')) class Invitation(models.Model): event = models.ForeignKey(Event, models.CASCADE) invitee = models.ForeignKey(Fan, models.CASCADE) inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+') field = Event._meta.get_field('invitees') self.assertEqual(field.check(from_model=Event), [ Error( "'Invitation.invitee' is not a foreign key to 'Event'.", hint="Did you mean one of the following foreign keys to 'Event': event?", obj=field, id='fields.E339', ), Error( "'Invitation.event' is not a foreign key to 'Fan'.", hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?", obj=field, id='fields.E339', ), ]) def test_invalid_field(self): """ Providing invalid field names to ManyToManyField.through_fields triggers validation errors. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField( Fan, through='Invitation', through_fields=('invalid_field_1', 'invalid_field_2'), ) class Invitation(models.Model): event = models.ForeignKey(Event, models.CASCADE) invitee = models.ForeignKey(Fan, models.CASCADE) inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+') field = Event._meta.get_field('invitees') self.assertEqual(field.check(from_model=Event), [ Error( "The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'.", hint="Did you mean one of the following foreign keys to 'Event': event?", obj=field, id='fields.E338', ), Error( "The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'.", hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?", obj=field, id='fields.E338', ), ]) def test_explicit_field_names(self): """ If ``through_fields`` kwarg is given, it must specify both link fields of the intermediary table. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee')) class Invitation(models.Model): event = models.ForeignKey(Event, models.CASCADE) invitee = models.ForeignKey(Fan, models.CASCADE) inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+') field = Event._meta.get_field('invitees') self.assertEqual(field.check(from_model=Event), [ Error( "Field specifies 'through_fields' but does not provide the names " "of the two link fields that should be used for the relation " "through model 'invalid_models_tests.Invitation'.", hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')", obj=field, id='fields.E337', ), ]) def test_superset_foreign_object(self): class Parent(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() c = models.PositiveIntegerField() class Meta: unique_together = (('a', 'b', 'c'),) class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() value = models.CharField(max_length=255) parent = ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b'), to_fields=('a', 'b'), related_name='children', ) field = Child._meta.get_field('parent') self.assertEqual(field.check(from_model=Child), [ Error( "No subset of the fields 'a', 'b' on model 'Parent' is unique.", hint=( "Add unique=True on any of those fields or add at least " "a subset of them to a unique_together constraint." ), obj=field, id='fields.E310', ), ]) def test_intersection_foreign_object(self): class Parent(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() c = models.PositiveIntegerField() d = models.PositiveIntegerField() class Meta: unique_together = (('a', 'b', 'c'),) class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() d = models.PositiveIntegerField() value = models.CharField(max_length=255) parent = ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b', 'd'), to_fields=('a', 'b', 'd'), related_name='children', ) field = Child._meta.get_field('parent') self.assertEqual(field.check(from_model=Child), [ Error( "No subset of the fields 'a', 'b', 'd' on model 'Parent' is unique.", hint=( "Add unique=True on any of those fields or add at least " "a subset of them to a unique_together constraint." ), obj=field, id='fields.E310', ), ])
bsd-3-clause
msampathkumar/python-social-auth
examples/flask_example/__init__.py
43
1567
import sys from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from flask import Flask, g from flask.ext import login sys.path.append('../..') from social.apps.flask_app.routes import social_auth from social.apps.flask_app.template_filters import backends from social.apps.flask_app.default.models import init_social # App app = Flask(__name__) app.config.from_object('flask_example.settings') try: app.config.from_object('flask_example.local_settings') except ImportError: pass # DB engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI']) Session = sessionmaker(autocommit=False, autoflush=False, bind=engine) db_session = scoped_session(Session) app.register_blueprint(social_auth) init_social(app, db_session) login_manager = login.LoginManager() login_manager.login_view = 'main' login_manager.login_message = '' login_manager.init_app(app) from flask_example import models from flask_example import routes @login_manager.user_loader def load_user(userid): try: return models.user.User.query.get(int(userid)) except (TypeError, ValueError): pass @app.before_request def global_user(): g.user = login.current_user @app.teardown_appcontext def commit_on_success(error=None): if error is None: db_session.commit() else: db_session.rollback() db_session.remove() @app.context_processor def inject_user(): try: return {'user': g.user} except AttributeError: return {'user': None} app.context_processor(backends)
bsd-3-clause
deanhiller/databus
webapp/play1.3.x/python/Lib/inspect.py
3
39210
# -*- coding: iso-8859-1 -*- """Get useful information from live Python objects. This module encapsulates the interface provided by the internal special attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion. It also provides some help for examining source code and class layout. Here are some of the useful functions provided by this module: ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), isroutine() - check object types getmembers() - get members of an object that satisfy a given condition getfile(), getsourcefile(), getsource() - find an object's source code getdoc(), getcomments() - get documentation on an object getmodule() - determine the module that an object came from getclasstree() - arrange classes so as to represent their hierarchy getargspec(), getargvalues() - get info about function arguments formatargspec(), formatargvalues() - format an argument spec getouterframes(), getinnerframes() - get info about frames currentframe() - get the current stack frame stack(), trace() - get info about frames on the stack or in a traceback """ # This module is in the public domain. No warranties. __author__ = 'Ka-Ping Yee <ping@lfw.org>' __date__ = '1 Jan 2001' import sys import os import types import string import re import dis import imp import tokenize import linecache from operator import attrgetter from collections import namedtuple # These constants are from Include/code.h. CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8 CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40 # See Include/object.h TPFLAGS_IS_ABSTRACT = 1 << 20 # ----------------------------------------------------------- type-checking def ismodule(object): """Return true if the object is a module. Module objects provide these attributes: __doc__ documentation string __file__ filename (missing for built-in modules)""" return isinstance(object, types.ModuleType) def isclass(object): """Return true if the object is a class. Class objects provide these attributes: __doc__ documentation string __module__ name of module in which this class was defined""" return isinstance(object, types.ClassType) or hasattr(object, '__bases__') def ismethod(object): """Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined im_class class object in which this method belongs im_func function object containing implementation of method im_self instance to which this method is bound, or None""" return isinstance(object, types.MethodType) def ismethoddescriptor(object): """Return true if the object is a method descriptor. But not if ismethod() or isclass() or isfunction() are true. This is new in Python 2.2, and, for example, is true of int.__add__. An object passing this test has a __get__ attribute but not a __set__ attribute, but beyond that the set of attributes varies. __name__ is usually sensible, and __doc__ often is. Methods implemented via descriptors that also pass one of the other tests return false from the ismethoddescriptor() test, simply because the other tests promise more -- you can, e.g., count on having the im_func attribute (etc) when an object passes ismethod().""" return (hasattr(object, "__get__") and not hasattr(object, "__set__") # else it's a data descriptor and not ismethod(object) # mutual exclusion and not isfunction(object) and not isclass(object)) def isdatadescriptor(object): """Return true if the object is a data descriptor. Data descriptors have both a __get__ and a __set__ attribute. Examples are properties (defined in Python) and getsets and members (defined in C). Typically, data descriptors will also have __name__ and __doc__ attributes (properties, getsets, and members have both of these attributes), but this is not guaranteed.""" return (hasattr(object, "__set__") and hasattr(object, "__get__")) if hasattr(types, 'MemberDescriptorType'): # CPython and equivalent def ismemberdescriptor(object): """Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules.""" return isinstance(object, types.MemberDescriptorType) else: # Other implementations def ismemberdescriptor(object): """Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules.""" return False if hasattr(types, 'GetSetDescriptorType'): # CPython and equivalent def isgetsetdescriptor(object): """Return true if the object is a getset descriptor. getset descriptors are specialized descriptors defined in extension modules.""" return isinstance(object, types.GetSetDescriptorType) else: # Other implementations def isgetsetdescriptor(object): """Return true if the object is a getset descriptor. getset descriptors are specialized descriptors defined in extension modules.""" return False def isfunction(object): """Return true if the object is a user-defined function. Function objects provide these attributes: __doc__ documentation string __name__ name with which this function was defined func_code code object containing compiled function bytecode func_defaults tuple of any default values for arguments func_doc (same as __doc__) func_globals global namespace in which this function was defined func_name (same as __name__)""" return isinstance(object, types.FunctionType) def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing.""" if (isfunction(object) or ismethod(object)) and \ object.func_code.co_flags & CO_GENERATOR: return True def isgenerator(object): """Return true if the object is a generator. Generator objects provide these attributes: __iter__ defined to support interation over container close raises a new GeneratorExit exception inside the generator to terminate the iteration gi_code code object gi_frame frame object or possibly None once the generator has been exhausted gi_running set to 1 when generator is executing, 0 otherwise next return the next item from the container send resumes the generator and "sends" a value that becomes the result of the current yield-expression throw used to raise an exception inside the generator""" return isinstance(object, types.GeneratorType) def istraceback(object): """Return true if the object is a traceback. Traceback objects provide these attributes: tb_frame frame object at this level tb_lasti index of last attempted instruction in bytecode tb_lineno current line number in Python source code tb_next next inner traceback object (called by this level)""" return isinstance(object, types.TracebackType) def isframe(object): """Return true if the object is a frame object. Frame objects provide these attributes: f_back next outer frame object (this frame's caller) f_builtins built-in namespace seen by this frame f_code code object being executed in this frame f_exc_traceback traceback if raised in this frame, or None f_exc_type exception type if raised in this frame, or None f_exc_value exception value if raised in this frame, or None f_globals global namespace seen by this frame f_lasti index of last attempted instruction in bytecode f_lineno current line number in Python source code f_locals local namespace seen by this frame f_restricted 0 or 1 if frame is in restricted execution mode f_trace tracing function for this frame, or None""" return isinstance(object, types.FrameType) def iscode(object): """Return true if the object is a code object. Code objects provide these attributes: co_argcount number of arguments (not including * or ** args) co_code string of raw compiled bytecode co_consts tuple of constants used in the bytecode co_filename name of file in which this code object was created co_firstlineno number of first line in Python source code co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg co_lnotab encoded mapping of line numbers to bytecode indices co_name name with which this code object was defined co_names tuple of names of local variables co_nlocals number of local variables co_stacksize virtual machine stack space required co_varnames tuple of names of arguments and local variables""" return isinstance(object, types.CodeType) def isbuiltin(object): """Return true if the object is a built-in function or method. Built-in functions and methods provide these attributes: __doc__ documentation string __name__ original name of this function or method __self__ instance to which a method is bound, or None""" return isinstance(object, types.BuiltinFunctionType) def isroutine(object): """Return true if the object is any kind of function or method.""" return (isbuiltin(object) or isfunction(object) or ismethod(object) or ismethoddescriptor(object)) def isgenerator(object): """Return true if the object is a generator object.""" return isinstance(object, types.GeneratorType) def isabstract(object): """Return true if the object is an abstract base class (ABC).""" return isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT def getmembers(object, predicate=None): """Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.""" results = [] for key in dir(object): value = getattr(object, key) if not predicate or predicate(value): results.append((key, value)) results.sort() return results Attribute = namedtuple('Attribute', 'name kind defining_class object') def classify_class_attrs(cls): """Return list of attribute-descriptor tuples. For each name in dir(cls), the return list contains a 4-tuple with these elements: 0. The name (a string). 1. The kind of attribute this is, one of these strings: 'class method' created via classmethod() 'static method' created via staticmethod() 'property' created via property() 'method' any other flavor of method 'data' not a method 2. The class which defined this attribute (a class). 3. The object as obtained directly from the defining class's __dict__, not via getattr. This is especially important for data attributes: C.data is just a data object, but C.__dict__['data'] may be a data descriptor with additional info, like a __doc__ string. """ mro = getmro(cls) names = dir(cls) result = [] for name in names: # Get the object associated with the name. # Getting an obj from the __dict__ sometimes reveals more than # using getattr. Static and class methods are dramatic examples. if name in cls.__dict__: obj = cls.__dict__[name] else: obj = getattr(cls, name) # Figure out where it was defined. homecls = getattr(obj, "__objclass__", None) if homecls is None: # search the dicts. for base in mro: if name in base.__dict__: homecls = base break # Get the object again, in order to get it from the defining # __dict__ instead of via getattr (if possible). if homecls is not None and name in homecls.__dict__: obj = homecls.__dict__[name] # Also get the object via getattr. obj_via_getattr = getattr(cls, name) # Classify the object. if isinstance(obj, staticmethod): kind = "static method" elif isinstance(obj, classmethod): kind = "class method" elif isinstance(obj, property): kind = "property" elif (ismethod(obj_via_getattr) or ismethoddescriptor(obj_via_getattr)): kind = "method" else: kind = "data" result.append(Attribute(name, kind, homecls, obj)) return result # ----------------------------------------------------------- class helpers def _searchbases(cls, accum): # Simulate the "classic class" search order. if cls in accum: return accum.append(cls) for base in cls.__bases__: _searchbases(base, accum) def getmro(cls): "Return tuple of base classes (including cls) in method resolution order." if hasattr(cls, "__mro__"): return cls.__mro__ else: result = [] _searchbases(cls, result) return tuple(result) # -------------------------------------------------- source code extraction def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = string.expandtabs(line) return len(expline) - len(string.lstrip(expline)) def getdoc(object): """Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.""" try: doc = object.__doc__ except AttributeError: return None if not isinstance(doc, types.StringTypes): return None return cleandoc(doc) def cleandoc(doc): """Clean up indentation from docstrings. Any whitespace that can be uniformly removed from the second line onwards is removed.""" try: lines = string.split(string.expandtabs(doc), '\n') except UnicodeError: return None else: # Find minimum indentation of any non-blank lines after first line. margin = sys.maxint for line in lines[1:]: content = len(string.lstrip(line)) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation. if lines: lines[0] = lines[0].lstrip() if margin < sys.maxint: for i in range(1, len(lines)): lines[i] = lines[i][margin:] # Remove any trailing or leading blank lines. while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return string.join(lines, '\n') def getfile(object): """Work out which source or compiled file an object was defined in.""" if ismodule(object): if hasattr(object, '__file__'): return object.__file__ raise TypeError('arg is a built-in module') if isclass(object): object = sys.modules.get(object.__module__) if hasattr(object, '__file__'): return object.__file__ raise TypeError('arg is a built-in class') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): return object.co_filename raise TypeError('arg is not a module, class, method, ' 'function, traceback, frame, or code object') ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type') def getmoduleinfo(path): """Get the module name, suffix, mode, and module type for a given file.""" filename = os.path.basename(path) suffixes = map(lambda info: (-len(info[0]), info[0], info[1], info[2]), imp.get_suffixes()) suffixes.sort() # try longest suffixes first, in case they overlap for neglen, suffix, mode, mtype in suffixes: if filename[neglen:] == suffix: return ModuleInfo(filename[:neglen], suffix, mode, mtype) def getmodulename(path): """Return the module name for a given file, or None.""" info = getmoduleinfo(path) if info: return info[0] def getsourcefile(object): """Return the Python source file an object was defined in, if it exists.""" filename = getfile(object) if string.lower(filename[-4:]) in ('.pyc', '.pyo'): filename = filename[:-4] + '.py' for suffix, mode, kind in imp.get_suffixes(): if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix: # Looks like a binary file. We want to only return a text file. return None if os.path.exists(filename): return filename # only return a non-existent filename if the module has a PEP 302 loader if hasattr(getmodule(object, filename), '__loader__'): return filename def getabsfile(object, _filename=None): """Return an absolute path to the source or compiled file for an object. The idea is for each object to have a unique origin, so this routine normalizes the result as much as possible.""" if _filename is None: _filename = getsourcefile(object) or getfile(object) return os.path.normcase(os.path.abspath(_filename)) modulesbyfile = {} _filesbymodname = {} def getmodule(object, _filename=None): """Return the module an object was defined in, or None if not found.""" if ismodule(object): return object if hasattr(object, '__module__'): return sys.modules.get(object.__module__) # Try the filename to modulename cache if _filename is not None and _filename in modulesbyfile: return sys.modules.get(modulesbyfile[_filename]) # Try the cache again with the absolute file name try: file = getabsfile(object, _filename) except TypeError: return None if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Update the filename to module name cache and check yet again # Copy sys.modules in order to cope with changes while iterating for modname, module in sys.modules.items(): if ismodule(module) and hasattr(module, '__file__'): f = module.__file__ if f == _filesbymodname.get(modname, None): # Have already mapped this module, so skip it continue _filesbymodname[modname] = f f = getabsfile(module) # Always map to the name the module knows itself by modulesbyfile[f] = modulesbyfile[ os.path.realpath(f)] = module.__name__ if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Check the main module main = sys.modules['__main__'] if not hasattr(object, '__name__'): return None if hasattr(main, object.__name__): mainobject = getattr(main, object.__name__) if mainobject is object: return main # Check builtins builtin = sys.modules['__builtin__'] if hasattr(builtin, object.__name__): builtinobject = getattr(builtin, object.__name__) if builtinobject is object: return builtin def findsource(object): """Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved.""" file = getsourcefile(object) or getfile(object) module = getmodule(object, file) if module: lines = linecache.getlines(file, module.__dict__) else: lines = linecache.getlines(file) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^(\s*)class\s*' + name + r'\b') # make some effort to find the best matching class definition: # use the one with the least indentation, which is the one # that's most probably not inside a function definition. candidates = [] for i in range(len(lines)): match = pat.match(lines[i]) if match: # if it's at toplevel, it's already the best one if lines[i][0] == 'c': return lines, i # else add whitespace to candidate list candidates.append((match.group(1), i)) if candidates: # this will sort by whitespace, and by line number, # less whitespace first candidates.sort() return lines, candidates[0][1] else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') lnum = object.co_firstlineno - 1 pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') while lnum > 0: if pat.match(lines[lnum]): break lnum = lnum - 1 return lines, lnum raise IOError('could not find code object') def getcomments(object): """Get lines of comments immediately preceding an object's source code. Returns None when source can't be found. """ try: lines, lnum = findsource(object) except (IOError, TypeError): return None if ismodule(object): # Look for a comment block at the top of the file. start = 0 if lines and lines[0][:2] == '#!': start = 1 while start < len(lines) and string.strip(lines[start]) in ('', '#'): start = start + 1 if start < len(lines) and lines[start][:1] == '#': comments = [] end = start while end < len(lines) and lines[end][:1] == '#': comments.append(string.expandtabs(lines[end])) end = end + 1 return string.join(comments, '') # Look for a preceding block of comments at the same indentation. elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \ indentsize(lines[end]) == indent: comments = [string.lstrip(string.expandtabs(lines[end]))] if end > 0: end = end - 1 comment = string.lstrip(string.expandtabs(lines[end])) while comment[:1] == '#' and indentsize(lines[end]) == indent: comments[:0] = [comment] end = end - 1 if end < 0: break comment = string.lstrip(string.expandtabs(lines[end])) while comments and string.strip(comments[0]) == '#': comments[:1] = [] while comments and string.strip(comments[-1]) == '#': comments[-1:] = [] return string.join(comments, '') class EndOfBlock(Exception): pass class BlockFinder: """Provide a tokeneater() method to detect the end of a code block.""" def __init__(self): self.indent = 0 self.islambda = False self.started = False self.passline = False self.last = 1 def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock def getblock(lines): """Extract the block of code at the top of the given list of lines.""" blockfinder = BlockFinder() try: tokenize.tokenize(iter(lines).next, blockfinder.tokeneater) except (EndOfBlock, IndentationError): pass return lines[:blockfinder.last] def getsourcelines(object): """Return a list of source lines and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of the lines corresponding to the object and the line number indicates where in the original source file the first line of code was found. An IOError is raised if the source code cannot be retrieved.""" lines, lnum = findsource(object) if ismodule(object): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1 def getsource(object): """Return the text of the source code for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a single string. An IOError is raised if the source code cannot be retrieved.""" lines, lnum = getsourcelines(object) return string.join(lines, '') # --------------------------------------------------- class tree extraction def walktree(classes, children, parent): """Recursive helper function for getclasstree().""" results = [] classes.sort(key=attrgetter('__module__', '__name__')) for c in classes: results.append((c, c.__bases__)) if c in children: results.append(walktree(children[c], children, c)) return results def getclasstree(classes, unique=0): """Arrange the given list of classes into a hierarchy of nested lists. Where a nested list appears, it contains classes derived from the class whose entry immediately precedes the list. Each entry is a 2-tuple containing a class and a tuple of its base classes. If the 'unique' argument is true, exactly one entry appears in the returned structure for each class in the given list. Otherwise, classes using multiple inheritance and their descendants will appear multiple times.""" children = {} roots = [] for c in classes: if c.__bases__: for parent in c.__bases__: if not parent in children: children[parent] = [] children[parent].append(c) if unique and parent in classes: break elif c not in roots: roots.append(c) for parent in children: if parent not in classes: roots.append(parent) return walktree(roots, children, None) # ------------------------------------------------ argument list extraction Arguments = namedtuple('Arguments', 'args varargs keywords') def getargs(co): """Get information about the arguments accepted by a code object. Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" if not iscode(co): raise TypeError('arg is not a code object') nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) step = 0 # The following acrobatics are for anonymous (tuple) arguments. for i in range(nargs): if args[i][:1] in ('', '.'): stack, remain, count = [], [], [] while step < len(co.co_code): op = ord(co.co_code[step]) step = step + 1 if op >= dis.HAVE_ARGUMENT: opname = dis.opname[op] value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256 step = step + 2 if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'): remain.append(value) count.append(value) elif opname == 'STORE_FAST': stack.append(names[value]) # Special case for sublists of length 1: def foo((bar)) # doesn't generate the UNPACK_TUPLE bytecode, so if # `remain` is empty here, we have such a sublist. if not remain: stack[0] = [stack[0]] break else: remain[-1] = remain[-1] - 1 while remain[-1] == 0: remain.pop() size = count.pop() stack[-size:] = [stack[-size:]] if not remain: break remain[-1] = remain[-1] - 1 if not remain: break args[i] = stack[0] varargs = None if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 varkw = None if co.co_flags & CO_VARKEYWORDS: varkw = co.co_varnames[nargs] return Arguments(args, varargs, varkw) ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults') def getargspec(func): """Get the names and default values of a function's arguments. A tuple of four things is returned: (args, varargs, varkw, defaults). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'defaults' is an n-tuple of the default values of the last n arguments. """ if ismethod(func): func = func.im_func if not isfunction(func): raise TypeError('arg is not a Python function') args, varargs, varkw = getargs(func.func_code) return ArgSpec(args, varargs, varkw, func.func_defaults) ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') def getargvalues(frame): """Get information about arguments passed into a particular frame. A tuple of four things is returned: (args, varargs, varkw, locals). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'locals' is the locals dictionary of the given frame.""" args, varargs, varkw = getargs(frame.f_code) return ArgInfo(args, varargs, varkw, frame.f_locals) def joinseq(seq): if len(seq) == 1: return '(' + seq[0] + ',)' else: return '(' + string.join(seq, ', ') + ')' def strseq(object, convert, join=joinseq): """Recursively walk a sequence, stringifying each element.""" if type(object) in (list, tuple): return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object)) else: return convert(object) def formatargspec(args, varargs=None, varkw=None, defaults=None, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): """Format an argument spec from the 4 values returned by getargspec. The first four arguments are (args, varargs, varkw, defaults). The other four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.""" specs = [] if defaults: firstdefault = len(args) - len(defaults) for i in range(len(args)): spec = strseq(args[i], formatarg, join) if defaults and i >= firstdefault: spec = spec + formatvalue(defaults[i - firstdefault]) specs.append(spec) if varargs is not None: specs.append(formatvarargs(varargs)) if varkw is not None: specs.append(formatvarkw(varkw)) return '(' + string.join(specs, ', ') + ')' def formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): """Format an argument spec from the 4 values returned by getargvalues. The first four arguments are (args, varargs, varkw, locals). The next four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.""" def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue): return formatarg(name) + formatvalue(locals[name]) specs = [] for i in range(len(args)): specs.append(strseq(args[i], convert, join)) if varargs: specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) if varkw: specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) return '(' + string.join(specs, ', ') + ')' # -------------------------------------------------- stack frame extraction Traceback = namedtuple('Traceback', 'filename lineno function code_context index') def getframeinfo(frame, context=1): """Get information about a frame or traceback object. A tuple of five things is returned: the filename, the line number of the current line, the function name, a list of lines of context from the source code, and the index of the current line within that list. The optional second argument specifies the number of lines of context to return, which are centered around the current line.""" if istraceback(frame): lineno = frame.tb_lineno frame = frame.tb_frame else: lineno = frame.f_lineno if not isframe(frame): raise TypeError('arg is not a frame or traceback object') filename = getsourcefile(frame) or getfile(frame) if context > 0: start = lineno - 1 - context//2 try: lines, lnum = findsource(frame) except IOError: lines = index = None else: start = max(start, 1) start = max(0, min(start, len(lines) - context)) lines = lines[start:start+context] index = lineno - 1 - start else: lines = index = None return Traceback(filename, lineno, frame.f_code.co_name, lines, index) def getlineno(frame): """Get the line number from a frame object, allowing for optimization.""" # FrameType.f_lineno is now a descriptor that grovels co_lnotab return frame.f_lineno def getouterframes(frame, context=1): """Get a list of records for a frame and all higher (calling) frames. Each record contains a frame object, filename, line number, function name, a list of lines of context, and index within the context.""" framelist = [] while frame: framelist.append((frame,) + getframeinfo(frame, context)) frame = frame.f_back return framelist def getinnerframes(tb, context=1): """Get a list of records for a traceback's frame and all lower frames. Each record contains a frame object, filename, line number, function name, a list of lines of context, and index within the context.""" framelist = [] while tb: framelist.append((tb.tb_frame,) + getframeinfo(tb, context)) tb = tb.tb_next return framelist currentframe = sys._getframe def stack(context=1): """Return a list of records for the stack above the caller's frame.""" return getouterframes(sys._getframe(1), context) def trace(context=1): """Return a list of records for the stack below the current exception.""" return getinnerframes(sys.exc_info()[2], context)
mpl-2.0
pjg101/SickRage
lib/imdb/Character.py
128
7700
""" Character module (imdb package). This module provides the Character class, used to store information about a given character. Copyright 2007-2010 Davide Alberani <da@erlug.linux.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ from copy import deepcopy from imdb.utils import analyze_name, build_name, flatten, _Container, cmpPeople class Character(_Container): """A Character. Every information about a character can be accessed as: characterObject['information'] to get a list of the kind of information stored in a Character object, use the keys() method; some useful aliases are defined (as "also known as" for the "akas" key); see the keys_alias dictionary. """ # The default sets of information retrieved. default_info = ('main', 'filmography', 'biography') # Aliases for some not-so-intuitive keys. keys_alias = {'mini biography': 'biography', 'bio': 'biography', 'character biography': 'biography', 'character biographies': 'biography', 'biographies': 'biography', 'character bio': 'biography', 'aka': 'akas', 'also known as': 'akas', 'alternate names': 'akas', 'personal quotes': 'quotes', 'keys': 'keywords', 'keyword': 'keywords'} keys_tomodify_list = ('biography', 'quotes') cmpFunct = cmpPeople def _init(self, **kwds): """Initialize a Character object. *characterID* -- the unique identifier for the character. *name* -- the name of the Character, if not in the data dictionary. *myName* -- the nickname you use for this character. *myID* -- your personal id for this character. *data* -- a dictionary used to initialize the object. *notes* -- notes about the given character. *accessSystem* -- a string representing the data access system used. *titlesRefs* -- a dictionary with references to movies. *namesRefs* -- a dictionary with references to persons. *charactersRefs* -- a dictionary with references to characters. *modFunct* -- function called returning text fields. """ name = kwds.get('name') if name and not self.data.has_key('name'): self.set_name(name) self.characterID = kwds.get('characterID', None) self.myName = kwds.get('myName', u'') def _reset(self): """Reset the Character object.""" self.characterID = None self.myName = u'' def set_name(self, name): """Set the name of the character.""" # XXX: convert name to unicode, if it's a plain string? try: d = analyze_name(name, canonical=0) self.data.update(d) except: # TODO: catch only IMDbPYParserError and issue a warning. pass def _additional_keys(self): """Valid keys to append to the data.keys() list.""" addkeys = [] if self.data.has_key('name'): addkeys += ['long imdb name'] if self.data.has_key('headshot'): addkeys += ['full-size headshot'] return addkeys def _getitem(self, key): """Handle special keys.""" ## XXX: can a character have an imdbIndex? if self.data.has_key('name'): if key == 'long imdb name': return build_name(self.data) if key == 'full-size headshot' and self.data.has_key('headshot'): return self._re_fullsizeURL.sub('', self.data.get('headshot', '')) return None def getID(self): """Return the characterID.""" return self.characterID def __nonzero__(self): """The Character is "false" if the self.data does not contain a name.""" # XXX: check the name and the characterID? if self.data.get('name'): return 1 return 0 def __contains__(self, item): """Return true if this Character was portrayed in the given Movie or it was impersonated by the given Person.""" from Movie import Movie from Person import Person if isinstance(item, Person): for m in flatten(self.data, yieldDictKeys=1, scalar=Movie): if item.isSame(m.currentRole): return 1 elif isinstance(item, Movie): for m in flatten(self.data, yieldDictKeys=1, scalar=Movie): if item.isSame(m): return 1 return 0 def isSameName(self, other): """Return true if two character have the same name and/or characterID.""" if not isinstance(other, self.__class__): return 0 if self.data.has_key('name') and \ other.data.has_key('name') and \ build_name(self.data, canonical=0) == \ build_name(other.data, canonical=0): return 1 if self.accessSystem == other.accessSystem and \ self.characterID is not None and \ self.characterID == other.characterID: return 1 return 0 isSameCharacter = isSameName def __deepcopy__(self, memo): """Return a deep copy of a Character instance.""" c = Character(name=u'', characterID=self.characterID, myName=self.myName, myID=self.myID, data=deepcopy(self.data, memo), notes=self.notes, accessSystem=self.accessSystem, titlesRefs=deepcopy(self.titlesRefs, memo), namesRefs=deepcopy(self.namesRefs, memo), charactersRefs=deepcopy(self.charactersRefs, memo)) c.current_info = list(self.current_info) c.set_mod_funct(self.modFunct) return c def __repr__(self): """String representation of a Character object.""" r = '<Character id:%s[%s] name:_%s_>' % (self.characterID, self.accessSystem, self.get('name')) if isinstance(r, unicode): r = r.encode('utf_8', 'replace') return r def __str__(self): """Simply print the short name.""" return self.get('name', u'').encode('utf_8', 'replace') def __unicode__(self): """Simply print the short title.""" return self.get('name', u'') def summary(self): """Return a string with a pretty-printed summary for the character.""" if not self: return u'' s = u'Character\n=====\nName: %s\n' % \ self.get('name', u'') bio = self.get('biography') if bio: s += u'Biography: %s\n' % bio[0] filmo = self.get('filmography') if filmo: a_list = [x.get('long imdb canonical title', u'') for x in filmo[:5]] s += u'Last movies with this character: %s.\n' % u'; '.join(a_list) return s
gpl-3.0
mrcslws/htmresearch
projects/wavelet_dataAggregation/run_nupic_aggregator.py
11
3532
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Run Nupic aggregator on good day bad day data (1) Perform aggregation using nupic.data.aggregator (2) Plot aggregated and raw data using matplotlib/plotly """ from nupic.data import aggregator from nupic.data import fieldmeta from unicorn_backend.utils import date_time_utils import matplotlib.pyplot as plt import plotly.plotly as py plt.ion() plt.close('all') def initializeAggregator(aggSpec, modelSpec): inputRecordSchema = ( fieldmeta.FieldMetaInfo(modelSpec["timestampFieldName"], fieldmeta.FieldMetaType.datetime, fieldmeta.FieldMetaSpecial.timestamp), fieldmeta.FieldMetaInfo(modelSpec["valueFieldName"], fieldmeta.FieldMetaType.float, fieldmeta.FieldMetaSpecial.none), ) dataAggregator = aggregator.Aggregator( aggregationInfo=dict( fields=([(modelSpec["valueFieldName"], aggSpec["func"])] if aggSpec is not None else []), seconds=aggSpec["windowSize"] if aggSpec is not None else 0 ), inputFields=inputRecordSchema) return dataAggregator if __name__ == "__main__": inputFile = open('example_data/JAO_Apple_Heart Rate_raw_20160404.csv') # skip header lines inputFile.readline() aggSpec = {"func": "mean", "windowSize": 3000} modelSpec = {"timestampFieldName": "timestamp", "valueFieldName": "value"} dataAggregator = initializeAggregator(aggSpec, modelSpec) timeStampRaw = [] timeStampAgg = [] valueRaw = [] valueAgg = [] sliceEndTime = [] for inputRow in inputFile.readlines(): inputRow = inputRow.split(',') fields = [ date_time_utils.parseDatetime(inputRow[0], '%m/%d/%y %H:%M'), float(inputRow[1]) ] aggRow, _ = dataAggregator.next(fields, None) timeStampRaw.append(fields[0]) valueRaw.append(fields[1]) if aggRow is not None: sliceEndTime.append(dataAggregator._endTime) timeStampAgg.append(aggRow[0]) valueAgg.append(aggRow[1]) fig = plt.figure() plt.plot(timeStampRaw, valueRaw, '.') plt.plot(timeStampAgg, valueAgg, 'r+') yl = plt.ylim() # for timestamp in sliceEndTime: # plt.vlines(timestamp, yl[0], yl[1]) plt.legend(['Raw', 'Aggregate']) plt.xlabel('Timestamp') plt.ylabel('Value') plt.xlim([timeStampRaw[100], timeStampRaw[300]]) # plot_url = py.plot_mpl(fig, filename='GDBD_HeartRate_VisualizeAggregation', # fileopt='overwrite', sharing='private')
agpl-3.0
farthir/msc-project
snippets/plot_all.py
1
1515
import sys import math import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams import matplotlib.cm as cm def main(): input_filename = sys.argv[1] df = pd.read_csv('data/%s.csv' % input_filename).round(10) df.columns = ['property', 'horizontal', 'vertical', 'force'] #max_force = math.ceil(df.ix[:,3].max()) font = {'family' : 'DejaVu Sans', 'weight' : 'normal', 'size' : 14} rcParams.update({'figure.autolayout': True}) properties = np.unique(df["property"]) plt.figure() plt.rc('font', **font) plt.xlabel('Vertical Displacement (mm)') plt.ylabel('Force (kN)') for prop in properties: label = '$e/l = %s$' % prop plt_df = df[df['property'] == prop] if prop == 0: prop = 0.9 plt.scatter(plt_df['vertical'], plt_df['force'], label=label, s=10, c=cm.Set1(prop)) legend = plt.legend() plt.savefig('data/%s_v.pdf' % input_filename) plt.show() plt.figure() plt.rc('font', **font) plt.xlabel('Horizontal Displacement (mm)') plt.ylabel('Force (kN)') for prop in properties: label = '$e/l = %s$' % prop plt_df = df[df['property'] == prop] if prop == 0: prop = 0.9 plt.scatter(plt_df['horizontal'], plt_df['force'], label=label, s=10, c=cm.Set1(prop)) legend = plt.legend() plt.savefig('data/%s_h.pdf' % input_filename) plt.show() if __name__ == "__main__": main()
mit
chadnetzer/numpy-gaurdro
numpy/numarray/alter_code1.py
102
9390
""" This module converts code written for numarray to run with numpy Makes the following changes: * Changes import statements import numarray.package --> import numpy.numarray.package as numarray_package with all numarray.package in code changed to numarray_package import numarray --> import numpy.numarray as numarray import numarray.package as <yyy> --> import numpy.numarray.package as <yyy> from numarray import <xxx> --> from numpy.numarray import <xxx> from numarray.package import <xxx> --> from numpy.numarray.package import <xxx> package can be convolve, image, nd_image, mlab, linear_algebra, ma, matrix, fft, random_array * Makes search and replace changes to: - .imaginary --> .imag - .flat --> .ravel() (most of the time) - .byteswapped() --> .byteswap(False) - .byteswap() --> .byteswap(True) - .info() --> numarray.info(self) - .isaligned() --> .flags.aligned - .isbyteswapped() --> (not .dtype.isnative) - .typecode() --> .dtype.char - .iscontiguous() --> .flags.contiguous - .is_c_array() --> .flags.carray and .dtype.isnative - .is_fortran_contiguous() --> .flags.fortran - .is_f_array() --> .dtype.isnative and .flags.farray - .itemsize() --> .itemsize - .nelements() --> .size - self.new(type) --> numarray.newobj(self, type) - .repeat(r) --> .repeat(r, axis=0) - .size() --> .size - self.type() -- numarray.typefrom(self) - .typecode() --> .dtype.char - .stddev() --> .std() - .togglebyteorder() --> numarray.togglebyteorder(self) - .getshape() --> .shape - .setshape(obj) --> .shape=obj - .getflat() --> .ravel() - .getreal() --> .real - .setreal() --> .real = - .getimag() --> .imag - .setimag() --> .imag = - .getimaginary() --> .imag - .setimaginary() --> .imag """ __all__ = ['convertfile', 'convertall', 'converttree', 'convertsrc'] import sys import os import re import glob def changeimports(fstr, name, newname): importstr = 'import %s' % name importasstr = 'import %s as ' % name fromstr = 'from %s import ' % name fromall=0 name_ = name if ('.' in name): name_ = name.replace('.','_') fstr = re.sub(r'(import\s+[^,\n\r]+,\s*)(%s)' % name, "\\1%s as %s" % (newname, name), fstr) fstr = fstr.replace(importasstr, 'import %s as ' % newname) fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name_)) if (name_ != name): fstr = fstr.replace(name, name_) ind = 0 Nlen = len(fromstr) Nlen2 = len("from %s import " % newname) while 1: found = fstr.find(fromstr,ind) if (found < 0): break ind = found + Nlen if fstr[ind] == '*': continue fstr = "%sfrom %s import %s" % (fstr[:found], newname, fstr[ind:]) ind += Nlen2 - Nlen return fstr, fromall flatindex_re = re.compile('([.]flat(\s*?[[=]))') def addimport(astr): # find the first line with import on it ind = astr.find('import') start = astr.rfind(os.linesep, 0, ind) astr = "%s%s%s%s" % (astr[:start], os.linesep, "import numpy.numarray as numarray", astr[start:]) return astr def replaceattr(astr): astr = astr.replace(".imaginary", ".imag") astr = astr.replace(".byteswapped()",".byteswap(False)") astr = astr.replace(".byteswap()", ".byteswap(True)") astr = astr.replace(".isaligned()", ".flags.aligned") astr = astr.replace(".iscontiguous()",".flags.contiguous") astr = astr.replace(".is_fortran_contiguous()",".flags.fortran") astr = astr.replace(".itemsize()",".itemsize") astr = astr.replace(".size()",".size") astr = astr.replace(".nelements()",".size") astr = astr.replace(".typecode()",".dtype.char") astr = astr.replace(".stddev()",".std()") astr = astr.replace(".getshape()", ".shape") astr = astr.replace(".getflat()", ".ravel()") astr = astr.replace(".getreal", ".real") astr = astr.replace(".getimag", ".imag") astr = astr.replace(".getimaginary", ".imag") # preserve uses of flat that should be o.k. tmpstr = flatindex_re.sub(r"@@@@\2",astr) # replace other uses of flat tmpstr = tmpstr.replace(".flat",".ravel()") # put back .flat where it was valid astr = tmpstr.replace("@@@@", ".flat") return astr info_re = re.compile(r'(\S+)\s*[.]\s*info\s*[(]\s*[)]') new_re = re.compile(r'(\S+)\s*[.]\s*new\s*[(]\s*(\S+)\s*[)]') toggle_re = re.compile(r'(\S+)\s*[.]\s*togglebyteorder\s*[(]\s*[)]') type_re = re.compile(r'(\S+)\s*[.]\s*type\s*[(]\s*[)]') isbyte_re = re.compile(r'(\S+)\s*[.]\s*isbyteswapped\s*[(]\s*[)]') iscarr_re = re.compile(r'(\S+)\s*[.]\s*is_c_array\s*[(]\s*[)]') isfarr_re = re.compile(r'(\S+)\s*[.]\s*is_f_array\s*[(]\s*[)]') repeat_re = re.compile(r'(\S+)\s*[.]\s*repeat\s*[(]\s*(\S+)\s*[)]') setshape_re = re.compile(r'(\S+)\s*[.]\s*setshape\s*[(]\s*(\S+)\s*[)]') setreal_re = re.compile(r'(\S+)\s*[.]\s*setreal\s*[(]\s*(\S+)\s*[)]') setimag_re = re.compile(r'(\S+)\s*[.]\s*setimag\s*[(]\s*(\S+)\s*[)]') setimaginary_re = re.compile(r'(\S+)\s*[.]\s*setimaginary\s*[(]\s*(\S+)\s*[)]') def replaceother(astr): # self.info() --> numarray.info(self) # self.new(type) --> numarray.newobj(self, type) # self.togglebyteorder() --> numarray.togglebyteorder(self) # self.type() --> numarray.typefrom(self) (astr, n1) = info_re.subn('numarray.info(\\1)', astr) (astr, n2) = new_re.subn('numarray.newobj(\\1, \\2)', astr) (astr, n3) = toggle_re.subn('numarray.togglebyteorder(\\1)', astr) (astr, n4) = type_re.subn('numarray.typefrom(\\1)', astr) if (n1+n2+n3+n4 > 0): astr = addimport(astr) astr = isbyte_re.sub('not \\1.dtype.isnative', astr) astr = iscarr_re.sub('\\1.dtype.isnative and \\1.flags.carray', astr) astr = isfarr_re.sub('\\1.dtype.isnative and \\1.flags.farray', astr) astr = repeat_re.sub('\\1.repeat(\\2, axis=0)', astr) astr = setshape_re.sub('\\1.shape = \\2', astr) astr = setreal_re.sub('\\1.real = \\2', astr) astr = setimag_re.sub('\\1.imag = \\2', astr) astr = setimaginary_re.sub('\\1.imag = \\2', astr) return astr import datetime def fromstr(filestr): savestr = filestr[:] filestr, fromall = changeimports(filestr, 'numarray', 'numpy.numarray') base = 'numarray' newbase = 'numpy.numarray' for sub in ['', 'convolve', 'image', 'nd_image', 'mlab', 'linear_algebra', 'ma', 'matrix', 'fft', 'random_array']: if sub != '': sub = '.'+sub filestr, fromall = changeimports(filestr, base+sub, newbase+sub) filestr = replaceattr(filestr) filestr = replaceother(filestr) if savestr != filestr: name = os.path.split(sys.argv[0])[-1] today = datetime.date.today().strftime('%b %d, %Y') filestr = '## Automatically adapted for '\ 'numpy.numarray %s by %s\n\n%s' % (today, name, filestr) return filestr, 1 return filestr, 0 def makenewfile(name, filestr): fid = file(name, 'w') fid.write(filestr) fid.close() def convertfile(filename, orig=1): """Convert the filename given from using Numarray to using NumPy Copies the file to filename.orig and then over-writes the file with the updated code """ fid = open(filename) filestr = fid.read() fid.close() filestr, changed = fromstr(filestr) if changed: if orig: base, ext = os.path.splitext(filename) os.rename(filename, base+".orig") else: os.remove(filename) makenewfile(filename, filestr) def fromargs(args): filename = args[1] convertfile(filename) def convertall(direc=os.path.curdir, orig=1): """Convert all .py files to use numpy.oldnumeric (from Numeric) in the directory given For each file, a backup of <usesnumeric>.py is made as <usesnumeric>.py.orig. A new file named <usesnumeric>.py is then written with the updated code. """ files = glob.glob(os.path.join(direc,'*.py')) for afile in files: if afile[-8:] == 'setup.py': continue convertfile(afile, orig) header_re = re.compile(r'(numarray/libnumarray.h)') def convertsrc(direc=os.path.curdir, ext=None, orig=1): """Replace Numeric/arrayobject.h with numpy/oldnumeric.h in all files in the directory with extension give by list ext (if ext is None, then all files are replaced).""" if ext is None: files = glob.glob(os.path.join(direc,'*')) else: files = [] for aext in ext: files.extend(glob.glob(os.path.join(direc,"*.%s" % aext))) for afile in files: fid = open(afile) fstr = fid.read() fid.close() fstr, n = header_re.subn(r'numpy/libnumarray.h',fstr) if n > 0: if orig: base, ext = os.path.splitext(afile) os.rename(afile, base+".orig") else: os.remove(afile) makenewfile(afile, fstr) def _func(arg, dirname, fnames): convertall(dirname, orig=0) convertsrc(dirname, ['h','c'], orig=0) def converttree(direc=os.path.curdir): """Convert all .py files in the tree given """ os.path.walk(direc, _func, None) if __name__ == '__main__': converttree(sys.argv)
bsd-3-clause
fdouetteau/PyBabe
pybabe/format_sql.py
1
2317
from base import BabeBase, StreamFooter, StreamHeader import sys import re pattern = r"'((?:(?:\\.)|[^'\\])*)'|((?:\d|\.)+)|(NULL)" pat = re.compile(pattern) def parse_value(pos, line): m = pat.match(line, pos) if m: if m.lastindex == 3: return (None, m.end(0)) elif m.lastindex == 2: return (m.group(2), m.end(0)) else: return (unescape(m.group(1)), m.end(0)) else: raise Exception("ParseError %s", line[pos:pos + 10 if pos + 10 < len(line) - 1 else len(line) - 1]) def unescape(s): if s.find('\\') >= 0: s = s.replace("\\'", "'") s = s.replace("\\n", "\n") s = s.replace("\\r", "\r") return s def parse_tuple(pos, line): if line[pos] != '(': raise Exception("ParseError") pos = pos + 1 buf = [] while True: (val, pos) = parse_value(pos, line) buf.append(val) if line[pos] == ',': pos = pos + 1 elif line[pos] == ')': pos = pos + 1 break else: raise Exception("ParseError %s", line[pos:pos + 10 if pos + 10 < len(line) - 1 else len(line) - 1]) return (buf, pos) def pull(format, stream, kwargs): """ Read a SQL dump "INSERT VALUE" statements from a single table table = The name of the table to read (mandatory) fields = The sets """ fields = kwargs['fields'] table = kwargs['table'] header = StreamHeader(fields=fields, table=table) yield header prefix = "INSERT INTO `%s` VALUES " % table try: for line in stream: if not line.startswith(prefix): continue pos = len(prefix) while pos < len(line): (elts, pos) = parse_tuple(pos, line) yield header.t(*elts) if line[pos] == ',': pos = pos + 1 continue elif line[pos] == ';': break else: raise Exception("ParseError pos %u " % pos) except TypeError, e: print len(elts), elts raise e yield StreamFooter() BabeBase.addPullPlugin("sql", ["sql"], pull) if __name__ == "__main__": for line in sys.stdin: print parse_tuple(0, line)
bsd-3-clause
yinwenpeng/rescale
en/parser/nltk_lite/chat/rude.py
9
2567
# Natural Language Toolkit: Zen Chatbot # # Copyright (C) 2005-2006 University of Melbourne # Author: Peter Spiller <pspiller@csse.unimelb.edu.au> # URL: <http://nltk.sf.net> # For license information, see LICENSE.TXT from re import * from en.parser.nltk_lite.chat import * pairs = ( (r'We (.*)', ("What do you mean, 'we'?", "Don't include me in that!", "I wouldn't be so sure about that.")), (r'You should (.*)', ("Don't tell me what to do, buddy.", "Really? I should, should I?")), (r'You\'re(.*)', ("More like YOU'RE %1!", "Hah! Look who's talking.", "Come over here and tell me I'm %1.")), (r'You are(.*)', ("More like YOU'RE %1!", "Hah! Look who's talking.", "Come over here and tell me I'm %1.")), (r'I can\'t(.*)', ("You do sound like the type who can't %1.", "Hear that splashing sound? That's my heart bleeding for you.", "Tell somebody who might actually care.")), (r'I think (.*)', ("I wouldn't think too hard if I were you.", "You actually think? I'd never have guessed...")), (r'I (.*)', ("I'm getting a bit tired of hearing about you.", "How about we talk about me instead?", "Me, me, me... Frankly, I don't care.")), (r'How (.*)', ("How do you think?", "Take a wild guess.", "I'm not even going to dignify that with an answer.")), (r'What (.*)', ("Do I look like an encylopedia?", "Figure it out yourself.")), (r'Why (.*)', ("Why not?", "That's so obvious I thought even you'd have already figured it out.")), (r'(.*)shut up(.*)', ("Make me.", "Getting angry at a feeble NLP assignment? Somebody's losing it.", "Say that again, I dare you.")), (r'Shut up(.*)', ("Make me.", "Getting angry at a feeble NLP assignment? Somebody's losing it.", "Say that again, I dare you.")), (r'Hello(.*)', ("Oh good, somebody else to talk to. Joy.", "'Hello'? How original...")), (r'(.*)', ("I'm getting bored here. Become more interesting.", "Either become more thrilling or get lost, buddy.", "Change the subject before I die of fatal boredom.")) ) rude = Chat(pairs, reflections) def demo(): print "Unpleasant Chatbot (type 'quit' to exit)." print '='*72 print "I suppose I should say hello." converse(rude) if __name__ == "__main__": demo()
gpl-3.0
mcking49/apache-flask
Python/Lib/site-packages/werkzeug/serving.py
145
27668
# -*- coding: utf-8 -*- """ werkzeug.serving ~~~~~~~~~~~~~~~~ There are many ways to serve a WSGI application. While you're developing it you usually don't want a full blown webserver like Apache but a simple standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in the standard library. If you're using older versions of Python you can download the package from the cheeseshop. However there are some caveats. Sourcecode won't reload itself when changed and each time you kill the server using ``^C`` you get an `KeyboardInterrupt` error. While the latter is easy to solve the first one can be a pain in the ass in some situations. The easiest way is creating a small ``start-myproject.py`` that runs the application:: #!/usr/bin/env python # -*- coding: utf-8 -*- from myproject import make_app from werkzeug.serving import run_simple app = make_app(...) run_simple('localhost', 8080, app, use_reloader=True) You can also pass it a `extra_files` keyword argument with a list of additional files (like configuration files) you want to observe. For bigger applications you should consider using `werkzeug.script` instead of a simple start file. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import os import socket import sys import time import signal import subprocess try: import thread except ImportError: import _thread as thread try: from SocketServer import ThreadingMixIn, ForkingMixIn from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler except ImportError: from socketserver import ThreadingMixIn, ForkingMixIn from http.server import HTTPServer, BaseHTTPRequestHandler import werkzeug from werkzeug._internal import _log from werkzeug._compat import iteritems, PY2, reraise, text_type, \ wsgi_encoding_dance from werkzeug.urls import url_parse, url_unquote from werkzeug.exceptions import InternalServerError, BadRequest class WSGIRequestHandler(BaseHTTPRequestHandler, object): """A request handler that implements WSGI dispatching.""" @property def server_version(self): return 'Werkzeug/' + werkzeug.__version__ def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and 'http' or 'https' path_info = url_unquote(request_url.path) environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': self.server.multithread, 'wsgi.multiprocess': self.server.multiprocess, 'wsgi.run_once': False, 'werkzeug.server.shutdown': shutdown_server, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), 'CONTENT_TYPE': self.headers.get('Content-Type', ''), 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), 'REMOTE_ADDR': self.client_address[0], 'REMOTE_PORT': self.client_address[1], 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for key, value in self.headers.items(): key = 'HTTP_' + key.upper().replace('-', '_') if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): environ[key] = value if request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ def run_wsgi(self): if self.headers.get('Expect', '').lower().strip() == '100-continue': self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n') environ = self.make_environ() headers_set = [] headers_sent = [] def write(data): assert headers_set, 'write() before start_response' if not headers_sent: status, response_headers = headers_sent[:] = headers_set try: code, msg = status.split(None, 1) except ValueError: code, msg = status, "" self.send_response(int(code), msg) header_keys = set() for key, value in response_headers: self.send_header(key, value) key = key.lower() header_keys.add(key) if 'content-length' not in header_keys: self.close_connection = True self.send_header('Connection', 'close') if 'server' not in header_keys: self.send_header('Server', self.version_string()) if 'date' not in header_keys: self.send_header('Date', self.date_time_string()) self.end_headers() assert type(data) is bytes, 'applications must write bytes' self.wfile.write(data) self.wfile.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: reraise(*exc_info) finally: exc_info = None elif headers_set: raise AssertionError('Headers already set') headers_set[:] = [status, response_headers] return write def execute(app): application_iter = app(environ, start_response) try: for data in application_iter: write(data) if not headers_sent: write(b'') finally: if hasattr(application_iter, 'close'): application_iter.close() application_iter = None try: execute(self.server.app) except (socket.error, socket.timeout) as e: self.connection_dropped(e, environ) except Exception: if self.server.passthrough_errors: raise from werkzeug.debug.tbtools import get_current_traceback traceback = get_current_traceback(ignore_system_exceptions=True) try: # if we haven't yet sent the headers but they are set # we roll back to be able to set them again. if not headers_sent: del headers_set[:] execute(InternalServerError()) except Exception: pass self.server.log('error', 'Error on request:\n%s', traceback.plaintext) def handle(self): """Handles a request ignoring dropped connections.""" rv = None try: rv = BaseHTTPRequestHandler.handle(self) except (socket.error, socket.timeout) as e: self.connection_dropped(e) except Exception: if self.server.ssl_context is None or not is_ssl_error(): raise if self.server.shutdown_signal: self.initiate_shutdown() return rv def initiate_shutdown(self): """A horrible, horrible way to kill the server for Python 2.6 and later. It's the best we can do. """ # Windows does not provide SIGKILL, go with SIGTERM then. sig = getattr(signal, 'SIGKILL', signal.SIGTERM) # reloader active if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': os.kill(os.getpid(), sig) # python 2.7 self.server._BaseServer__shutdown_request = True # python 2.6 self.server._BaseServer__serving = False def connection_dropped(self, error, environ=None): """Called if the connection was closed by the client. By default nothing happens. """ def handle_one_request(self): """Handle a single HTTP request.""" self.raw_requestline = self.rfile.readline() if not self.raw_requestline: self.close_connection = 1 elif self.parse_request(): return self.run_wsgi() def send_response(self, code, message=None): """Send the response header and log the response code.""" self.log_request(code) if message is None: message = code in self.responses and self.responses[code][0] or '' if self.request_version != 'HTTP/0.9': hdr = "%s %d %s\r\n" % (self.protocol_version, code, message) self.wfile.write(hdr.encode('ascii')) def version_string(self): return BaseHTTPRequestHandler.version_string(self).strip() def address_string(self): return self.client_address[0] def log_request(self, code='-', size='-'): self.log('info', '"%s" %s %s', self.requestline, code, size) def log_error(self, *args): self.log('error', *args) def log_message(self, format, *args): self.log('info', format, *args) def log(self, type, message, *args): _log(type, '%s - - [%s] %s\n' % (self.address_string(), self.log_date_time_string(), message % args)) #: backwards compatible name if someone is subclassing it BaseRequestHandler = WSGIRequestHandler def generate_adhoc_ssl_pair(cn=None): from random import random from OpenSSL import crypto # pretty damn sure that this is not actually accepted by anyone if cn is None: cn = '*' cert = crypto.X509() cert.set_serial_number(int(random() * sys.maxint)) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(60 * 60 * 24 * 365) subject = cert.get_subject() subject.CN = cn subject.O = 'Dummy Certificate' issuer = cert.get_issuer() issuer.CN = 'Untrusted Authority' issuer.O = 'Self-Signed' pkey = crypto.PKey() pkey.generate_key(crypto.TYPE_RSA, 768) cert.set_pubkey(pkey) cert.sign(pkey, 'md5') return cert, pkey def make_ssl_devcert(base_path, host=None, cn=None): """Creates an SSL key for development. This should be used instead of the ``'adhoc'`` key which generates a new cert on each server start. It accepts a path for where it should store the key and cert and either a host or CN. If a host is given it will use the CN ``*.host/CN=host``. For more information see :func:`run_simple`. .. versionadded:: 0.9 :param base_path: the path to the certificate and key. The extension ``.crt`` is added for the certificate, ``.key`` is added for the key. :param host: the name of the host. This can be used as an alternative for the `cn`. :param cn: the `CN` to use. """ from OpenSSL import crypto if host is not None: cn = '*.%s/CN=%s' % (host, host) cert, pkey = generate_adhoc_ssl_pair(cn=cn) cert_file = base_path + '.crt' pkey_file = base_path + '.key' with open(cert_file, 'w') as f: f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) with open(pkey_file, 'w') as f: f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) return cert_file, pkey_file def generate_adhoc_ssl_context(): """Generates an adhoc SSL context for the development server.""" from OpenSSL import SSL cert, pkey = generate_adhoc_ssl_pair() ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_privatekey(pkey) ctx.use_certificate(cert) return ctx def load_ssl_context(cert_file, pkey_file): """Loads an SSL context from a certificate and private key file.""" from OpenSSL import SSL ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_certificate_file(cert_file) ctx.use_privatekey_file(pkey_file) return ctx def is_ssl_error(error=None): """Checks if the given error (or the current one) is an SSL error.""" if error is None: error = sys.exc_info()[1] from OpenSSL import SSL return isinstance(error, SSL.Error) class _SSLConnectionFix(object): """Wrapper around SSL connection to provide a working makefile().""" def __init__(self, con): self._con = con def makefile(self, mode, bufsize): return socket._fileobject(self._con, mode, bufsize) def __getattr__(self, attrib): return getattr(self._con, attrib) def shutdown(self, arg=None): try: self._con.shutdown() except Exception: pass def select_ip_version(host, port): """Returns AF_INET4 or AF_INET6 depending on where to connect to.""" # disabled due to problems with current ipv6 implementations # and various operating systems. Probably this code also is # not supposed to work, but I can't come up with any other # ways to implement this. ##try: ## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, ## socket.SOCK_STREAM, 0, ## socket.AI_PASSIVE) ## if info: ## return info[0][0] ##except socket.gaierror: ## pass if ':' in host and hasattr(socket, 'AF_INET6'): return socket.AF_INET6 return socket.AF_INET class BaseWSGIServer(HTTPServer, object): """Simple single-threaded, single-process WSGI server.""" multithread = False multiprocess = False request_queue_size = 128 def __init__(self, host, port, app, handler=None, passthrough_errors=False, ssl_context=None): if handler is None: handler = WSGIRequestHandler self.address_family = select_ip_version(host, port) HTTPServer.__init__(self, (host, int(port)), handler) self.app = app self.passthrough_errors = passthrough_errors self.shutdown_signal = False if ssl_context is not None: try: from OpenSSL import tsafe except ImportError: raise TypeError('SSL is not available if the OpenSSL ' 'library is not installed.') if isinstance(ssl_context, tuple): ssl_context = load_ssl_context(*ssl_context) if ssl_context == 'adhoc': ssl_context = generate_adhoc_ssl_context() self.socket = tsafe.Connection(ssl_context, self.socket) self.ssl_context = ssl_context else: self.ssl_context = None def log(self, type, message, *args): _log(type, message, *args) def serve_forever(self): self.shutdown_signal = False try: HTTPServer.serve_forever(self) except KeyboardInterrupt: pass def handle_error(self, request, client_address): if self.passthrough_errors: raise else: return HTTPServer.handle_error(self, request, client_address) def get_request(self): con, info = self.socket.accept() if self.ssl_context is not None: con = _SSLConnectionFix(con) return con, info class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer): """A WSGI server that does threading.""" multithread = True class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer): """A WSGI server that does forking.""" multiprocess = True def __init__(self, host, port, app, processes=40, handler=None, passthrough_errors=False, ssl_context=None): BaseWSGIServer.__init__(self, host, port, app, handler, passthrough_errors, ssl_context) self.max_children = processes def make_server(host, port, app=None, threaded=False, processes=1, request_handler=None, passthrough_errors=False, ssl_context=None): """Create a new server instance that is either threaded, or forks or just processes one request after another. """ if threaded and processes > 1: raise ValueError("cannot have a multithreaded and " "multi process server.") elif threaded: return ThreadedWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context) elif processes > 1: return ForkingWSGIServer(host, port, app, processes, request_handler, passthrough_errors, ssl_context) else: return BaseWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context) def _iter_module_files(): # The list call is necessary on Python 3 in case the module # dictionary modifies during iteration. for module in list(sys.modules.values()): filename = getattr(module, '__file__', None) if filename: old = None while not os.path.isfile(filename): old = filename filename = os.path.dirname(filename) if filename == old: break else: if filename[-4:] in ('.pyc', '.pyo'): filename = filename[:-1] yield filename def _reloader_stat_loop(extra_files=None, interval=1): """When this function is run from the main thread, it will force other threads to exit when any modules currently loaded change. Copyright notice. This function is based on the autoreload.py from the CherryPy trac which originated from WSGIKit which is now dead. :param extra_files: a list of additional files it should watch. """ from itertools import chain mtimes = {} while 1: for filename in chain(_iter_module_files(), extra_files or ()): try: mtime = os.stat(filename).st_mtime except OSError: continue old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime continue elif mtime > old_time: _log('info', ' * Detected change in %r, reloading' % filename) sys.exit(3) time.sleep(interval) def _reloader_inotify(extra_files=None, interval=None): # Mutated by inotify loop when changes occur. changed = [False] # Setup inotify watches from pyinotify import WatchManager, Notifier # this API changed at one point, support both try: from pyinotify import EventsCodes as ec ec.IN_ATTRIB except (ImportError, AttributeError): import pyinotify as ec wm = WatchManager() mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB def signal_changed(event): if changed[0]: return _log('info', ' * Detected change in %r, reloading' % event.path) changed[:] = [True] for fname in extra_files or (): wm.add_watch(fname, mask, signal_changed) # ... And now we wait... notif = Notifier(wm) try: while not changed[0]: # always reiterate through sys.modules, adding them for fname in _iter_module_files(): wm.add_watch(fname, mask, signal_changed) notif.process_events() if notif.check_events(timeout=interval): notif.read_events() # TODO Set timeout to something small and check parent liveliness finally: notif.stop() sys.exit(3) # currently we always use the stat loop reloader for the simple reason # that the inotify one does not respond to added files properly. Also # it's quite buggy and the API is a mess. reloader_loop = _reloader_stat_loop def restart_with_reloader(): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log('info', ' * Restarting with reloader') args = [sys.executable] + sys.argv new_environ = os.environ.copy() new_environ['WERKZEUG_RUN_MAIN'] = 'true' # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == 'nt' and PY2: for key, value in iteritems(new_environ): if isinstance(value, text_type): new_environ[key] = value.encode('iso-8859-1') exit_code = subprocess.call(args, env=new_environ) if exit_code != 3: return exit_code def run_with_reloader(main_func, extra_files=None, interval=1): """Run the given function in an independent python interpreter.""" import signal signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': thread.start_new_thread(main_func, ()) try: reloader_loop(extra_files, interval) except KeyboardInterrupt: return try: sys.exit(restart_with_reloader()) except KeyboardInterrupt: pass def run_simple(hostname, port, application, use_reloader=False, use_debugger=False, use_evalex=True, extra_files=None, reloader_interval=1, threaded=False, processes=1, request_handler=None, static_files=None, passthrough_errors=False, ssl_context=None): """Start an application using wsgiref and with an optional reloader. This wraps `wsgiref` to fix the wrong default reporting of the multithreaded WSGI variable and adds optional multithreading and fork support. This function has a command-line interface too:: python -m werkzeug.serving --help .. versionadded:: 0.5 `static_files` was added to simplify serving of static files as well as `passthrough_errors`. .. versionadded:: 0.6 support for SSL was added. .. versionadded:: 0.8 Added support for automatically loading a SSL context from certificate file and private key. .. versionadded:: 0.9 Added command-line interface. :param hostname: The host for the application. eg: ``'localhost'`` :param port: The port for the server. eg: ``8080`` :param application: the WSGI application to execute :param use_reloader: should the server automatically restart the python process if modules were changed? :param use_debugger: should the werkzeug debugging system be used? :param use_evalex: should the exception evaluation feature be enabled? :param extra_files: a list of files the reloader should watch additionally to the modules. For example configuration files. :param reloader_interval: the interval for the reloader in seconds. :param threaded: should the process handle each request in a separate thread? :param processes: if greater than 1 then handle each request in a new process up to this maximum number of concurrent processes. :param request_handler: optional parameter that can be used to replace the default one. You can use this to replace it with a different :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass. :param static_files: a dict of paths for static files. This works exactly like :class:`SharedDataMiddleware`, it's actually just wrapping the application in that middleware before serving. :param passthrough_errors: set this to `True` to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.) :param ssl_context: an SSL context for the connection. Either an OpenSSL context, a tuple in the form ``(cert_file, pkey_file)``, the string ``'adhoc'`` if the server should automatically create one, or `None` to disable SSL (which is the default). """ if use_debugger: from werkzeug.debug import DebuggedApplication application = DebuggedApplication(application, use_evalex) if static_files: from werkzeug.wsgi import SharedDataMiddleware application = SharedDataMiddleware(application, static_files) def inner(): make_server(hostname, port, application, threaded, processes, request_handler, passthrough_errors, ssl_context).serve_forever() if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': display_hostname = hostname != '*' and hostname or 'localhost' if ':' in display_hostname: display_hostname = '[%s]' % display_hostname _log('info', ' * Running on %s://%s:%d/', ssl_context is None and 'http' or 'https', display_hostname, port) if use_reloader: # Create and destroy a socket so that any exceptions are raised before # we spawn a separate Python interpreter and lose this ability. address_family = select_ip_version(hostname, port) test_socket = socket.socket(address_family, socket.SOCK_STREAM) test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) test_socket.bind((hostname, port)) test_socket.close() run_with_reloader(inner, extra_files, reloader_interval) else: inner() def main(): '''A simple command-line interface for :py:func:`run_simple`.''' # in contrast to argparse, this works at least under Python < 2.7 import optparse from werkzeug.utils import import_string parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object') parser.add_option('-b', '--bind', dest='address', help='The hostname:port the app should listen on.') parser.add_option('-d', '--debug', dest='use_debugger', action='store_true', default=False, help='Use Werkzeug\'s debugger.') parser.add_option('-r', '--reload', dest='use_reloader', action='store_true', default=False, help='Reload Python process if modules change.') options, args = parser.parse_args() hostname, port = None, None if options.address: address = options.address.split(':') hostname = address[0] if len(address) > 1: port = address[1] if len(args) != 1: sys.stdout.write('No application supplied, or too much. See --help\n') sys.exit(1) app = import_string(args[0]) run_simple( hostname=(hostname or '127.0.0.1'), port=int(port or 5000), application=app, use_reloader=options.use_reloader, use_debugger=options.use_debugger ) if __name__ == '__main__': main()
mit
carrigan98/Autorippr
classes/docopt.py
2
19810
# -*- coding: utf-8 -*- """Pythonic command-line interface parser that will make you smile. * http://docopt.org * Repository and issue-tracker: https://github.com/docopt/docopt * Licensed under terms of MIT license (see LICENSE-MIT) * Copyright (c) 2013 Vladimir Keleshev, vladimir@keleshev.com """ import re import sys __all__ = ['docopt'] __version__ = '0.6.1' class DocoptLanguageError(Exception): """Error in construction of usage-message by developer.""" class DocoptExit(SystemExit): """Exit in case user invoked program with incorrect arguments.""" usage = '' def __init__(self, message=''): SystemExit.__init__(self, (message + '\n' + self.usage).strip()) class Pattern(object): def __eq__(self, other): return repr(self) == repr(other) def __hash__(self): return hash(repr(self)) def fix(self): self.fix_identities() self.fix_repeating_arguments() return self def fix_identities(self, uniq=None): """Make pattern-tree tips point to same object if they are equal.""" if not hasattr(self, 'children'): return self uniq = list(set(self.flat())) if uniq is None else uniq for i, child in enumerate(self.children): if not hasattr(child, 'children'): assert child in uniq self.children[i] = uniq[uniq.index(child)] else: child.fix_identities(uniq) def fix_repeating_arguments(self): """Fix elements that should accumulate/increment values.""" either = [list(child.children) for child in transform(self).children] for case in either: for e in [child for child in case if case.count(child) > 1]: if type(e) is Argument or type(e) is Option and e.argcount: if e.value is None: e.value = [] elif type(e.value) is not list: e.value = e.value.split() if type(e) is Command or type(e) is Option and e.argcount == 0: e.value = 0 return self def transform(pattern): """Expand pattern into an (almost) equivalent one, but with single Either. Example: ((-a | -b) (-c | -d)) => (-a -c | -a -d | -b -c | -b -d) Quirks: [-a] => (-a), (-a...) => (-a -a) """ result = [] groups = [[pattern]] while groups: children = groups.pop(0) parents = [Required, Optional, OptionsShortcut, Either, OneOrMore] if any(t in map(type, children) for t in parents): child = [c for c in children if type(c) in parents][0] children.remove(child) if type(child) is Either: for c in child.children: groups.append([c] + children) elif type(child) is OneOrMore: groups.append(child.children * 2 + children) else: groups.append(child.children + children) else: result.append(children) return Either(*[Required(*e) for e in result]) class LeafPattern(Pattern): """Leaf/terminal node of a pattern tree.""" def __init__(self, name, value=None): self.name, self.value = name, value def __repr__(self): return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value) def flat(self, *types): return [self] if not types or type(self) in types else [] def match(self, left, collected=None): collected = [] if collected is None else collected pos, match = self.single_match(left) if match is None: return False, left, collected left_ = left[:pos] + left[pos + 1:] same_name = [a for a in collected if a.name == self.name] if type(self.value) in (int, list): if type(self.value) is int: increment = 1 else: increment = ([match.value] if type(match.value) is str else match.value) if not same_name: match.value = increment return True, left_, collected + [match] same_name[0].value += increment return True, left_, collected return True, left_, collected + [match] class BranchPattern(Pattern): """Branch/inner node of a pattern tree.""" def __init__(self, *children): self.children = list(children) def __repr__(self): return '%s(%s)' % (self.__class__.__name__, ', '.join(repr(a) for a in self.children)) def flat(self, *types): if type(self) in types: return [self] return sum([child.flat(*types) for child in self.children], []) class Argument(LeafPattern): def single_match(self, left): for n, pattern in enumerate(left): if type(pattern) is Argument: return n, Argument(self.name, pattern.value) return None, None @classmethod def parse(class_, source): name = re.findall('(<\S*?>)', source)[0] value = re.findall('\[default: (.*)\]', source, flags=re.I) return class_(name, value[0] if value else None) class Command(Argument): def __init__(self, name, value=False): self.name, self.value = name, value def single_match(self, left): for n, pattern in enumerate(left): if type(pattern) is Argument: if pattern.value == self.name: return n, Command(self.name, True) else: break return None, None class Option(LeafPattern): def __init__(self, short=None, long=None, argcount=0, value=False): assert argcount in (0, 1) self.short, self.long, self.argcount = short, long, argcount self.value = None if value is False and argcount else value @classmethod def parse(class_, option_description): short, long, argcount, value = None, None, 0, False options, _, description = option_description.strip().partition(' ') options = options.replace(',', ' ').replace('=', ' ') for s in options.split(): if s.startswith('--'): long = s elif s.startswith('-'): short = s else: argcount = 1 if argcount: matched = re.findall('\[default: (.*)\]', description, flags=re.I) value = matched[0] if matched else None return class_(short, long, argcount, value) def single_match(self, left): for n, pattern in enumerate(left): if self.name == pattern.name: return n, pattern return None, None @property def name(self): return self.long or self.short def __repr__(self): return 'Option(%r, %r, %r, %r)' % (self.short, self.long, self.argcount, self.value) class Required(BranchPattern): def match(self, left, collected=None): collected = [] if collected is None else collected l = left c = collected for pattern in self.children: matched, l, c = pattern.match(l, c) if not matched: return False, left, collected return True, l, c class Optional(BranchPattern): def match(self, left, collected=None): collected = [] if collected is None else collected for pattern in self.children: m, left, collected = pattern.match(left, collected) return True, left, collected class OptionsShortcut(Optional): """Marker/placeholder for [options] shortcut.""" class OneOrMore(BranchPattern): def match(self, left, collected=None): assert len(self.children) == 1 collected = [] if collected is None else collected l = left c = collected l_ = None matched = True times = 0 while matched: # could it be that something didn't match but changed l or c? matched, l, c = self.children[0].match(l, c) times += 1 if matched else 0 if l_ == l: break l_ = l if times >= 1: return True, l, c return False, left, collected class Either(BranchPattern): def match(self, left, collected=None): collected = [] if collected is None else collected outcomes = [] for pattern in self.children: matched, _, _ = outcome = pattern.match(left, collected) if matched: outcomes.append(outcome) if outcomes: return min(outcomes, key=lambda outcome: len(outcome[1])) return False, left, collected class Tokens(list): def __init__(self, source, error=DocoptExit): self += source.split() if hasattr(source, 'split') else source self.error = error @staticmethod def from_pattern(source): source = re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source) source = [s for s in re.split('\s+|(\S*<.*?>)', source) if s] return Tokens(source, error=DocoptLanguageError) def move(self): return self.pop(0) if len(self) else None def current(self): return self[0] if len(self) else None def parse_long(tokens, options): """long ::= '--' chars [ ( ' ' | '=' ) chars ] ;""" long, eq, value = tokens.move().partition('=') assert long.startswith('--') value = None if eq == value == '' else value similar = [o for o in options if o.long == long] if tokens.error is DocoptExit and similar == []: # if no exact match similar = [o for o in options if o.long and o.long.startswith(long)] if len(similar) > 1: # might be simply specified ambiguously 2+ times? raise tokens.error('%s is not a unique prefix: %s?' % (long, ', '.join(o.long for o in similar))) elif len(similar) < 1: argcount = 1 if eq == '=' else 0 o = Option(None, long, argcount) options.append(o) if tokens.error is DocoptExit: o = Option(None, long, argcount, value if argcount else True) else: o = Option(similar[0].short, similar[0].long, similar[0].argcount, similar[0].value) if o.argcount == 0: if value is not None: raise tokens.error('%s must not have an argument' % o.long) else: if value is None: if tokens.current() in [None, '--']: raise tokens.error('%s requires argument' % o.long) value = tokens.move() if tokens.error is DocoptExit: o.value = value if value is not None else True return [o] def parse_shorts(tokens, options): """shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;""" token = tokens.move() assert token.startswith('-') and not token.startswith('--') left = token.lstrip('-') parsed = [] while left != '': short, left = '-' + left[0], left[1:] similar = [o for o in options if o.short == short] if len(similar) > 1: raise tokens.error('%s is specified ambiguously %d times' % (short, len(similar))) elif len(similar) < 1: o = Option(short, None, 0) options.append(o) if tokens.error is DocoptExit: o = Option(short, None, 0, True) else: # why copying is necessary here? o = Option(short, similar[0].long, similar[0].argcount, similar[0].value) value = None if o.argcount != 0: if left == '': if tokens.current() in [None, '--']: raise tokens.error('%s requires argument' % short) value = tokens.move() else: value = left left = '' if tokens.error is DocoptExit: o.value = value if value is not None else True parsed.append(o) return parsed def parse_pattern(source, options): tokens = Tokens.from_pattern(source) result = parse_expr(tokens, options) if tokens.current() is not None: raise tokens.error('unexpected ending: %r' % ' '.join(tokens)) return Required(*result) def parse_expr(tokens, options): """expr ::= seq ( '|' seq )* ;""" seq = parse_seq(tokens, options) if tokens.current() != '|': return seq result = [Required(*seq)] if len(seq) > 1 else seq while tokens.current() == '|': tokens.move() seq = parse_seq(tokens, options) result += [Required(*seq)] if len(seq) > 1 else seq return [Either(*result)] if len(result) > 1 else result def parse_seq(tokens, options): """seq ::= ( atom [ '...' ] )* ;""" result = [] while tokens.current() not in [None, ']', ')', '|']: atom = parse_atom(tokens, options) if tokens.current() == '...': atom = [OneOrMore(*atom)] tokens.move() result += atom return result def parse_atom(tokens, options): """atom ::= '(' expr ')' | '[' expr ']' | 'options' | long | shorts | argument | command ; """ token = tokens.current() result = [] if token in '([': tokens.move() matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token] result = pattern(*parse_expr(tokens, options)) if tokens.move() != matching: raise tokens.error("unmatched '%s'" % token) return [result] elif token == 'options': tokens.move() return [OptionsShortcut()] elif token.startswith('--') and token != '--': return parse_long(tokens, options) elif token.startswith('-') and token not in ('-', '--'): return parse_shorts(tokens, options) elif token.startswith('<') and token.endswith('>') or token.isupper(): return [Argument(tokens.move())] else: return [Command(tokens.move())] def parse_argv(tokens, options, options_first=False): """Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; """ parsed = [] while tokens.current() is not None: if tokens.current() == '--': return parsed + [Argument(None, v) for v in tokens] elif tokens.current().startswith('--'): parsed += parse_long(tokens, options) elif tokens.current().startswith('-') and tokens.current() != '-': parsed += parse_shorts(tokens, options) elif options_first: return parsed + [Argument(None, v) for v in tokens] else: parsed.append(Argument(None, tokens.move())) return parsed def parse_defaults(doc): defaults = [] for s in parse_section('options:', doc): # FIXME corner case "bla: options: --foo" _, _, s = s.partition(':') # get rid of "options:" split = re.split('\n[ \t]*(-\S+?)', '\n' + s)[1:] split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])] options = [Option.parse(s) for s in split if s.startswith('-')] defaults += options return defaults def parse_section(name, source): pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)', re.IGNORECASE | re.MULTILINE) return [s.strip() for s in pattern.findall(source)] def formal_usage(section): _, _, section = section.partition(':') # drop "usage:" pu = section.split() return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )' def extras(help, version, options, doc): if help and any((o.name in ('-h', '--help')) and o.value for o in options): print(doc.strip("\n")) sys.exit() if version and any(o.name == '--version' and o.value for o in options): print(version) sys.exit() class Dict(dict): def __repr__(self): return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items())) def docopt(doc, argv=None, help=True, version=None, options_first=False): """Parse `argv` based on command-line interface described in `doc`. `docopt` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv[1:] is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object If passed, the object will be printed if --version is in `argv`. options_first : bool (default: False) Set to True to require options precede positional arguments, i.e. to forbid options and positional arguments intermix. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docopt import docopt >>> doc = ''' ... Usage: ... my_program tcp <host> <port> [--timeout=<seconds>] ... my_program serial <port> [--baud=<n>] [--timeout=<seconds>] ... my_program (-h | --help | --version) ... ... Options: ... -h, --help Show this screen and exit. ... --baud=<n> Baudrate [default: 9600] ... ''' >>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docopt(doc, argv) {'--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * For video introduction see http://docopt.org * Full documentation is available in README.rst as well as online at https://github.com/docopt/docopt#readme """ argv = sys.argv[1:] if argv is None else argv usage_sections = parse_section('usage:', doc) if len(usage_sections) == 0: raise DocoptLanguageError('"usage:" (case-insensitive) not found.') if len(usage_sections) > 1: raise DocoptLanguageError('More than one "usage:" (case-insensitive).') DocoptExit.usage = usage_sections[0] options = parse_defaults(doc) pattern = parse_pattern(formal_usage(DocoptExit.usage), options) # [default] syntax for argument is disabled # for a in pattern.flat(Argument): # same_name = [d for d in arguments if d.name == a.name] # if same_name: # a.value = same_name[0].value argv = parse_argv(Tokens(argv), list(options), options_first) pattern_options = set(pattern.flat(Option)) for options_shortcut in pattern.flat(OptionsShortcut): doc_options = parse_defaults(doc) options_shortcut.children = list(set(doc_options) - pattern_options) # if any_options: # options_shortcut.children += [Option(o.short, o.long, o.argcount) # for o in argv if type(o) is Option] extras(help, version, argv, doc) matched, left, collected = pattern.fix().match(argv) if matched and left == []: # better error message if left? return Dict((a.name, a.value) for a in (pattern.flat() + collected)) raise DocoptExit()
mit
rickhurst/Django-non-rel-blog
django/views/csrf.py
289
3834
from django.http import HttpResponseForbidden from django.template import Context, Template from django.conf import settings # We include the template inline since we need to be able to reliably display # this error message, especially for the sake of developers, and there isn't any # other way of making it available independent of what is in the settings file. CSRF_FAILRE_TEMPLATE = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>403 Forbidden</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } #info { background:#f6f6f6; } #info ul { margin: 0.5em 4em; } #info p, #summary p { padding-top:10px; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>Forbidden <span>(403)</span></h1> <p>CSRF verification failed. Request aborted.</p> {% if no_referer %} <p>You are seeing this message because this HTTPS site requires a 'Referer header' to be sent by your Web browser, but none was sent. This header is required for security reasons, to ensure that your browser is not being hijacked by third parties.</p> <p>If you have configured your browser to disable 'Referer' headers, please re-enable them, at least for this site, or for HTTPS connections, or for 'same-origin' requests.</p> {% endif %} </div> {% if DEBUG %} <div id="info"> <h2>Help</h2> {% if reason %} <p>Reason given for failure:</p> <pre> {{ reason }} </pre> {% endif %} <p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when <a href='http://docs.djangoproject.com/en/dev/ref/contrib/csrf/#ref-contrib-csrf'>Django's CSRF mechanism</a> has not been used correctly. For POST forms, you need to ensure:</p> <ul> <li>The view function uses <a href='http://docs.djangoproject.com/en/dev/ref/templates/api/#subclassing-context-requestcontext'><code>RequestContext</code></a> for the template, instead of <code>Context</code>.</li> <li>In the template, there is a <code>{% templatetag openblock %} csrf_token {% templatetag closeblock %}</code> template tag inside each POST form that targets an internal URL.</li> <li>If you are not using <code>CsrfViewMiddleware</code>, then you must use <code>csrf_protect</code> on any views that use the <code>csrf_token</code> template tag, as well as those that accept the POST data.</li> </ul> <p>You're seeing the help section of this page because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and only the initial error message will be displayed. </p> <p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p> </div> {% else %} <div id="explanation"> <p><small>More information is available with DEBUG=True.</small></p> </div> {% endif %} </body> </html> """ def csrf_failure(request, reason=""): """ Default view used when request fails CSRF protection """ from django.middleware.csrf import REASON_NO_REFERER t = Template(CSRF_FAILRE_TEMPLATE) c = Context({'DEBUG': settings.DEBUG, 'reason': reason, 'no_referer': reason == REASON_NO_REFERER }) return HttpResponseForbidden(t.render(c), mimetype='text/html')
bsd-3-clause
rochacbruno/python-pagseguro
tests/conftest.py
2
1127
# -*- coding: utf-8 -*- import pytest from pagseguro import PagSeguro @pytest.fixture(scope='session') def sender(): return { 'name': u'Guybrush Treepwood', 'area_code': 11, "phone": 5555555, "email": 'guybrush@monkeyisland.com', "cpf": "00000000000", "born_date": "06/08/1650", } @pytest.fixture(scope='session') def shipping(): return { "type": PagSeguro.SEDEX, "street": "Av Brig Faria Lima", "number": 1234, "complement": "5 andar", "district": "Jardim Paulistano", "postal_code": "06650030", "city": "Sao Paulo", "state": "SP", "country": "BRA", "cost": "1234.56" } @pytest.fixture(scope='session') def items(): return [ { "id": "0001", "description": "Produto 1", "amount": 354.20, "quantity": 2, "weight": 200 }, { "id": "0002", "description": "Produto 2", "amount": 355.20, "quantity": 1, "weight": 200 }, ]
mit
ivar1234/babu.repo
contrib/inventory/nagios_ndo.py
213
3842
#!/usr/bin/env python # (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com> # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ Nagios NDO external inventory script. ======================================== Returns hosts and hostgroups from Nagios NDO. Configuration is read from `nagios_ndo.ini`. """ import os import argparse try: import configparser except ImportError: import ConfigParser configparser = ConfigParser import json try: from sqlalchemy import text from sqlalchemy.engine import create_engine except ImportError: print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") exit(1) class NagiosNDOInventory(object): def read_settings(self): config = configparser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini') if config.has_option('ndo', 'database_uri'): self.ndo_database_uri = config.get('ndo', 'database_uri') def read_cli(self): parser = argparse.ArgumentParser() parser.add_argument('--host', nargs=1) parser.add_argument('--list', action='store_true') self.options = parser.parse_args() def get_hosts(self): engine = create_engine(self.ndo_database_uri) connection = engine.connect() select_hosts = text("SELECT display_name \ FROM nagios_hosts") select_hostgroups = text("SELECT alias \ FROM nagios_hostgroups") select_hostgroup_hosts = text("SELECT h.display_name \ FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \ WHERE hgm.hostgroup_id = hg.hostgroup_id \ AND hgm.host_object_id = h.host_object_id \ AND hg.alias =:hostgroup_alias") hosts = connection.execute(select_hosts) self.result['all']['hosts'] = [host['display_name'] for host in hosts] for hostgroup in connection.execute(select_hostgroups): hostgroup_alias = hostgroup['alias'] self.result[hostgroup_alias] = {} hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias) self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts] def __init__(self): self.defaultgroup = 'group_all' self.ndo_database_uri = None self.options = None self.read_settings() self.read_cli() self.result = {} self.result['all'] = {} self.result['all']['hosts'] = [] self.result['_meta'] = {} self.result['_meta']['hostvars'] = {} if self.ndo_database_uri: self.get_hosts() if self.options.host: print(json.dumps({})) elif self.options.list: print(json.dumps(self.result)) else: print("usage: --list or --host HOSTNAME") exit(1) else: print("Error: Database configuration is missing. See nagios_ndo.ini.") exit(1) NagiosNDOInventory()
gpl-3.0
Matt-Deacalion/django
tests/gis_tests/inspectapp/tests.py
180
8022
from __future__ import unicode_literals import os import re from unittest import skipUnless from django.contrib.gis.gdal import HAS_GDAL from django.core.management import call_command from django.db import connection, connections from django.test import TestCase, skipUnlessDBFeature from django.test.utils import modify_settings from django.utils.six import StringIO from ..test_data import TEST_DATA if HAS_GDAL: from django.contrib.gis.gdal import Driver, GDALException, GDAL_VERSION from django.contrib.gis.utils.ogrinspect import ogrinspect from .models import AllOGRFields @skipUnless(HAS_GDAL, "InspectDbTests needs GDAL support") class InspectDbTests(TestCase): @skipUnlessDBFeature("gis_enabled") def test_geom_columns(self): """ Test the geo-enabled inspectdb command. """ out = StringIO() call_command( 'inspectdb', table_name_filter=lambda tn: tn == 'inspectapp_allogrfields', stdout=out ) output = out.getvalue() if connection.features.supports_geometry_field_introspection: self.assertIn('geom = models.PolygonField()', output) self.assertIn('point = models.PointField()', output) else: self.assertIn('geom = models.GeometryField(', output) self.assertIn('point = models.GeometryField(', output) @skipUnlessDBFeature("supports_3d_storage") def test_3d_columns(self): out = StringIO() call_command( 'inspectdb', table_name_filter=lambda tn: tn == 'inspectapp_fields3d', stdout=out ) output = out.getvalue() if connection.features.supports_geometry_field_introspection: self.assertIn('point = models.PointField(dim=3)', output) self.assertIn('line = models.LineStringField(dim=3)', output) self.assertIn('poly = models.PolygonField(dim=3)', output) else: self.assertIn('point = models.GeometryField(', output) self.assertIn('line = models.GeometryField(', output) self.assertIn('poly = models.GeometryField(', output) @skipUnless(HAS_GDAL, "OGRInspectTest needs GDAL support") @modify_settings( INSTALLED_APPS={'append': 'django.contrib.gis'}, ) class OGRInspectTest(TestCase): maxDiff = 1024 def test_poly(self): shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp') model_def = ogrinspect(shp_file, 'MyModel') expected = [ '# This is an auto-generated Django model module created by ogrinspect.', 'from django.contrib.gis.db import models', '', 'class MyModel(models.Model):', ' float = models.FloatField()', ' int = models.{}()'.format('BigIntegerField' if GDAL_VERSION >= (2, 0) else 'FloatField'), ' str = models.CharField(max_length=80)', ' geom = models.PolygonField(srid=-1)', ] self.assertEqual(model_def, '\n'.join(expected)) def test_poly_multi(self): shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp') model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True) self.assertIn('geom = models.MultiPolygonField(srid=-1)', model_def) # Same test with a 25D-type geometry field shp_file = os.path.join(TEST_DATA, 'gas_lines', 'gas_leitung.shp') model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True) self.assertIn('geom = models.MultiLineStringField(srid=-1)', model_def) def test_date_field(self): shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp') model_def = ogrinspect(shp_file, 'City') expected = [ '# This is an auto-generated Django model module created by ogrinspect.', 'from django.contrib.gis.db import models', '', 'class City(models.Model):', ' name = models.CharField(max_length=80)', ' population = models.{}()'.format('BigIntegerField' if GDAL_VERSION >= (2, 0) else 'FloatField'), ' density = models.FloatField()', ' created = models.DateField()', ' geom = models.PointField(srid=-1)', ] self.assertEqual(model_def, '\n'.join(expected)) def test_time_field(self): # Getting the database identifier used by OGR, if None returned # GDAL does not have the support compiled in. ogr_db = get_ogr_db_string() if not ogr_db: self.skipTest("Unable to setup an OGR connection to your database") try: # Writing shapefiles via GDAL currently does not support writing OGRTime # fields, so we need to actually use a database model_def = ogrinspect(ogr_db, 'Measurement', layer_key=AllOGRFields._meta.db_table, decimal=['f_decimal']) except GDALException: self.skipTest("Unable to setup an OGR connection to your database") self.assertTrue(model_def.startswith( '# This is an auto-generated Django model module created by ogrinspect.\n' 'from django.contrib.gis.db import models\n' '\n' 'class Measurement(models.Model):\n' )) # The ordering of model fields might vary depending on several factors (version of GDAL, etc.) self.assertIn(' f_decimal = models.DecimalField(max_digits=0, decimal_places=0)', model_def) self.assertIn(' f_int = models.IntegerField()', model_def) self.assertIn(' f_datetime = models.DateTimeField()', model_def) self.assertIn(' f_time = models.TimeField()', model_def) self.assertIn(' f_float = models.FloatField()', model_def) self.assertIn(' f_char = models.CharField(max_length=10)', model_def) self.assertIn(' f_date = models.DateField()', model_def) # Some backends may have srid=-1 self.assertIsNotNone(re.search(r' geom = models.PolygonField\(([^\)])*\)', model_def)) def test_management_command(self): shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp') out = StringIO() call_command('ogrinspect', shp_file, 'City', stdout=out) output = out.getvalue() self.assertIn('class City(models.Model):', output) def get_ogr_db_string(): """ Construct the DB string that GDAL will use to inspect the database. GDAL will create its own connection to the database, so we re-use the connection settings from the Django test. """ db = connections.databases['default'] # Map from the django backend into the OGR driver name and database identifier # http://www.gdal.org/ogr/ogr_formats.html # # TODO: Support Oracle (OCI). drivers = { 'django.contrib.gis.db.backends.postgis': ('PostgreSQL', "PG:dbname='%(db_name)s'", ' '), 'django.contrib.gis.db.backends.mysql': ('MySQL', 'MYSQL:"%(db_name)s"', ','), 'django.contrib.gis.db.backends.spatialite': ('SQLite', '%(db_name)s', '') } db_engine = db['ENGINE'] if db_engine not in drivers: return None drv_name, db_str, param_sep = drivers[db_engine] # Ensure that GDAL library has driver support for the database. try: Driver(drv_name) except: return None # SQLite/Spatialite in-memory databases if db['NAME'] == ":memory:": return None # Build the params of the OGR database connection string params = [db_str % {'db_name': db['NAME']}] def add(key, template): value = db.get(key, None) # Don't add the parameter if it is not in django's settings if value: params.append(template % value) add('HOST', "host='%s'") add('PORT', "port='%s'") add('USER', "user='%s'") add('PASSWORD', "password='%s'") return param_sep.join(params)
bsd-3-clause
pekeler/arangodb
3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/scripts/VersionStamp/bulkstamp.py
21
3709
# # bulkstamp.py: # Stamp versions on all files that can be found in a given tree. # # USAGE: python bulkstamp.py <version> <root directory> <descriptions> # # Example: python bulkstamp.py 103 ..\win32\Build\ desc.txt # # <version> corresponds to the build number. It will be concatenated with # the major and minor version numbers found in the description file. # # Description information is pulled from an input text file with lines of # the form: # # <basename> <white space> <description> # # For example: # # PyWinTypes.dll Common types for Python on Win32 # etc # # The product's name, major, and minor versions are specified as: # # name <white space> <value> # major <white space> <value> # minor <white space> <value> # # The tags are case-sensitive. # # Any line beginning with "#" will be ignored. Empty lines are okay. # import sys import os import verstamp import fnmatch import string import win32api numStamped = 0 g_patterns = [ '*.dll', '*.pyd', '*.exe', '*.ocx', ] def walk(arg, dirname, names): global numStamped vars, debug, descriptions = arg for name in names: for pat in g_patterns: if fnmatch.fnmatch(name, pat): # Handle the "_d" thing. pathname = os.path.join(dirname, name) base, ext = os.path.splitext(name) if base[-2:]=='_d': name = base[:-2] + ext is_dll = ext.lower() != ".exe" if descriptions.has_key(os.path.normcase(name)): desc = descriptions[os.path.normcase(name)] try: verstamp.stamp(vars, pathname, desc, is_dll=is_dll) numStamped = numStamped + 1 except win32api.error, (hr, func, desc): print "Could not stamp", pathname, "Error", hr, "-", desc else: print 'WARNING: description not provided for:', name # skip branding this - assume already branded or handled elsewhere # print "Stamped", pathname def load_descriptions(fname, vars): retvars = {} descriptions = { } lines = open(fname, 'r').readlines() for i in range(len(lines)): line = string.strip(lines[i]) if line != '' and line[0] != '#': idx1 = string.find(line, ' ') idx2 = string.find(line, '\t') if idx1 == -1 or idx2 < idx1: idx1 = idx2 if idx1 == -1: print 'ERROR: bad syntax in description file at line %d.' % (i+1) sys.exit(1) key = line[:idx1] val = string.strip(line[idx1:]) if key in vars: retvars[key] = val else: descriptions[key] = val if not retvars.has_key('product'): print 'ERROR: description file is missing the product name.' sys.exit(1) if not retvars.has_key('major'): print 'ERROR: description file is missing the major version number.' sys.exit(1) if not retvars.has_key('minor'): print 'ERROR: description file is missing the minor version number.' sys.exit(1) return retvars, descriptions def scan(build, root, desc, **custom_vars ): global numStamped numStamped = 0 try: build = string.atoi(build) except ValueError: print 'ERROR: build number is not a number: %s' % build sys.exit(1) debug = 0 ### maybe fix this one day varList = ['major', 'minor', 'sub', 'company', 'copyright', 'trademarks', 'product'] vars, descriptions = load_descriptions(desc, varList) vars['build'] = build vars.update(custom_vars) arg = vars, debug, descriptions os.path.walk(root, walk, arg) print "Stamped %d files." % (numStamped) if __name__ == '__main__': if len(sys.argv) != 4: print "ERROR: incorrect invocation. See script's header comments." sys.exit(1) apply(scan, tuple(sys.argv[1:]))
apache-2.0
libscie/liberator
liberator/lib/python3.6/site-packages/pip/_vendor/ordereddict.py
1047
4094
# Copyright (c) 2009 Raymond Hettinger # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from UserDict import DictMixin class OrderedDict(dict, DictMixin): def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except AttributeError: self.clear() self.update(*args, **kwds) def clear(self): self.__end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.__map = {} # key --> [key, prev, next] dict.clear(self) def __setitem__(self, key, value): if key not in self: end = self.__end curr = end[1] curr[2] = end[1] = self.__map[key] = [key, curr, end] dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) key, prev, next = self.__map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.__end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.__end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def popitem(self, last=True): if not self: raise KeyError('dictionary is empty') if last: key = reversed(self).next() else: key = iter(self).next() value = self.pop(key) return key, value def __reduce__(self): items = [[k, self[k]] for k in self] tmp = self.__map, self.__end del self.__map, self.__end inst_dict = vars(self).copy() self.__map, self.__end = tmp if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def keys(self): return list(self) setdefault = DictMixin.setdefault update = DictMixin.update pop = DictMixin.pop values = DictMixin.values items = DictMixin.items iterkeys = DictMixin.iterkeys itervalues = DictMixin.itervalues iteritems = DictMixin.iteritems def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) def copy(self): return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): if isinstance(other, OrderedDict): if len(self) != len(other): return False for p, q in zip(self.items(), other.items()): if p != q: return False return True return dict.__eq__(self, other) def __ne__(self, other): return not self == other
cc0-1.0
adamyedidia/parsimony
src/laconic/laconic_meta/LaconicLexer.py
2
10478
# Generated from java-escape by ANTLR 4.5 # encoding: utf-8 from __future__ import print_function from antlr4 import * from io import StringIO def serializedATN(): with StringIO() as buf: buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2") buf.write(u"*\u00f2\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4") buf.write(u"\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r") buf.write(u"\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22") buf.write(u"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4") buf.write(u"\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35") buf.write(u"\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4") buf.write(u"$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\3\2\3\2\3\2\3\2") buf.write(u"\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6") buf.write(u"\3\6\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t") buf.write(u"\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3") buf.write(u"\r\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\20\3\20\3\20") buf.write(u"\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3") buf.write(u"\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24") buf.write(u"\3\24\3\24\3\24\3\25\3\25\3\26\3\26\3\27\3\27\3\30\3") buf.write(u"\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\5\30\u00b0\n\30") buf.write(u"\3\31\3\31\3\32\3\32\3\33\3\33\3\34\3\34\3\34\3\35\3") buf.write(u"\35\3\35\3\36\3\36\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3") buf.write(u"!\3\"\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3&\3&\7&\u00d6") buf.write(u"\n&\f&\16&\u00d9\13&\3&\3&\3&\3&\3&\3\'\6\'\u00e1\n\'") buf.write(u"\r\'\16\'\u00e2\3\'\3\'\3(\3(\7(\u00e9\n(\f(\16(\u00ec") buf.write(u"\13(\3)\6)\u00ef\n)\r)\16)\u00f0\3\u00d7\2*\3\3\5\4\7") buf.write(u"\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17") buf.write(u"\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63") buf.write(u"\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*\3\2\n") buf.write(u"\5\2\'\',,\61\61\4\2--//\4\2>>@@\4\2((~~\5\2\13\f\17") buf.write(u"\17\"\"\5\2C\\aac|\6\2\62;C\\aac|\3\2\62;\u00f9\2\3\3") buf.write(u"\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2") buf.write(u"\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2") buf.write(u"\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2") buf.write(u"\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3") buf.write(u"\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2") buf.write(u"/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67") buf.write(u"\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2") buf.write(u"\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2") buf.write(u"\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\3S\3\2") buf.write(u"\2\2\5X\3\2\2\2\7]\3\2\2\2\t_\3\2\2\2\13a\3\2\2\2\re") buf.write(u"\3\2\2\2\17g\3\2\2\2\21l\3\2\2\2\23r\3\2\2\2\25t\3\2") buf.write(u"\2\2\27v\3\2\2\2\31x\3\2\2\2\33~\3\2\2\2\35\u0082\3\2") buf.write(u"\2\2\37\u0085\3\2\2\2!\u008b\3\2\2\2#\u0092\3\2\2\2%") buf.write(u"\u0094\3\2\2\2\'\u009b\3\2\2\2)\u00a0\3\2\2\2+\u00a2") buf.write(u"\3\2\2\2-\u00a4\3\2\2\2/\u00af\3\2\2\2\61\u00b1\3\2\2") buf.write(u"\2\63\u00b3\3\2\2\2\65\u00b5\3\2\2\2\67\u00b7\3\2\2\2") buf.write(u"9\u00ba\3\2\2\2;\u00bd\3\2\2\2=\u00c1\3\2\2\2?\u00c3") buf.write(u"\3\2\2\2A\u00c6\3\2\2\2C\u00c8\3\2\2\2E\u00cb\3\2\2\2") buf.write(u"G\u00cd\3\2\2\2I\u00cf\3\2\2\2K\u00d1\3\2\2\2M\u00e0") buf.write(u"\3\2\2\2O\u00e6\3\2\2\2Q\u00ee\3\2\2\2ST\7h\2\2TU\7w") buf.write(u"\2\2UV\7p\2\2VW\7e\2\2W\4\3\2\2\2XY\7r\2\2YZ\7t\2\2Z") buf.write(u"[\7q\2\2[\\\7e\2\2\\\6\3\2\2\2]^\7}\2\2^\b\3\2\2\2_`") buf.write(u"\7\177\2\2`\n\3\2\2\2ab\7k\2\2bc\7p\2\2cd\7v\2\2d\f\3") buf.write(u"\2\2\2ef\7=\2\2f\16\3\2\2\2gh\7n\2\2hi\7k\2\2ij\7u\2") buf.write(u"\2jk\7v\2\2k\20\3\2\2\2lm\7n\2\2mn\7k\2\2no\7u\2\2op") buf.write(u"\7v\2\2pq\7\64\2\2q\22\3\2\2\2rs\7*\2\2s\24\3\2\2\2t") buf.write(u"u\7.\2\2u\26\3\2\2\2vw\7+\2\2w\30\3\2\2\2xy\7y\2\2yz") buf.write(u"\7j\2\2z{\7k\2\2{|\7n\2\2|}\7g\2\2}\32\3\2\2\2~\177\7") buf.write(u"h\2\2\177\u0080\7q\2\2\u0080\u0081\7t\2\2\u0081\34\3") buf.write(u"\2\2\2\u0082\u0083\7k\2\2\u0083\u0084\7h\2\2\u0084\36") buf.write(u"\3\2\2\2\u0085\u0086\7r\2\2\u0086\u0087\7t\2\2\u0087") buf.write(u"\u0088\7k\2\2\u0088\u0089\7p\2\2\u0089\u008a\7v\2\2\u008a") buf.write(u" \3\2\2\2\u008b\u008c\7k\2\2\u008c\u008d\7h\2\2\u008d") buf.write(u"\u008e\7g\2\2\u008e\u008f\7n\2\2\u008f\u0090\7u\2\2\u0090") buf.write(u"\u0091\7g\2\2\u0091\"\3\2\2\2\u0092\u0093\7?\2\2\u0093") buf.write(u"$\3\2\2\2\u0094\u0095\7t\2\2\u0095\u0096\7g\2\2\u0096") buf.write(u"\u0097\7v\2\2\u0097\u0098\7w\2\2\u0098\u0099\7t\2\2\u0099") buf.write(u"\u009a\7p\2\2\u009a&\3\2\2\2\u009b\u009c\7j\2\2\u009c") buf.write(u"\u009d\7c\2\2\u009d\u009e\7n\2\2\u009e\u009f\7v\2\2\u009f") buf.write(u"(\3\2\2\2\u00a0\u00a1\t\2\2\2\u00a1*\3\2\2\2\u00a2\u00a3") buf.write(u"\t\3\2\2\u00a3,\3\2\2\2\u00a4\u00a5\7\u0080\2\2\u00a5") buf.write(u".\3\2\2\2\u00a6\u00a7\7?\2\2\u00a7\u00b0\7?\2\2\u00a8") buf.write(u"\u00a9\7#\2\2\u00a9\u00b0\7?\2\2\u00aa\u00b0\t\4\2\2") buf.write(u"\u00ab\u00ac\7@\2\2\u00ac\u00b0\7?\2\2\u00ad\u00ae\7") buf.write(u">\2\2\u00ae\u00b0\7?\2\2\u00af\u00a6\3\2\2\2\u00af\u00a8") buf.write(u"\3\2\2\2\u00af\u00aa\3\2\2\2\u00af\u00ab\3\2\2\2\u00af") buf.write(u"\u00ad\3\2\2\2\u00b0\60\3\2\2\2\u00b1\u00b2\t\5\2\2\u00b2") buf.write(u"\62\3\2\2\2\u00b3\u00b4\7#\2\2\u00b4\64\3\2\2\2\u00b5") buf.write(u"\u00b6\7`\2\2\u00b6\66\3\2\2\2\u00b7\u00b8\7`\2\2\u00b8") buf.write(u"\u00b9\7,\2\2\u00b98\3\2\2\2\u00ba\u00bb\7~\2\2\u00bb") buf.write(u"\u00bc\7~\2\2\u00bc:\3\2\2\2\u00bd\u00be\7~\2\2\u00be") buf.write(u"\u00bf\7~\2\2\u00bf\u00c0\7,\2\2\u00c0<\3\2\2\2\u00c1") buf.write(u"\u00c2\7B\2\2\u00c2>\3\2\2\2\u00c3\u00c4\7B\2\2\u00c4") buf.write(u"\u00c5\7,\2\2\u00c5@\3\2\2\2\u00c6\u00c7\7%\2\2\u00c7") buf.write(u"B\3\2\2\2\u00c8\u00c9\7%\2\2\u00c9\u00ca\7,\2\2\u00ca") buf.write(u"D\3\2\2\2\u00cb\u00cc\7]\2\2\u00ccF\3\2\2\2\u00cd\u00ce") buf.write(u"\7_\2\2\u00ceH\3\2\2\2\u00cf\u00d0\7<\2\2\u00d0J\3\2") buf.write(u"\2\2\u00d1\u00d2\7\61\2\2\u00d2\u00d3\7,\2\2\u00d3\u00d7") buf.write(u"\3\2\2\2\u00d4\u00d6\13\2\2\2\u00d5\u00d4\3\2\2\2\u00d6") buf.write(u"\u00d9\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d7\u00d5\3\2\2") buf.write(u"\2\u00d8\u00da\3\2\2\2\u00d9\u00d7\3\2\2\2\u00da\u00db") buf.write(u"\7,\2\2\u00db\u00dc\7\61\2\2\u00dc\u00dd\3\2\2\2\u00dd") buf.write(u"\u00de\b&\2\2\u00deL\3\2\2\2\u00df\u00e1\t\6\2\2\u00e0") buf.write(u"\u00df\3\2\2\2\u00e1\u00e2\3\2\2\2\u00e2\u00e0\3\2\2") buf.write(u"\2\u00e2\u00e3\3\2\2\2\u00e3\u00e4\3\2\2\2\u00e4\u00e5") buf.write(u"\b\'\2\2\u00e5N\3\2\2\2\u00e6\u00ea\t\7\2\2\u00e7\u00e9") buf.write(u"\t\b\2\2\u00e8\u00e7\3\2\2\2\u00e9\u00ec\3\2\2\2\u00ea") buf.write(u"\u00e8\3\2\2\2\u00ea\u00eb\3\2\2\2\u00ebP\3\2\2\2\u00ec") buf.write(u"\u00ea\3\2\2\2\u00ed\u00ef\t\t\2\2\u00ee\u00ed\3\2\2") buf.write(u"\2\u00ef\u00f0\3\2\2\2\u00f0\u00ee\3\2\2\2\u00f0\u00f1") buf.write(u"\3\2\2\2\u00f1R\3\2\2\2\b\2\u00af\u00d7\u00e2\u00ea\u00f0") buf.write(u"\3\b\2\2") return buf.getvalue() class LaconicLexer(Lexer): atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] T__0 = 1 T__1 = 2 T__2 = 3 T__3 = 4 T__4 = 5 T__5 = 6 T__6 = 7 T__7 = 8 T__8 = 9 T__9 = 10 T__10 = 11 T__11 = 12 T__12 = 13 T__13 = 14 T__14 = 15 T__15 = 16 T__16 = 17 T__17 = 18 T__18 = 19 OPERATOR_MUL_DIV = 20 OPERATOR_ADD_SUB = 21 OPERATOR_NEGATE = 22 OPERATOR_COMPARE = 23 OPERATOR_BOOLEAN = 24 OPERATOR_NOT = 25 OPERATOR_APPEND = 26 OPERATOR_APPEND2 = 27 OPERATOR_CONCAT = 28 OPERATOR_CONCAT2 = 29 OPERATOR_INDEX = 30 OPERATOR_INDEX2 = 31 OPERATOR_LENGTH = 32 OPERATOR_LENGTH2 = 33 BEGIN_LIST = 34 END_LIST = 35 BEGINEND_LIST2 = 36 COMMENT = 37 WS = 38 VAR = 39 INT = 40 modeNames = [ u"DEFAULT_MODE" ] literalNames = [ u"<INVALID>", u"'func'", u"'proc'", u"'{'", u"'}'", u"'int'", u"';'", u"'list'", u"'list2'", u"'('", u"','", u"')'", u"'while'", u"'for'", u"'if'", u"'print'", u"'ifelse'", u"'='", u"'return'", u"'halt'", u"':'" ] symbolicNames = [ u"<INVALID>", u"OPERATOR_MUL_DIV", u"OPERATOR_ADD_SUB", u"OPERATOR_NEGATE", u"OPERATOR_COMPARE", u"OPERATOR_BOOLEAN", u"OPERATOR_NOT", u"OPERATOR_APPEND", u"OPERATOR_APPEND2", u"OPERATOR_CONCAT", u"OPERATOR_CONCAT2", u"OPERATOR_INDEX", u"OPERATOR_INDEX2", u"OPERATOR_LENGTH", u"OPERATOR_LENGTH2", u"BEGIN_LIST", u"END_LIST", u"BEGINEND_LIST2", u"COMMENT", u"WS", u"VAR", u"INT" ] ruleNames = [ u"T__0", u"T__1", u"T__2", u"T__3", u"T__4", u"T__5", u"T__6", u"T__7", u"T__8", u"T__9", u"T__10", u"T__11", u"T__12", u"T__13", u"T__14", u"T__15", u"T__16", u"T__17", u"T__18", u"OPERATOR_MUL_DIV", u"OPERATOR_ADD_SUB", u"OPERATOR_NEGATE", u"OPERATOR_COMPARE", u"OPERATOR_BOOLEAN", u"OPERATOR_NOT", u"OPERATOR_APPEND", u"OPERATOR_APPEND2", u"OPERATOR_CONCAT", u"OPERATOR_CONCAT2", u"OPERATOR_INDEX", u"OPERATOR_INDEX2", u"OPERATOR_LENGTH", u"OPERATOR_LENGTH2", u"BEGIN_LIST", u"END_LIST", u"BEGINEND_LIST2", u"COMMENT", u"WS", u"VAR", u"INT" ] grammarFileName = u"Laconic.g4" def __init__(self, input=None): super(LaconicLexer, self).__init__(input) self.checkVersion("4.5") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None
mit
domenicosolazzo/practice-django
venv/lib/python2.7/site-packages/django/contrib/messages/tests/urls.py
68
2474
from django.conf.urls import patterns, url from django.contrib import messages from django.core.urlresolvers import reverse from django import forms from django.http import HttpResponseRedirect, HttpResponse from django.template import RequestContext, Template from django.template.response import TemplateResponse from django.views.decorators.cache import never_cache from django.contrib.messages.views import SuccessMessageMixin from django.views.generic.edit import FormView TEMPLATE = """{% if messages %} <ul class="messages"> {% for message in messages %} <li{% if message.tags %} class="{{ message.tags }}"{% endif %}> {{ message }} </li> {% endfor %} </ul> {% endif %} """ @never_cache def add(request, message_type): # don't default to False here, because we want to test that it defaults # to False if unspecified fail_silently = request.POST.get('fail_silently', None) for msg in request.POST.getlist('messages'): if fail_silently is not None: getattr(messages, message_type)(request, msg, fail_silently=fail_silently) else: getattr(messages, message_type)(request, msg) show_url = reverse('django.contrib.messages.tests.urls.show') return HttpResponseRedirect(show_url) @never_cache def add_template_response(request, message_type): for msg in request.POST.getlist('messages'): getattr(messages, message_type)(request, msg) show_url = reverse('django.contrib.messages.tests.urls.show_template_response') return HttpResponseRedirect(show_url) @never_cache def show(request): t = Template(TEMPLATE) return HttpResponse(t.render(RequestContext(request))) @never_cache def show_template_response(request): return TemplateResponse(request, Template(TEMPLATE)) class ContactForm(forms.Form): name = forms.CharField(required=True) slug = forms.SlugField(required=True) class ContactFormViewWithMsg(SuccessMessageMixin, FormView): form_class = ContactForm success_url = show success_message = "%(name)s was created successfully" urlpatterns = patterns('', ('^add/(debug|info|success|warning|error)/$', add), url('^add/msg/$', ContactFormViewWithMsg.as_view(), name='add_success_msg'), ('^show/$', show), ('^template_response/add/(debug|info|success|warning|error)/$', add_template_response), ('^template_response/show/$', show_template_response), )
mit
CiscoSystems/horizon
openstack_dashboard/dashboards/admin/volumes/volume_types/qos_specs/urls.py
66
1055
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls import patterns from django.conf.urls import url from openstack_dashboard.dashboards.admin.volumes.volume_types.qos_specs \ import views urlpatterns = patterns( '', url(r'^(?P<qos_spec_id>[^/]+)/create/$', views.CreateKeyValuePairView.as_view(), name='create'), url(r'^(?P<qos_spec_id>[^/]+)/$', views.IndexView.as_view(), name='index'), url(r'^(?P<qos_spec_id>[^/]+)/key/(?P<key>[^/]+)/edit/$', views.EditKeyValuePairView.as_view(), name='edit') )
apache-2.0
ZwickyTransientFacility/ztf_sim
ztf_sim/QueueManager.py
1
55792
"""Queue classes.""" import os from collections import defaultdict from datetime import datetime import logging import numpy as np import pandas as pd import astropy.coordinates as coord import astropy.units as u from astropy.time import Time, TimeDelta import astroplan from .Fields import Fields from .optimize import tsp_optimize, night_optimize from .cadence import enough_gap_since_last_obs from .constants import P48_loc, PROGRAM_IDS, FILTER_IDS, TIME_BLOCK_SIZE from .constants import EXPOSURE_TIME, READOUT_TIME, FILTER_CHANGE_TIME, slew_time from .constants import PROGRAM_BLOCK_SEQUENCE, LEN_BLOCK_SEQUENCE, MAX_AIRMASS from .constants import BASE_DIR from .utils import approx_hours_of_darkness from .utils import skycoord_to_altaz, seeing_at_pointing from .utils import altitude_to_airmass, airmass_to_altitude, RA_to_HA, HA_to_RA from .utils import scalar_len, nightly_blocks, block_index, block_index_to_time from .utils import block_use_fraction, maximum_altitude, compute_limiting_mag class QueueEmptyError(Exception): """Error class for when the nightly queue has no more fields""" pass class QueueManager(object): def __init__(self, queue_name, queue_configuration, rp=None, fields=None): self.logger = logging.getLogger(__name__) # queue name (useful in Scheduler object when swapping queues) self.queue_name = queue_name # list of ObservingPrograms self.observing_programs = queue_configuration.build_observing_programs() # defaults to handle time-windowed queues self.is_TOO = False self.validity_window = None # Hack for greedy queues self.requests_in_window = True if 'validity_window_mjd' in queue_configuration.config: window = queue_configuration.config['validity_window_mjd'] if window is not None: assert(len(window) == 2) self.set_validity_window_mjd(window[0], window[1]) else: self.validity_window = None else: self.validity_window = None # flag to check if assign_nightly_requests has been called tonight self.queue_night = None # block on which the queue parameters were calculated self.queue_slot = None # number allowed requests by subprogram tonight # (dict of (program_id, subprogram_name)) self.requests_allowed = {} # the queue itself self.queue = pd.DataFrame() # should we only consider fields from one program in a given # observing block? # CURRENTLY NOT IMPLEMENTED. self.block_programs = False if rp is None: # initialize an empty RequestPool self.rp = RequestPool() else: self.rp = rp if fields is None: self.fields = Fields() else: self.fields = fields self.missed_obs_queue = None def is_valid(self, time): if self.validity_window is None: return True window_start = self.validity_window[0] window_stop = self.validity_window[1] return window_start <= time <= window_stop def validity_window_mjd(self): if self.validity_window is None: return None return [self.validity_window[0].mjd, self.validity_window[1].mjd] def set_validity_window_mjd(self, window_start, window_stop): """Set the time at which this queue can run. Parameters ---------- window_start : `float` Modified Julian Date start time window_stop : `float` Modified Julian Date end time """ if window_start >= window_stop: raise ValueError("validity window start time must be less than end time") # rough sanity checks if window_start <= Time('2017-01-01').mjd: raise ValueError(f"MJD likely out of range: {window_start}") if window_stop >= Time('2030-01-01').mjd: raise ValueError(f"MJD likely out of range: {window_stop}") self.validity_window = [Time(window_start,format='mjd'), Time(window_stop,format='mjd')] def compute_block_use(self): """Returns a dictionary with the fraction of blocks used by the queue, assuming observing starts at the beginning of the validity window""" if self.validity_window is None: raise ValueError('All blocks are valid') start_block = block_index(self.validity_window[0]) obs_start_time = Time(self.validity_window[0],format='mjd') # greedy queues have no len until they have assignments made, so # just use the validity window if len(self.queue) == 0: stop_block = block_index(self.validity_window[1]) obs_end_time = self.validity_window[1] else: # with no weather, we start at the start of the window if 'n_repeats' in self.queue.columns: n_obs = np.sum(self.queue.n_repeats) exp_time = np.sum(self.queue.exposure_time * self.queue.n_repeats) else: n_obs = len(self.queue) exp_time = np.sum(self.queue.exposure_time) obs_time = (exp_time * u.second) + n_obs * READOUT_TIME obs_end_time = self.validity_window[0] + obs_time stop_block = block_index(obs_end_time) # below breaks if the window is longer than the observations #stop_block = block_index(self.validity_window[1]) assert obs_end_time > obs_start_time # compute fraction of the blocks used by the queue block_use = defaultdict(float) for block in np.arange(start_block, stop_block+1): block_use[block] = block_use_fraction(block, obs_start_time, obs_end_time) return block_use def add_observing_program(self, observing_program): self.observing_programs.append(observing_program) def assign_nightly_requests(self, current_state, obs_log, time_limit = 30 * u.second, block_use = defaultdict(float), timed_obs_count = defaultdict(int)): # clear previous request pool if self.queue_name != 'missed_obs': self.rp.clear_all_request_sets() # set number of allowed requests by program. self.determine_allowed_requests(current_state['current_time'], obs_log, timed_obs_count = timed_obs_count) # can be used by field_selection_functions downstream program_fields = {} for program in self.observing_programs: key = (program.program_id, program.subprogram_name) program_fields[key] = \ {'field_ids': program.field_ids, 'field_selection_function': program.field_selection_function, 'requests_allowed': self.requests_allowed[key]} for program in self.observing_programs: request_sets = program.assign_nightly_requests( current_state['current_time'], self.fields, obs_log, program_fields, block_programs=self.block_programs) for rs in request_sets: self.rp.add_request_sets(rs['program_id'], rs['subprogram_name'], rs['program_pi'], rs['field_ids'], rs['filter_ids'], rs['intranight_gap'], rs['exposure_time'], rs['total_requests_tonight']) # assert(len(self.rp.pool) > 0) # any specific tasks needed) self._assign_nightly_requests(current_state, time_limit = time_limit, block_use = block_use) # mark that we've set up the pool for tonight self.queue_night = np.floor(current_state['current_time'].mjd) def adjust_program_exposures_tonight(self, obs_log, mjd_start, mjd_stop): """Use past history to adjust the number of exposures per program tonight. Counts exposures from the start of the month and equalizes any excess over NIGHTS_TO_REDISTRIBUTE or the number of nights to the end of the month, whichever is less.""" obs_count_by_program = obs_log.count_equivalent_obs_by_program( mjd_range = [mjd_start, mjd_stop]) # drop engineering/commissioning obs_count_by_program = obs_count_by_program[ obs_count_by_program['program_id'] != 0] obs_count_by_program.set_index('program_id', inplace=True) # if there are no observations, add zeros for program_id in PROGRAM_IDS: if program_id != 0: if program_id not in obs_count_by_program.index: obs_count_by_program.loc[program_id] = 0 total_obs = np.sum(obs_count_by_program['n_obs']) # infer the program fractions from the subprograms target_program_fractions = {propid:0 for propid in PROGRAM_IDS if propid != 0} for op in self.observing_programs: target_program_fractions[op.program_id] = \ op.program_observing_time_fraction target_program_fractions = pd.Series(target_program_fractions) target_program_fractions.index.name = 'program_id' target_program_fractions.name = 'target_fraction' target_program_nobs = target_program_fractions * total_obs target_program_nobs.name = 'target_program_nobs' # note that this gives 0 in case of no observations, as desired # have to do the subtraction backwords because of Series/DataFrame # API nonsense delta_program_nobs = \ -1*obs_count_by_program.subtract(target_program_nobs, axis=0) NIGHTS_TO_REDISTRIBUTE = 5 time = Time(mjd_stop,format='mjd') dtnow = time.to_datetime() if dtnow.month != 12: next_month_start_mjd = Time(datetime(dtnow.year,dtnow.month+1,1), scale='utc').mjd else: next_month_start_mjd = Time(datetime(dtnow.year+1,1,1), scale='utc').mjd nights_left_this_month = np.round(next_month_start_mjd - time.mjd) if nights_left_this_month > NIGHTS_TO_REDISTRIBUTE: divisor = NIGHTS_TO_REDISTRIBUTE else: divisor = nights_left_this_month if divisor == 0: divisor = 1 delta_program_nobs /= divisor delta_program_nobs = np.round(delta_program_nobs).astype(int) return delta_program_nobs def adjust_subprogram_exposures_tonight(self, obs_log, mjd_start, mjd_stop): """Use past history to adjust the number of exposures per subprogram tonight. Counts exposures from the start of the month and equalizes any excess over NIGHTS_TO_REDISTRIBUTE or the number of nights to the end of the month, whichever is less.""" obs_count_by_subprogram_all = obs_log.count_equivalent_obs_by_subprogram( mjd_range = [mjd_start, mjd_stop]) # drop engineering/commissioning obs_count_by_subprogram_all = obs_count_by_subprogram_all[ obs_count_by_subprogram_all['program_id'] != 0] obs_count_by_subprogram_all.set_index(['program_id','subprogram_name'], inplace=True) # only count the subprograms that are currently active. This is # going to cause problems when the programs change--but we are going to # only use the subprogram balance for i-band obs_count_by_current_subprogram_dict = {} # if there are no observations, add zeros for op in self.observing_programs: idx = (op.program_id, op.subprogram_name) if idx not in obs_count_by_subprogram_all.index: obs_count_by_current_subprogram_dict[idx] = 0 else: obs_count_by_current_subprogram_dict[idx] = obs_count_by_subprogram_all.loc[idx,'n_obs'] obs_count_by_subprogram = pd.Series(obs_count_by_current_subprogram_dict) obs_count_by_subprogram.name = 'n_obs' obs_count_by_subprogram.index.set_names( ['program_id','subprogram_name'], inplace=True) total_obs = obs_count_by_subprogram.sum() # record the subprogram fractions target_subprogram_fractions = defaultdict(float) for op in self.observing_programs: target_subprogram_fractions[(op.program_id, op.subprogram_name)] = \ op.program_observing_time_fraction * op.subprogram_fraction target_subprogram_fractions = pd.Series(target_subprogram_fractions) # target_program_fractions.index.name = 'program_id' target_subprogram_fractions.name = 'target_fraction' target_subprogram_nobs = target_subprogram_fractions * total_obs target_subprogram_nobs.name = 'target_subprogram_nobs' target_subprogram_nobs.index.set_names( ['program_id','subprogram_name'], inplace=True) # note that this gives 0 in case of no observations, as desired # have to do the subtraction backwords because of Series/DataFrame # API nonsense delta_subprogram_nobs = \ -1*obs_count_by_subprogram.subtract(target_subprogram_nobs, axis=0).fillna(0) NIGHTS_TO_REDISTRIBUTE = 5 time = Time(mjd_stop,format='mjd') dtnow = time.to_datetime() if dtnow.month != 12: next_month_start_mjd = Time(datetime(dtnow.year,dtnow.month+1,1), scale='utc').mjd else: next_month_start_mjd = Time(datetime(dtnow.year+1,1,1), scale='utc').mjd nights_left_this_month = np.round(next_month_start_mjd - time.mjd) if nights_left_this_month > NIGHTS_TO_REDISTRIBUTE: divisor = NIGHTS_TO_REDISTRIBUTE else: divisor = nights_left_this_month if divisor == 0: divisor = 1 delta_subprogram_nobs /= divisor delta_subprogram_nobs = np.round(delta_subprogram_nobs).astype(int) return delta_subprogram_nobs def determine_allowed_requests(self, time, obs_log, timed_obs_count = defaultdict(int)): """Use count of past observations and expected observing time fractions to determine number of allowed requests tonight. Exclude observations already planned in timed queues.""" self.requests_allowed = {} # rather than using equivalent obs, might be easier to work in # exposure time directly? # enforce program balance on a monthly basis dtnow = time.to_datetime() month_start_mjd = Time(datetime(dtnow.year,dtnow.month,1), scale='utc').mjd delta_program_exposures_tonight = self.adjust_program_exposures_tonight( obs_log, month_start_mjd, time.mjd) # use this for i-band only delta_subprogram_exposures_tonight = self.adjust_subprogram_exposures_tonight( obs_log, month_start_mjd, time.mjd) self.logger.info(f'Change in allowed exposures: {delta_program_exposures_tonight}') self.logger.info(f'Needed change in allowed exposures by subprogram: {delta_subprogram_exposures_tonight}') self.logger.debug(f"Sum of change in allowed exposures by subprogram: {delta_subprogram_exposures_tonight.reset_index().groupby('program_id').agg(np.sum)}") self.logger.info(f'Number of timed observations: {timed_obs_count}') dark_time = approx_hours_of_darkness(time) # calculate subprogram fractions excluding list queues and TOOs scheduled_subprogram_sum = defaultdict(float) for op in self.observing_programs: # list queues and TOOs should set field_ids = [], but not None # OPs scheduled using field_selection_function will have # field_ids = None if op.field_ids is not None: if len(op.field_ids) == 0: continue scheduled_subprogram_sum[op.program_id] += \ op.subprogram_fraction for op in self.observing_programs: program_time_tonight = ( dark_time * op.program_observing_time_fraction + (delta_program_exposures_tonight.loc[op.program_id,'n_obs'] - timed_obs_count[op.program_id]) * (EXPOSURE_TIME+READOUT_TIME)) subprogram_time_tonight = ( program_time_tonight * op.subprogram_fraction / scheduled_subprogram_sum[op.program_id]) n_requests = (subprogram_time_tonight.to(u.min) / op.time_per_exposure().to(u.min)).value[0] n_requests = np.round(n_requests).astype(np.int) # i_band program balance needs individual tuning due to # longer cadence and filter blocking if op.subprogram_name == 'i_band': delta_i_nexp = delta_subprogram_exposures_tonight.loc[(2,'i_band')] if delta_i_nexp > 0: self.logger.info(f'Adding {delta_i_nexp} additional i-band exposures') n_requests += delta_i_nexp else: self.logger.info(f'Implied change in i-band exposures is negative, skipping supplementation: {delta_i_nexp}') self.requests_allowed[(op.program_id, op.subprogram_name)] = n_requests for key, n_requests in self.requests_allowed.items(): if n_requests < 0: self.requests_allowed[key] = 0 self.logger.info(self.requests_allowed) def next_obs(self, current_state, obs_log): """Given current state, return the parameters for the next request""" # don't store the telescope state locally! # check that assign_nightly_requests has been called tonight. if self.queue_type != 'list': if np.floor(current_state['current_time'].mjd) != self.queue_night: self.assign_nightly_requests(current_state, obs_log) # define functions that actually do the work in subclasses next_obs = self._next_obs(current_state, obs_log) # check if we have a disallowed observation, and reject it: if next_obs['target_limiting_mag'] < 0: self.logger.warning(f'Target is unobservable! Removing from queue {next_obs}') self.remove_requests(next_obs['request_id']) next_obs = self.next_obs(current_state, obs_log) next_obs['queue_name'] = self.queue_name return next_obs def update_queue(self, current_state, obs_log, **kwargs): """Recalculate queue""" # define functions that actually do the work in subclasses return self._update_queue(current_state, obs_log) def remove_requests(self, request_id): """Remove a request from both the queue and the request set pool""" # define functions that actually do the work in subclasses return self._remove_requests(request_id) def return_queue(self): """Return queue values, ordered in the expected sequence if possible""" queue = self._return_queue() cols = ['field_id','filter_id','exposure_time','program_id', 'subprogram_name','ra','dec','ordered'] if self.queue_type == 'gurobi': cols.append('slot_start_time') if self.queue_type == 'list': cols.append('mode_num') cols.append('ewr_num_images') return queue.loc[:,cols] class GurobiQueueManager(QueueManager): def __init__(self, queue_name, queue_configuration, **kwargs): super().__init__(queue_name, queue_configuration, **kwargs) self.block_obs_number = 0 self.queue_type = 'gurobi' def _assign_nightly_requests(self, current_state, time_limit = 30.*u.second, block_use = defaultdict(float)): self._assign_slots(current_state, time_limit = time_limit, block_use = block_use) def _next_obs(self, current_state, obs_log): """Select the highest value request.""" # do the slot assignment at the beginning of the night # (or if the queue is empty, which should be unusual) # if we've entered a new block, solve the TSP to sequence the requests if (block_index(current_state['current_time'])[0] != self.queue_slot): try: self._move_requests_to_missed_obs(self.queue_slot) except Exception as e: self.logger.exception(e) self.logger.error('Failed moving requests to missed obs!') self._sequence_requests_in_block(current_state) if (len(self.queue_order) == 0): raise QueueEmptyError("Ran out of observations this block.") idx = self.queue_order[0] row = self.queue.loc[idx] if self.queue_slot in self.filter_by_slot: filter_id = int(self.filter_by_slot[self.queue_slot]) else: raise QueueEmptyError("No requests in this slot!") next_obs = {'target_field_id': int(row['field_id']), 'target_ra': row['ra'], 'target_dec': row['dec'], 'target_filter_id': filter_id, 'target_program_id': int(row['program_id']), 'target_subprogram_name': row['subprogram_name'], 'target_program_pi': row['program_pi'], 'target_exposure_time': row['exposure_time'] * u.second, 'target_sky_brightness': self.block_sky_brightness.loc[idx,self.queue_slot][filter_id], 'target_limiting_mag': self.block_lim_mags.loc[idx,self.queue_slot][filter_id], 'target_metric_value': self.block_slot_metric.loc[idx,self.queue_slot][filter_id], 'target_total_requests_tonight': int(row['total_requests_tonight']), 'target_mode_num': 0, 'target_num_images': 1, 'request_id': idx} # 'target_sky_brightness': self.queue.ix[idx].sky_brightness, # 'target_limiting_mag': self.queue.ix[idx].limiting_mag, # 'target_metric_value': self.queue.ix[idx].value, # 'target_request_number_tonight': return next_obs def _slot_metric(self, limiting_mag, dec): """Calculate metric for assigning fields to slots. penalizes volume for both extinction (airmass) and fwhm penalty due to atmospheric refraction, plus sky brightness from moon phase and distance == 1 for 21st mag. normalize metrics by maximum value at transit so low-declination fields are not penalized """ #see 200430 notes metric = (10.**(0.6 * (limiting_mag - 21)) / (1-1e-4*(maximum_altitude(dec) - 90)**2.)) # lock out -99 limiting mags even more aggressively return metric.where(limiting_mag > 0, -0.99) def _assign_slots(self, current_state, time_limit = 30*u.second, block_use = defaultdict(float)): """Assign requests in the Pool to slots""" # check that the pool has fields in it if len(self.rp.pool) == 0: raise QueueEmptyError("No fields in pool") # join with fields so we have the information we need # make a copy so rp.pool and self.queue are not linked df = self.rp.pool.join(self.fields.fields, on='field_id').copy() # calculate limiting mag by block. uses the block midpoint time blocks, times = nightly_blocks(current_state['current_time'], time_block_size=TIME_BLOCK_SIZE) # remove the excluded blocks, if any. Could do this in optimize.py # but it makes the optimization problem unneccesarily bigger # don't demand 100% of the block is used: tiny fractions lead to # infeasible models exclude_blocks = [b for (b,v) in block_use.items() if v > 0.95] self.logger.debug(f'Excluding completely filled blocks {exclude_blocks}') if len(exclude_blocks): cut_blocks = np.setdiff1d(blocks, exclude_blocks) cut_times = block_index_to_time(cut_blocks, current_state['current_time'], where='mid') blocks, times = cut_blocks, cut_times lim_mags = {} sky_brightnesses = {} decs = {} for bi, ti in zip(blocks, times): if 'altitude' in df.columns: df.drop('altitude', axis=1, inplace=True) if 'azimuth' in df.columns: df.drop('azimuth', axis=1, inplace=True) # use pre-computed blocks df_alt = self.fields.block_alt[bi] df_alt.name = 'altitude' df = df.join(df_alt, on='field_id') df_az = self.fields.block_az[bi] df_az.name = 'azimuth' df = df.join(df_az, on='field_id') for fid in FILTER_IDS: df_limmag, df_sky = \ compute_limiting_mag(df, ti, self.fields.Sky, filter_id = fid) lim_mags[(bi, fid)] = df_limmag sky_brightnesses[(bi, fid)] = df_sky decs[(bi, fid)] = df.dec # this results in a MultiIndex on the *columns*: level 0 is block, # level 1 is filter_id. df_metric.unstack() flattens it self.block_lim_mags = pd.DataFrame(lim_mags) self.block_sky_brightness = pd.DataFrame(sky_brightnesses) block_decs = pd.DataFrame(decs) self.block_slot_metric = self._slot_metric(self.block_lim_mags, block_decs) # count the number of observations requested by filter df['n_reqs_tot'] = 0 for fid in FILTER_IDS: df['n_reqs_{}'.format(fid)] = \ df.filter_ids.apply(lambda x: np.sum([xi == fid for xi in x])) df['n_reqs_tot'] += df['n_reqs_{}'.format(fid)] # prepare the data for input to gurobi #import shelve #s = shelve.open('tmp_vars.shelf') #s['block_lim_mags'] = self.block_lim_mags #s['block_slot_metric'] = self.block_slot_metric #s['df'] = df #s.close() self.request_sets_tonight, df_slots, dft = night_optimize( self.block_slot_metric, df, self.requests_allowed, time_limit = time_limit, block_use = block_use) grp = df_slots.groupby('slot') self.queued_requests_by_slot = grp['request_id'].apply(list) self.filter_by_slot = \ grp['metric_filter_id'].apply(lambda x: np.unique(x)[0]) # rework to dump output df_slots['scheduled'] = True dft.set_index(['request_id','slot','metric_filter_id'],inplace=True) df_slots.set_index(['request_id','slot','metric_filter_id'],inplace=True) dft = dft.join(df_slots,how='outer') dft['scheduled'] = dft['scheduled'].fillna(False) dft.reset_index(inplace=True) dft = pd.merge(dft,df[['field_id']], left_on='request_id', right_index=True) n_requests_scheduled = np.sum(dft['scheduled']) total_metric_value = np.sum(dft['scheduled']*dft['metric']) avg_metric_value = total_metric_value / n_requests_scheduled tot_avail_requests_bysubprogram = \ df.groupby(['program_id','subprogram_name'])['n_reqs_tot'].agg(np.sum) tot_avail_requests_bysubprogram.name = 'available' # use self.requests_allowed and join this all up nscheduled_requests_bysubprogram = \ dft.loc[dft['scheduled'],['program_id','subprogram_name']].groupby(['program_id','subprogram_name']).agg(len) nscheduled_requests_bysubprogram.name = 'scheduled' # reformat requests_allowed for joining mux = pd.MultiIndex.from_tuples(self.requests_allowed.keys(), names = ['program_id','subprogram_name']) df_allowed = pd.DataFrame(list(self.requests_allowed.values()), index=mux,columns=['allowed']) df_summary = df_allowed.join(tot_avail_requests_bysubprogram).join(nscheduled_requests_bysubprogram) self.logger.info(df_summary) self.logger.info(f'{n_requests_scheduled} requests scheduled') self.logger.info(f'{total_metric_value:.2f} total metric value; ' f'{avg_metric_value:.2f} average per request') # this is not ideal for tnow = current_state['current_time'] yymmdd = tnow.iso.split()[0][2:].replace('-','') solution_outfile = f'{BASE_DIR}/../sims/gurobi_solution_{yymmdd}.csv' before_noon_utc = (tnow.mjd - np.floor(tnow.mjd)) < 0.5 # avoid clobbering the solution file with restarts after observing has # completed if before_noon_utc or (not os.path.exists(solution_outfile)): dft.drop(columns=['Yrtf']).to_csv(solution_outfile) def _sequence_requests_in_block(self, current_state): """Solve the TSP for requests in this slot""" self.queue_slot = block_index(current_state['current_time'])[0] # raise an error if there are missing blocks--potentially due to # excluded blocks if self.queue_slot not in self.queued_requests_by_slot.index: raise QueueEmptyError(f"Current block {self.queue_slot} is not stored") # retrieve requests to be observed in this block req_list = self.queued_requests_by_slot.loc[self.queue_slot] # request_set ids should be unique per block assert( (len(set(req_list)) == len(req_list) ) ) if np.all(np.isnan(req_list)): raise QueueEmptyError("No requests assigned to this block") idx = pd.Index(req_list) # reconstruct df = self.rp.pool.loc[idx].join(self.fields.fields, on='field_id').copy() az = self.fields.block_az[self.queue_slot] df = df.join(az, on='field_id') # now prepend the CALSTOW positoin so we can minimize slew from # filter exchanges # Need to use current HA=0 df_blockstart = pd.DataFrame({'ra':HA_to_RA(0, current_state['current_time']).to(u.degree).value, 'dec':-48.,'azimuth':180.},index=[0]) df_fakestart = pd.concat([df_blockstart,df],sort=True) # compute overhead time between all request pairs # compute pairwise slew times by axis for all pointings slews_by_axis = {} def coord_to_slewtime(coord, axis=None): c1, c2 = np.meshgrid(coord, coord) dangle = np.abs(c1 - c2) angle = np.where(dangle < (360. - dangle), dangle, 360. - dangle) return slew_time(axis, angle * u.deg) slews_by_axis['dome'] = coord_to_slewtime( df_fakestart['azimuth'], axis='dome') slews_by_axis['dec'] = coord_to_slewtime( df_fakestart['dec'], axis='dec') slews_by_axis['ra'] = coord_to_slewtime( df_fakestart['ra'], axis='ha') maxradec = np.maximum(slews_by_axis['ra'], slews_by_axis['dec']) maxslews = np.maximum(slews_by_axis['dome'], maxradec) # impose a penalty on zero-length slews (which by construction # in this mode are from different programs) wnoslew = maxslews == 0 maxslews[wnoslew] = READOUT_TIME * 10. overhead_time = np.maximum(maxslews, READOUT_TIME) tsp_order, tsp_overhead_time = tsp_optimize(overhead_time.value) # remove the fake starting point. tsp_optimize always starts with # the first observation in df, which by construction is our fake point, # so we can simply cut it off. tsp_order = tsp_order[1:] assert(0 not in tsp_order) # tsp_order is 0-indexed from overhead time, so I need to # reconstruct the request_id self.queue_order = df_fakestart.index.values[tsp_order] self.queue = df def _move_requests_to_missed_obs(self, queue_slot): """After a block is expired, move any un-observed requests into the missed_obs queue.""" #self.queue should have any remaining obs if len(self.queue): cols = ['program_id', 'subprogram_name', 'program_pi', 'field_id', 'intranight_gap_min', 'exposure_time', 'priority'] # it's a little confusing, because each queue entry has all of the # filter_ids from the original request set. So we have to # make a pool that only has single filters in it. filter_id = int(self.filter_by_slot[queue_slot]) missed_obs = self.queue.loc[:,cols].copy() missed_obs['filter_ids'] = pd.Series([[filter_id] for i in missed_obs.index],index=missed_obs.index) missed_obs['total_requests_tonight'] = 1 self.logger.info(f"Saving {len(missed_obs)} requests (filter {filter_id}) to the missed_obs queue: {missed_obs.loc[:,['subprogram_name','field_id']]}") # the missed obs RequestPool wants request *sets*, so find out # if previous requests were missed rows_to_append = [] for idx, row in missed_obs.iterrows(): if idx in self.missed_obs_queue.rp.pool.index: assert(len(self.missed_obs_queue.rp.pool.loc[idx] == 1)) self.missed_obs_queue.rp.pool.loc[idx,'filter_ids'].append(filter_id) self.missed_obs_queue.rp.pool.loc[idx,'total_requests_tonight'] += 1 else: rows_to_append.append(row) self.missed_obs_queue.rp.pool = self.missed_obs_queue.rp.pool.append(rows_to_append) else: self.logger.debug(f'No remaining queued observations in slot {queue_slot}') def _remove_requests(self, request_set_id): """Remove a request from both the queue and the pool. Note that gurobi queue uses request_set_id to index.""" # should be the topmost item assert (self.queue_order[0] == request_set_id) self.queue_order = self.queue_order[1:] row = self.queue.loc[request_set_id] self.queue = self.queue.drop(request_set_id) # (past slot assignments are still in self.queued_requests_by_slot) # (we will only reuse the RequestPool if we do recomputes) self.rp.remove_request(request_set_id, self.filter_by_slot.loc[self.queue_slot]) def _return_queue(self): # start by setting up the current slot if len(self.queue) > 0: queue = self.queue.loc[self.queue_order].copy() queue.loc[:,'ordered'] = True queue.loc[:,'slot_start_time'] = block_index_to_time( self.queue_slot, Time.now(), where='start').iso else: # before the night starts, the queue is empty queue = self.queue.copy() # now loop over upcoming slots, ensuring they are sorted (should be) slots = self.queued_requests_by_slot.index.values slots = np.sort(slots) for slot in slots: if (self.queue_slot is not None): if slot <= self.queue_slot: continue slot_requests = self.queued_requests_by_slot.loc[slot] idx = pd.Index(slot_requests) # reconstruct df = self.rp.pool.loc[idx].join(self.fields.fields, on='field_id').copy() df.loc[:,'filter_id'] = self.filter_by_slot[slot] df.loc[:,'ordered'] = False df.loc[:,'slot_start_time'] = block_index_to_time(slot, Time.now(), where='start').iso queue = queue.append(df) return queue class GreedyQueueManager(QueueManager): def __init__(self, queue_name, queue_configuration, **kwargs): super().__init__(queue_name, queue_configuration, **kwargs) self.time_of_last_filter_change = None self.min_time_before_filter_change = TIME_BLOCK_SIZE self.queue_type = 'greedy' def _assign_nightly_requests(self, current_state, time_limit = 30.*u.second, block_use = defaultdict(float)): # initialize the time of last filter change if self.time_of_last_filter_change is None: self.time_of_last_filter_change = current_state['current_time'] def _next_obs(self, current_state, obs_log): """Select the highest value request.""" # since this is a greedy queue, we update the queue after each obs # for speed, only do the whole recalculation if we're in a new slot # if ((block_index(current_state['current_time'])[0] != self.queue_slot) # or (len(self.queue) == 0)): # self._update_queue(current_state) # else: # # otherwise just recalculate the overhead times # _ = self._update_overhead(current_state) # to get the "on the fly" cadence windows to work I have to # run the whole queue every time right now... self._update_queue(current_state, obs_log) # in case this wasn't initialized by assign_nightly_requests if self.time_of_last_filter_change is None: self.time_of_last_filter_change = current_state['current_time'] # check if filter changes are allowed yet if ((current_state['current_time'] - self.time_of_last_filter_change) < self.min_time_before_filter_change): # only consider observations in the current filter queue = self.queue[self.queue['filter_id'] == current_state['current_filter_id']] # unless there are no more observations, in which case allow a # change if len(queue) == 0: queue = self.queue else: # allow filter changes if desired queue = self.queue # request_id of the highest value request max_idx = queue.value.idxmax() row = queue.loc[max_idx] next_obs = {'target_field_id': row['field_id'], 'target_ra': row['ra'], 'target_dec': row['dec'], 'target_filter_id': row['filter_id'], 'target_program_id': row['program_id'], 'target_subprogram_name': row['subprogram_name'], 'target_program_pi': row['program_pi'], 'target_exposure_time': row['exposure_time'] * u.second, 'target_sky_brightness': row['sky_brightness'], 'target_limiting_mag': row['limiting_mag'], 'target_metric_value': row['value'], 'target_total_requests_tonight': row['total_requests_tonight'], 'target_mode_num': 0, 'target_num_images': 1, 'request_id': max_idx} return next_obs def _metric(self, df): """Calculate metric for prioritizing fields. Penalizes volume for both extinction (airmass) and fwhm penalty due to atmospheric refraction, plus sky brightness from moon phase and distance, overhead time == 1 for 21st mag, 15 sec overhead. Normalize by value at transit.""" return 10.**(0.6 * (df['limiting_mag'] - 21)) / \ (1-1e-4*(maximum_altitude(df['dec']) - 90)**2.) / \ ((EXPOSURE_TIME.value + df['overhead_time']) / (EXPOSURE_TIME.value + 10.)) def _update_overhead(self, current_state, df=None): """recalculate overhead values without regenerating whole queue""" inplace = df is None if inplace: # no dataframe supplied, so replace existing self.queue on exit df = self.queue df.drop(['overhead_time', 'altitude', 'azimuth'], axis=1, inplace=True) # compute readout/slew overhead times, plus current alt/az df_overhead, df_altaz = self.fields.overhead_time(current_state) # nb: df has index request_id, not field_id df = pd.merge(df, df_overhead, left_on='field_id', right_index=True) df = pd.merge(df, df_altaz, left_on='field_id', right_index=True) df.rename(columns={'alt': 'altitude', 'az': 'azimuth'}, inplace=True) # add overhead for filter changes w = df['filter_id'] != current_state['current_filter_id'] if np.sum(w): df.loc[w, 'overhead_time'] += FILTER_CHANGE_TIME.to(u.second).value if inplace: df.loc[:, 'value'] = self._metric(df) self.queue = df return df def _update_queue(self, current_state, obs_log): """Calculate greedy weighting of requests in the Pool using current telescope state only""" # store block index for which these values were calculated self.queue_slot = block_index(current_state['current_time'])[0] # check that the pool has fields in it if len(self.rp.pool) == 0: raise QueueEmptyError("No fields in pool") # join with fields so we have the information we need # make a copy so rp.pool and self.queue are not linked df_rs = self.rp.pool.join(self.fields.fields, on='field_id').copy() # now expand the dataframe of request sets to a dataframe with one # row per obs. requests = [] for request_set_id, row in df_rs.iterrows(): rdict = row.to_dict() filter_ids = rdict.pop('filter_ids') for filter_id in filter_ids: ri = rdict.copy() ri['filter_id'] = filter_id ri['request_set_id'] = request_set_id requests.append(ri) df = pd.DataFrame(requests) df = self._update_overhead(current_state, df=df) # start with conservative altitude cut; # airmass weighting applied naturally below # also make a copy because otherwise it retains knowledge of # (discarded) previous reference and raises SettingWithCopyWarnings df = df.loc[df['altitude'] > 20, :].copy() if len(df) == 0: raise QueueEmptyError("No fields in queue above altitude cut") # if restricting to one program per block, drop other programs if self.block_programs: current_block_program = PROGRAM_BLOCK_SEQUENCE[ self.queue_slot % LEN_BLOCK_SEQUENCE] df = df.loc[df['program_id'] == current_block_program, :] cadence_cuts = enough_gap_since_last_obs(df, current_state,obs_log) self.requests_in_window = np.sum(cadence_cuts) > 0 if ~self.requests_in_window: self.logger.warning(calc_queue_stats(df, current_state, intro="No fields with observable cadence windows. Queue in progress:")) raise QueueEmptyError("No fields with observable cadence windows") # also make a copy because otherwise it retains knowledge of # (discarded) previous reference and raises SettingWithCopyWarnings df = df.loc[cadence_cuts, :].copy() # compute airmasses by field_id # airmass = zenith_angle_to_airmass(90. - df_alt) # airmass.name = 'airmass' # df = pd.merge(df, pd.DataFrame(airmass), # left_on='field_id', right_index=True) # airmass cut (or add airmass weighting to value below) # df = df[(df['airmass'] <= MAX_AIRMASS) & (df['airmass'] > 0)] df_limmag, df_sky = compute_limiting_mag(df, current_state['current_time'], self.fields.Sky) df.loc[:, 'limiting_mag'] = df_limmag df.loc[:, 'sky_brightness'] = df_sky #df_limmag.name = 'limiting_mag' #df = pd.merge(df, df_limmag, left_on='field_id', right_index=True) df.loc[:, 'value'] = self._metric(df) self.queue = df def _remove_requests(self, request_id): """Remove a request from both the queue and the request pool""" row = self.queue.loc[request_id] self.queue = self.queue.drop(request_id) self.rp.remove_request(row['request_set_id'], row['filter_id']) def _return_queue(self): if 'value' in self.queue.columns: queue = self.queue.sort_values('value',ascending=False).copy() else: queue = self.queue.copy() # we have put these in value order but the sequence can change queue['ordered'] = False return queue class ListQueueManager(QueueManager): """Simple Queue that returns observations in order.""" def __init__(self, queue_name, queue_configuration, fields=None, **kwargs): self.queue_type = 'list' # queue name (useful in Scheduler object when swapping queues) self.queue_name = queue_name if fields is None: self.fields = Fields() else: self.fields = fields # the queue itself self.load_list_queue(queue_configuration.config['targets']) if 'validity_window_mjd' in queue_configuration.config: window = queue_configuration.config['validity_window_mjd'] if window is not None: assert(len(window) == 2) assert(window[1] > window[0]) self.validity_window = [Time(window[0],format='mjd'), Time(window[1],format='mjd')] else: self.validity_window = None else: self.validity_window = None self.is_TOO = queue_configuration.config['targets'][0]['subprogram_name'].startswith('ToO') def _assign_nightly_requests(self, current_state, **kwargs): pass def _update_queue(self, current_state, obs_log): pass def load_list_queue(self, queue_dict_list, append=False): """Initialize an ordered queue. queue_dict_list is a list of dicts, one per observation""" df = pd.DataFrame(queue_dict_list) # check that major columns are included required_columns = ['field_id','program_id', 'subprogram_name', 'filter_id', 'program_pi'] for col in required_columns: if col not in df.columns: raise ValueError(f'Missing required column {col}') # by default use field ids alone to specify pointings, # but allow manual ra/dec if needed if ('ra' not in df.columns) and ('dec' not in df.columns): queue = df.join(self.fields.fields, on='field_id', how='inner').sort_index().copy() else: queue = df # if some of the field ids are bad, there will be missing rows if len(queue) != len(df): raise ValueError('One or more field ids are malformed: {}'.format( df.index.difference(self.fields.fields.index))) # add standard keywords if not present if 'exposure_time' not in queue.columns: queue['exposure_time'] = EXPOSURE_TIME.to(u.second).value if 'max_airmass' not in queue.columns: queue['max_airmass'] = MAX_AIRMASS if 'n_repeats' not in queue.columns: queue['n_repeats'] = 1 if 'mode_num' not in queue.columns: queue['mode_num'] = 0 if 'ewr_num_images' not in queue.columns: queue['num_images'] = 1 else: queue['num_images'] = queue['ewr_num_images'] if append: self.queue = self.queue.append(queue, ignore_index=True) else: self.queue = queue def _next_obs(self, current_state, obs_log): """Return the next observation in the time ordered queue unless it has expired.""" if len(self.queue) == 0: raise QueueEmptyError("No more observations in queue!") # take the next observation in line idx = 0 while True: if idx == len(self.queue): raise QueueEmptyError("No valid observations in queue!") ra = self.queue.iloc[idx].ra ha = RA_to_HA(ra * u.degree, current_state['current_time'] ).to(u.degree).wrap_at(180.*u.degree).value dec = self.queue.iloc[idx].dec sc = coord.SkyCoord(ra,dec, unit=u.deg) airmass = altitude_to_airmass( skycoord_to_altaz(sc, current_state['current_time']).alt.to(u.deg).value) if airmass >= self.queue.iloc[idx].max_airmass: idx += 1 continue # Reed limits |HA| to < 5.95 hours (most relevant for circumpolar # fields not hit by the airmass cut) if np.abs(ha) >= (5.95 * u.hourangle).to(u.degree).value: idx += 1 continue # 1) HA < -17.6 deg && Dec < -22 deg is rejected for both track & stow because of interference with FFI. if (ha <= -17.6) & (dec <= -22): idx += 1 continue # West of HA -17.6 deg, Dec < -45 deg is rejected for tracking because of the service platform in the south. if (ha >= -17.6) & (dec <= -45): idx += 1 continue # fabs(HA) > 3 deg is rejected for Dec < -46 to protect the shutter "ears". if (np.abs(ha) >= 3.) & (dec <= -46): idx += 1 continue # dec > 87.5 is rejected if (dec > 87.5): idx += 1 continue break next_obs = {'target_field_id': int(self.queue.iloc[idx].field_id), 'target_ra': self.queue.iloc[idx].ra, 'target_dec': self.queue.iloc[idx].dec, 'target_filter_id': self.queue.iloc[idx].filter_id, 'target_program_id': int(self.queue.iloc[idx].program_id), 'target_subprogram_name': self.queue.iloc[idx].subprogram_name, 'target_program_pi': self.queue.iloc[idx].program_pi, 'target_exposure_time': self.queue.iloc[idx].exposure_time * u.second, 'target_sky_brightness': 0., 'target_limiting_mag': 0., 'target_metric_value': 0., 'target_total_requests_tonight': 1, 'target_mode_num': int(self.queue.iloc[idx].mode_num), 'target_num_images': int(self.queue.iloc[idx].num_images), 'request_id': self.queue.index[idx]} return next_obs def _remove_requests(self, request_id): """Remove a request from the queue""" try: if self.queue.loc[request_id,'n_repeats'] > 1: self.queue.loc[request_id,'n_repeats'] -= 1 else: self.queue = self.queue.drop(request_id) except Exception: self.logger.exception(f'Failure removing request {request_id}') def _return_queue(self): # by construction the list queue is already in order queue = self.queue.copy() queue['ordered'] = True return queue class RequestPool(object): def __init__(self): # initialize empty dataframe to add to self.pool = pd.DataFrame() pass def add_request_sets(self, program_id, subprogram_name, program_pi, field_ids, filter_ids, intranight_gap, exposure_time, total_requests_tonight, priority=1): """program_ids must be scalar""" assert (scalar_len(program_id) == 1) assert (scalar_len(subprogram_name) == 1) n_fields = scalar_len(field_ids) if n_fields == 1: # see if it's iterable or not try: iterator = iter(field_ids) except TypeError: # if not, assume it's a scalar and wrap in a list field_ids = [field_ids] # build df as a list of dicts request_sets = [] for i, field_id in enumerate(field_ids): request_sets.append({ 'program_id': program_id, 'subprogram_name': subprogram_name, 'program_pi': program_pi, 'field_id': field_id, 'filter_ids': filter_ids.copy(), # pandas doesn't play well with astropy quantities, so change # back to seconds 'intranight_gap_min': intranight_gap.to(u.minute).value, 'exposure_time': exposure_time.to(u.second).value, 'total_requests_tonight': total_requests_tonight, 'priority': priority}) self.pool = self.pool.append(pd.DataFrame(request_sets), ignore_index=True) def n_request_sets(self): return len(self.pool) def remove_request_sets(self, request_set_ids): """Remove completed or otherwise unwanted requests by request_id request_ids : scalar or list requests to drop (index of self.pool)""" self.pool = self.pool.drop(request_set_ids) def remove_request(self, request_set_id, filter_id): """Remove single completed request from a request set. request_set_id: scalar request set to modify (index of self.pool) filter_id: scalar filter_id of completed observation""" rs = self.pool.loc[request_set_id].copy() filters = rs['filter_ids'] # this is another step that shouldn't be necessary... filters.remove(filter_id) if len(filters) == 0: self.remove_request_sets(request_set_id) else: self.pool.at[request_set_id, 'filter_ids'] = filters def clear_all_request_sets(self): self.pool = pd.DataFrame() # utils for examining inputs def calc_pool_stats(df, intro=""): """ df = Q.rp.pool""" stats_str = intro + "\n" stats_str += "\t{} request sets\n".format(len(df)) stats_str += "\t{} unique fields\n".format(len(set(df.field_id))) for prog_id in PROGRAM_IDS: w = df.program_id == prog_id stats_str += "\tProgram {}:\n".format(prog_id) stats_str += "\t\t{} request sets\n".format(np.sum(w)) stats_str += "\t\t{} unique fields\n".format( len(set(df.loc[w, 'field_id']))) stats_str += "\t\t{} median requests tonight per field\n".format( np.median(df.loc[w, 'total_requests_tonight'])) return stats_str def calc_queue_stats(df, current_state, intro=""): """ df = Q.queue""" stats_str = intro + "\n" stats_str += "\t{} queued requests\n".format(len(df)) stats_str += "\t{} unique fields\n".format(len(set(df.field_id))) for prog_id in PROGRAM_IDS: w = df.program_id == prog_id stats_str += "\tProgram {}:\n".format(prog_id) if np.sum(w) == 0: stats_str += "\t\tNo queued requests!\n" continue stats_str += "\t\t{} requests\n".format(np.sum(w)) stats_str += "\t\t{} unique fields\n".format( len(set(df.loc[w, 'field_id']))) walt = w & (df.loc[w, 'altitude'] > 20) stats_str += "\t\t{} fields above altitude cut\n".format( np.sum(walt)) # wfirst = walt & (df.loc[walt, 'request_number_tonight'] == 1) # stats_str += "\t\t{} requests awaiting first obs tonight\n".format( # np.sum(wfirst)) return stats_str
bsd-3-clause
pspierce/compose
compose/cli/errors.py
38
1588
from __future__ import absolute_import from textwrap import dedent class UserError(Exception): def __init__(self, msg): self.msg = dedent(msg).strip() def __unicode__(self): return self.msg __str__ = __unicode__ class DockerNotFoundMac(UserError): def __init__(self): super(DockerNotFoundMac, self).__init__(""" Couldn't connect to Docker daemon. You might need to install docker-osx: https://github.com/noplay/docker-osx """) class DockerNotFoundUbuntu(UserError): def __init__(self): super(DockerNotFoundUbuntu, self).__init__(""" Couldn't connect to Docker daemon. You might need to install Docker: http://docs.docker.io/en/latest/installation/ubuntulinux/ """) class DockerNotFoundGeneric(UserError): def __init__(self): super(DockerNotFoundGeneric, self).__init__(""" Couldn't connect to Docker daemon. You might need to install Docker: http://docs.docker.io/en/latest/installation/ """) class ConnectionErrorBoot2Docker(UserError): def __init__(self): super(ConnectionErrorBoot2Docker, self).__init__(""" Couldn't connect to Docker daemon - you might need to run `boot2docker up`. """) class ConnectionErrorGeneric(UserError): def __init__(self, url): super(ConnectionErrorGeneric, self).__init__(""" Couldn't connect to Docker daemon at %s - is it running? If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable. """ % url)
apache-2.0
damianavila/nikola
nikola/plugins/compile/markdown/mdx_podcast.py
5
3143
# -*- coding: utf-8 -*- # # Copyright (c) 2013 Michael Rabbitt, Roberto Alsina # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Inspired by "[Python] reStructuredText GitHub Podcast directive" # (https://gist.github.com/brianhsu/1407759), public domain by Brian Hsu from __future__ import print_function, unicode_literals ''' Extension to Python Markdown for Embedded Audio Basic Example: >>> import markdown >>> text = """[podcast]http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3[/podcast]""" >>> html = markdown.markdown(text, [PodcastExtension()]) >>> print(html) <p><audio src="http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3"></audio></p> ''' from markdown.extensions import Extension from markdown.inlinepatterns import Pattern from markdown.util import etree PODCAST_RE = r'\[podcast\](?P<url>.+)\[/podcast\]' class PodcastPattern(Pattern): """ InlinePattern for footnote markers in a document's body text. """ def __init__(self, pattern, configs): Pattern.__init__(self, pattern) def handleMatch(self, m): url = m.group('url').strip() audio_elem = etree.Element('audio') audio_elem.set('controls', '') source_elem = etree.SubElement(audio_elem, 'source') source_elem.set('src', url) source_elem.set('type', 'audio/mpeg') return audio_elem class PodcastExtension(Extension): def __init__(self, configs={}): # set extension defaults self.config = {} # Override defaults with user settings for key, value in configs: self.setConfig(key, value) def extendMarkdown(self, md, md_globals): podcast_md_pattern = PodcastPattern(PODCAST_RE, self.getConfigs()) podcast_md_pattern.md = md md.inlinePatterns.add('podcast', podcast_md_pattern, "<not_strong") md.registerExtension(self) def makeExtension(configs=None): return PodcastExtension(configs) if __name__ == '__main__': import doctest doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE + doctest.REPORT_NDIFF))
mit
ondrejkajinek/pyGrim
example/routes.py
1
1786
# coding: utf8 from re import compile as re_compile from pygrim import Route, RouteGroup class Routes(object): def _route_register_func(self, router): # string routes router.map(Route(("GET",), "/", "home", "home")) router.map(Route("GET", "/set_en", "set_en")) router.map(Route("GET", "/set_cs", "set_cs")) router.map(Route(("GET",), "/session", "session_text")) router.map(Route(("GET",), "/cookie_show", "cookie_show")) router.map(Route(("GET",), "/cookie_set", "cookie_set")) router.map( Route(("GET",), "/template_display", "use_template_display") ) router.map(Route(("GET",), "/template_method", "use_template_method")) router.map(Route(("GET",), "/type_error", "type_error")) router.map(Route(("GET",), "/runtime_error", "runtime_error")) # regexp routes router.map(Route( ("GET",), re_compile(r"/template/(?P<template>[^/]+)"), "template_show", "template_show" )) # route groups router.map(RouteGroup( "/group", ( Route(("GET",), "/test", "group_test"), RouteGroup( "inner_group", ( Route( ("GET",), re_compile(r"/test/(?P<param>[0-9]+)"), "int_inner_group_test" ), Route( ("GET",), re_compile(r"/test(/(?P<param>[^/]+))?"), "inner_group_test" ), ) ) ) ))
mit
sagemathinc/cocalc
src/scripts/hosts.py
2
1342
#!/usr/bin/env python ############################################################################### # # CoCalc: Collaborative Calculation # # Copyright (C) 2016, Sagemath Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### vm_hosts = ['%02dsalvus' % k for k in [1, 2, 3, 4, 5, 6, 7, 8] ] + ['%s.math.washington.edu' % h for h in ['geom', 'combinat']] persistent_hosts = vm_hosts + ['servedby%s.salv.us' % k for k in [1] ] + ['bsd%s.salv.us' % k for k in ['', 1]] # they run sage unsafe_hosts = ['servedby%s.salv.us' % k for k in [2]] + ['bsd%s.salv.us' % k for k in [2]]
agpl-3.0
manipopopo/tensorflow
tensorflow/python/debug/wrappers/framework.py
7
36968
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Framework of debug wrapper sessions. A debug wrapper session is a wrapper around a TensorFlow Python Session. The wrapper preserves the Session interface, most importantly the run() method, while providing abilities to: a) Intercept a run() call to a wrapped session and insert debug tensor watches according to externally-specified debug URLs. b) Release control to an external (i.e., non-Session) object before and after the run() call, so that the external object can perform actions such as launching a UI to let users inspect the intermediate tensors and partition graphs from the run() call. c) (To be implemented) Intercept a run() call and give control to DebugStepper to let it perform stepping / continuing-to actions on the graph. b) (To be implemented in a future CL) Enter an instruction loop to let an external object (e.g., remote client) launch run() and cont() calls remotely. *** The lifetime of a debug wrapper session: *** 1) The wrapper session is created by calling the constructor with a wrapped (normal) session as the argument: wrapper = FooDebugWrapperSession(sess) wherein FooDebugWrapperSession is a concrete subclass implementing the abstract BaseDebugWrapperSession class below. 2) Near the end of the constructor call, the on_session_init() callback is invoked, with a OnSessionInitRequest object as the argument. The object carries the wrapped (normal) session object. 3) The callback handles the request and returns a OnSessionInitResponse object with an action field, directing the wrapper session what to do next. If the action field in the OnSessionInitResponse is PROCEED, the constuctor returns. Control is released back to the caller of the constructor, which can invoke run() method of wrapper session with the same syntax as a non-wrapped session, e.g.,: wrapper.run(fetches, feed_dict=feeds, options=run_options) Below, A1 - A2 is the lifetime of a wrapper run() call if the action is PROCEED: A1) Right at the start of each run() call, the on_run_start() callback is invoked, with an OnRunStartRequest object carrying information such as the fetches, the feed dict, the run options and run metadata used in this run call, along with a count of how many run calls has occurred on this wrapper session. The callback then returns an OnRunStartResponse object, of which the action field directs what the wrapper session actually will do of the run() call. If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue, with the debug URLs supplied in the debug_urls field of the response. These can be file:// or grpc:// URLs, for example. If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue. If the action is INVOKE_STEPPER, no run() call will be issued to the wrapped session. But instead, a DebugStepper (i.e., "continuation debugger") will be used to perform stepping / continue-to actions on the graph. TODO(cais): The event loop for the DebugStepper will request additional callbacks including on_cont_start() and on_cont_end(). Add those. A2) Right before the run() returns, the on_run_end() callback is invoked, with an OnRunEndRequest object as the argument, which carries information including the actual action performed in the warpper run() call and the run_metadata from the run() call. However, if the action field in OnSessionInitResponse is REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop that gives the control to a remote caller. In the remote instruction loop, the following steps will happen: B1) Callback on_instr_start() is invoked. The callback will return an OnInstrStartResponse object with an action field which can order one of the following actions: i) a run() call with fetches, feeds and debug_urls specified. ii) a DebugStepper cont() call with target specified. iii) value overrides in the cached tensors from the DebugStepper. iv) exit the instruction loop. B2) The wrapper session carries out the action specified above. B3) If still in the instruction loop, the wrapper session invokes the on_instr_end() callback. After the on_instr_end() callback returns, jump back to B1. TODO(cais): Implemented the instruction loop in B1 - B3. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import re import threading from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.debug.lib import debug_utils from tensorflow.python.debug.lib import stepper from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.platform import tf_logging from tensorflow.python.training import monitored_session from tensorflow.python.util import nest # Helper function. def _check_type(obj, expected_types): """Check if an object is of the expected type. Args: obj: The object being checked. expected_types: (`type` or an iterable of `type`s) The expected `type`(s) of obj. Raises: TypeError: If obj is not an instance of expected_type. """ if not isinstance(obj, expected_types): raise TypeError("Expected type %s; got type %s" % (expected_types, type(obj))) class OnSessionInitRequest(object): """Request to an on-session-init callback. This callback is invoked during the __init__ call to a debug-wrapper session. """ def __init__(self, sess): """Constructor. Args: sess: A tensorflow Session object. """ _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession)) self.session = sess class OnSessionInitAction(object): """Enum-like values for possible action to take on session init.""" # Proceed, without special actions, in the wrapper session initialization. # What action the wrapper session performs next is determined by the caller # of the wrapper session. E.g., it can call run(). PROCEED = "proceed" # Instead of letting the caller of the wrapper session determine what actions # the wrapper session will perform next, enter a loop to receive instructions # from a remote client. # For example, TensorBoard visual debugger can use this action so that it can # launch session.run() calls remotely. REMOTE_INSTR_LOOP = "remote_instr_loop" class OnSessionInitResponse(object): """Response from an on-session-init callback.""" def __init__(self, action): """Constructor. Args: action: (`OnSessionInitAction`) Debugger action to take on session init. """ _check_type(action, str) self.action = action class OnRunStartRequest(object): """Request to an on-run-start callback. This callback is invoked during a run() call of the debug-wrapper session, immediately after the run() call counter is incremented. """ def __init__(self, fetches, feed_dict, run_options, run_metadata, run_call_count, is_callable_runner=False): """Constructor of `OnRunStartRequest`. Args: fetches: Fetch targets of the run() call. feed_dict: The feed dictionary to the run() call. run_options: RunOptions input to the run() call. run_metadata: RunMetadata input to the run() call. The above four arguments are identical to the input arguments to the run() method of a non-wrapped TensorFlow session. run_call_count: 1-based count of how many run calls (including this one) has been invoked. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run. """ self.fetches = fetches self.feed_dict = feed_dict self.run_options = run_options self.run_metadata = run_metadata self.run_call_count = run_call_count self.is_callable_runner = is_callable_runner class OnRunStartAction(object): """Enum-like values for possible action to take on start of a run() call.""" # Run once with debug tensor-watching. DEBUG_RUN = "debug_run" # Run once with profiler. PROFILE_RUN = "profile_run" # Run without debug tensor-watching. NON_DEBUG_RUN = "non_debug_run" # Instead of running the fetches as a whole, as would normally happen, invoke # the (to-be-implemented) debug stepper. # TODO(cais): Remove "to-be-implemented". INVOKE_STEPPER = "invoke_stepper" class OnRunStartResponse(object): """Request from an on-run-start callback. The caller of the callback can use this response object to specify what action the debug-wrapper session actually takes on the run() call. """ def __init__(self, action, debug_urls, debug_ops="DebugIdentity", node_name_regex_whitelist=None, op_type_regex_whitelist=None, tensor_dtype_regex_whitelist=None, tolerate_debug_op_creation_failures=False): """Constructor of `OnRunStartResponse`. Args: action: (`OnRunStartAction`) the action actually taken by the wrapped session for the run() call. debug_urls: (`list` of `str`) debug_urls used in watching the tensors during the run() call. debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the debugger. node_name_regex_whitelist: Regular-expression whitelist for node name. op_type_regex_whitelist: Regular-expression whitelist for op type. tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor dtype. tolerate_debug_op_creation_failures: Whether debug op creation failures are to be tolerated. """ _check_type(action, str) self.action = action _check_type(debug_urls, list) self.debug_urls = debug_urls self.debug_ops = debug_ops self.node_name_regex_whitelist = node_name_regex_whitelist self.op_type_regex_whitelist = op_type_regex_whitelist self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist self.tolerate_debug_op_creation_failures = ( tolerate_debug_op_creation_failures) class OnRunEndRequest(object): """Request to an on-run-end callback. The callback is invoked immediately before the wrapped run() call ends. """ def __init__(self, performed_action, run_metadata=None, client_graph_def=None, tf_error=None): """Constructor for `OnRunEndRequest`. Args: performed_action: (`OnRunStartAction`) Actually-performed action by the debug-wrapper session. run_metadata: run_metadata output from the run() call (if any). client_graph_def: (GraphDef) GraphDef from the client side, i.e., from the python front end of TensorFlow. Can be obtained with session.graph.as_graph_def(). tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred during the run (if any). """ _check_type(performed_action, str) self.performed_action = performed_action if run_metadata is not None: _check_type(run_metadata, config_pb2.RunMetadata) self.run_metadata = run_metadata self.client_graph_def = client_graph_def self.tf_error = tf_error class OnRunEndResponse(object): """Response from an on-run-end callback.""" def __init__(self): # Currently only a placeholder. pass class BaseDebugWrapperSession(session.SessionInterface): """Base class of debug-wrapper session classes. Concrete classes that inherit from this class need to implement the abstract methods such as on_session_init, on_run_start and on_run_end. """ # TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is # is available. def __init__(self, sess, thread_name_filter=None, pass_through_operrors=False): """Constructor of `BaseDebugWrapperSession`. Args: sess: An (unwrapped) TensorFlow session instance. It should be a subtype of `BaseSession` or `tf.MonitoredSession`. thread_name_filter: Regular-expression filter (whitelist) for name(s) of thread(s) on which the wrapper session will be active. This regular expression is used in a start-anchored fashion on the thread name, i.e., by applying the `match` method of the compiled pattern. The default `None` means that the wrapper session will be active on all threads. E.g., r"MainThread$", r"QueueRunnerThread.*". pass_through_operrors: If True, all captured OpErrors will be propagated. By default this captures all OpErrors. Raises: ValueError: On invalid `OnSessionInitAction` value. NotImplementedError: If a non-DirectSession sess object is received. """ _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession)) # The session being wrapped. self._sess = sess self._thread_name_filter_pattern = (re.compile(thread_name_filter) if thread_name_filter else None) # TODO(cais/kstevens): Unittest this pass through feature. self._pass_through_operrors = pass_through_operrors # Keeps track of number of run calls that have been performed on this # debug-wrapper session. The count can be used for purposes such as # displaying the state of the Session in a UI and determining a run # number-dependent debug URL. self._run_call_count = 0 # Invoke on-session-init callback. response = self.on_session_init(OnSessionInitRequest(self._sess)) _check_type(response, OnSessionInitResponse) if response.action == OnSessionInitAction.PROCEED: pass elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP: # TODO(cais): Implement REMOTE_INSTR_LOOP raise NotImplementedError( "OnSessionInitAction REMOTE_INSTR_LOOP has not been " "implemented.") else: raise ValueError( "Invalid OnSessionInitAction value: %s" % response.action) self._default_session_context_manager = None # A cache for callables created from CallableOptions. self._cached_callables_from_options = dict() @property def graph(self): return self._sess.graph @property def graph_def(self): return self._sess.graph_def @property def sess_str(self): return self._sess.sess_str @property def session(self): return self._sess def run(self, fetches, feed_dict=None, options=None, run_metadata=None, callable_runner=None, callable_runner_args=None, callable_options=None): """Wrapper around Session.run() that inserts tensor watch options. Args: fetches: Same as the `fetches` arg to regular `Session.run()`. feed_dict: Same as the `feed_dict` arg to regular `Session.run()`. options: Same as the `options` arg to regular `Session.run()`. run_metadata: Same as the `run_metadata` arg to regular `Session.run()`. callable_runner: A `callable` returned by `Session.make_callable()`. If not `None`, `fetches` and `feed_dict` must both be `None`. Mutually exclusive with `callable_options`. callable_runner_args: An optional list of arguments to `callable_runner` or for `callable_options`. callable_options: An instance of `config_pb2.CallableOptions`, to be used with `Session._make_callable_from_options()`. Mutually exclusive with `callable_runner`. Returns: Simply forwards the output of the wrapped `Session.run()` call. Raises: ValueError: On invalid `OnRunStartAction` value. Or if `callable_runner` is not `None` and either or both of `fetches` and `feed_dict` is `None`. """ if callable_runner and callable_options: raise ValueError( "callable_runner and callable_options are mutually exclusive, but " "are both specified in this call to BaseDebugWrapperSession.run().") if not (callable_runner or callable_options): self.increment_run_call_count() elif callable_runner and (fetches or feed_dict): raise ValueError( "callable_runner and fetches/feed_dict are mutually exclusive, " "but are used simultaneously.") empty_fetches = not nest.flatten(fetches) if empty_fetches: tf_logging.info( "Due to empty fetches, tfdbg Session wrapper is letting a " "Session.run pass through without any debugging actions.") if self._is_disabled_thread() or empty_fetches: if callable_runner: return callable_runner(*callable_runner_args) elif callable_options: # pylint:disable=protected-access return self._sess._make_callable_from_options( callable_options)(*callable_runner_args) # pylint:enable=protected-access else: return self._sess.run(fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata) # Invoke on-run-start callback and obtain response. run_start_resp = self.on_run_start( OnRunStartRequest(fetches, feed_dict, options, run_metadata, self._run_call_count, is_callable_runner=bool(callable_runner))) _check_type(run_start_resp, OnRunStartResponse) if run_start_resp.action == OnRunStartAction.DEBUG_RUN: # Decorate RunOption to fill in debugger tensor watch specifications. decorated_run_options = None if callable_options: callable_options_id = id(callable_options) if callable_options_id not in self._cached_callables_from_options: # Make a copy of callable_options to avoid mutating it. new_callable_options = config_pb2.CallableOptions() new_callable_options.CopyFrom(callable_options) decorated_run_options = new_callable_options.run_options else: decorated_run_options = options or config_pb2.RunOptions() run_metadata = run_metadata or config_pb2.RunMetadata() if decorated_run_options: self._decorate_run_options_for_debug( decorated_run_options, run_start_resp.debug_urls, debug_ops=run_start_resp.debug_ops, node_name_regex_whitelist=run_start_resp.node_name_regex_whitelist, op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist, tensor_dtype_regex_whitelist=( run_start_resp.tensor_dtype_regex_whitelist), tolerate_debug_op_creation_failures=( run_start_resp.tolerate_debug_op_creation_failures)) # Invoke the run() method of the wrapped Session. Catch any TensorFlow # runtime errors. tf_error = None try: if callable_runner: retvals = callable_runner(*callable_runner_args, options=decorated_run_options, run_metadata=run_metadata) elif callable_options: # pylint:disable=protected-access if callable_options_id in self._cached_callables_from_options: callable_object = self._cached_callables_from_options[ callable_options_id] else: callable_object = self._sess._make_callable_from_options( new_callable_options) self._cached_callables_from_options[ callable_options_id] = callable_object # pylint:enable=protected-access retvals = callable_object( *callable_runner_args, run_metadata=run_metadata) else: retvals = self._sess.run(fetches, feed_dict=feed_dict, options=decorated_run_options, run_metadata=run_metadata) except errors.OpError as op_error: if self._pass_through_operrors: raise op_error tf_error = op_error retvals = op_error run_end_req = OnRunEndRequest( run_start_resp.action, run_metadata=run_metadata, client_graph_def=self._sess.graph.as_graph_def(), tf_error=tf_error) elif run_start_resp.action == OnRunStartAction.PROFILE_RUN: decorated_run_options = options or config_pb2.RunOptions() run_metadata = run_metadata or config_pb2.RunMetadata() self._decorate_run_options_for_profile(decorated_run_options) if callable_runner: retvals = callable_runner(*callable_runner_args, options=decorated_run_options, run_metadata=run_metadata) else: retvals = self._sess.run(fetches, feed_dict=feed_dict, options=decorated_run_options, run_metadata=run_metadata) run_end_req = OnRunEndRequest( run_start_resp.action, run_metadata=run_metadata, client_graph_def=self._sess.graph.as_graph_def()) elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or run_start_resp.action == OnRunStartAction.INVOKE_STEPPER): if callable_runner: raise NotImplementedError( "Stepper mode is not implemented for callables created by " "Session.make_callable().") if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER: with stepper.NodeStepper( self._sess, fetches, feed_dict) as node_stepper: retvals = self.invoke_node_stepper( node_stepper, restore_variable_values_on_exit=True) # Invoke run() method of the wrapped session. retvals = self._sess.run( fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata) # Prepare arg for the on-run-end callback. run_end_req = OnRunEndRequest(run_start_resp.action) else: raise ValueError( "Invalid OnRunStartAction value: %s" % run_start_resp.action) # Invoke on-run-end callback and obtain response. run_end_resp = self.on_run_end(run_end_req) _check_type(run_end_resp, OnRunEndResponse) # Currently run_end_resp is only a placeholder. No action is taken on it. return retvals def _is_disabled_thread(self): thread_name = threading.current_thread().name or "" return (self._thread_name_filter_pattern and not self._thread_name_filter_pattern.match(thread_name)) def run_step_fn(self, step_fn): return step_fn( monitored_session.MonitoredSession.StepContext(self._sess, self.run)) def partial_run_setup(self, fetches, feeds=None): """Sets up the feeds and fetches for partial runs in the session.""" raise NotImplementedError( "partial_run_setup is not implemented for debug-wrapper sessions.") def partial_run(self, handle, fetches, feed_dict=None): raise NotImplementedError( "partial_run is not implemented for debug-wrapper sessions.") def list_devices(self, *args, **kwargs): return self._sess.list_devices(*args, **kwargs) def reset(self, *args, **kwargs): return self._sess.reset(*args, **kwargs) def make_callable(self, fetches, feed_list=None, accept_options=False): runner = self._sess.make_callable( fetches, feed_list=feed_list, accept_options=True) def wrapped_runner(*runner_args, **kwargs): return self.run(None, feed_dict=None, options=kwargs.get("options", None), run_metadata=kwargs.get("run_metadata", None), callable_runner=runner, callable_runner_args=runner_args) return wrapped_runner def _make_callable_from_options(self, callable_options): def wrapped_runner(*feed_values, **kwargs): return self.run(None, run_metadata=kwargs.get("run_metadata", None), callable_options=callable_options, callable_runner_args=feed_values) return wrapped_runner @property def run_call_count(self): return self._run_call_count def increment_run_call_count(self): self._run_call_count += 1 def _decorate_run_options_for_debug( self, run_options, debug_urls, debug_ops="DebugIdentity", node_name_regex_whitelist=None, op_type_regex_whitelist=None, tensor_dtype_regex_whitelist=None, tolerate_debug_op_creation_failures=False): """Modify a RunOptions object for debug tensor watching. Specifies request for outputting partition graphs. Adds debug_tensor_watch_opts with proper debug URLs. Args: run_options: (RunOptions) the modified RunOptions object. debug_urls: (list of str) debug URLs to be entered in run_options. debug_tensor_watch_opts. debug_ops: (str or list of str) debug op(s) to be used by the debugger. node_name_regex_whitelist: Regular-expression whitelist for node name. op_type_regex_whitelist: Regular-expression whitelist for op type. tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor dtype. tolerate_debug_op_creation_failures: Whether debug op creation failures are to be tolerated. """ run_options.output_partition_graphs = True debug_utils.watch_graph( run_options, self._sess.graph, debug_urls=debug_urls, debug_ops=debug_ops, node_name_regex_whitelist=node_name_regex_whitelist, op_type_regex_whitelist=op_type_regex_whitelist, tensor_dtype_regex_whitelist=tensor_dtype_regex_whitelist, tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures) def _decorate_run_options_for_profile(self, run_options): """Modify a RunOptions object for profiling TensorFlow graph execution. Args: run_options: (RunOptions) the modified RunOptions object. """ run_options.trace_level = config_pb2.RunOptions.FULL_TRACE @abc.abstractmethod def on_session_init(self, request): """Callback invoked during construction of the debug-wrapper session. This is a blocking callback. The invocation happens right before the constructor ends. Args: request: (`OnSessionInitRequest`) callback request carrying information such as the session being wrapped. Returns: An instance of `OnSessionInitResponse`. """ @abc.abstractmethod def on_run_start(self, request): """Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens after the wrapper's run() call is entered, after an increment of run call counter. Args: request: (`OnRunStartRequest`) callback request object carrying information about the run call such as the fetches, feed dict, run options, run metadata, and how many `run()` calls to this wrapper session have occurred. Returns: An instance of `OnRunStartResponse`, carrying information to 1) direct the wrapper session to perform a specified action (e.g., run with or without debug tensor watching, invoking the stepper.) 2) debug URLs used to watch the tensors. """ @abc.abstractmethod def on_run_end(self, request): """Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens right before the wrapper exits its run() call. Args: request: (`OnRunEndRequest`) callback request object carrying information such as the actual action performed by the session wrapper for the run() call. Returns: An instance of `OnRunStartResponse`. """ def as_default(self): return ops.default_session(self) def __enter__(self): if self._default_session_context_manager is None: self._default_session_context_manager = self.as_default() return self._default_session_context_manager.__enter__() def __exit__(self, exec_type, exec_value, exec_tb): self._default_session_context_manager.__exit__( exec_type, exec_value, exec_tb) def __del__(self): if hasattr(self._sess, "__del__"): self._sess.__del__() def close(self): self._sess.close() # TODO(cais): Add _node_name_regex_whitelist and # _node_op_type_regex_whitelist. @abc.abstractmethod def invoke_node_stepper(self, node_stepper, restore_variable_values_on_exit=True): """Callback invoked when the client intends to step through graph nodes. Args: node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used in this stepping session. restore_variable_values_on_exit: (bool) Whether any variables whose values have been altered during this node-stepper invocation should be restored to their old values when this invocation ends. Returns: The same return values as the `Session.run()` call on the same fetches as the NodeStepper. """ def should_stop(self): if hasattr(self._sess, "should_stop"): return self._sess.should_stop() else: raise ValueError( "The wrapped session %r does not have a method called 'should_stop'. " "Do you intend to wrap a tf.MonitoredSession instead?" % self._sess) class WatchOptions(object): """Type for return values of watch_fn.""" def __init__(self, debug_ops=None, node_name_regex_whitelist=None, op_type_regex_whitelist=None, tensor_dtype_regex_whitelist=None, tolerate_debug_op_creation_failures=False): """Constructor of WatchOptions: Debug watch options. Used as return values of `watch_fn`s. Args: debug_ops: (`str` or `list of str`) Debug ops to be used. node_name_regex_whitelist: Regular-expression whitelist for node_name, e.g., `"(weight_[0-9]+|bias_.*)"` op_type_regex_whitelist: Regular-expression whitelist for the op type of nodes, e.g., `"(Variable|Add)"`. If both `node_name_regex_whitelist` and `op_type_regex_whitelist` are set, the two filtering operations will occur in a logical `AND` relation. In other words, a node will be included if and only if it hits both whitelists. tensor_dtype_regex_whitelist: Regular-expression whitelist for Tensor data type, e.g., `"^int.*"`. This whitelist operates in logical `AND` relations to the two whitelists above. tolerate_debug_op_creation_failures: (`bool`) whether debug op creation failures (e.g., due to dtype incompatibility) are to be tolerated by not throwing exceptions. """ if debug_ops: self.debug_ops = debug_ops else: self.debug_ops = ["DebugIdentity"] self.node_name_regex_whitelist = node_name_regex_whitelist self.op_type_regex_whitelist = op_type_regex_whitelist self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist self.tolerate_debug_op_creation_failures = ( tolerate_debug_op_creation_failures) def __repr__(self): return ("WatchOptions(debug_ops=%r, node_name_regex_whitelist=%r, " "op_type_regex_whitelist=%r, tensor_dtype_regex_whitelist=%r, " "tolerate_debug_op_creation_failures=%r)" % ( self.debug_ops, self.node_name_regex_whitelist, self.op_type_regex_whitelist, self.tensor_dtype_regex_whitelist, self.tolerate_debug_op_creation_failures)) class NonInteractiveDebugWrapperSession(BaseDebugWrapperSession): """Base class for non-interactive (i.e., non-CLI) debug wrapper sessions.""" def __init__(self, sess, watch_fn=None, thread_name_filter=None, pass_through_operrors=False): """Constructor of NonInteractiveDebugWrapperSession. Args: sess: The TensorFlow `Session` object being wrapped. watch_fn: (`Callable`) A Callable that maps the fetches and feeds of a debugged `Session.run()` call to `WatchOptions.` * Args: * `fetches`: the fetches to the `Session.run()` call. * `feeds`: the feeds to the `Session.run()` call. * Returns: (`tf_debug.WatchOptions`) An object containing debug options including the debug ops to use, the node names, op types and/or tensor data types to watch, etc. See the documentation of `tf_debug.WatchOptions` for more details. thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of `BaseDebugWrapperSession` for more details. pass_through_operrors: If true, all captured OpErrors will be propagated. By default this captures all OpErrors. Raises: TypeError: If a non-None `watch_fn` is specified and it is not callable. """ BaseDebugWrapperSession.__init__( self, sess, thread_name_filter=thread_name_filter, pass_through_operrors=pass_through_operrors) self._watch_fn = None if watch_fn is not None: if not callable(watch_fn): raise TypeError("watch_fn is not callable") self._watch_fn = watch_fn def on_session_init(self, request): """See doc of BaseDebugWrapperSession.on_run_start.""" return OnSessionInitResponse(OnSessionInitAction.PROCEED) @abc.abstractmethod def prepare_run_debug_urls(self, fetches, feed_dict): """Abstract method to be implemented by concrete subclasses. This method prepares the run-specific debug URL(s). Args: fetches: Same as the `fetches` argument to `Session.run()` feed_dict: Same as the `feed_dict` argument to `Session.run()` Returns: debug_urls: (`str` or `list` of `str`) Debug URLs to be used in this `Session.run()` call. """ def on_run_start(self, request): """See doc of BaseDebugWrapperSession.on_run_start.""" debug_urls, watch_opts = self._prepare_run_watch_config( request.fetches, request.feed_dict) return OnRunStartResponse( OnRunStartAction.DEBUG_RUN, debug_urls, debug_ops=watch_opts.debug_ops, node_name_regex_whitelist=watch_opts.node_name_regex_whitelist, op_type_regex_whitelist=watch_opts.op_type_regex_whitelist, tensor_dtype_regex_whitelist=watch_opts.tensor_dtype_regex_whitelist, tolerate_debug_op_creation_failures=( watch_opts.tolerate_debug_op_creation_failures)) def _prepare_run_watch_config(self, fetches, feed_dict): """Get the debug_urls, and node/op whitelists for the current run() call. Args: fetches: Same as the `fetches` argument to `Session.run()`. feed_dict: Same as the `feed_dict argument` to `Session.run()`. Returns: debug_urls: (str or list of str) Debug URLs for the current run() call. Currently, the list consists of only one URL that is a file:// URL. watch_options: (WatchOptions) The return value of a watch_fn, containing options including debug_ops, and whitelists. """ debug_urls = self.prepare_run_debug_urls(fetches, feed_dict) if self._watch_fn is None: watch_options = WatchOptions() else: watch_options = self._watch_fn(fetches, feed_dict) if isinstance(watch_options, tuple): # For legacy return type (tuples). watch_options = WatchOptions(*watch_options) return debug_urls, watch_options def on_run_end(self, request): """See doc of BaseDebugWrapperSession.on_run_end.""" return OnRunEndResponse() def invoke_node_stepper(self, node_stepper, restore_variable_values_on_exit=True): """See doc of BaseDebugWrapperSession.invoke_node_stepper.""" raise NotImplementedError( "NonInteractiveDebugWrapperSession does not support node-stepper mode.")
apache-2.0
abhitopia/tensorflow
tensorflow/contrib/distributions/python/ops/mvn_diag.py
24
7869
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Multivariate Normal distribution classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops import distribution_util from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop from tensorflow.python.framework import ops from tensorflow.python.ops import nn __all__ = [ "MultivariateNormalDiag", "MultivariateNormalDiagWithSoftplusScale", ] class MultivariateNormalDiag( mvn_linop.MultivariateNormalLinearOperator): """The multivariate normal distribution on `R^k`. The Multivariate Normal distribution is defined over `R^k` and parameterized by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k` `scale` matrix; `covariance = scale @ scale.T` where `@` denotes matrix-multiplication. #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z, y = inv(scale) @ (x - loc), Z = (2 pi)**(0.5 k) |det(scale)|, ``` where: * `loc` is a vector in `R^k`, * `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`, * `Z` denotes the normalization constant, and, * `||y||**2` denotes the squared Euclidean norm of `y`. A (non-batch) `scale` matrix is: ```none scale = diag(scale_diag + scale_identity_multiplier * ones(k)) ``` where: * `scale_diag.shape = [k]`, and, * `scale_identity_multiplier.shape = []`. Additional leading dimensions (if any) will index batches. If both `scale_diag` and `scale_identity_multiplier` are `None`, then `scale` is the Identity matrix. The MultivariateNormal distribution is a member of the [location-scale family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be constructed as, ```none X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift. Y = scale @ X + loc ``` #### Examples ```python ds = tf.contrib.distributions # Initialize a single 2-variate Gaussian. mvn = ds.MultivariateNormalDiag( loc=[1., -1], scale_diag=[1, 2.]) mvn.mean().eval() # ==> [1., -1] mvn.stddev().eval() # ==> [1., 2] # Evaluate this on an observation in `R^2`, returning a scalar. mvn.prob([-1., 0]).eval() # shape: [] # Initialize a 3-batch, 2-variate scaled-identity Gaussian. mvn = ds.MultivariateNormalDiag( loc=[1., -1], scale_identity_multiplier=[1, 2., 3]) mvn.mean().eval() # shape: [3, 2] # ==> [[1., -1] # [1, -1], # [1, -1]] mvn.stddev().eval() # shape: [3, 2] # ==> [[1., 1], # [2, 2], # [3, 3]] # Evaluate this on an observation in `R^2`, returning a length-3 vector. mvn.prob([-1., 0]).eval() # shape: [3] # Initialize a 2-batch of 3-variate Gaussians. mvn = ds.MultivariateNormalDiag( loc=[[1., 2, 3], [11, 22, 33]] # shape: [2, 3] scale_diag=[[1., 2, 3], [0.5, 1, 1.5]]) # shape: [2, 3] # Evaluate this on a two observations, each in `R^3`, returning a length-2 # vector. x = [[-1., 0, 1], [-11, 0, 11.]] # shape: [2, 3]. mvn.prob(x).eval() # shape: [2] ``` """ def __init__(self, loc=None, scale_diag=None, scale_identity_multiplier=None, validate_args=False, allow_nan_stats=True, name="MultivariateNormalDiag"): """Construct Multivariate Normal distribution on `R^k`. The `batch_shape` is the broadcast shape between `loc` and `scale` arguments. The `event_shape` is given by the last dimension of `loc` or the last dimension of the matrix implied by `scale`. Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is: ```none scale = diag(scale_diag + scale_identity_multiplier * ones(k)) ``` where: * `scale_diag.shape = [k]`, and, * `scale_identity_multiplier.shape = []`. Additional leading dimensions (if any) will index batches. If both `scale_diag` and `scale_identity_multiplier` are `None`, then `scale` is the Identity matrix. Args: loc: Floating-point `Tensor`. If this is set to `None`, `loc` is implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where `b >= 0` and `k` is the event size. scale_diag: Non-zero, floating-point `Tensor` representing a diagonal matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`, and characterizes `b`-batches of `k x k` diagonal matrices added to `scale`. When both `scale_identity_multiplier` and `scale_diag` are `None` then `scale` is the `Identity`. scale_identity_multiplier: Non-zero, floating-point `Tensor` representing a scaled-identity-matrix added to `scale`. May have shape `[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled `k x k` identity matrices added to `scale`. When both `scale_identity_multiplier` and `scale_diag` are `None` then `scale` is the `Identity`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: if at most `scale_identity_multiplier` is specified. """ parameters = locals() with ops.name_scope(name): with ops.name_scope("init", values=[ loc, scale_diag, scale_identity_multiplier]): scale = distribution_util.make_diag_scale( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, validate_args=validate_args, assert_positive=False) super(MultivariateNormalDiag, self).__init__( loc=loc, scale=scale, validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters class MultivariateNormalDiagWithSoftplusScale(MultivariateNormalDiag): """MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`.""" def __init__(self, loc, scale_diag, validate_args=False, allow_nan_stats=True, name="MultivariateNormalDiagWithSoftplusScale"): parameters = locals() with ops.name_scope(name, values=[scale_diag]): super(MultivariateNormalDiagWithSoftplusScale, self).__init__( loc=loc, scale_diag=nn.softplus(scale_diag), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters
apache-2.0
JoostvanPinxten/ConstraintPuzzler
constraints/cellgreaterthancellconstraint.py
1
1639
''' Created on 7 jan. 2013 @author: Juice ''' from constraints import Constraint class CellGreaterThanCellConstraint(Constraint): def __init__(self, group, initialValues): super(CellGreaterThanCellConstraint, self).__init__(group, initialValues) self.lesserCell = None self.greaterCell = None def notify(self, cell): pass def setLesserCell(self, cell): self.lesserCell = cell def setGreaterCell(self, cell): self.greaterCell = cell def applyConstraint(self): smallerPossibilities = set(self.lesserCell.getPossibleValues()) largerPossibilities = set(self.greaterCell.getPossibleValues()) minimumValue = min(largerPossibilities) maximumValue = max(smallerPossibilities) # print minimumValue, maximumValue allowedRange = range(minimumValue+1, maximumValue+1) #print "s", smallerPossibilities, allowedRange for val in smallerPossibilities: if(val not in allowedRange): self.lesserCell.remove(val) allowedRange = range(minimumValue, maximumValue) #print "l", largerPossibilities, allowedRange for val in largerPossibilities: if(val not in allowedRange): self.greaterCell.remove(val) def getType(self): return "Total Sum Value Constraint" def setTotalValue(self, value): self.totalValue = value def getAllowedValuesForValueList(self, allowedValues, usedValues): return allowedValues
mit
e0ne/cinder
cinder/db/sqlalchemy/migration.py
6
2910
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository import sqlalchemy from cinder.db.sqlalchemy.api import get_engine from cinder import exception from cinder.i18n import _ INIT_VERSION = 000 _REPOSITORY = None def db_sync(version=None): if version is not None: try: version = int(version) except ValueError: raise exception.Error(_("version should be an integer")) current_version = db_version() repository = _find_migrate_repo() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version) def db_version(): repository = _find_migrate_repo() try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError: # If we aren't version controlled we may already have the database # in the state from before we started version control, check for that # and set up version_control appropriately meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: db_version_control(INIT_VERSION) return versioning_api.db_version(get_engine(), repository) else: raise exception.Error(_("Upgrade DB using Essex release first.")) def db_initial_version(): return INIT_VERSION def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(get_engine(), repository, version) return version def _find_migrate_repo(): """Get the path for the migrate repository.""" global _REPOSITORY path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') assert os.path.exists(path) if _REPOSITORY is None: _REPOSITORY = Repository(path) return _REPOSITORY
apache-2.0
playm2mboy/edx-platform
common/lib/xmodule/xmodule/errortracker.py
177
1476
import logging import sys import traceback from collections import namedtuple log = logging.getLogger(__name__) ErrorLog = namedtuple('ErrorLog', 'tracker errors') def exc_info_to_str(exc_info): """Given some exception info, convert it into a string using the traceback.format_exception() function. """ return ''.join(traceback.format_exception(*exc_info)) def in_exception_handler(): '''Is there an active exception?''' return sys.exc_info() != (None, None, None) def make_error_tracker(): '''Return an ErrorLog (named tuple), with fields (tracker, errors), where the logger appends a tuple (message, exception_str) to the errors on every call. exception_str is in the format returned by traceback.format_exception. error_list is a simple list. If the caller modifies it, info will be lost. ''' errors = [] def error_tracker(msg): '''Log errors''' exc_str = '' if in_exception_handler(): exc_str = exc_info_to_str(sys.exc_info()) # don't display irrelevant gunicorn sync error if (('python2.7/site-packages/gunicorn/workers/sync.py' in exc_str) and ('[Errno 11] Resource temporarily unavailable' in exc_str)): exc_str = '' errors.append((msg, exc_str)) return ErrorLog(error_tracker, errors) def null_error_tracker(msg): '''A dummy error tracker that just ignores the messages''' pass
agpl-3.0
Fujin-Suzukaze/GT-I9505-Kernel-JB-4.3
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
nico202/pyNeMo
Batch.py
1
9496
#!/usr/bin/env python2 import subprocess import numpy as np import time import os import re import string from libs.runParser import parse_args from libs.IO import is_folder, hashDict, write_batch_log, saveKey, saveFile from libs.IO import cprint, time_to_steps, write_log, ask from plugins.importer import import_history import config from sys import argv, exit from libs.simulations import main_simulation_run from libs.IO import import_network print ("\n\n\n----------------") name = False new_args = [] for a in argv: if not "--" in a: a = '\'%s\'' % a new_args.append(a) #Get the output dir, or set it to batch if none defined output_dir = "batch" if not "--history-dir" in new_args: new_args.append("--history-dir") new_args.append(output_dir) else: output_dir = new_args[new_args.index("--history-dir")+1] if "'" in output_dir: output_dir = string.replace(output_dir,"'", "") if ( config.BATCH_CONFIRM_NO_SAVE_IMAGE and not "--save-spikes" in new_args ): ask("You are not saving the spikes image.") if ( config.BATCH_CONFIRM_SHOW_IMAGE and all([ not "--no-show-membrane" in new_args or not "--no-show-spikes" in new_args , not "--no-show-images" in new_args ])): ask("You are showing images", "Use \"--no-show-images\" and run the batch again") args = " ".join(new_args[1:]) #Save args to file l = open("batch_history.log", 'a') l.write("%s, %s" % (output_dir, args)) l.close() ranges = True def missing(input_string): #TODO: check [ number + , + number + , + number ] ranges = re.findall("\[(.*?)\]", input_string) if ranges: return ranges[0] else: return False def substituteRanges(input_strings, commands): if not commands: commands = [] for input_string in input_strings: ranges = missing(input_string) if ranges: start, stop, step = [float(n) for n in ranges.split(',')] steps = np.linspace (start, stop, (abs(stop-start)/step)+1) for s in steps: command = input_string.replace("["+ranges+"]", str(s), 1) commands.append(command) else: commands.append(input_string) return commands return substituteRanges(commands, commands) commands = substituteRanges([args], []) #fix parentheses: commands = [ i for i in commands if not missing(i) ] #set just to be sure no duplicate runs real_commands = set(commands) #TODO: #FIXME: remove "--show-image" etc to prevent different hashes of same config session_hash = hashDict(real_commands) #Save commands list is_folder("./commands") commands_file = "./commands/" + session_hash + "_commands" cprint ("Saving %s commands to file: %s" % (len(real_commands), commands_file), 'info') saveKey(commands_file, commands) # Debug: re-enable in release # cprint("Running in 5s", "warning") # time.sleep(5) #Start is_folder (output_dir) cprint("We'll use %s as output dir" % (output_dir), 'info') #Let's run the simulations (surely not the best way, but does the job) #TODO: add the loop inside Runner? start = time.time() try: run = import_history(output_dir + "/" + session_hash + "_batch") recover_from_lap = run["cycle"] cprint("You already run this sym, recovering from %s" % (recover_from_lap), 'okblue') except IOError: print("First time you run this exact sim") recover_from_lap = 0 pass last_save_time = time.time() all_start_time = last_save_time next_sleep = False forced_quit = False lap = 0 laps = len(real_commands) cprint ("We are going to run %s simulations!" % (laps), 'okblue') for com in real_commands: try: if lap < recover_from_lap: lap += 1 continue if next_sleep: forced_quit = True cprint("Press CTRL-C NOW! (trice) to quit", 'warning') time.sleep(5) forced_quit = False next_sleep = False #Start. Take time start_time = time.time() print ("Lap %s / %s" % (lap , laps)) ##Replace subprocess: call it directly to save time parser = parse_args() args = parser.parse_args([w.strip("'") for w in com.split()]) #That way is 100% compatible with the old os call #Load all the parameters (choose if/when read from CLI/config.py) use_config = True if os.path.isfile("config.py") else False if use_config: import config network_file = args.network_file config_steps = config.STEPS if use_config else 0 steps = time_to_steps(args.steps) if args.steps != None else config_steps config_history = config.HISTORY_DIR if use_config else 0 output_dir = args.history_dir or config_history config_cuda = config.TRY_CUDA if use_config else 0 use_cuda = args.use_cuda if args.use_cuda != None else config_cuda config_cuda_backend = config.CUDA_BACKEND if use_config else 0 cuda_backend = args.cuda_backend or config_cuda_backend try: cuda_backend = int(cuda_backend) except ValueError: exit("Error, wrong cuda_backend format. Must be a int") vue_prehook = args.vue_prehook vue_posthook = args.vue_posthook #Robot args control_robot = args.control_robot robot_mode = args.robot_mode #FIXME disable_sensory = args.disable_sensory ######################################## #L1 import_start_time = time.time() networks, config_file_name =\ import_network ( (network_file, (vue_prehook, vue_posthook)) , (use_cuda, cuda_backend) , (disable_sensory)) #L1 import_end_time = time.time() cprint("Import time: %s" % (import_end_time - import_start_time), 'info', True) nemo_simulation = networks[0] to_save = networks[1][0] neuron_number = networks[1][1] stimuli_dict = networks[1][2] #L3 if not disable_sensory: #CLI sensory_neurons_in, sensory_neurons_out = networks[1][3] else: sensory_neurons_in = [] sensory_neurons_out = [] dict_config = { "neurons": networks[1][4] , "sensory_neurons":(sensory_neurons_in, sensory_neurons_out) , "save": to_save , "step_input": stimuli_dict , "synapses": networks[1][5] , "name": networks[1][6] } config_dict_hash = hashDict(dict_config) #L2 if (sensory_neurons_in or sensory_neurons_out): from libs.pYARP import RobotYARP #Import only if strictly needed robot = RobotYARP () else: robot = None #Save input files #saveKey(config_dict_hash + "_input", dict_config, output_dir) saveFile(config_file_name, output_dir + "/" + config_dict_hash + "_input.py") nemo_start_time = time.time() output = main_simulation_run( { "steps": steps , "use_cuda": use_cuda , "cuda_backend": cuda_backend }#general_config , (nemo_simulation, (to_save, neuron_number), stimuli_dict) #L1 , (robot) #L2 , (sensory_neurons_in, sensory_neurons_out) #L3 , save_membrane=args.save_membrane ) nemo_end_time = time.time() cprint("NeMo speedup: %s" % (output["ran_steps"]/((nemo_end_time - nemo_start_time)*1000)), 'okgreen', True) general_config_out = {"steps": output["ran_steps"]} #Add various robot params #Save/process output + input uniqueId = hashDict(general_config_out) + "_" + config_dict_hash #Step is included in the output print ("Saving output to %s_output" % uniqueId) saveKey(uniqueId + "_output", output, output_dir) #Save the log write_log(uniqueId, output_dir = output_dir) lap +=1 #save only every X laps and on keyboard interrupt if not lap % config.BATCH_SAVE_EVERY: now = time.time() time_diff = now - last_save_time last_save_time = now cprint("\n-----------------------------------\nSAVING\n") cprint("This round mean step time: %s" % (time_diff / config.BATCH_SAVE_EVERY)) write_batch_log(session_hash + "_batch", lap, output_dir) cprint("-------------------") else: end_time = time.time() cprint ("Realtime ratio: %sX" % (output["ran_steps"]/((end_time - start_time)*1000)), 'info') except KeyboardInterrupt: if not forced_quit: write_batch_log(session_hash + "_batch", lap, output_dir) next_sleep = True cprint ("Forced saving! Press CTRL-C again (on cue) to quit", 'warning') else: #save 2 lap less to be sure sims have not been interrupted write_batch_log(session_hash + "_batch", lap - 2, output_dir) cprint("Forced QUIT", 'fail') exit() run_time = time.time() - all_start_time cprint("Batch runned successfully in %s!" % (run_time), 'okgreen') end = time.time() #subprocess.call("notify-send 'pyNeMo' 'batch process ended!'", shell = True)
gpl-2.0
dischinator/pyload
module/gui/Overview.py
41
7578
# -*- coding: utf-8 -*- """ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. @author: mkaay """ from PyQt4.QtCore import * from PyQt4.QtGui import * from module.utils import formatSpeed, formatSize class OverviewModel(QAbstractListModel): PackageName = 10 Progress = 11 PartsFinished = 12 Parts = 13 ETA = 14 Speed = 15 CurrentSize = 16 MaxSize = 17 Status = 18 def __init__(self, view, connector): QAbstractListModel.__init__(self) self.packages = [] def queueChanged(self): #dirty.. self.beginResetModel() self.packages = [] def partsFinished(p): f = 0 for c in p.children: if c.data["status"] == 0: f += 1 return f def maxSize(p): ms = 0 cs = 0 for c in p.children: try: s = c.data["downloading"]["size"] except: s = c.data["size"] if c.data["downloading"]: cs += s - c.data["downloading"]["bleft"] elif self.queue.getProgress(c, False) == 100: cs += s ms += s return ms, cs def getProgress(p): for c in p.children: if c.data["status"] == 13: pass # TODO return _("Unpacking"), int(c.data["progress"]) return _("Downloading"), self.queue.getProgress(p) d = self.queue._data for p in d: status, progress = getProgress(p) maxsize, currentsize = maxSize(p) speed = self.queue.getSpeed(p) if speed: eta = (maxsize - (maxsize * (progress/100.0)))/speed else: eta = 0 if not speed and not progress: status = _("Queued") info = { OverviewModel.PackageName: p.data["name"], OverviewModel.Progress: progress, OverviewModel.PartsFinished: partsFinished(p), OverviewModel.Parts: len(p.children), OverviewModel.ETA: int(eta), OverviewModel.Speed: speed, OverviewModel.CurrentSize: currentsize, OverviewModel.MaxSize: maxsize, OverviewModel.Status: status, } self.packages.append(info) self.endResetModel() def headerData(self, section, orientation, role=Qt.DisplayRole): return QVariant(_("Package")) def rowCount(self, parent=QModelIndex()): return len(self.packages) def data(self, index, role=Qt.DisplayRole): if role in [OverviewModel.PackageName, OverviewModel.Progress, OverviewModel.PartsFinished, OverviewModel.Parts, OverviewModel.ETA, OverviewModel.Speed, OverviewModel.CurrentSize, OverviewModel.MaxSize, OverviewModel.Status]: return QVariant(self.packages[index.row()][role]) return QVariant() class OverviewView(QListView): def __init__(self, connector): QListView.__init__(self) self.setModel(OverviewModel(self, connector)) self.setAlternatingRowColors(True) self.delegate = OverviewDelegate(self) self.setItemDelegate(self.delegate) class OverviewDelegate(QItemDelegate): def __init__(self, parent): QItemDelegate.__init__(self, parent) self.parent = parent self.model = parent.model() def paint(self, painter, option, index): option.rect.setHeight(59+16) option.rect.setWidth(self.parent.width()-20) #if option.state & QStyle.State_Selected: # painter.fillRect(option.rect, option.palette.color(QPalette.Highlight)) packagename = index.data(OverviewModel.PackageName).toString() partsf = index.data(OverviewModel.PartsFinished).toString() parts = index.data(OverviewModel.Parts).toString() eta = int(index.data(OverviewModel.ETA).toString()) speed = index.data(OverviewModel.Speed).toString() or 0 progress = int(index.data(OverviewModel.Progress).toString()) currentSize = int(index.data(OverviewModel.CurrentSize).toString()) maxSize = int(index.data(OverviewModel.MaxSize).toString()) status = index.data(OverviewModel.Status).toString() def formatEta(seconds): #TODO add to utils if seconds <= 0: return "" hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) return _("ETA: ") + "%.2i:%.2i:%.2i" % (hours, minutes, seconds) statusline = QString(_("Parts: ") + "%s/%s" % (partsf, parts)) if partsf == parts: speedline = _("Finished") elif not status == _("Downloading"): speedline = QString(status) else: speedline = QString(formatEta(eta) + " " + _("Speed: %s") % formatSpeed(speed)) if progress in (0,100): sizeline = QString(_("Size:") + "%s" % formatSize(maxSize)) else: sizeline = QString(_("Size:") + "%s / %s" % (formatSize(currentSize), formatSize(maxSize))) f = painter.font() f.setPointSize(12) f.setBold(True) painter.setFont(f) r = option.rect.adjusted(4, 4, -4, -4) painter.drawText(r.left(), r.top(), r.width(), r.height(), Qt.AlignTop | Qt.AlignLeft, packagename) newr = painter.boundingRect(r.left(), r.top(), r.width(), r.height(), Qt.AlignTop | Qt.AlignLeft, packagename) f.setPointSize(10) f.setBold(False) painter.setFont(f) painter.drawText(r.left(), newr.bottom()+5, r.width(), r.height(), Qt.AlignTop | Qt.AlignLeft, statusline) painter.drawText(r.left(), newr.bottom()+5, r.width(), r.height(), Qt.AlignTop | Qt.AlignHCenter, sizeline) painter.drawText(r.left(), newr.bottom()+5, r.width(), r.height(), Qt.AlignTop | Qt.AlignRight, speedline) newr = painter.boundingRect(r.left(), newr.bottom()+2, r.width(), r.height(), Qt.AlignTop | Qt.AlignLeft, statusline) newr.setTop(newr.bottom()+8) newr.setBottom(newr.top()+20) newr.setRight(self.parent.width()-25) f.setPointSize(10) painter.setFont(f) opts = QStyleOptionProgressBarV2() opts.maximum = 100 opts.minimum = 0 opts.progress = progress opts.rect = newr opts.textVisible = True opts.textAlignment = Qt.AlignCenter opts.text = QString.number(opts.progress) + "%" QApplication.style().drawControl(QStyle.CE_ProgressBar, opts, painter) def sizeHint(self, option, index): return QSize(self.parent.width()-22, 59+16)
gpl-3.0
Lilykos/inspire-next
inspire/modules/classifier/utils.py
1
3296
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2015 CERN. # # INSPIRE is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Utilities for classifier.""" from cPickle import load def get_classification_from_task_results(obj): """Return the classification output from a object's task results.""" tasks_results = obj.get_tasks_results() if "classification" in tasks_results: classification = tasks_results.get("classification")[0] elif "classification_full" in tasks_results: classification = tasks_results.get("classification_full")[0] elif "classification_fast" in tasks_results: classification = tasks_results.get("classification_fast")[0] else: obj.log.info("No classification results found.") return try: return classification.get("result").get("dict").get("complete_output") except AttributeError: obj.log.info("Problem getting classification from {0}.".format( classification )) return def update_classification_in_task_results(obj, output): """Return the classification output from a object's task results.""" tasks_results = obj.get_tasks_results() name = "" if "classification" in tasks_results: classification = tasks_results.get("classification")[0] name = "classification" elif "classification_full" in tasks_results: classification = tasks_results.get("classification_full")[0] name = "classification_full" elif "classification_fast" in tasks_results: classification = tasks_results.get("classification_fast")[0] name = "classification_fast" else: obj.log.info("No classification results found.") return try: classification["result"]["dict"]["complete_output"] = output obj.update_task_results(name, [classification]) except AttributeError: obj.log.info("Problem getting classification from {0}.".format( classification )) return def prepare_prediction_record(obj): """Given a workflow object, return compatible prediction record.""" prepared_record = {} prepared_record["title"] = obj.data.get("title.title") prepared_record["abstract"] = obj.data.get("abstract.summary") categories = [] for category in obj.data.get("subject_term"): if category.get("scheme").lower() == "arxiv": categories.append(category.get("term", "")) prepared_record["categories"] = categories return prepared_record def load_model(path_to_object): """Load a pickled prediction model.""" return load(open(path_to_object))
gpl-2.0
afaheem88/tempest
tempest/tests/common/utils/test_file_utils.py
31
1154
# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from tempest.common.utils import file_utils from tempest.tests import base class TestFileUtils(base.TestCase): def test_have_effective_read_path(self): with mock.patch('six.moves.builtins.open', mock.mock_open(), create=True): result = file_utils.have_effective_read_access('fake_path') self.assertTrue(result) def test_not_effective_read_path(self): result = file_utils.have_effective_read_access('fake_path') self.assertFalse(result)
apache-2.0
ewindisch/nova
nova/openstack/common/report/__init__.py
77
1098
# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides a way to generate serializable reports This package/module provides mechanisms for defining reports which may then be serialized into various data types. Each report ( :class:`openstack.common.report.report.BasicReport` ) is composed of one or more report sections ( :class:`openstack.common.report.report.BasicSection` ), which contain generators which generate data models ( :class:`openstack.common.report.models.base.ReportModels` ), which are then serialized by views. """
apache-2.0
luxus/home-assistant
tests/components/sensor/test_tcp.py
17
9967
"""The tests for the TCP sensor platform.""" import socket from copy import copy from uuid import uuid4 from unittest.mock import patch, Mock from homeassistant.components.sensor import tcp from homeassistant.helpers.entity import Entity from tests.common import get_test_home_assistant TEST_CONFIG = { tcp.CONF_NAME: "test_name", tcp.CONF_HOST: "test_host", tcp.CONF_PORT: 12345, tcp.CONF_TIMEOUT: tcp.DEFAULT_TIMEOUT + 1, tcp.CONF_PAYLOAD: "test_payload", tcp.CONF_UNIT: "test_unit", tcp.CONF_VALUE_TEMPLATE: "test_template", tcp.CONF_VALUE_ON: "test_on", tcp.CONF_BUFFER_SIZE: tcp.DEFAULT_BUFFER_SIZE + 1 } KEYS_AND_DEFAULTS = { tcp.CONF_NAME: None, tcp.CONF_TIMEOUT: tcp.DEFAULT_TIMEOUT, tcp.CONF_UNIT: None, tcp.CONF_VALUE_TEMPLATE: None, tcp.CONF_VALUE_ON: None, tcp.CONF_BUFFER_SIZE: tcp.DEFAULT_BUFFER_SIZE } @patch('homeassistant.components.sensor.tcp.Sensor.update') def test_setup_platform_valid_config(mock_update): """Should check the supplied config and call add_entities with Sensor.""" add_entities = Mock() ret = tcp.setup_platform(None, TEST_CONFIG, add_entities) assert ret is None, "setup_platform() should return None if successful." assert add_entities.called assert isinstance(add_entities.call_args[0][0][0], tcp.Sensor) def test_setup_platform_invalid_config(): """Should check the supplied config and return False if it is invalid.""" config = copy(TEST_CONFIG) del config[tcp.CONF_HOST] assert tcp.setup_platform(None, config, None) is False class TestTCPSensor(): """Test the TCP Sensor.""" def setup_class(cls): """Setup things to be run when tests are started.""" cls.hass = get_test_home_assistant() def teardown_class(cls): """Stop everything that was started.""" cls.hass.stop() @patch('homeassistant.components.sensor.tcp.Sensor.update') def test_name(self, mock_update): """Should return the name if set in the config.""" sensor = tcp.Sensor(self.hass, TEST_CONFIG) assert sensor.name == TEST_CONFIG[tcp.CONF_NAME] @patch('homeassistant.components.sensor.tcp.Sensor.update') def test_name_not_set(self, mock_update): """Should return the superclass name property if not set in config.""" config = copy(TEST_CONFIG) del config[tcp.CONF_NAME] entity = Entity() sensor = tcp.Sensor(self.hass, config) assert sensor.name == entity.name @patch('homeassistant.components.sensor.tcp.Sensor.update') def test_state(self, mock_update): """Should return the contents of _state.""" sensor = tcp.Sensor(self.hass, TEST_CONFIG) uuid = str(uuid4()) sensor._state = uuid assert sensor.state == uuid @patch('homeassistant.components.sensor.tcp.Sensor.update') def test_unit_of_measurement(self, mock_update): """Should return the configured unit of measurement.""" sensor = tcp.Sensor(self.hass, TEST_CONFIG) assert sensor.unit_of_measurement == TEST_CONFIG[tcp.CONF_UNIT] @patch("homeassistant.components.sensor.tcp.Sensor.update") def test_config_valid_keys(self, *args): """Should store valid keys in _config.""" sensor = tcp.Sensor(self.hass, TEST_CONFIG) for key in TEST_CONFIG: assert key in sensor._config def test_validate_config_valid_keys(self): """Should return True when provided with the correct keys.""" assert tcp.Sensor.validate_config(TEST_CONFIG) @patch("homeassistant.components.sensor.tcp.Sensor.update") def test_config_invalid_keys(self, mock_update): """Shouldn't store invalid keys in _config.""" config = copy(TEST_CONFIG) config.update({ "a": "test_a", "b": "test_b", "c": "test_c" }) sensor = tcp.Sensor(self.hass, config) for invalid_key in "abc": assert invalid_key not in sensor._config def test_validate_config_invalid_keys(self): """Test with invalid keys plus some extra.""" config = copy(TEST_CONFIG) config.update({ "a": "test_a", "b": "test_b", "c": "test_c" }) assert tcp.Sensor.validate_config(config) @patch("homeassistant.components.sensor.tcp.Sensor.update") def test_config_uses_defaults(self, mock_update): """Should use defaults where appropriate.""" config = copy(TEST_CONFIG) for key in KEYS_AND_DEFAULTS.keys(): del config[key] sensor = tcp.Sensor(self.hass, config) for key, default in KEYS_AND_DEFAULTS.items(): assert sensor._config[key] == default def test_validate_config_missing_defaults(self): """Should return True when defaulted keys are not provided.""" config = copy(TEST_CONFIG) for key in KEYS_AND_DEFAULTS.keys(): del config[key] assert tcp.Sensor.validate_config(config) def test_validate_config_missing_required(self): """Should return False when required config items are missing.""" for key in TEST_CONFIG: if key in KEYS_AND_DEFAULTS: continue config = copy(TEST_CONFIG) del config[key] assert not tcp.Sensor.validate_config(config), ( "validate_config() should have returned False since %r was not" "provided." % key) @patch("homeassistant.components.sensor.tcp.Sensor.update") def test_init_calls_update(self, mock_update): """Should call update() method during __init__().""" tcp.Sensor(self.hass, TEST_CONFIG) assert mock_update.called @patch("socket.socket") @patch("select.select", return_value=(True, False, False)) def test_update_connects_to_host_and_port(self, mock_select, mock_socket): """Should connect to the configured host and port.""" tcp.Sensor(self.hass, TEST_CONFIG) mock_socket = mock_socket().__enter__() assert mock_socket.connect.mock_calls[0][1] == (( TEST_CONFIG[tcp.CONF_HOST], TEST_CONFIG[tcp.CONF_PORT]),) @patch("socket.socket.connect", side_effect=socket.error()) def test_update_returns_if_connecting_fails(self, *args): """Should return if connecting to host fails.""" with patch("homeassistant.components.sensor.tcp.Sensor.update"): sensor = tcp.Sensor(self.hass, TEST_CONFIG) assert sensor.update() is None @patch("socket.socket.connect") @patch("socket.socket.send", side_effect=socket.error()) def test_update_returns_if_sending_fails(self, *args): """Should return if sending fails.""" with patch("homeassistant.components.sensor.tcp.Sensor.update"): sensor = tcp.Sensor(self.hass, TEST_CONFIG) assert sensor.update() is None @patch("socket.socket.connect") @patch("socket.socket.send") @patch("select.select", return_value=(False, False, False)) def test_update_returns_if_select_fails(self, *args): """Should return if select fails to return a socket.""" with patch("homeassistant.components.sensor.tcp.Sensor.update"): sensor = tcp.Sensor(self.hass, TEST_CONFIG) assert sensor.update() is None @patch("socket.socket") @patch("select.select", return_value=(True, False, False)) def test_update_sends_payload(self, mock_select, mock_socket): """Should send the configured payload as bytes.""" tcp.Sensor(self.hass, TEST_CONFIG) mock_socket = mock_socket().__enter__() mock_socket.send.assert_called_with( TEST_CONFIG[tcp.CONF_PAYLOAD].encode() ) @patch("socket.socket") @patch("select.select", return_value=(True, False, False)) def test_update_calls_select_with_timeout(self, mock_select, mock_socket): """Should provide the timeout argument to select.""" tcp.Sensor(self.hass, TEST_CONFIG) mock_socket = mock_socket().__enter__() mock_select.assert_called_with( [mock_socket], [], [], TEST_CONFIG[tcp.CONF_TIMEOUT]) @patch("socket.socket") @patch("select.select", return_value=(True, False, False)) def test_update_receives_packet_and_sets_as_state( self, mock_select, mock_socket): """Test the response from the socket and set it as the state.""" test_value = "test_value" mock_socket = mock_socket().__enter__() mock_socket.recv.return_value = test_value.encode() config = copy(TEST_CONFIG) del config[tcp.CONF_VALUE_TEMPLATE] sensor = tcp.Sensor(self.hass, config) assert sensor._state == test_value @patch("socket.socket") @patch("select.select", return_value=(True, False, False)) def test_update_renders_value_in_template(self, mock_select, mock_socket): """Should render the value in the provided template.""" test_value = "test_value" mock_socket = mock_socket().__enter__() mock_socket.recv.return_value = test_value.encode() config = copy(TEST_CONFIG) config[tcp.CONF_VALUE_TEMPLATE] = "{{ value }} {{ 1+1 }}" sensor = tcp.Sensor(self.hass, config) assert sensor._state == "%s 2" % test_value @patch("socket.socket") @patch("select.select", return_value=(True, False, False)) def test_update_returns_if_template_render_fails( self, mock_select, mock_socket): """Should return None if rendering the template fails.""" test_value = "test_value" mock_socket = mock_socket().__enter__() mock_socket.recv.return_value = test_value.encode() config = copy(TEST_CONFIG) config[tcp.CONF_VALUE_TEMPLATE] = "{{ this won't work" sensor = tcp.Sensor(self.hass, config) assert sensor.update() is None
mit
gauribhoite/personfinder
env/google_appengine/google/appengine/ext/remote_api/remote_api_services.py
6
18078
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Service configuration for remote API. This module is shared by both the remote_api_stub and the handler. """ import sys from google.appengine.api import api_base_pb from google.appengine.api import mail_service_pb from google.appengine.api import urlfetch_service_pb from google.appengine.api import user_service_pb from google.appengine.api.app_identity import app_identity_service_pb from google.appengine.api.blobstore import blobstore_service_pb from google.appengine.api.capabilities import capability_service_pb from google.appengine.api.channel import channel_service_pb from google.appengine.api.files import file_service_pb from google.appengine.api.images import images_service_pb from google.appengine.api.logservice import log_service_pb from google.appengine.api.memcache import memcache_service_pb from google.appengine.api.modules import modules_service_pb from google.appengine.api.prospective_search import prospective_search_pb from google.appengine.api.remote_socket import remote_socket_service_pb from google.appengine.api.search import search_service_pb from google.appengine.api.system import system_service_pb from google.appengine.api.taskqueue import taskqueue_service_pb from google.appengine.api.xmpp import xmpp_service_pb from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_v4_pb from google.appengine.ext.remote_api import remote_api_pb SERVICE_PB_MAP = { 'app_identity_service': { 'SignForApp': (app_identity_service_pb.SignForAppRequest, app_identity_service_pb.SignForAppResponse), 'GetPublicCertificatesForApp': ( app_identity_service_pb.GetPublicCertificateForAppRequest, app_identity_service_pb.GetPublicCertificateForAppResponse), 'GetServiceAccountName': ( app_identity_service_pb.GetServiceAccountNameRequest, app_identity_service_pb.GetServiceAccountNameResponse), 'GetDefaultGcsBucketName': ( app_identity_service_pb.GetDefaultGcsBucketNameRequest, app_identity_service_pb.GetDefaultGcsBucketNameResponse), 'GetAccessToken': (app_identity_service_pb.GetAccessTokenRequest, app_identity_service_pb.GetAccessTokenResponse), }, 'blobstore': { 'CreateUploadURL': (blobstore_service_pb.CreateUploadURLRequest, blobstore_service_pb.CreateUploadURLResponse), 'DeleteBlob': (blobstore_service_pb.DeleteBlobRequest, api_base_pb.VoidProto), 'FetchData': (blobstore_service_pb.FetchDataRequest, blobstore_service_pb.FetchDataResponse), 'DecodeBlobKey': (blobstore_service_pb.DecodeBlobKeyRequest, blobstore_service_pb.DecodeBlobKeyResponse), 'CreateEncodedGoogleStorageKey': (blobstore_service_pb.CreateEncodedGoogleStorageKeyRequest, blobstore_service_pb.CreateEncodedGoogleStorageKeyResponse), }, 'capability_service': { 'IsEnabled': (capability_service_pb.IsEnabledRequest, capability_service_pb.IsEnabledResponse), }, 'channel': { 'CreateChannel': (channel_service_pb.CreateChannelRequest, channel_service_pb.CreateChannelResponse), 'SendChannelMessage': (channel_service_pb.SendMessageRequest, api_base_pb.VoidProto), }, 'datastore_v3': { 'Get': (datastore_pb.GetRequest, datastore_pb.GetResponse), 'Put': (datastore_pb.PutRequest, datastore_pb.PutResponse), 'Delete': (datastore_pb.DeleteRequest, datastore_pb.DeleteResponse), 'AllocateIds':(datastore_pb.AllocateIdsRequest, datastore_pb.AllocateIdsResponse), 'RunQuery': (datastore_pb.Query, datastore_pb.QueryResult), 'Next': (datastore_pb.NextRequest, datastore_pb.QueryResult), 'BeginTransaction':(datastore_pb.BeginTransactionRequest, datastore_pb.Transaction), 'Commit': (datastore_pb.Transaction, datastore_pb.CommitResponse), 'Rollback': (datastore_pb.Transaction, api_base_pb.VoidProto), 'GetIndices': (api_base_pb.StringProto, datastore_pb.CompositeIndices), }, 'datastore_v4': { 'AllocateIds': (datastore_v4_pb.AllocateIdsRequest, datastore_v4_pb.AllocateIdsResponse), }, 'file': { 'Create': (file_service_pb.CreateRequest, file_service_pb.CreateResponse), 'Open': (file_service_pb.OpenRequest, file_service_pb.OpenResponse), 'Close': (file_service_pb.CloseRequest, file_service_pb.CloseResponse), 'Append': (file_service_pb.AppendRequest, file_service_pb.AppendResponse), 'Stat': (file_service_pb.StatRequest, file_service_pb.StatResponse), 'Delete': (file_service_pb.DeleteRequest, file_service_pb.DeleteResponse), 'Read': (file_service_pb.ReadRequest, file_service_pb.ReadResponse), 'ReadKeyValue': (file_service_pb.ReadKeyValueRequest, file_service_pb.ReadKeyValueResponse), 'Shuffle': (file_service_pb.ShuffleRequest, file_service_pb.ShuffleResponse), 'GetShuffleStatus': (file_service_pb.GetShuffleStatusRequest, file_service_pb.GetShuffleStatusResponse), 'GetCapabilities': (file_service_pb.GetCapabilitiesRequest, file_service_pb.GetCapabilitiesResponse), 'GetDefaultGsBucketName': (file_service_pb.GetDefaultGsBucketNameRequest, file_service_pb.GetDefaultGsBucketNameResponse), 'ListDir': (file_service_pb.ListDirRequest, file_service_pb.ListDirResponse), }, 'images': { 'Transform': (images_service_pb.ImagesTransformRequest, images_service_pb.ImagesTransformResponse), 'Composite': (images_service_pb.ImagesCompositeRequest, images_service_pb.ImagesCompositeResponse), 'Histogram': (images_service_pb.ImagesHistogramRequest, images_service_pb.ImagesHistogramResponse), 'GetUrlBase': (images_service_pb.ImagesGetUrlBaseRequest, images_service_pb.ImagesGetUrlBaseResponse), 'DeleteUrlBase': (images_service_pb.ImagesDeleteUrlBaseRequest, images_service_pb.ImagesDeleteUrlBaseResponse), }, 'logservice': { 'Flush': (log_service_pb.FlushRequest, api_base_pb.VoidProto), 'SetStatus': (log_service_pb.SetStatusRequest, api_base_pb.VoidProto), 'Read': (log_service_pb.LogReadRequest, log_service_pb.LogReadResponse), }, 'mail': { 'Send': (mail_service_pb.MailMessage, api_base_pb.VoidProto), 'SendToAdmins': (mail_service_pb.MailMessage, api_base_pb.VoidProto), }, 'matcher': { 'Subscribe': (prospective_search_pb.SubscribeRequest, prospective_search_pb.SubscribeResponse), 'Unsubscribe': (prospective_search_pb.UnsubscribeRequest, prospective_search_pb.UnsubscribeResponse), 'ListSubscriptions': (prospective_search_pb.ListSubscriptionsRequest, prospective_search_pb.ListSubscriptionsResponse), 'ListTopics': (prospective_search_pb.ListTopicsRequest, prospective_search_pb.ListTopicsResponse), 'Match': (prospective_search_pb.MatchRequest, prospective_search_pb.MatchResponse), }, 'memcache': { 'Get': (memcache_service_pb.MemcacheGetRequest, memcache_service_pb.MemcacheGetResponse), 'Set': (memcache_service_pb.MemcacheSetRequest, memcache_service_pb.MemcacheSetResponse), 'Delete': (memcache_service_pb.MemcacheDeleteRequest, memcache_service_pb.MemcacheDeleteResponse), 'Increment': (memcache_service_pb.MemcacheIncrementRequest, memcache_service_pb.MemcacheIncrementResponse), 'BatchIncrement': (memcache_service_pb.MemcacheBatchIncrementRequest, memcache_service_pb.MemcacheBatchIncrementResponse), 'FlushAll': (memcache_service_pb.MemcacheFlushRequest, memcache_service_pb.MemcacheFlushResponse), 'Stats': (memcache_service_pb.MemcacheStatsRequest, memcache_service_pb.MemcacheStatsResponse), }, 'remote_datastore': { 'RunQuery': (datastore_pb.Query, datastore_pb.QueryResult), 'TransactionQuery': (datastore_pb.Query, remote_api_pb.TransactionQueryResult), 'Transaction': (remote_api_pb.TransactionRequest, datastore_pb.PutResponse), 'GetIDs': (datastore_pb.PutRequest, datastore_pb.PutResponse), 'GetIDsXG': (datastore_pb.PutRequest, datastore_pb.PutResponse), }, 'remote_socket': { 'CreateSocket': (remote_socket_service_pb.CreateSocketRequest, remote_socket_service_pb.CreateSocketReply), 'Bind': (remote_socket_service_pb.BindRequest, remote_socket_service_pb.BindReply), 'GetSocketName': (remote_socket_service_pb.GetSocketNameRequest, remote_socket_service_pb.GetSocketNameReply), 'GetPeerName': (remote_socket_service_pb.GetPeerNameRequest, remote_socket_service_pb.GetPeerNameReply), 'SetSocketOptions': (remote_socket_service_pb.SetSocketOptionsRequest, remote_socket_service_pb.SetSocketOptionsReply), 'GetSocketOptions': (remote_socket_service_pb.GetSocketOptionsRequest, remote_socket_service_pb.GetSocketOptionsReply), 'Connect': (remote_socket_service_pb.ConnectRequest, remote_socket_service_pb.ConnectReply), 'Listen': (remote_socket_service_pb.ListenRequest, remote_socket_service_pb.ListenReply), 'Accept': (remote_socket_service_pb.AcceptRequest, remote_socket_service_pb.AcceptReply), 'ShutDown': (remote_socket_service_pb.ShutDownRequest, remote_socket_service_pb.ShutDownReply), 'Close': (remote_socket_service_pb.CloseRequest, remote_socket_service_pb.CloseReply), 'Send': (remote_socket_service_pb.SendRequest, remote_socket_service_pb.SendReply), 'Receive': (remote_socket_service_pb.ReceiveRequest, remote_socket_service_pb.ReceiveReply), 'Poll': (remote_socket_service_pb.PollRequest, remote_socket_service_pb.PollReply), 'Resolve': (remote_socket_service_pb.ResolveRequest, remote_socket_service_pb.ResolveReply), }, 'search': { 'IndexDocument': (search_service_pb.IndexDocumentRequest, search_service_pb.IndexDocumentResponse), 'DeleteDocument': (search_service_pb.DeleteDocumentRequest, search_service_pb.DeleteDocumentResponse), 'ListDocuments': (search_service_pb.ListDocumentsRequest, search_service_pb.ListDocumentsResponse), 'ListIndexes': (search_service_pb.ListIndexesRequest, search_service_pb.ListIndexesResponse), 'Search': (search_service_pb.SearchRequest, search_service_pb.SearchResponse), 'DeleteSchema': (search_service_pb.DeleteSchemaRequest, search_service_pb.DeleteSchemaResponse), }, 'modules': { 'GetModules': (modules_service_pb.GetModulesRequest, modules_service_pb.GetModulesResponse), 'GetVersions': (modules_service_pb.GetVersionsRequest, modules_service_pb.GetVersionsResponse), 'GetDefaultVersion': (modules_service_pb.GetDefaultVersionRequest, modules_service_pb.GetDefaultVersionResponse), 'GetNumInstances': (modules_service_pb.GetNumInstancesRequest, modules_service_pb.GetNumInstancesResponse), 'SetNumInstances': (modules_service_pb.SetNumInstancesRequest, modules_service_pb.SetNumInstancesResponse), 'StartModule': (modules_service_pb.StartModuleRequest, modules_service_pb.StartModuleResponse), 'StopModule': (modules_service_pb.StopModuleRequest, modules_service_pb.StopModuleResponse), 'GetHostname': (modules_service_pb.GetHostnameRequest, modules_service_pb.GetHostnameResponse), }, 'system': { 'GetSystemStats': (system_service_pb.GetSystemStatsRequest, system_service_pb.GetSystemStatsResponse), 'StartBackgroundRequest': ( system_service_pb.StartBackgroundRequestRequest, system_service_pb.StartBackgroundRequestResponse), }, 'taskqueue': { 'Add': (taskqueue_service_pb.TaskQueueAddRequest, taskqueue_service_pb.TaskQueueAddResponse), 'BulkAdd': (taskqueue_service_pb.TaskQueueBulkAddRequest, taskqueue_service_pb.TaskQueueBulkAddResponse), 'FetchQueues': (taskqueue_service_pb.TaskQueueFetchQueuesRequest, taskqueue_service_pb.TaskQueueFetchQueuesResponse), 'FetchQueueStats': ( taskqueue_service_pb.TaskQueueFetchQueueStatsRequest, taskqueue_service_pb.TaskQueueFetchQueueStatsResponse), 'Delete': (taskqueue_service_pb.TaskQueueDeleteRequest, taskqueue_service_pb.TaskQueueDeleteResponse), 'ForceRun': (taskqueue_service_pb.TaskQueueForceRunRequest, taskqueue_service_pb.TaskQueueForceRunResponse), 'UpdateQueue': (taskqueue_service_pb.TaskQueueUpdateQueueRequest, taskqueue_service_pb.TaskQueueUpdateQueueResponse), 'PauseQueue': (taskqueue_service_pb.TaskQueuePauseQueueRequest, taskqueue_service_pb.TaskQueuePauseQueueResponse), 'PurgeQueue': (taskqueue_service_pb.TaskQueuePurgeQueueRequest, taskqueue_service_pb.TaskQueuePurgeQueueResponse), 'DeleteQueue': (taskqueue_service_pb.TaskQueueDeleteQueueRequest, taskqueue_service_pb.TaskQueueDeleteQueueResponse), 'DeleteGroup': (taskqueue_service_pb.TaskQueueDeleteGroupRequest, taskqueue_service_pb.TaskQueueDeleteGroupResponse), 'QueryTasks': (taskqueue_service_pb.TaskQueueQueryTasksRequest, taskqueue_service_pb.TaskQueueQueryTasksResponse), 'FetchTask': (taskqueue_service_pb.TaskQueueFetchTaskRequest, taskqueue_service_pb.TaskQueueFetchTaskResponse), 'QueryAndOwnTasks': ( taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest, taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse), 'ModifyTaskLease': ( taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest, taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse), 'UpdateStorageLimit': ( taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest, taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse), }, 'urlfetch': { 'Fetch': (urlfetch_service_pb.URLFetchRequest, urlfetch_service_pb.URLFetchResponse), }, 'user': { 'CreateLoginURL': (user_service_pb.CreateLoginURLRequest, user_service_pb.CreateLoginURLResponse), 'CreateLogoutURL': (user_service_pb.CreateLogoutURLRequest, user_service_pb.CreateLogoutURLResponse), 'GetOAuthUser': (user_service_pb.GetOAuthUserRequest, user_service_pb.GetOAuthUserResponse), 'CheckOAuthSignature': (user_service_pb.CheckOAuthSignatureRequest, user_service_pb.CheckOAuthSignatureResponse), }, 'xmpp': { 'GetPresence': (xmpp_service_pb.PresenceRequest, xmpp_service_pb.PresenceResponse), 'BulkGetPresence': (xmpp_service_pb.BulkPresenceRequest, xmpp_service_pb.BulkPresenceResponse), 'SendMessage': (xmpp_service_pb.XmppMessageRequest, xmpp_service_pb.XmppMessageResponse), 'SendInvite': (xmpp_service_pb.XmppInviteRequest, xmpp_service_pb.XmppInviteResponse), 'SendPresence': (xmpp_service_pb.XmppSendPresenceRequest, xmpp_service_pb.XmppSendPresenceResponse), 'CreateChannel': (channel_service_pb.CreateChannelRequest, channel_service_pb.CreateChannelResponse), 'SendChannelMessage': (channel_service_pb.SendMessageRequest, api_base_pb.VoidProto), }, }
apache-2.0
PhiInnovations/mdp28-linux-bsp
bitbake/lib/ply/yacc.py
18
128492
# ----------------------------------------------------------------------------- # ply: yacc.py # # Copyright (C) 2001-2009, # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # # This implements an LR parser that is constructed from grammar rules defined # as Python functions. The grammer is specified by supplying the BNF inside # Python documentation strings. The inspiration for this technique was borrowed # from John Aycock's Spark parsing system. PLY might be viewed as cross between # Spark and the GNU bison utility. # # The current implementation is only somewhat object-oriented. The # LR parser itself is defined in terms of an object (which allows multiple # parsers to co-exist). However, most of the variables used during table # construction are defined in terms of global variables. Users shouldn't # notice unless they are trying to define multiple parsers at the same # time using threads (in which case they should have their head examined). # # This implementation supports both SLR and LALR(1) parsing. LALR(1) # support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), # using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, # Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced # by the more efficient DeRemer and Pennello algorithm. # # :::::::: WARNING ::::::: # # Construction of LR parsing tables is fairly complicated and expensive. # To make this module run fast, a *LOT* of work has been put into # optimization---often at the expensive of readability and what might # consider to be good Python "coding style." Modify the code at your # own risk! # ---------------------------------------------------------------------------- __version__ = "3.3" __tabversion__ = "3.2" # Table version #----------------------------------------------------------------------------- # === User configurable parameters === # # Change these to modify the default behavior of yacc (if you wish) #----------------------------------------------------------------------------- yaccdebug = 0 # Debugging mode. If set, yacc generates a # a 'parser.out' file in the current directory debug_file = 'parser.out' # Default name of the debugging file tab_module = 'parsetab' # Default name of the table module default_lr = 'LALR' # Default LR table generation method error_count = 3 # Number of symbols that must be shifted to leave recovery mode yaccdevel = 0 # Set to True if developing yacc. This turns off optimized # implementations of certain functions. resultlimit = 40 # Size limit of results when running in debug mode. pickle_protocol = 0 # Protocol to use when writing pickle files import re, types, sys, os.path # Compatibility function for python 2.6/3.0 if sys.version_info[0] < 3: def func_code(f): return f.func_code else: def func_code(f): return f.__code__ # Compatibility try: MAXINT = sys.maxint except AttributeError: MAXINT = sys.maxsize # Python 2.x/3.0 compatibility. def load_ply_lex(): if sys.version_info[0] < 3: import lex else: import ply.lex as lex return lex # This object is a stand-in for a logging object created by the # logging module. PLY will use this by default to create things # such as the parser.out file. If a user wants more detailed # information, they can create their own logging object and pass # it into PLY. class PlyLogger(object): def __init__(self,f): self.f = f def debug(self,msg,*args,**kwargs): self.f.write((msg % args) + "\n") info = debug def warning(self,msg,*args,**kwargs): self.f.write("WARNING: "+ (msg % args) + "\n") def error(self,msg,*args,**kwargs): self.f.write("ERROR: " + (msg % args) + "\n") critical = debug # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self,name): return self def __call__(self,*args,**kwargs): return self # Exception raised for yacc-related errors class YaccError(Exception): pass # Format the result message that the parser produces when running in debug mode. def format_result(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) > resultlimit: repr_str = repr_str[:resultlimit]+" ..." result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str) return result # Format stack entries when the parser is running in debug mode def format_stack_entry(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) < 16: return repr_str else: return "<%s @ 0x%x>" % (type(r).__name__,id(r)) #----------------------------------------------------------------------------- # === LR Parsing Engine === # # The following classes are used for the LR parser itself. These are not # used during table construction and are independent of the actual LR # table generation algorithm #----------------------------------------------------------------------------- # This class is used to hold non-terminal grammar symbols during parsing. # It normally has the following attributes set: # .type = Grammar symbol type # .value = Symbol value # .lineno = Starting line number # .endlineno = Ending line number (optional, set automatically) # .lexpos = Starting lex position # .endlexpos = Ending lex position (optional, set automatically) class YaccSymbol: def __str__(self): return self.type def __repr__(self): return str(self) # This class is a wrapper around the objects actually passed to each # grammar rule. Index lookup and assignment actually assign the # .value attribute of the underlying YaccSymbol object. # The lineno() method returns the line number of a given # item (or 0 if not defined). The linespan() method returns # a tuple of (startline,endline) representing the range of lines # for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) # representing the range of positional information for a symbol. class YaccProduction: def __init__(self,s,stack=None): self.slice = s self.stack = stack self.lexer = None self.parser= None def __getitem__(self,n): if n >= 0: return self.slice[n].value else: return self.stack[n].value def __setitem__(self,n,v): self.slice[n].value = v def __getslice__(self,i,j): return [s.value for s in self.slice[i:j]] def __len__(self): return len(self.slice) def lineno(self,n): return getattr(self.slice[n],"lineno",0) def set_lineno(self,n,lineno): self.slice[n].lineno = lineno def linespan(self,n): startline = getattr(self.slice[n],"lineno",0) endline = getattr(self.slice[n],"endlineno",startline) return startline,endline def lexpos(self,n): return getattr(self.slice[n],"lexpos",0) def lexspan(self,n): startpos = getattr(self.slice[n],"lexpos",0) endpos = getattr(self.slice[n],"endlexpos",startpos) return startpos,endpos def error(self): raise SyntaxError # ----------------------------------------------------------------------------- # == LRParser == # # The LR Parsing engine. # ----------------------------------------------------------------------------- class LRParser: def __init__(self,lrtab,errorf): self.productions = lrtab.lr_productions self.action = lrtab.lr_action self.goto = lrtab.lr_goto self.errorfunc = errorf def errok(self): self.errorok = 1 def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): if debug or yaccdevel: if isinstance(debug,int): debug = PlyLogger(sys.stderr) return self.parsedebug(input,lexer,debug,tracking,tokenfunc) elif tracking: return self.parseopt(input,lexer,debug,tracking,tokenfunc) else: return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parsedebug(). # # This is the debugging enabled version of parse(). All changes made to the # parsing engine should be made here. For the non-debugging version, # copy this code to a method parseopt() and delete all of the sections # enclosed in: # # #--! DEBUG # statements # #--! DEBUG # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # --! DEBUG debug.info("PLY: PARSE DEBUG START") # --! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = "$end" symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer # --! DEBUG debug.debug('') debug.debug('State : %s', state) # --! DEBUG if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = "$end" # --! DEBUG debug.debug('Stack : %s', ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) # --! DEBUG # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t # --! DEBUG debug.debug("Action : Shift and goto state %s", t) # --! DEBUG symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None # --! DEBUG if plen: debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t) else: debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t) # --! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym # --! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1,"endlineno",t1.lineno) sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) # --! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) # --! DEBUG debug.info("Result : %s", format_result(pslice[0])) # --! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: # --! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos # --! TRACKING targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) # --! DEBUG debug.info("Result : %s", format_result(pslice[0])) # --! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n,"value",None) # --! DEBUG debug.info("Done : Returning %s", format_result(result)) debug.info("PLY: PARSE DEBUG END") # --! DEBUG return result if t == None: # --! DEBUG debug.error('Error : %s', ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) # --! DEBUG # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == "$end": errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != "$end": lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == "$end": # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt(). # # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY. # Edit the debug version above, then copy any modifications to the method # below while removing #--! DEBUG sections. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # --! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1,"endlineno",t1.lineno) sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) # --! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: # --! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos # --! TRACKING targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] return getattr(n,"value",None) if t == None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt_notrack(). # # Optimized version of parseopt() with line number tracking removed. # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove # code in the #--! TRACKING sections # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] return getattr(n,"value",None) if t == None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # ----------------------------------------------------------------------------- # === Grammar Representation === # # The following functions, classes, and variables are used to represent and # manipulate the rules that make up a grammar. # ----------------------------------------------------------------------------- import re # regex matching identifiers _is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- # class Production: # # This class stores the raw information about a single production or grammar rule. # A grammar rule refers to a specification such as this: # # expr : expr PLUS term # # Here are the basic attributes defined on all productions # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','PLUS','term'] # prec - Production precedence level # number - Production number. # func - Function that executes on reduce # file - File where production function is defined # lineno - Line number where production function is defined # # The following attributes are defined or optional. # # len - Length of the production (number of symbols on right hand side) # usyms - Set of unique symbols found in the production # ----------------------------------------------------------------------------- class Production(object): reduced = 0 def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0): self.name = name self.prod = tuple(prod) self.number = number self.func = func self.callable = None self.file = file self.line = line self.prec = precedence # Internal settings used during table construction self.len = len(self.prod) # Length of the production # Create a list of unique production symbols used in the production self.usyms = [ ] for s in self.prod: if s not in self.usyms: self.usyms.append(s) # List of all LR items for the production self.lr_items = [] self.lr_next = None # Create a string representation if self.prod: self.str = "%s -> %s" % (self.name," ".join(self.prod)) else: self.str = "%s -> <empty>" % self.name def __str__(self): return self.str def __repr__(self): return "Production("+str(self)+")" def __len__(self): return len(self.prod) def __nonzero__(self): return 1 def __getitem__(self,index): return self.prod[index] # Return the nth lr_item from the production (or None if at the end) def lr_item(self,n): if n > len(self.prod): return None p = LRItem(self,n) # Precompute the list of productions immediately following. Hack. Remove later try: p.lr_after = Prodnames[p.prod[n+1]] except (IndexError,KeyError): p.lr_after = [] try: p.lr_before = p.prod[n-1] except IndexError: p.lr_before = None return p # Bind the production function name to a callable def bind(self,pdict): if self.func: self.callable = pdict[self.func] # This class serves as a minimal standin for Production objects when # reading table data from files. It only contains information # actually used by the LR parsing engine, plus some additional # debugging information. class MiniProduction(object): def __init__(self,str,name,len,func,file,line): self.name = name self.len = len self.func = func self.callable = None self.file = file self.line = line self.str = str def __str__(self): return self.str def __repr__(self): return "MiniProduction(%s)" % self.str # Bind the production function name to a callable def bind(self,pdict): if self.func: self.callable = pdict[self.func] # ----------------------------------------------------------------------------- # class LRItem # # This class represents a specific stage of parsing a production rule. For # example: # # expr : expr . PLUS term # # In the above, the "." represents the current location of the parse. Here # basic attributes: # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] # number - Production number. # # lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' # then lr_next refers to 'expr -> expr PLUS . term' # lr_index - LR item index (location of the ".") in the prod list. # lookaheads - LALR lookahead symbols for this item # len - Length of the production (number of symbols on right hand side) # lr_after - List of all productions that immediately follow # lr_before - Grammar symbol immediately before # ----------------------------------------------------------------------------- class LRItem(object): def __init__(self,p,n): self.name = p.name self.prod = list(p.prod) self.number = p.number self.lr_index = n self.lookaheads = { } self.prod.insert(n,".") self.prod = tuple(self.prod) self.len = len(self.prod) self.usyms = p.usyms def __str__(self): if self.prod: s = "%s -> %s" % (self.name," ".join(self.prod)) else: s = "%s -> <empty>" % self.name return s def __repr__(self): return "LRItem("+str(self)+")" # ----------------------------------------------------------------------------- # rightmost_terminal() # # Return the rightmost terminal from a list of symbols. Used in add_production() # ----------------------------------------------------------------------------- def rightmost_terminal(symbols, terminals): i = len(symbols) - 1 while i >= 0: if symbols[i] in terminals: return symbols[i] i -= 1 return None # ----------------------------------------------------------------------------- # === GRAMMAR CLASS === # # The following class represents the contents of the specified grammar along # with various computed properties such as first sets, follow sets, LR items, etc. # This data is used for critical parts of the table generation process later. # ----------------------------------------------------------------------------- class GrammarError(YaccError): pass class Grammar(object): def __init__(self,terminals): self.Productions = [None] # A list of all of the productions. The first # entry is always reserved for the purpose of # building an augmented grammar self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all # productions of that nonterminal. self.Prodmap = { } # A dictionary that is only used to detect duplicate # productions. self.Terminals = { } # A dictionary mapping the names of terminal symbols to a # list of the rules where they are used. for term in terminals: self.Terminals[term] = [] self.Terminals['error'] = [] self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list # of rule numbers where they are used. self.First = { } # A dictionary of precomputed FIRST(x) symbols self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the # form ('right',level) or ('nonassoc', level) or ('left',level) self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer. # This is only used to provide error checking and to generate # a warning about unused precedence rules. self.Start = None # Starting symbol for the grammar def __len__(self): return len(self.Productions) def __getitem__(self,index): return self.Productions[index] # ----------------------------------------------------------------------------- # set_precedence() # # Sets the precedence for a given terminal. assoc is the associativity such as # 'left','right', or 'nonassoc'. level is a numeric level. # # ----------------------------------------------------------------------------- def set_precedence(self,term,assoc,level): assert self.Productions == [None],"Must call set_precedence() before add_production()" if term in self.Precedence: raise GrammarError("Precedence already specified for terminal '%s'" % term) if assoc not in ['left','right','nonassoc']: raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") self.Precedence[term] = (assoc,level) # ----------------------------------------------------------------------------- # add_production() # # Given an action function, this function assembles a production rule and # computes its precedence level. # # The production rule is supplied as a list of symbols. For example, # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and # symbols ['expr','PLUS','term']. # # Precedence is determined by the precedence of the right-most non-terminal # or the precedence of a terminal specified by %prec. # # A variety of error checks are performed to make sure production symbols # are valid and that %prec is used correctly. # ----------------------------------------------------------------------------- def add_production(self,prodname,syms,func=None,file='',line=0): if prodname in self.Terminals: raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname)) if prodname == 'error': raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname)) if not _is_identifier.match(prodname): raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname)) # Look for literal tokens for n,s in enumerate(syms): if s[0] in "'\"": try: c = eval(s) if (len(c) > 1): raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname)) if not c in self.Terminals: self.Terminals[c] = [] syms[n] = c continue except SyntaxError: pass if not _is_identifier.match(s) and s != '%prec': raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname)) # Determine the precedence level if '%prec' in syms: if syms[-1] == '%prec': raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line)) if syms[-2] != '%prec': raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line)) precname = syms[-1] prodprec = self.Precedence.get(precname,None) if not prodprec: raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname)) else: self.UsedPrecedence[precname] = 1 del syms[-2:] # Drop %prec from the rule else: # If no %prec, precedence is determined by the rightmost terminal symbol precname = rightmost_terminal(syms,self.Terminals) prodprec = self.Precedence.get(precname,('right',0)) # See if the rule is already in the rulemap map = "%s -> %s" % (prodname,syms) if map in self.Prodmap: m = self.Prodmap[map] raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) + "Previous definition at %s:%d" % (m.file, m.line)) # From this point on, everything is valid. Create a new Production instance pnumber = len(self.Productions) if not prodname in self.Nonterminals: self.Nonterminals[prodname] = [ ] # Add the production number to Terminals and Nonterminals for t in syms: if t in self.Terminals: self.Terminals[t].append(pnumber) else: if not t in self.Nonterminals: self.Nonterminals[t] = [ ] self.Nonterminals[t].append(pnumber) # Create a production and add it to the list of productions p = Production(pnumber,prodname,syms,prodprec,func,file,line) self.Productions.append(p) self.Prodmap[map] = p # Add to the global productions list try: self.Prodnames[prodname].append(p) except KeyError: self.Prodnames[prodname] = [ p ] return 0 # ----------------------------------------------------------------------------- # set_start() # # Sets the starting symbol and creates the augmented grammar. Production # rule 0 is S' -> start where start is the start symbol. # ----------------------------------------------------------------------------- def set_start(self,start=None): if not start: start = self.Productions[1].name if start not in self.Nonterminals: raise GrammarError("start symbol %s undefined" % start) self.Productions[0] = Production(0,"S'",[start]) self.Nonterminals[start].append(0) self.Start = start # ----------------------------------------------------------------------------- # find_unreachable() # # Find all of the nonterminal symbols that can't be reached from the starting # symbol. Returns a list of nonterminals that can't be reached. # ----------------------------------------------------------------------------- def find_unreachable(self): # Mark all symbols that are reachable from a symbol s def mark_reachable_from(s): if reachable[s]: # We've already reached symbol s. return reachable[s] = 1 for p in self.Prodnames.get(s,[]): for r in p.prod: mark_reachable_from(r) reachable = { } for s in list(self.Terminals) + list(self.Nonterminals): reachable[s] = 0 mark_reachable_from( self.Productions[0].prod[0] ) return [s for s in list(self.Nonterminals) if not reachable[s]] # ----------------------------------------------------------------------------- # infinite_cycles() # # This function looks at the various parsing rules and tries to detect # infinite recursion cycles (grammar rules where there is no possible way # to derive a string of only terminals). # ----------------------------------------------------------------------------- def infinite_cycles(self): terminates = {} # Terminals: for t in self.Terminals: terminates[t] = 1 terminates['$end'] = 1 # Nonterminals: # Initialize to false: for n in self.Nonterminals: terminates[n] = 0 # Then propagate termination until no change: while 1: some_change = 0 for (n,pl) in self.Prodnames.items(): # Nonterminal n terminates iff any of its productions terminates. for p in pl: # Production p terminates iff all of its rhs symbols terminate. for s in p.prod: if not terminates[s]: # The symbol s does not terminate, # so production p does not terminate. p_terminates = 0 break else: # didn't break from the loop, # so every symbol s terminates # so production p terminates. p_terminates = 1 if p_terminates: # symbol n terminates! if not terminates[n]: terminates[n] = 1 some_change = 1 # Don't need to consider any more productions for this n. break if not some_change: break infinite = [] for (s,term) in terminates.items(): if not term: if not s in self.Prodnames and not s in self.Terminals and s != 'error': # s is used-but-not-defined, and we've already warned of that, # so it would be overkill to say that it's also non-terminating. pass else: infinite.append(s) return infinite # ----------------------------------------------------------------------------- # undefined_symbols() # # Find all symbols that were used the grammar, but not defined as tokens or # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol # and prod is the production where the symbol was used. # ----------------------------------------------------------------------------- def undefined_symbols(self): result = [] for p in self.Productions: if not p: continue for s in p.prod: if not s in self.Prodnames and not s in self.Terminals and s != 'error': result.append((s,p)) return result # ----------------------------------------------------------------------------- # unused_terminals() # # Find all terminals that were defined, but not used by the grammar. Returns # a list of all symbols. # ----------------------------------------------------------------------------- def unused_terminals(self): unused_tok = [] for s,v in self.Terminals.items(): if s != 'error' and not v: unused_tok.append(s) return unused_tok # ------------------------------------------------------------------------------ # unused_rules() # # Find all grammar rules that were defined, but not used (maybe not reachable) # Returns a list of productions. # ------------------------------------------------------------------------------ def unused_rules(self): unused_prod = [] for s,v in self.Nonterminals.items(): if not v: p = self.Prodnames[s][0] unused_prod.append(p) return unused_prod # ----------------------------------------------------------------------------- # unused_precedence() # # Returns a list of tuples (term,precedence) corresponding to precedence # rules that were never used by the grammar. term is the name of the terminal # on which precedence was applied and precedence is a string such as 'left' or # 'right' corresponding to the type of precedence. # ----------------------------------------------------------------------------- def unused_precedence(self): unused = [] for termname in self.Precedence: if not (termname in self.Terminals or termname in self.UsedPrecedence): unused.append((termname,self.Precedence[termname][0])) return unused # ------------------------------------------------------------------------- # _first() # # Compute the value of FIRST1(beta) where beta is a tuple of symbols. # # During execution of compute_first1, the result may be incomplete. # Afterward (e.g., when called from compute_follow()), it will be complete. # ------------------------------------------------------------------------- def _first(self,beta): # We are computing First(x1,x2,x3,...,xn) result = [ ] for x in beta: x_produces_empty = 0 # Add all the non-<empty> symbols of First[x] to the result. for f in self.First[x]: if f == '<empty>': x_produces_empty = 1 else: if f not in result: result.append(f) if x_produces_empty: # We have to consider the next x in beta, # i.e. stay in the loop. pass else: # We don't have to consider any further symbols in beta. break else: # There was no 'break' from the loop, # so x_produces_empty was true for all x in beta, # so beta produces empty as well. result.append('<empty>') return result # ------------------------------------------------------------------------- # compute_first() # # Compute the value of FIRST1(X) for all symbols # ------------------------------------------------------------------------- def compute_first(self): if self.First: return self.First # Terminals: for t in self.Terminals: self.First[t] = [t] self.First['$end'] = ['$end'] # Nonterminals: # Initialize to the empty set: for n in self.Nonterminals: self.First[n] = [] # Then propagate symbols until no change: while 1: some_change = 0 for n in self.Nonterminals: for p in self.Prodnames[n]: for f in self._first(p.prod): if f not in self.First[n]: self.First[n].append( f ) some_change = 1 if not some_change: break return self.First # --------------------------------------------------------------------- # compute_follow() # # Computes all of the follow sets for every non-terminal symbol. The # follow set is the set of all symbols that might follow a given # non-terminal. See the Dragon book, 2nd Ed. p. 189. # --------------------------------------------------------------------- def compute_follow(self,start=None): # If already computed, return the result if self.Follow: return self.Follow # If first sets not computed yet, do that first. if not self.First: self.compute_first() # Add '$end' to the follow list of the start symbol for k in self.Nonterminals: self.Follow[k] = [ ] if not start: start = self.Productions[1].name self.Follow[start] = [ '$end' ] while 1: didadd = 0 for p in self.Productions[1:]: # Here is the production set for i in range(len(p.prod)): B = p.prod[i] if B in self.Nonterminals: # Okay. We got a non-terminal in a production fst = self._first(p.prod[i+1:]) hasempty = 0 for f in fst: if f != '<empty>' and f not in self.Follow[B]: self.Follow[B].append(f) didadd = 1 if f == '<empty>': hasempty = 1 if hasempty or i == (len(p.prod)-1): # Add elements of follow(a) to follow(b) for f in self.Follow[p.name]: if f not in self.Follow[B]: self.Follow[B].append(f) didadd = 1 if not didadd: break return self.Follow # ----------------------------------------------------------------------------- # build_lritems() # # This function walks the list of productions and builds a complete set of the # LR items. The LR items are stored in two ways: First, they are uniquely # numbered and placed in the list _lritems. Second, a linked list of LR items # is built for each production. For example: # # E -> E PLUS E # # Creates the list # # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] # ----------------------------------------------------------------------------- def build_lritems(self): for p in self.Productions: lastlri = p i = 0 lr_items = [] while 1: if i > len(p): lri = None else: lri = LRItem(p,i) # Precompute the list of productions immediately following try: lri.lr_after = self.Prodnames[lri.prod[i+1]] except (IndexError,KeyError): lri.lr_after = [] try: lri.lr_before = lri.prod[i-1] except IndexError: lri.lr_before = None lastlri.lr_next = lri if not lri: break lr_items.append(lri) lastlri = lri i += 1 p.lr_items = lr_items # ----------------------------------------------------------------------------- # == Class LRTable == # # This basic class represents a basic table of LR parsing information. # Methods for generating the tables are not defined here. They are defined # in the derived class LRGeneratedTable. # ----------------------------------------------------------------------------- class VersionError(YaccError): pass class LRTable(object): def __init__(self): self.lr_action = None self.lr_goto = None self.lr_productions = None self.lr_method = None def read_table(self,module): if isinstance(module,types.ModuleType): parsetab = module else: if sys.version_info[0] < 3: exec("import %s as parsetab" % module) else: env = { } exec("import %s as parsetab" % module, env, env) parsetab = env['parsetab'] if parsetab._tabversion != __tabversion__: raise VersionError("yacc table file version is out of date") self.lr_action = parsetab._lr_action self.lr_goto = parsetab._lr_goto self.lr_productions = [] for p in parsetab._lr_productions: self.lr_productions.append(MiniProduction(*p)) self.lr_method = parsetab._lr_method return parsetab._lr_signature def read_pickle(self,filename): try: import cPickle as pickle except ImportError: import pickle in_f = open(filename,"rb") tabversion = pickle.load(in_f) if tabversion != __tabversion__: raise VersionError("yacc table file version is out of date") self.lr_method = pickle.load(in_f) signature = pickle.load(in_f) self.lr_action = pickle.load(in_f) self.lr_goto = pickle.load(in_f) productions = pickle.load(in_f) self.lr_productions = [] for p in productions: self.lr_productions.append(MiniProduction(*p)) in_f.close() return signature # Bind all production function names to callable objects in pdict def bind_callables(self,pdict): for p in self.lr_productions: p.bind(pdict) # ----------------------------------------------------------------------------- # === LR Generator === # # The following classes and functions are used to generate LR parsing tables on # a grammar. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # digraph() # traverse() # # The following two functions are used to compute set valued functions # of the form: # # F(x) = F'(x) U U{F(y) | x R y} # # This is used to compute the values of Read() sets as well as FOLLOW sets # in LALR(1) generation. # # Inputs: X - An input set # R - A relation # FP - Set-valued function # ------------------------------------------------------------------------------ def digraph(X,R,FP): N = { } for x in X: N[x] = 0 stack = [] F = { } for x in X: if N[x] == 0: traverse(x,N,stack,F,X,R,FP) return F def traverse(x,N,stack,F,X,R,FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) # F(X) <- F'(x) rel = R(x) # Get y's related to x for y in rel: if N[y] == 0: traverse(y,N,stack,F,X,R,FP) N[x] = min(N[x],N[y]) for a in F.get(y,[]): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() class LALRError(YaccError): pass # ----------------------------------------------------------------------------- # == LRGeneratedTable == # # This class implements the LR table generation algorithm. There are no # public methods except for write() # ----------------------------------------------------------------------------- class LRGeneratedTable(LRTable): def __init__(self,grammar,method='LALR',log=None): if method not in ['SLR','LALR']: raise LALRError("Unsupported method %s" % method) self.grammar = grammar self.lr_method = method # Set up the logger if not log: log = NullLogger() self.log = log # Internal attributes self.lr_action = {} # Action table self.lr_goto = {} # Goto table self.lr_productions = grammar.Productions # Copy of grammar Production array self.lr_goto_cache = {} # Cache of computed gotos self.lr0_cidhash = {} # Cache of closures self._add_count = 0 # Internal counter used to detect cycles # Diagonistic information filled in by the table generator self.sr_conflict = 0 self.rr_conflict = 0 self.conflicts = [] # List of conflicts self.sr_conflicts = [] self.rr_conflicts = [] # Build the tables self.grammar.build_lritems() self.grammar.compute_first() self.grammar.compute_follow() self.lr_parse_table() # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. def lr0_closure(self,I): self._add_count += 1 # Add everything in I to J J = I[:] didadd = 1 while didadd: didadd = 0 for j in J: for x in j.lr_after: if getattr(x,"lr0_added",0) == self._add_count: continue # Add B --> .G to J J.append(x.lr_next) x.lr0_added = self._add_count didadd = 1 return J # Compute the LR(0) goto function goto(I,X) where I is a set # of LR(0) items and X is a grammar symbol. This function is written # in a way that guarantees uniqueness of the generated goto sets # (i.e. the same goto set will never be returned as two different Python # objects). With uniqueness, we can later do fast set comparisons using # id(obj) instead of element-wise comparison. def lr0_goto(self,I,x): # First we look for a previously cached entry g = self.lr_goto_cache.get((id(I),x),None) if g: return g # Now we generate the goto set in a way that guarantees uniqueness # of the result s = self.lr_goto_cache.get(x,None) if not s: s = { } self.lr_goto_cache[x] = s gs = [ ] for p in I: n = p.lr_next if n and n.lr_before == x: s1 = s.get(id(n),None) if not s1: s1 = { } s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end',None) if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[(id(I),x)] = g return g # Compute the LR(0) sets of item function def lr0_items(self): C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ] i = 0 for I in C: self.lr0_cidhash[id(I)] = i i += 1 # Loop over the items in C and each grammar symbols i = 0 while i < len(C): I = C[i] i += 1 # Collect all of the symbols that could possibly be in the goto(I,X) sets asyms = { } for ii in I: for s in ii.usyms: asyms[s] = None for x in asyms: g = self.lr0_goto(I,x) if not g: continue if id(g) in self.lr0_cidhash: continue self.lr0_cidhash[id(g)] = len(C) C.append(g) return C # ----------------------------------------------------------------------------- # ==== LALR(1) Parsing ==== # # LALR(1) parsing is almost exactly the same as SLR except that instead of # relying upon Follow() sets when performing reductions, a more selective # lookahead set that incorporates the state of the LR(0) machine is utilized. # Thus, we mainly just have to focus on calculating the lookahead sets. # # The method used here is due to DeRemer and Pennelo (1982). # # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) # Lookahead Sets", ACM Transactions on Programming Languages and Systems, # Vol. 4, No. 4, Oct. 1982, pp. 615-649 # # Further details can also be found in: # # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", # McGraw-Hill Book Company, (1985). # # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # compute_nullable_nonterminals() # # Creates a dictionary containing all of the non-terminals that might produce # an empty production. # ----------------------------------------------------------------------------- def compute_nullable_nonterminals(self): nullable = {} num_nullable = 0 while 1: for p in self.grammar.Productions[1:]: if p.len == 0: nullable[p.name] = 1 continue for t in p.prod: if not t in nullable: break else: nullable[p.name] = 1 if len(nullable) == num_nullable: break num_nullable = len(nullable) return nullable # ----------------------------------------------------------------------------- # find_nonterminal_trans(C) # # Given a set of LR(0) items, this functions finds all of the non-terminal # transitions. These are transitions in which a dot appears immediately before # a non-terminal. Returns a list of tuples of the form (state,N) where state # is the state number and N is the nonterminal symbol. # # The input C is the set of LR(0) items. # ----------------------------------------------------------------------------- def find_nonterminal_transitions(self,C): trans = [] for state in range(len(C)): for p in C[state]: if p.lr_index < p.len - 1: t = (state,p.prod[p.lr_index+1]) if t[1] in self.grammar.Nonterminals: if t not in trans: trans.append(t) state = state + 1 return trans # ----------------------------------------------------------------------------- # dr_relation() # # Computes the DR(p,A) relationships for non-terminal transitions. The input # is a tuple (state,N) where state is a number and N is a nonterminal symbol. # # Returns a list of terminals. # ----------------------------------------------------------------------------- def dr_relation(self,C,trans,nullable): dr_set = { } state,N = trans terms = [] g = self.lr0_goto(C[state],N) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index+1] if a in self.grammar.Terminals: if a not in terms: terms.append(a) # This extra bit is to handle the start state if state == 0 and N == self.grammar.Productions[0].prod[0]: terms.append('$end') return terms # ----------------------------------------------------------------------------- # reads_relation() # # Computes the READS() relation (p,A) READS (t,C). # ----------------------------------------------------------------------------- def reads_relation(self,C, trans, empty): # Look for empty transitions rel = [] state, N = trans g = self.lr0_goto(C[state],N) j = self.lr0_cidhash.get(id(g),-1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j,a)) return rel # ----------------------------------------------------------------------------- # compute_lookback_includes() # # Determines the lookback and includes relations # # LOOKBACK: # # This relation is determined by running the LR(0) state machine forward. # For example, starting with a production "N : . A B C", we run it forward # to obtain "N : A B C ." We then build a relationship between this final # state and the starting state. These relationships are stored in a dictionary # lookdict. # # INCLUDES: # # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). # # This relation is used to determine non-terminal transitions that occur # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) # if the following holds: # # B -> LAT, where T -> epsilon and p' -L-> p # # L is essentially a prefix (which may be empty), T is a suffix that must be # able to derive an empty string. State p' must lead to state p with the string L. # # ----------------------------------------------------------------------------- def compute_lookback_includes(self,C,trans,nullable): lookdict = {} # Dictionary of lookback relations includedict = {} # Dictionary of include relations # Make a dictionary of non-terminal transitions dtrans = {} for t in trans: dtrans[t] = 1 # Loop over all transitions and compute lookbacks and includes for state,N in trans: lookb = [] includes = [] for p in C[state]: if p.name != N: continue # Okay, we have a name match. We now follow the production all the way # through the state machine until we get the . on the right hand side lr_index = p.lr_index j = state while lr_index < p.len - 1: lr_index = lr_index + 1 t = p.prod[lr_index] # Check to see if this symbol and state are a non-terminal transition if (j,t) in dtrans: # Yes. Okay, there is some chance that this is an includes relation # the only way to know for certain is whether the rest of the # production derives empty li = lr_index + 1 while li < p.len: if p.prod[li] in self.grammar.Terminals: break # No forget it if not p.prod[li] in nullable: break li = li + 1 else: # Appears to be a relation between (j,t) and (state,N) includes.append((j,t)) g = self.lr0_goto(C[j],t) # Go to next set j = self.lr0_cidhash.get(id(g),-1) # Go to next state # When we get here, j is the final state, now we have to locate the production for r in C[j]: if r.name != p.name: continue if r.len != p.len: continue i = 0 # This look is comparing a production ". A B C" with "A B C ." while i < r.lr_index: if r.prod[i] != p.prod[i+1]: break i = i + 1 else: lookb.append((j,r)) for i in includes: if not i in includedict: includedict[i] = [] includedict[i].append((state,N)) lookdict[(state,N)] = lookb return lookdict,includedict # ----------------------------------------------------------------------------- # compute_read_sets() # # Given a set of LR(0) items, this function computes the read sets. # # Inputs: C = Set of LR(0) items # ntrans = Set of nonterminal transitions # nullable = Set of empty transitions # # Returns a set containing the read sets # ----------------------------------------------------------------------------- def compute_read_sets(self,C, ntrans, nullable): FP = lambda x: self.dr_relation(C,x,nullable) R = lambda x: self.reads_relation(C,x,nullable) F = digraph(ntrans,R,FP) return F # ----------------------------------------------------------------------------- # compute_follow_sets() # # Given a set of LR(0) items, a set of non-terminal transitions, a readset, # and an include set, this function computes the follow sets # # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} # # Inputs: # ntrans = Set of nonterminal transitions # readsets = Readset (previously computed) # inclsets = Include sets (previously computed) # # Returns a set containing the follow sets # ----------------------------------------------------------------------------- def compute_follow_sets(self,ntrans,readsets,inclsets): FP = lambda x: readsets[x] R = lambda x: inclsets.get(x,[]) F = digraph(ntrans,R,FP) return F # ----------------------------------------------------------------------------- # add_lookaheads() # # Attaches the lookahead symbols to grammar rules. # # Inputs: lookbacks - Set of lookback relations # followset - Computed follow set # # This function directly attaches the lookaheads to productions contained # in the lookbacks set # ----------------------------------------------------------------------------- def add_lookaheads(self,lookbacks,followset): for trans,lb in lookbacks.items(): # Loop over productions in lookback for state,p in lb: if not state in p.lookaheads: p.lookaheads[state] = [] f = followset.get(trans,[]) for a in f: if a not in p.lookaheads[state]: p.lookaheads[state].append(a) # ----------------------------------------------------------------------------- # add_lalr_lookaheads() # # This function does all of the work of adding lookahead information for use # with LALR parsing # ----------------------------------------------------------------------------- def add_lalr_lookaheads(self,C): # Determine all of the nullable nonterminals nullable = self.compute_nullable_nonterminals() # Find all non-terminal transitions trans = self.find_nonterminal_transitions(C) # Compute read sets readsets = self.compute_read_sets(C,trans,nullable) # Compute lookback/includes relations lookd, included = self.compute_lookback_includes(C,trans,nullable) # Compute LALR FOLLOW sets followsets = self.compute_follow_sets(trans,readsets,included) # Add all of the lookaheads self.add_lookaheads(lookd,followsets) # ----------------------------------------------------------------------------- # lr_parse_table() # # This function constructs the parse tables for SLR or LALR # ----------------------------------------------------------------------------- def lr_parse_table(self): Productions = self.grammar.Productions Precedence = self.grammar.Precedence goto = self.lr_goto # Goto array action = self.lr_action # Action array log = self.log # Logger for output actionp = { } # Action production array (temporary) log.info("Parsing method: %s", self.lr_method) # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items # This determines the number of states C = self.lr0_items() if self.lr_method == 'LALR': self.add_lalr_lookaheads(C) # Build the parser table, state by state st = 0 for I in C: # Loop over each production in I actlist = [ ] # List of actions st_action = { } st_actionp = { } st_goto = { } log.info("") log.info("state %d", st) log.info("") for p in I: log.info(" (%d) %s", p.number, str(p)) log.info("") for p in I: if p.len == p.lr_index + 1: if p.name == "S'": # Start symbol. Accept! st_action["$end"] = 0 st_actionp["$end"] = p else: # We are at the end of a production. Reduce! if self.lr_method == 'LALR': laheads = p.lookaheads[st] else: laheads = self.grammar.Follow[p.name] for a in laheads: actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p))) r = st_action.get(a,None) if r is not None: # Whoa. Have a shift/reduce or reduce/reduce conflict if r > 0: # Need to decide on shift or reduce here # By default we favor shifting. Need to add # some precedence rules here. sprec,slevel = Productions[st_actionp[a].number].prec rprec,rlevel = Precedence.get(a,('right',0)) if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): # We really need to reduce here. st_action[a] = -p.number st_actionp[a] = p if not slevel and not rlevel: log.info(" ! shift/reduce conflict for %s resolved as reduce",a) self.sr_conflicts.append((st,a,'reduce')) Productions[p.number].reduced += 1 elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the shift if not rlevel: log.info(" ! shift/reduce conflict for %s resolved as shift",a) self.sr_conflicts.append((st,a,'shift')) elif r < 0: # Reduce/reduce conflict. In this case, we favor the rule # that was defined first in the grammar file oldp = Productions[-r] pp = Productions[p.number] if oldp.line > pp.line: st_action[a] = -p.number st_actionp[a] = p chosenp,rejectp = pp,oldp Productions[p.number].reduced += 1 Productions[oldp.number].reduced -= 1 else: chosenp,rejectp = oldp,pp self.rr_conflicts.append((st,chosenp,rejectp)) log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a]) else: raise LALRError("Unknown conflict in state %d" % st) else: st_action[a] = -p.number st_actionp[a] = p Productions[p.number].reduced += 1 else: i = p.lr_index a = p.prod[i+1] # Get symbol right after the "." if a in self.grammar.Terminals: g = self.lr0_goto(I,a) j = self.lr0_cidhash.get(id(g),-1) if j >= 0: # We are in a shift state actlist.append((a,p,"shift and go to state %d" % j)) r = st_action.get(a,None) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: if r != j: raise LALRError("Shift/shift conflict in state %d" % st) elif r < 0: # Do a precedence check. # - if precedence of reduce rule is higher, we reduce. # - if precedence of reduce is same and left assoc, we reduce. # - otherwise we shift rprec,rlevel = Productions[st_actionp[a].number].prec sprec,slevel = Precedence.get(a,('right',0)) if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): # We decide to shift here... highest precedence to shift Productions[st_actionp[a].number].reduced -= 1 st_action[a] = j st_actionp[a] = p if not rlevel: log.info(" ! shift/reduce conflict for %s resolved as shift",a) self.sr_conflicts.append((st,a,'shift')) elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the reduce if not slevel and not rlevel: log.info(" ! shift/reduce conflict for %s resolved as reduce",a) self.sr_conflicts.append((st,a,'reduce')) else: raise LALRError("Unknown conflict in state %d" % st) else: st_action[a] = j st_actionp[a] = p # Print the actions associated with each terminal _actprint = { } for a,p,m in actlist: if a in st_action: if p is st_actionp[a]: log.info(" %-15s %s",a,m) _actprint[(a,m)] = 1 log.info("") # Print the actions that were not used. (debugging) not_used = 0 for a,p,m in actlist: if a in st_action: if p is not st_actionp[a]: if not (a,m) in _actprint: log.debug(" ! %-15s [ %s ]",a,m) not_used = 1 _actprint[(a,m)] = 1 if not_used: log.debug("") # Construct the goto table for this state nkeys = { } for ii in I: for s in ii.usyms: if s in self.grammar.Nonterminals: nkeys[s] = None for n in nkeys: g = self.lr0_goto(I,n) j = self.lr0_cidhash.get(id(g),-1) if j >= 0: st_goto[n] = j log.info(" %-30s shift and go to state %d",n,j) action[st] = st_action actionp[st] = st_actionp goto[st] = st_goto st += 1 # ----------------------------------------------------------------------------- # write() # # This function writes the LR parsing tables to a file # ----------------------------------------------------------------------------- def write_table(self,modulename,outputdir='',signature=""): basemodulename = modulename.split(".")[-1] filename = os.path.join(outputdir,basemodulename) + ".py" try: f = open(filename,"w") f.write(""" # %s # This file is automatically generated. Do not edit. _tabversion = %r _lr_method = %r _lr_signature = %r """ % (filename, __tabversion__, self.lr_method, signature)) # Change smaller to 0 to go back to original tables smaller = 1 # Factor out names to try and make smaller if smaller: items = { } for s,nd in self.lr_action.items(): for name,v in nd.items(): i = items.get(name) if not i: i = ([],[]) items[name] = i i[0].append(s) i[1].append(v) f.write("\n_lr_action_items = {") for k,v in items.items(): f.write("%r:([" % k) for i in v[0]: f.write("%r," % i) f.write("],[") for i in v[1]: f.write("%r," % i) f.write("]),") f.write("}\n") f.write(""" _lr_action = { } for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = { } _lr_action[_x][_k] = _y del _lr_action_items """) else: f.write("\n_lr_action = { "); for k,v in self.lr_action.items(): f.write("(%r,%r):%r," % (k[0],k[1],v)) f.write("}\n"); if smaller: # Factor out names to try and make smaller items = { } for s,nd in self.lr_goto.items(): for name,v in nd.items(): i = items.get(name) if not i: i = ([],[]) items[name] = i i[0].append(s) i[1].append(v) f.write("\n_lr_goto_items = {") for k,v in items.items(): f.write("%r:([" % k) for i in v[0]: f.write("%r," % i) f.write("],[") for i in v[1]: f.write("%r," % i) f.write("]),") f.write("}\n") f.write(""" _lr_goto = { } for _k, _v in _lr_goto_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_goto: _lr_goto[_x] = { } _lr_goto[_x][_k] = _y del _lr_goto_items """) else: f.write("\n_lr_goto = { "); for k,v in self.lr_goto.items(): f.write("(%r,%r):%r," % (k[0],k[1],v)) f.write("}\n"); # Write production table f.write("_lr_productions = [\n") for p in self.lr_productions: if p.func: f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line)) else: f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len)) f.write("]\n") f.close() except IOError: e = sys.exc_info()[1] sys.stderr.write("Unable to create '%s'\n" % filename) sys.stderr.write(str(e)+"\n") return # ----------------------------------------------------------------------------- # pickle_table() # # This function pickles the LR parsing tables to a supplied file object # ----------------------------------------------------------------------------- def pickle_table(self,filename,signature=""): try: import cPickle as pickle except ImportError: import pickle outf = open(filename,"wb") pickle.dump(__tabversion__,outf,pickle_protocol) pickle.dump(self.lr_method,outf,pickle_protocol) pickle.dump(signature,outf,pickle_protocol) pickle.dump(self.lr_action,outf,pickle_protocol) pickle.dump(self.lr_goto,outf,pickle_protocol) outp = [] for p in self.lr_productions: if p.func: outp.append((p.str,p.name, p.len, p.func,p.file,p.line)) else: outp.append((str(p),p.name,p.len,None,None,None)) pickle.dump(outp,outf,pickle_protocol) outf.close() # ----------------------------------------------------------------------------- # === INTROSPECTION === # # The following functions and classes are used to implement the PLY # introspection features followed by the yacc() function itself. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): try: raise RuntimeError except RuntimeError: e,b,t = sys.exc_info() f = t.tb_frame while levels > 0: f = f.f_back levels -= 1 ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # parse_grammar() # # This takes a raw grammar rule string and parses it into production data # ----------------------------------------------------------------------------- def parse_grammar(doc,file,line): grammar = [] # Split the doc string into lines pstrings = doc.splitlines() lastp = None dline = line for ps in pstrings: dline += 1 p = ps.split() if not p: continue try: if p[0] == '|': # This is a continuation of a previous rule if not lastp: raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline)) prodname = lastp syms = p[1:] else: prodname = p[0] lastp = prodname syms = p[2:] assign = p[1] if assign != ':' and assign != '::=': raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline)) grammar.append((file,dline,prodname,syms)) except SyntaxError: raise except Exception: raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip())) return grammar # ----------------------------------------------------------------------------- # ParserReflect() # # This class represents information extracted for building a parser including # start symbol, error function, tokens, precedence list, action functions, # etc. # ----------------------------------------------------------------------------- class ParserReflect(object): def __init__(self,pdict,log=None): self.pdict = pdict self.start = None self.error_func = None self.tokens = None self.files = {} self.grammar = [] self.error = 0 if log is None: self.log = PlyLogger(sys.stderr) else: self.log = log # Get all of the basic information def get_all(self): self.get_start() self.get_error_func() self.get_tokens() self.get_precedence() self.get_pfunctions() # Validate all of the information def validate_all(self): self.validate_start() self.validate_error_func() self.validate_tokens() self.validate_precedence() self.validate_pfunctions() self.validate_files() return self.error # Compute a signature over the grammar def signature(self): try: from hashlib import md5 except ImportError: from md5 import md5 try: sig = md5() if self.start: sig.update(self.start.encode('latin-1')) if self.prec: sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1')) if self.tokens: sig.update(" ".join(self.tokens).encode('latin-1')) for f in self.pfuncs: if f[3]: sig.update(f[3].encode('latin-1')) except (TypeError,ValueError): pass return sig.digest() # ----------------------------------------------------------------------------- # validate_file() # # This method checks to see if there are duplicated p_rulename() functions # in the parser module file. Without this function, it is really easy for # users to make mistakes by cutting and pasting code fragments (and it's a real # bugger to try and figure out why the resulting parser doesn't work). Therefore, # we just do a little regular expression pattern matching of def statements # to try and detect duplicates. # ----------------------------------------------------------------------------- def validate_files(self): # Match def p_funcname( fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') for filename in self.files.keys(): base,ext = os.path.splitext(filename) if ext != '.py': return 1 # No idea. Assume it's okay. try: f = open(filename) lines = f.readlines() f.close() except IOError: continue counthash = { } for linen,l in enumerate(lines): linen += 1 m = fre.match(l) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev) # Get the start symbol def get_start(self): self.start = self.pdict.get('start') # Validate the start symbol def validate_start(self): if self.start is not None: if not isinstance(self.start,str): self.log.error("'start' must be a string") # Look for error handler def get_error_func(self): self.error_func = self.pdict.get('p_error') # Validate the error function def validate_error_func(self): if self.error_func: if isinstance(self.error_func,types.FunctionType): ismethod = 0 elif isinstance(self.error_func, types.MethodType): ismethod = 1 else: self.log.error("'p_error' defined, but is not a function or method") self.error = 1 return eline = func_code(self.error_func).co_firstlineno efile = func_code(self.error_func).co_filename self.files[efile] = 1 if (func_code(self.error_func).co_argcount != 1+ismethod): self.log.error("%s:%d: p_error() requires 1 argument",efile,eline) self.error = 1 # Get the tokens map def get_tokens(self): tokens = self.pdict.get("tokens",None) if not tokens: self.log.error("No token list is defined") self.error = 1 return if not isinstance(tokens,(list, tuple)): self.log.error("tokens must be a list or tuple") self.error = 1 return if not tokens: self.log.error("tokens is empty") self.error = 1 return self.tokens = tokens # Validate the tokens def validate_tokens(self): # Validate the tokens. if 'error' in self.tokens: self.log.error("Illegal token name 'error'. Is a reserved word") self.error = 1 return terminals = {} for n in self.tokens: if n in terminals: self.log.warning("Token '%s' multiply defined", n) terminals[n] = 1 # Get the precedence map (if any) def get_precedence(self): self.prec = self.pdict.get("precedence",None) # Validate and parse the precedence map def validate_precedence(self): preclist = [] if self.prec: if not isinstance(self.prec,(list,tuple)): self.log.error("precedence must be a list or tuple") self.error = 1 return for level,p in enumerate(self.prec): if not isinstance(p,(list,tuple)): self.log.error("Bad precedence table") self.error = 1 return if len(p) < 2: self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p) self.error = 1 return assoc = p[0] if not isinstance(assoc,str): self.log.error("precedence associativity must be a string") self.error = 1 return for term in p[1:]: if not isinstance(term,str): self.log.error("precedence items must be strings") self.error = 1 return preclist.append((term,assoc,level+1)) self.preclist = preclist # Get all p_functions from the grammar def get_pfunctions(self): p_functions = [] for name, item in self.pdict.items(): if name[:2] != 'p_': continue if name == 'p_error': continue if isinstance(item,(types.FunctionType,types.MethodType)): line = func_code(item).co_firstlineno file = func_code(item).co_filename p_functions.append((line,file,name,item.__doc__)) # Sort all of the actions by line number p_functions.sort() self.pfuncs = p_functions # Validate all of the p_functions def validate_pfunctions(self): grammar = [] # Check for non-empty symbols if len(self.pfuncs) == 0: self.log.error("no rules of the form p_rulename are defined") self.error = 1 return for line, file, name, doc in self.pfuncs: func = self.pdict[name] if isinstance(func, types.MethodType): reqargs = 2 else: reqargs = 1 if func_code(func).co_argcount > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__) self.error = 1 elif func_code(func).co_argcount < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__) self.error = 1 elif not func.__doc__: self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__) else: try: parsed_g = parse_grammar(doc,file,line) for g in parsed_g: grammar.append((name, g)) except SyntaxError: e = sys.exc_info()[1] self.log.error(str(e)) self.error = 1 # Looks like a valid grammar rule # Mark the file in which defined. self.files[file] = 1 # Secondary validation step that looks for p_ definitions that are not functions # or functions that look like they might be grammar rules. for n,v in self.pdict.items(): if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue if n[0:2] == 't_': continue if n[0:2] == 'p_' and n != 'p_error': self.log.warning("'%s' not defined as a function", n) if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)): try: doc = v.__doc__.split(" ") if doc[1] == ':': self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix", func_code(v).co_filename, func_code(v).co_firstlineno,n) except Exception: pass self.grammar = grammar # ----------------------------------------------------------------------------- # yacc(module) # # Build a parser # ----------------------------------------------------------------------------- def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='', debuglog=None, errorlog = None, picklefile=None): global parse # Reference to the parsing method of the last built parser # If pickling is enabled, table files are not created if picklefile: write_tables = 0 if errorlog is None: errorlog = PlyLogger(sys.stderr) # Get the module dictionary used for the parser if module: _items = [(k,getattr(module,k)) for k in dir(module)] pdict = dict(_items) else: pdict = get_caller_module_dict(2) # Collect parser information from the dictionary pinfo = ParserReflect(pdict,log=errorlog) pinfo.get_all() if pinfo.error: raise YaccError("Unable to build parser") # Check signature against table files (if any) signature = pinfo.signature() # Read the tables try: lr = LRTable() if picklefile: read_signature = lr.read_pickle(picklefile) else: read_signature = lr.read_table(tabmodule) if optimize or (read_signature == signature): try: lr.bind_callables(pinfo.pdict) parser = LRParser(lr,pinfo.error_func) parse = parser.parse return parser except Exception: e = sys.exc_info()[1] errorlog.warning("There was a problem loading the table file: %s", repr(e)) except VersionError: e = sys.exc_info() errorlog.warning(str(e)) except Exception: pass if debuglog is None: if debug: debuglog = PlyLogger(open(debugfile,"w")) else: debuglog = NullLogger() debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__) errors = 0 # Validate the parser information if pinfo.validate_all(): raise YaccError("Unable to build parser") if not pinfo.error_func: errorlog.warning("no p_error() function is defined") # Create a grammar object grammar = Grammar(pinfo.tokens) # Set precedence level for terminals for term, assoc, level in pinfo.preclist: try: grammar.set_precedence(term,assoc,level) except GrammarError: e = sys.exc_info()[1] errorlog.warning("%s",str(e)) # Add productions to the grammar for funcname, gram in pinfo.grammar: file, line, prodname, syms = gram try: grammar.add_production(prodname,syms,funcname,file,line) except GrammarError: e = sys.exc_info()[1] errorlog.error("%s",str(e)) errors = 1 # Set the grammar start symbols try: if start is None: grammar.set_start(pinfo.start) else: grammar.set_start(start) except GrammarError: e = sys.exc_info()[1] errorlog.error(str(e)) errors = 1 if errors: raise YaccError("Unable to build parser") # Verify the grammar structure undefined_symbols = grammar.undefined_symbols() for sym, prod in undefined_symbols: errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym) errors = 1 unused_terminals = grammar.unused_terminals() if unused_terminals: debuglog.info("") debuglog.info("Unused terminals:") debuglog.info("") for term in unused_terminals: errorlog.warning("Token '%s' defined, but not used", term) debuglog.info(" %s", term) # Print out all productions to the debug log if debug: debuglog.info("") debuglog.info("Grammar") debuglog.info("") for n,p in enumerate(grammar.Productions): debuglog.info("Rule %-5d %s", n, p) # Find unused non-terminals unused_rules = grammar.unused_rules() for prod in unused_rules: errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name) if len(unused_terminals) == 1: errorlog.warning("There is 1 unused token") if len(unused_terminals) > 1: errorlog.warning("There are %d unused tokens", len(unused_terminals)) if len(unused_rules) == 1: errorlog.warning("There is 1 unused rule") if len(unused_rules) > 1: errorlog.warning("There are %d unused rules", len(unused_rules)) if debug: debuglog.info("") debuglog.info("Terminals, with rules where they appear") debuglog.info("") terms = list(grammar.Terminals) terms.sort() for term in terms: debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]])) debuglog.info("") debuglog.info("Nonterminals, with rules where they appear") debuglog.info("") nonterms = list(grammar.Nonterminals) nonterms.sort() for nonterm in nonterms: debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]])) debuglog.info("") if check_recursion: unreachable = grammar.find_unreachable() for u in unreachable: errorlog.warning("Symbol '%s' is unreachable",u) infinite = grammar.infinite_cycles() for inf in infinite: errorlog.error("Infinite recursion detected for symbol '%s'", inf) errors = 1 unused_prec = grammar.unused_precedence() for term, assoc in unused_prec: errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term) errors = 1 if errors: raise YaccError("Unable to build parser") # Run the LRGeneratedTable on the grammar if debug: errorlog.debug("Generating %s tables", method) lr = LRGeneratedTable(grammar,method,debuglog) if debug: num_sr = len(lr.sr_conflicts) # Report shift/reduce and reduce/reduce conflicts if num_sr == 1: errorlog.warning("1 shift/reduce conflict") elif num_sr > 1: errorlog.warning("%d shift/reduce conflicts", num_sr) num_rr = len(lr.rr_conflicts) if num_rr == 1: errorlog.warning("1 reduce/reduce conflict") elif num_rr > 1: errorlog.warning("%d reduce/reduce conflicts", num_rr) # Write out conflicts to the output file if debug and (lr.sr_conflicts or lr.rr_conflicts): debuglog.warning("") debuglog.warning("Conflicts:") debuglog.warning("") for state, tok, resolution in lr.sr_conflicts: debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution) already_reported = {} for state, rule, rejected in lr.rr_conflicts: if (state,id(rule),id(rejected)) in already_reported: continue debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) debuglog.warning("rejected rule (%s) in state %d", rejected,state) errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) errorlog.warning("rejected rule (%s) in state %d", rejected, state) already_reported[state,id(rule),id(rejected)] = 1 warned_never = [] for state, rule, rejected in lr.rr_conflicts: if not rejected.reduced and (rejected not in warned_never): debuglog.warning("Rule (%s) is never reduced", rejected) errorlog.warning("Rule (%s) is never reduced", rejected) warned_never.append(rejected) # Write the table file if requested if write_tables: lr.write_table(tabmodule,outputdir,signature) # Write a pickled version of the tables if picklefile: lr.pickle_table(picklefile,signature) # Build the parser lr.bind_callables(pinfo.pdict) parser = LRParser(lr,pinfo.error_func) parse = parser.parse return parser
mit
surajssd/kuma
vendor/packages/nose/plugins/plugintest.py
69
13536
""" Testing Plugins =============== The plugin interface is well-tested enough to safely unit test your use of its hooks with some level of confidence. However, there is also a mixin for unittest.TestCase called PluginTester that's designed to test plugins in their native runtime environment. Here's a simple example with a do-nothing plugin and a composed suite. >>> import unittest >>> from nose.plugins import Plugin, PluginTester >>> class FooPlugin(Plugin): ... pass >>> class TestPluginFoo(PluginTester, unittest.TestCase): ... activate = '--with-foo' ... plugins = [FooPlugin()] ... def test_foo(self): ... for line in self.output: ... # i.e. check for patterns ... pass ... ... # or check for a line containing ... ... assert "ValueError" in self.output ... def makeSuite(self): ... class TC(unittest.TestCase): ... def runTest(self): ... raise ValueError("I hate foo") ... return [TC('runTest')] ... >>> res = unittest.TestResult() >>> case = TestPluginFoo('test_foo') >>> _ = case(res) >>> res.errors [] >>> res.failures [] >>> res.wasSuccessful() True >>> res.testsRun 1 And here is a more complex example of testing a plugin that has extra arguments and reads environment variables. >>> import unittest, os >>> from nose.plugins import Plugin, PluginTester >>> class FancyOutputter(Plugin): ... name = "fancy" ... def configure(self, options, conf): ... Plugin.configure(self, options, conf) ... if not self.enabled: ... return ... self.fanciness = 1 ... if options.more_fancy: ... self.fanciness = 2 ... if 'EVEN_FANCIER' in self.env: ... self.fanciness = 3 ... ... def options(self, parser, env=os.environ): ... self.env = env ... parser.add_option('--more-fancy', action='store_true') ... Plugin.options(self, parser, env=env) ... ... def report(self, stream): ... stream.write("FANCY " * self.fanciness) ... >>> class TestFancyOutputter(PluginTester, unittest.TestCase): ... activate = '--with-fancy' # enables the plugin ... plugins = [FancyOutputter()] ... args = ['--more-fancy'] ... env = {'EVEN_FANCIER': '1'} ... ... def test_fancy_output(self): ... assert "FANCY FANCY FANCY" in self.output, ( ... "got: %s" % self.output) ... def makeSuite(self): ... class TC(unittest.TestCase): ... def runTest(self): ... raise ValueError("I hate fancy stuff") ... return [TC('runTest')] ... >>> res = unittest.TestResult() >>> case = TestFancyOutputter('test_fancy_output') >>> _ = case(res) >>> res.errors [] >>> res.failures [] >>> res.wasSuccessful() True >>> res.testsRun 1 """ import re import sys from warnings import warn try: from cStringIO import StringIO except ImportError: from StringIO import StringIO __all__ = ['PluginTester', 'run'] from os import getpid class MultiProcessFile(object): """ helper for testing multiprocessing multiprocessing poses a problem for doctests, since the strategy of replacing sys.stdout/stderr with file-like objects then inspecting the results won't work: the child processes will write to the objects, but the data will not be reflected in the parent doctest-ing process. The solution is to create file-like objects which will interact with multiprocessing in a more desirable way. All processes can write to this object, but only the creator can read. This allows the testing system to see a unified picture of I/O. """ def __init__(self): # per advice at: # http://docs.python.org/library/multiprocessing.html#all-platforms self.__master = getpid() self.__queue = Manager().Queue() self.__buffer = StringIO() self.softspace = 0 def buffer(self): if getpid() != self.__master: return from Queue import Empty from collections import defaultdict cache = defaultdict(str) while True: try: pid, data = self.__queue.get_nowait() except Empty: break if pid == (): #show parent output after children #this is what users see, usually pid = ( 1e100, ) # googol! cache[pid] += data for pid in sorted(cache): #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG self.__buffer.write( cache[pid] ) def write(self, data): # note that these pids are in the form of current_process()._identity # rather than OS pids from multiprocessing import current_process pid = current_process()._identity self.__queue.put((pid, data)) def __iter__(self): "getattr doesn't work for iter()" self.buffer() return self.__buffer def seek(self, offset, whence=0): self.buffer() return self.__buffer.seek(offset, whence) def getvalue(self): self.buffer() return self.__buffer.getvalue() def __getattr__(self, attr): return getattr(self.__buffer, attr) try: from multiprocessing import Manager Buffer = MultiProcessFile except ImportError: Buffer = StringIO class PluginTester(object): """A mixin for testing nose plugins in their runtime environment. Subclass this and mix in unittest.TestCase to run integration/functional tests on your plugin. When setUp() is called, the stub test suite is executed with your plugin so that during an actual test you can inspect the artifacts of how your plugin interacted with the stub test suite. - activate - the argument to send nosetests to activate the plugin - suitepath - if set, this is the path of the suite to test. Otherwise, you will need to use the hook, makeSuite() - plugins - the list of plugins to make available during the run. Note that this does not mean these plugins will be *enabled* during the run -- only the plugins enabled by the activate argument or other settings in argv or env will be enabled. - args - a list of arguments to add to the nosetests command, in addition to the activate argument - env - optional dict of environment variables to send nosetests """ activate = None suitepath = None args = None env = {} argv = None plugins = [] ignoreFiles = None def makeSuite(self): """returns a suite object of tests to run (unittest.TestSuite()) If self.suitepath is None, this must be implemented. The returned suite object will be executed with all plugins activated. It may return None. Here is an example of a basic suite object you can return :: >>> import unittest >>> class SomeTest(unittest.TestCase): ... def runTest(self): ... raise ValueError("Now do something, plugin!") ... >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]> """ raise NotImplementedError def _execPlugin(self): """execute the plugin on the internal test suite. """ from nose.config import Config from nose.core import TestProgram from nose.plugins.manager import PluginManager suite = None stream = Buffer() conf = Config(env=self.env, stream=stream, plugins=PluginManager(plugins=self.plugins)) if self.ignoreFiles is not None: conf.ignoreFiles = self.ignoreFiles if not self.suitepath: suite = self.makeSuite() self.nose = TestProgram(argv=self.argv, config=conf, suite=suite, exit=False) self.output = AccessDecorator(stream) def setUp(self): """runs nosetests with the specified test suite, all plugins activated. """ self.argv = ['nosetests', self.activate] if self.args: self.argv.extend(self.args) if self.suitepath: self.argv.append(self.suitepath) self._execPlugin() class AccessDecorator(object): stream = None _buf = None def __init__(self, stream): self.stream = stream stream.seek(0) self._buf = stream.read() stream.seek(0) def __contains__(self, val): return val in self._buf def __iter__(self): return iter(self.stream) def __str__(self): return self._buf def blankline_separated_blocks(text): "a bunch of === characters is also considered a blank line" block = [] for line in text.splitlines(True): block.append(line) line = line.strip() if not line or line.startswith('===') and not line.strip('='): yield "".join(block) block = [] if block: yield "".join(block) def remove_stack_traces(out): # this regexp taken from Python 2.5's doctest traceback_re = re.compile(r""" # Grab the traceback header. Different versions of Python have # said different things on the first traceback line. ^(?P<hdr> Traceback\ \( (?: most\ recent\ call\ last | innermost\ last ) \) : ) \s* $ # toss trailing whitespace on the header. (?P<stack> .*?) # don't blink: absorb stuff until... ^(?=\w) # a line *starts* with alphanum. .*?(?P<exception> \w+ ) # exception name (?P<msg> [:\n] .*) # the rest """, re.VERBOSE | re.MULTILINE | re.DOTALL) blocks = [] for block in blankline_separated_blocks(out): blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block)) return "".join(blocks) def simplify_warnings(out): warn_re = re.compile(r""" # Cut the file and line no, up to the warning name ^.*:\d+:\s (?P<category>\w+): \s+ # warning category (?P<detail>.+) $ \n? # warning message ^ .* $ # stack frame """, re.VERBOSE | re.MULTILINE) return warn_re.sub(r"\g<category>: \g<detail>", out) def remove_timings(out): return re.sub( r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out) def munge_nose_output_for_doctest(out): """Modify nose output to make it easy to use in doctests.""" out = remove_stack_traces(out) out = simplify_warnings(out) out = remove_timings(out) return out.strip() def run(*arg, **kw): """ Specialized version of nose.run for use inside of doctests that test test runs. This version of run() prints the result output to stdout. Before printing, the output is processed by replacing the timing information with an ellipsis (...), removing traceback stacks, and removing trailing whitespace. Use this version of run wherever you are writing a doctest that tests nose (or unittest) test result output. Note: do not use doctest: +ELLIPSIS when testing nose output, since ellipses ("test_foo ... ok") in your expected test runner output may match multiple lines of output, causing spurious test passes! """ from nose import run from nose.config import Config from nose.plugins.manager import PluginManager buffer = Buffer() if 'config' not in kw: plugins = kw.pop('plugins', []) if isinstance(plugins, list): plugins = PluginManager(plugins=plugins) env = kw.pop('env', {}) kw['config'] = Config(env=env, plugins=plugins) if 'argv' not in kw: kw['argv'] = ['nosetests', '-v'] kw['config'].stream = buffer # Set up buffering so that all output goes to our buffer, # or warn user if deprecated behavior is active. If this is not # done, prints and warnings will either be out of place or # disappear. stderr = sys.stderr stdout = sys.stdout if kw.pop('buffer_all', False): sys.stdout = sys.stderr = buffer restore = True else: restore = False warn("The behavior of nose.plugins.plugintest.run() will change in " "the next release of nose. The current behavior does not " "correctly account for output to stdout and stderr. To enable " "correct behavior, use run_buffered() instead, or pass " "the keyword argument buffer_all=True to run().", DeprecationWarning, stacklevel=2) try: run(*arg, **kw) finally: if restore: sys.stderr = stderr sys.stdout = stdout out = buffer.getvalue() print munge_nose_output_for_doctest(out) def run_buffered(*arg, **kw): kw['buffer_all'] = True run(*arg, **kw) if __name__ == '__main__': import doctest doctest.testmod()
mpl-2.0
Kynarth/pyqtcli
tests/test_makealias.py
1
2912
import os from click.testing import CliRunner from pyqtcli.cli import pyqtcli from pyqtcli.qrc import read_qrc from pyqtcli.test.qrc import QRCTestFile from pyqtcli.test.verbose import format_msg def test_makealias_with_on_file(): runner = CliRunner() # Create false qrc files for testing ( QRCTestFile("res.qrc") .add_qresource().add_file("file.txt").add_file("test.txt") .add_qresource("/images").add_file("images/icon.png") .build() ) # Launch makerc command result = runner.invoke(pyqtcli, ["makealias", "-v", "res.qrc"]) assert result.exit_code == 0 qrc = read_qrc("res.qrc") for res in qrc.list_files(): assert res.attrib["alias"] == os.path.basename(res.text) def test_makealias_with_duplication(): runner = CliRunner() # Create false qrc files for testing ( QRCTestFile("res.qrc") .add_qresource("/").add_file("file.txt").add_file("test/file.txt") .build() ) # Launch makerc command result = runner.invoke(pyqtcli, ["makealias", "-v", "res.qrc"]) # Verify that only first resource get its alias qrc = read_qrc("res.qrc") files = qrc.list_files() assert files[0].attrib.get("alias", None) assert files[1].attrib.get("alias", None) is None # Check if a warning has been send assert ("[WARNING]: Alias \'file.txt\' already exists in \'res.qrc\'" " at prefix \'/\'.") in format_msg(result.output) def test_makealias_recursive(): runner = CliRunner() # Create false qrc files for testing qrc = ( QRCTestFile("res.qrc") .add_qresource("/").add_file("file.txt") .add_qresource("/test").add_file("test/file.txt") .build() ) sub_qrc = ( QRCTestFile("res", "sub") .add_qresource("/").add_file("file.txt") .add_qresource("/test").add_file("test/file.txt") .build() ) sub_sub = ( QRCTestFile("res", "sub/sub") .add_qresource("/").add_file("file.txt") .add_qresource("/test").add_file("test/file.txt") .build() ) other_sub = ( QRCTestFile("res", "other") .add_qresource("/").add_file("file.txt") .add_qresource("/test").add_file("test/file.txt") .build() ) qrcs = [qrc, sub_qrc, sub_sub, other_sub] # Launch makerc command runner.invoke(pyqtcli, ["makealias", "-v", "-r"]) # Assert all qrc resources get their alias for qrc_file in qrcs: cur_qrc = read_qrc(qrc_file.path) for resource in cur_qrc.list_files(): assert resource.attrib.get("alias", None) def test_makealias_recursive_with_no_qrc(): runner = CliRunner() # Launch makerc command result = runner.invoke(pyqtcli, ["makealias", "-r"]) assert format_msg(result.output).startswith( "[ERROR]: Could not find any qrc files.")
mit
iPodLinux/linux-2.6.7-ipod
arch/ia64/scripts/unwcheck.py
916
1718
#!/usr/bin/env python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
gpl-2.0
x303597316/hue
desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/reader/style.py
116
2951
# file openpyxl/reader/style.py # Copyright (c) 2010 openpyxl # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # @license: http://www.opensource.org/licenses/mit-license.php # @author: Eric Gazoni """Read shared style definitions""" # package imports from ..shared.xmltools import fromstring, QName from ..shared.exc import MissingNumberFormat from ..style import Style, NumberFormat def read_style_table(xml_source): """Read styles from the shared style table""" table = {} xmlns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main' root = fromstring(xml_source) custom_num_formats = parse_custom_num_formats(root, xmlns) builtin_formats = NumberFormat._BUILTIN_FORMATS cell_xfs = root.find(QName(xmlns, 'cellXfs').text) cell_xfs_nodes = cell_xfs.findall(QName(xmlns, 'xf').text) for index, cell_xfs_node in enumerate(cell_xfs_nodes): new_style = Style() number_format_id = int(cell_xfs_node.get('numFmtId')) if number_format_id < 164: new_style.number_format.format_code = \ builtin_formats.get(number_format_id, 'General') else: if number_format_id in custom_num_formats: new_style.number_format.format_code = \ custom_num_formats[number_format_id] else: raise MissingNumberFormat('%s' % number_format_id) table[index] = new_style return table def parse_custom_num_formats(root, xmlns): """Read in custom numeric formatting rules from the shared style table""" custom_formats = {} num_fmts = root.find(QName(xmlns, 'numFmts').text) if num_fmts is not None: num_fmt_nodes = num_fmts.findall(QName(xmlns, 'numFmt').text) for num_fmt_node in num_fmt_nodes: custom_formats[int(num_fmt_node.get('numFmtId'))] = \ num_fmt_node.get('formatCode') return custom_formats
apache-2.0
mick-d/nipype_source
nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py
14
1099
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.spm.utils import CalcCoregAffine def test_CalcCoregAffine_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), invmat=dict(), mat=dict(), matlab_cmd=dict(), mfile=dict(usedefault=True, ), moving=dict(copyfile=False, mandatory=True, ), paths=dict(), target=dict(mandatory=True, ), use_mcr=dict(), use_v8struct=dict(min_ver='8', usedefault=True, ), ) inputs = CalcCoregAffine.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_CalcCoregAffine_outputs(): output_map = dict(invmat=dict(), mat=dict(), ) outputs = CalcCoregAffine.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause