repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
acecheese/CloudBot | plugins/locate.py | 31 | 2234 | import requests
from cloudbot import hook
from cloudbot.util import web
# Define some constants
base_url = 'https://maps.googleapis.com/maps/api/'
geocode_api = base_url + 'geocode/json'
# Change this to a ccTLD code (eg. uk, nz) to make results more targeted towards that specific country.
# <https://developers.google.com/maps/documentation/geocoding/#RegionCodes>
bias = None
def check_status(status):
""" A little helper function that checks an API error code and returns a nice message.
Returns None if no errors found """
if status == 'REQUEST_DENIED':
return 'The geocode API is off in the Google Developers Console.'
elif status == 'ZERO_RESULTS':
return 'No results found.'
elif status == 'OVER_QUERY_LIMIT':
return 'The geocode API quota has run out.'
elif status == 'UNKNOWN_ERROR':
return 'Unknown Error.'
elif status == 'INVALID_REQUEST':
return 'Invalid Request.'
elif status == 'OK':
return None
@hook.on_start
def load_key(bot):
""" Loads the API key for Google APIs """
global dev_key
dev_key = bot.config.get("api_keys", {}).get("google_dev_key", None)
@hook.command("locate", "maps")
def locate(text):
"""<location> -- Finds <location> on Google Maps."""
if not dev_key:
return "This command requires a Google Developers Console API key."
# Use the Geocoding API to get co-ordinates from the input
params = {"address": text, "key": dev_key}
if bias:
params['region'] = bias
json = requests.get(geocode_api, params=params).json()
error = check_status(json['status'])
if error:
return error
result = json['results'][0]
location_name = result['formatted_address']
location = result['geometry']['location']
formatted_location = "{lat},{lng},16z".format(**location)
url = "https://google.com/maps/@" + formatted_location + "/data=!3m1!1e3"
tags = result['types']
# if 'political' is not the only tag, remove it.
if not tags == ['political']:
tags = [x for x in result['types'] if x != 'political']
tags = ", ".join(tags).replace("_", " ")
return "\x02{}\x02 - {} ({})".format(location_name, url, tags) | gpl-3.0 |
PagerDuty/pagerduty-api-python-client | pypd/models/note.py | 1 | 1425 | # Copyright (c) PagerDuty.
# See LICENSE for details.
from .entity import Entity
from ..errors import (InvalidEndpointOperation, InvalidEndpoint,
InvalidArguments)
class Note(Entity):
"""PagerDuty note entity."""
@classmethod
def fetch(*args, **kwargs):
"""Disable this endpoint, not valid v2."""
raise InvalidEndpoint('Not a valid location on this endpoint')
def remove(self, *args, **kwargs):
"""Disable this operation, not valid on this endpoint."""
raise InvalidEndpointOperation(
'Not a valid operation on this endpoint.'
)
delete = fetch
@classmethod
def create(cls, incident=None, endpoint=None, *args, **kwargs):
"""
Create a note within the scope of an incident.
Make sure that they should reasonably be able to query with an
incident or endpoint that knows about an incident.
"""
if incident is None and endpoint is None:
raise InvalidArguments(incident, endpoint)
if endpoint is None:
iid = incident['id'] if isinstance(incident, Entity) else incident
endpoint = 'incidents/{0}/notes'.format(iid)
# otherwise endpoint should contain the incident path too
return getattr(Entity, 'create').__func__(
cls,
endpoint=endpoint,
*args,
**kwargs
)
| mit |
wvangeit/python-mode | pymode/libs3/rope/refactor/occurrences.py | 91 | 10704 | import re
import rope.base.pynames
from rope.base import pynames, pyobjects, codeanalyze, evaluate, exceptions, utils, worder
class Finder(object):
"""For finding occurrences of a name
The constructor takes a `filters` argument. It should be a list
of functions that take a single argument. For each possible
occurrence, these functions are called in order with the an
instance of `Occurrence`:
* If it returns `None` other filters are tried.
* If it returns `True`, the occurrence will be a match.
* If it returns `False`, the occurrence will be skipped.
* If all of the filters return `None`, it is skipped also.
"""
def __init__(self, pycore, name, filters=[lambda o: True], docs=False):
self.pycore = pycore
self.name = name
self.docs = docs
self.filters = filters
self._textual_finder = _TextualFinder(name, docs=docs)
def find_occurrences(self, resource=None, pymodule=None):
"""Generate `Occurrence` instances"""
tools = _OccurrenceToolsCreator(self.pycore, resource=resource,
pymodule=pymodule, docs=self.docs)
for offset in self._textual_finder.find_offsets(tools.source_code):
occurrence = Occurrence(tools, offset)
for filter in self.filters:
result = filter(occurrence)
if result is None:
continue
if result:
yield occurrence
break
def create_finder(pycore, name, pyname, only_calls=False, imports=True,
unsure=None, docs=False, instance=None, in_hierarchy=False):
"""A factory for `Finder`
Based on the arguments it creates a list of filters. `instance`
argument is needed only when you want implicit interfaces to be
considered.
"""
pynames = set([pyname])
filters = []
if only_calls:
filters.append(CallsFilter())
if not imports:
filters.append(NoImportsFilter())
if isinstance(instance, rope.base.pynames.ParameterName):
for pyobject in instance.get_objects():
try:
pynames.add(pyobject[name])
except exceptions.AttributeNotFoundError:
pass
for pyname in pynames:
filters.append(PyNameFilter(pyname))
if in_hierarchy:
filters.append(InHierarchyFilter(pyname))
if unsure:
filters.append(UnsureFilter(unsure))
return Finder(pycore, name, filters=filters, docs=docs)
class Occurrence(object):
def __init__(self, tools, offset):
self.tools = tools
self.offset = offset
self.resource = tools.resource
@utils.saveit
def get_word_range(self):
return self.tools.word_finder.get_word_range(self.offset)
@utils.saveit
def get_primary_range(self):
return self.tools.word_finder.get_primary_range(self.offset)
@utils.saveit
def get_pyname(self):
try:
return self.tools.name_finder.get_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def get_primary_and_pyname(self):
try:
return self.tools.name_finder.get_primary_and_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def is_in_import_statement(self):
return (self.tools.word_finder.is_from_statement(self.offset) or
self.tools.word_finder.is_import_statement(self.offset))
def is_called(self):
return self.tools.word_finder.is_a_function_being_called(self.offset)
def is_defined(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset)
def is_a_fixed_primary(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset) or \
self.tools.word_finder.is_a_name_after_from_import(self.offset)
def is_written(self):
return self.tools.word_finder.is_assigned_here(self.offset)
def is_unsure(self):
return unsure_pyname(self.get_pyname())
@property
@utils.saveit
def lineno(self):
offset = self.get_word_range()[0]
return self.tools.pymodule.lines.get_line_number(offset)
def same_pyname(expected, pyname):
"""Check whether `expected` and `pyname` are the same"""
if expected is None or pyname is None:
return False
if expected == pyname:
return True
if type(expected) not in (pynames.ImportedModule, pynames.ImportedName) and \
type(pyname) not in (pynames.ImportedModule, pynames.ImportedName):
return False
return expected.get_definition_location() == pyname.get_definition_location() and \
expected.get_object() == pyname.get_object()
def unsure_pyname(pyname, unbound=True):
"""Return `True` if we don't know what this name references"""
if pyname is None:
return True
if unbound and not isinstance(pyname, pynames.UnboundName):
return False
if pyname.get_object() == pyobjects.get_unknown():
return True
class PyNameFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname):
self.pyname = pyname
def __call__(self, occurrence):
if same_pyname(self.pyname, occurrence.get_pyname()):
return True
class InHierarchyFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname, implementations_only=False):
self.pyname = pyname
self.impl_only = implementations_only
self.pyclass = self._get_containing_class(pyname)
if self.pyclass is not None:
self.name = pyname.get_object().get_name()
self.roots = self._get_root_classes(self.pyclass, self.name)
else:
self.roots = None
def __call__(self, occurrence):
if self.roots is None:
return
pyclass = self._get_containing_class(occurrence.get_pyname())
if pyclass is not None:
roots = self._get_root_classes(pyclass, self.name)
if self.roots.intersection(roots):
return True
def _get_containing_class(self, pyname):
if isinstance(pyname, pynames.DefinedName):
scope = pyname.get_object().get_scope()
parent = scope.parent
if parent is not None and parent.get_kind() == 'Class':
return parent.pyobject
def _get_root_classes(self, pyclass, name):
if self.impl_only and pyclass == self.pyclass:
return set([pyclass])
result = set()
for superclass in pyclass.get_superclasses():
if name in superclass:
result.update(self._get_root_classes(superclass, name))
if not result:
return set([pyclass])
return result
class UnsureFilter(object):
def __init__(self, unsure):
self.unsure = unsure
def __call__(self, occurrence):
if occurrence.is_unsure() and self.unsure(occurrence):
return True
class NoImportsFilter(object):
def __call__(self, occurrence):
if occurrence.is_in_import_statement():
return False
class CallsFilter(object):
def __call__(self, occurrence):
if not occurrence.is_called():
return False
class _TextualFinder(object):
def __init__(self, name, docs=False):
self.name = name
self.docs = docs
self.comment_pattern = _TextualFinder.any('comment', [r'#[^\n]*'])
self.string_pattern = _TextualFinder.any(
'string', [codeanalyze.get_string_pattern()])
self.pattern = self._get_occurrence_pattern(self.name)
def find_offsets(self, source):
if not self._fast_file_query(source):
return
if self.docs:
searcher = self._normal_search
else:
searcher = self._re_search
for matched in searcher(source):
yield matched
def _re_search(self, source):
for match in self.pattern.finditer(source):
for key, value in match.groupdict().items():
if value and key == 'occurrence':
yield match.start(key)
def _normal_search(self, source):
current = 0
while True:
try:
found = source.index(self.name, current)
current = found + len(self.name)
if (found == 0 or not self._is_id_char(source[found - 1])) and \
(current == len(source) or not self._is_id_char(source[current])):
yield found
except ValueError:
break
def _is_id_char(self, c):
return c.isalnum() or c == '_'
def _fast_file_query(self, source):
try:
source.index(self.name)
return True
except ValueError:
return False
def _get_source(self, resource, pymodule):
if resource is not None:
return resource.read()
else:
return pymodule.source_code
def _get_occurrence_pattern(self, name):
occurrence_pattern = _TextualFinder.any('occurrence',
['\\b' + name + '\\b'])
pattern = re.compile(occurrence_pattern + '|' + self.comment_pattern +
'|' + self.string_pattern)
return pattern
@staticmethod
def any(name, list_):
return '(?P<%s>' % name + '|'.join(list_) + ')'
class _OccurrenceToolsCreator(object):
def __init__(self, pycore, resource=None, pymodule=None, docs=False):
self.pycore = pycore
self.__resource = resource
self.__pymodule = pymodule
self.docs = docs
@property
@utils.saveit
def name_finder(self):
return evaluate.ScopeNameFinder(self.pymodule)
@property
@utils.saveit
def source_code(self):
if self.__resource is not None:
return self.resource.read()
else:
return self.pymodule.source_code
@property
@utils.saveit
def word_finder(self):
return worder.Worder(self.source_code, self.docs)
@property
@utils.saveit
def resource(self):
if self.__resource is not None:
return self.__resource
if self.__pymodule is not None:
return self.__pymodule.resource
@property
@utils.saveit
def pymodule(self):
if self.__pymodule is not None:
return self.__pymodule
return self.pycore.resource_to_pyobject(self.resource)
| lgpl-3.0 |
tagliateller/openshift-ansible | roles/lib_openshift/src/class/oc_adm_policy_user.py | 17 | 8279 | # pylint: skip-file
# flake8: noqa
class PolicyUserException(Exception):
''' PolicyUser exception'''
pass
class PolicyUserConfig(OpenShiftCLIConfig):
''' PolicyUserConfig is a DTO for user related policy. '''
def __init__(self, namespace, kubeconfig, policy_options):
super(PolicyUserConfig, self).__init__(policy_options['name']['value'],
namespace, kubeconfig, policy_options)
self.kind = self.get_kind()
self.namespace = namespace
def get_kind(self):
''' return the kind we are working with '''
if self.config_options['resource_kind']['value'] == 'role':
return 'rolebinding'
elif self.config_options['resource_kind']['value'] == 'cluster-role':
return 'clusterrolebinding'
elif self.config_options['resource_kind']['value'] == 'scc':
return 'scc'
return None
# pylint: disable=too-many-return-statements
class PolicyUser(OpenShiftCLI):
''' Class to handle attaching policies to users '''
def __init__(self,
config,
verbose=False):
''' Constructor for PolicyUser '''
super(PolicyUser, self).__init__(config.namespace, config.kubeconfig, verbose)
self.config = config
self.verbose = verbose
self._rolebinding = None
self._scc = None
self._cluster_role_bindings = None
self._role_bindings = None
@property
def rolebindings(self):
if self._role_bindings is None:
results = self._get('rolebindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve rolebindings')
self._role_bindings = results['results'][0]['items']
return self._role_bindings
@property
def clusterrolebindings(self):
if self._cluster_role_bindings is None:
results = self._get('clusterrolebindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve clusterrolebindings')
self._cluster_role_bindings = results['results'][0]['items']
return self._cluster_role_bindings
@property
def role_binding(self):
''' role_binding property '''
return self._rolebinding
@role_binding.setter
def role_binding(self, binding):
''' setter for role_binding property '''
self._rolebinding = binding
@property
def security_context_constraint(self):
''' security_context_constraint property '''
return self._scc
@security_context_constraint.setter
def security_context_constraint(self, scc):
''' setter for security_context_constraint property '''
self._scc = scc
def get(self):
'''fetch the desired kind
This is only used for scc objects.
The {cluster}rolebindings happen in exists.
'''
resource_name = self.config.config_options['name']['value']
if resource_name == 'cluster-reader':
resource_name += 's'
return self._get(self.config.kind, resource_name)
def exists_role_binding(self):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
bindings = self.clusterrolebindings
else:
bindings = self.rolebindings
if bindings is None:
return False
for binding in bindings:
if self.config.config_options['rolebinding_name']['value'] is not None and \
binding['metadata']['name'] != self.config.config_options['rolebinding_name']['value']:
continue
if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \
'userNames' in binding and binding['userNames'] is not None and \
self.config.config_options['user']['value'] in binding['userNames']:
self.role_binding = binding
return True
return False
def exists_scc(self):
''' return whether scc exists '''
results = self.get()
if results['returncode'] == 0:
self.security_context_constraint = SecurityContextConstraints(results['results'][0])
if self.security_context_constraint.find_user(self.config.config_options['user']['value']) != None:
return True
return False
return results
def exists(self):
'''does the object exist?'''
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'scc':
return self.exists_scc()
return False
def perform(self):
'''perform action on resource'''
cmd = ['policy',
self.config.config_options['action']['value'],
self.config.config_options['name']['value'],
self.config.config_options['user']['value']]
if self.config.config_options['role_namespace']['value'] is not None:
cmd.extend(['--role-namespace', self.config.config_options['role_namespace']['value']])
if self.config.config_options['rolebinding_name']['value'] is not None:
cmd.extend(['--rolebinding-name', self.config.config_options['rolebinding_name']['value']])
return self.openshift_cmd(cmd, oadm=True)
@staticmethod
def run_ansible(params, check_mode):
'''run the oc_adm_policy_user module'''
state = params['state']
action = None
if state == 'present':
action = 'add-' + params['resource_kind'] + '-to-user'
else:
action = 'remove-' + params['resource_kind'] + '-from-user'
nconfig = PolicyUserConfig(params['namespace'],
params['kubeconfig'],
{'action': {'value': action, 'include': False},
'user': {'value': params['user'], 'include': False},
'resource_kind': {'value': params['resource_kind'], 'include': False},
'name': {'value': params['resource_name'], 'include': False},
'role_namespace': {'value': params['role_namespace'], 'include': False},
'rolebinding_name': {'value': params['rolebinding_name'], 'include': False},
})
policyuser = PolicyUser(nconfig, params['debug'])
# Run the oc adm policy user related command
########
# Delete
########
if state == 'absent':
if not policyuser.exists():
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
api_rval = policyuser.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results' : api_rval, state:'absent'}
if state == 'present':
########
# Create
########
results = policyuser.exists()
if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
return {'msg': results}
if not results:
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
api_rval = policyuser.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results': api_rval, state: 'present'}
return {'changed': False, state: 'present'}
return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
| apache-2.0 |
andela-ifageyinbo/django | tests/gis_tests/geoapp/test_functions.py | 262 | 21654 | from __future__ import unicode_literals
import re
from decimal import Decimal
from django.contrib.gis.db.models import functions
from django.contrib.gis.geos import (
LineString, Point, Polygon, fromstr, geos_version_info,
)
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from ..utils import mysql, oracle, postgis, spatialite
from .models import City, Country, State, Track
@skipUnlessDBFeature("gis_enabled")
class GISFunctionsTests(TestCase):
"""
Testing functions from django/contrib/gis/db/models/functions.py.
Several tests are taken and adapted from GeoQuerySetTest.
Area/Distance/Length/Perimeter are tested in distapp/tests.
Please keep the tests in function's alphabetic order.
"""
fixtures = ['initial']
def test_asgeojson(self):
# Only PostGIS and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
with self.assertRaises(NotImplementedError):
list(Country.objects.annotate(json=functions.AsGeoJSON('mpoly')))
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
with self.assertRaises(TypeError):
City.objects.annotate(geojson=functions.AsGeoJSON('point', precision='foo'))
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(
pueblo_json,
City.objects.annotate(geojson=functions.AsGeoJSON('point')).get(name='Pueblo').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(
houston_json,
City.objects.annotate(json=functions.AsGeoJSON('point', crs=True)).get(name='Houston').json
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(
victoria_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True)
).get(name='Victoria').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True, crs=True, precision=5)
).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_AsGML_function")
def test_asgml(self):
# Should throw a TypeError when tyring to obtain GML from a
# non-geometry field.
qs = City.objects.all()
with self.assertRaises(TypeError):
qs.annotate(gml=functions.AsGML('name'))
ptown = City.objects.annotate(gml=functions.AsGML('point', precision=9)).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(
r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." '
r'cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn(
'<gml:pos srsDimension="2">',
City.objects.annotate(gml=functions.AsGML('point', version=3)).get(name='Pueblo').gml
)
@skipUnlessDBFeature("has_AsKML_function")
def test_askml(self):
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
with self.assertRaises(TypeError):
City.objects.annotate(kml=functions.AsKML('name'))
# Ensuring the KML is as expected.
ptown = City.objects.annotate(kml=functions.AsKML('point', precision=9)).get(name='Pueblo')
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@skipUnlessDBFeature("has_AsSVG_function")
def test_assvg(self):
with self.assertRaises(TypeError):
City.objects.annotate(svg=functions.AsSVG('point', precision='foo'))
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.annotate(svg=functions.AsSVG('point')).get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.annotate(svg=functions.AsSVG('point', relative=5)).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_BoundingCircle_function")
def test_bounding_circle(self):
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly')).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, 168.89, 2)
self.assertAlmostEqual(qs[1].circle.area, 135.95, 2)
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly', num_seg=12)).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, 168.44, 2)
self.assertAlmostEqual(qs[1].circle.area, 135.59, 2)
@skipUnlessDBFeature("has_Centroid_function")
def test_centroid(self):
qs = State.objects.exclude(poly__isnull=True).annotate(centroid=functions.Centroid('poly'))
tol = 1.8 if mysql else (0.1 if oracle else 0.00001)
for state in qs:
self.assertTrue(state.poly.centroid.equals_exact(state.centroid, tol))
@skipUnlessDBFeature("has_Difference_function")
def test_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(diff=functions.Difference('mpoly', geom))
# For some reason SpatiaLite does something screwy with the Texas geometry here.
if spatialite:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertEqual(c.mpoly.difference(geom), c.diff)
@skipUnlessDBFeature("has_Difference_function")
def test_difference_mixed_srid(self):
"""Testing with mixed SRID (Country has default 4326)."""
geom = Point(556597.4, 2632018.6, srid=3857) # Spherical mercator
qs = Country.objects.annotate(difference=functions.Difference('mpoly', geom))
# For some reason SpatiaLite does something screwy with the Texas geometry here.
if spatialite:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertEqual(c.mpoly.difference(geom), c.difference)
@skipUnlessDBFeature("has_Envelope_function")
def test_envelope(self):
countries = Country.objects.annotate(envelope=functions.Envelope('mpoly'))
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("has_ForceRHR_function")
def test_force_rhr(self):
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
st = State.objects.annotate(force_rhr=functions.ForceRHR('poly')).get(name='Foo')
self.assertEqual(rhr_rings, st.force_rhr.coords)
@skipUnlessDBFeature("has_GeoHash_function")
def test_geohash(self):
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.annotate(geohash=functions.GeoHash('point')).get(name='Houston')
h2 = City.objects.annotate(geohash=functions.GeoHash('point', precision=5)).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
@skipUnlessDBFeature("has_Intersection_function")
def test_intersection(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(inter=functions.Intersection('mpoly', geom))
for c in qs:
if spatialite:
# When the intersection is empty, Spatialite returns None
expected = None
else:
expected = c.mpoly.intersection(geom)
self.assertEqual(c.inter, expected)
@skipUnlessDBFeature("has_MemSize_function")
def test_memsize(self):
ptown = City.objects.annotate(size=functions.MemSize('point')).get(name='Pueblo')
self.assertTrue(20 <= ptown.size <= 40) # Exact value may depend on PostGIS version
@skipUnlessDBFeature("has_NumGeom_function")
def test_num_geom(self):
# Both 'countries' only have two geometries.
for c in Country.objects.annotate(num_geom=functions.NumGeometries('mpoly')):
self.assertEqual(2, c.num_geom)
qs = City.objects.filter(point__isnull=False).annotate(num_geom=functions.NumGeometries('point'))
for city in qs:
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections, whereas PostGIS < 2.0.0 and MySQL
# will return None.
if (postgis and connection.ops.spatial_version < (2, 0, 0)) or mysql:
self.assertIsNone(city.num_geom)
else:
self.assertEqual(1, city.num_geom)
@skipUnlessDBFeature("has_NumPoint_function")
def test_num_points(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
qs = Track.objects.annotate(num_points=functions.NumPoints('line'))
self.assertEqual(qs.first().num_points, 2)
if spatialite or mysql:
# Spatialite and MySQL can only count points on LineStrings
return
for c in Country.objects.annotate(num_points=functions.NumPoints('mpoly')):
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.annotate(num_points=functions.NumPoints('point')):
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_PointOnSurface_function")
def test_point_on_surface(self):
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
qs = Country.objects.annotate(point_on_surface=functions.PointOnSurface('mpoly'))
for country in qs:
tol = 0.00001 # Spatialite might have WKT-translation-related precision issues
self.assertTrue(ref[country.name].equals_exact(country.point_on_surface, tol))
@skipUnlessDBFeature("has_Reverse_function")
def test_reverse_geom(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
track = Track.objects.annotate(reverse_geom=functions.Reverse('line')).get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), track.reverse_geom.coords)
@skipUnlessDBFeature("has_Scale_function")
def test_scale(self):
xfac, yfac = 2, 3
tol = 5 # The low precision tolerance is for SpatiaLite
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', xfac, yfac))
for country in qs:
for p1, p2 in zip(country.mpoly, country.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
# Test float/Decimal values
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', 1.5, Decimal('2.5')))
self.assertGreater(qs[0].scaled.area, qs[0].mpoly.area)
@skipUnlessDBFeature("has_SnapToGrid_function")
def test_snap_to_grid(self):
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
with self.assertRaises(ValueError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
with self.assertRaises(TypeError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.1)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23, 0.5, 0.17)
).get(name='San Marino').snap,
tol
)
)
@skipUnlessDBFeature("has_SymDifference_function")
def test_sym_difference(self):
if geos_version_info()['version'] < '3.3.0':
self.skipTest("GEOS >= 3.3 required")
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(sym_difference=functions.SymDifference('mpoly', geom))
for country in qs:
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in country.mpoly.sym_difference(geom)),
set(g.wkt for g in country.sym_difference))
@skipUnlessDBFeature("has_Transform_function")
def test_transform(self):
# Pre-transformed points for Houston and Pueblo.
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points.
h = City.objects.annotate(pt=functions.Transform('point', ptown.srid)).get(name='Pueblo')
self.assertEqual(2774, h.pt.srid)
self.assertAlmostEqual(ptown.x, h.pt.x, prec)
self.assertAlmostEqual(ptown.y, h.pt.y, prec)
@skipUnlessDBFeature("has_Translate_function")
def test_translate(self):
xfac, yfac = 5, -23
qs = Country.objects.annotate(translated=functions.Translate('mpoly', xfac, yfac))
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# Some combined function tests
@skipUnlessDBFeature(
"has_Difference_function", "has_Intersection_function",
"has_SymDifference_function", "has_Union_function")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23, srid=4326)
qs = Country.objects.all().annotate(
difference=functions.Difference('mpoly', geom),
sym_difference=functions.SymDifference('mpoly', geom),
union=functions.Union('mpoly', geom),
)
# For some reason SpatiaLite does something screwey with the Texas geometry here.
# Also, it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.annotate(intersection=functions.Intersection('mpoly', geom))
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
return
for c in qs:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnlessDBFeature("has_Union_function")
def test_union(self):
geom = Point(-95.363151, 29.763374, srid=4326)
ptown = City.objects.annotate(union=functions.Union('point', geom)).get(name='Dallas')
tol = 0.00001
# Undefined ordering
expected1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)', srid=4326)
expected2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)', srid=4326)
self.assertTrue(expected1.equals_exact(ptown.union, tol) or expected2.equals_exact(ptown.union, tol))
| bsd-3-clause |
jriehl/numba | numba/cuda/kernels/transpose.py | 2 | 2040 | from numba import cuda
from numba.cuda.cudadrv.driver import driver
from numba import numpy_support as nps
import math
def transpose(a, b=None):
"""Compute the transpose of 'a' and store it into 'b', if given,
and return it. If 'b' is not given, allocate a new array
and return that.
This implements the algorithm documented in
http://devblogs.nvidia.com/parallelforall/efficient-matrix-transpose-cuda-cc/
:param a: an `np.ndarray` or a `DeviceNDArrayBase` subclass. If already on
the device its stream will be used to perform the transpose (and to copy
`b` to the device if necessary).
"""
# prefer `a`'s stream if
stream = getattr(a, 'stream', 0)
if not b:
cols, rows = a.shape
strides = a.dtype.itemsize * cols, a.dtype.itemsize
b = cuda.cudadrv.devicearray.DeviceNDArray(
(rows, cols),
strides,
dtype=a.dtype,
stream=stream)
dt=nps.from_dtype(a.dtype)
tpb = driver.get_device().MAX_THREADS_PER_BLOCK
# we need to factor available threads into x and y axis
tile_width = int(math.pow(2, math.log(tpb, 2)/2))
tile_height = int(tpb / tile_width)
tile_shape=(tile_height, tile_width + 1)
@cuda.jit
def kernel(input, output):
tile = cuda.shared.array(shape=tile_shape, dtype=dt)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bx = cuda.blockIdx.x * cuda.blockDim.x
by = cuda.blockIdx.y * cuda.blockDim.y
x = by + tx
y = bx + ty
if by+ty < input.shape[0] and bx+tx < input.shape[1]:
tile[ty, tx] = input[by+ty, bx+tx]
cuda.syncthreads()
if y < output.shape[0] and x < output.shape[1]:
output[y, x] = tile[tx, ty]
# one block per tile, plus one for remainders
blocks = int(b.shape[0]/tile_height + 1), int(b.shape[1]/tile_width + 1)
# one thread per tile element
threads = tile_height, tile_width
kernel[blocks, threads, stream](a, b)
return b
| bsd-2-clause |
gcodetogit/depot_tools | third_party/boto/services/sonofmmm.py | 98 | 3481 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.services.service import Service
from boto.services.message import ServiceMessage
import os
import mimetypes
class SonOfMMM(Service):
def __init__(self, config_file=None):
Service.__init__(self, config_file)
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.working_dir, self.log_file)
boto.set_file_logger(self.name, self.log_path)
if self.sd.has_option('ffmpeg_args'):
self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args')
else:
self.command = '/usr/local/bin/ffmpeg -y -i %s %s'
self.output_mimetype = self.sd.get('output_mimetype')
if self.sd.has_option('output_ext'):
self.output_ext = self.sd.get('output_ext')
else:
self.output_ext = mimetypes.guess_extension(self.output_mimetype)
self.output_bucket = self.sd.get_obj('output_bucket')
self.input_bucket = self.sd.get_obj('input_bucket')
# check to see if there are any messages queue
# if not, create messages for all files in input_bucket
m = self.input_queue.read(1)
if not m:
self.queue_files()
def queue_files(self):
boto.log.info('Queueing files from %s' % self.input_bucket.name)
for key in self.input_bucket:
boto.log.info('Queueing %s' % key.name)
m = ServiceMessage()
if self.output_bucket:
d = {'OutputBucket' : self.output_bucket.name}
else:
d = None
m.for_key(key, d)
self.input_queue.write(m)
def process_file(self, in_file_name, msg):
base, ext = os.path.splitext(in_file_name)
out_file_name = os.path.join(self.working_dir,
base+self.output_ext)
command = self.command % (in_file_name, out_file_name)
boto.log.info('running:\n%s' % command)
status = self.run(command)
if status == 0:
return [(out_file_name, self.output_mimetype)]
else:
return []
def shutdown(self):
if os.path.isfile(self.log_path):
if self.output_bucket:
key = self.output_bucket.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
Service.shutdown(self)
| bsd-3-clause |
piffey/ansible | test/units/plugins/test_plugins.py | 51 | 5435 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.tests import BUILTINS, unittest
from ansible.compat.tests.mock import mock_open, patch, MagicMock
from ansible.plugins.loader import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, PluginLoader
class TestErrors(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch.object(PluginLoader, '_get_paths')
def test_print_paths(self, mock_method):
mock_method.return_value = ['/path/one', '/path/two', '/path/three']
pl = PluginLoader('foo', 'foo', '', 'test_plugins')
paths = pl.print_paths()
expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three'])
self.assertEqual(paths, expected_paths)
def test_plugins__get_package_paths_no_package(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
self.assertEqual(pl._get_package_paths(), [])
def test_plugins__get_package_paths_with_package(self):
# the _get_package_paths() call uses __import__ to load a
# python library, and then uses the __file__ attribute of
# the result for that to get the library path, so we mock
# that here and patch the builtin to use our mocked result
foo = MagicMock()
bar = MagicMock()
bam = MagicMock()
bam.__file__ = '/path/to/my/foo/bar/bam/__init__.py'
bar.bam = bam
foo.return_value.bar = bar
pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin')
with patch('{0}.__import__'.format(BUILTINS), foo):
self.assertEqual(pl._get_package_paths(), ['/path/to/my/foo/bar/bam'])
def test_plugins__get_paths(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
pl._paths = ['/path/one', '/path/two']
self.assertEqual(pl._get_paths(), ['/path/one', '/path/two'])
# NOT YET WORKING
# def fake_glob(path):
# if path == 'test/*':
# return ['test/foo', 'test/bar', 'test/bam']
# elif path == 'test/*/*'
# m._paths = None
# mock_glob = MagicMock()
# mock_glob.return_value = []
# with patch('glob.glob', mock_glob):
# pass
def assertPluginLoaderConfigBecomes(self, arg, expected):
pl = PluginLoader('test', '', arg, 'test_plugin')
self.assertEqual(pl.config, expected)
def test_plugin__init_config_list(self):
config = ['/one', '/two']
self.assertPluginLoaderConfigBecomes(config, config)
def test_plugin__init_config_str(self):
self.assertPluginLoaderConfigBecomes('test', ['test'])
def test_plugin__init_config_none(self):
self.assertPluginLoaderConfigBecomes(None, [])
def test__load_module_source_no_duplicate_names(self):
'''
This test simulates importing 2 plugins with the same name,
and validating that the import is shortcirtuited if a file with the same name
has already been imported
'''
fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
pl = PluginLoader('test', '', 'test', 'test_plugin')
one = pl._load_module_source('import_fixture', os.path.join(fixture_path, 'import_fixture.py'))
# This line wouldn't even succeed if we didn't short cirtuit on finding a duplicate name
two = pl._load_module_source('import_fixture', '/path/to/import_fixture.py')
self.assertEqual(one, two)
@patch('ansible.plugins.loader.glob')
@patch.object(PluginLoader, '_get_paths')
def test_all_no_duplicate_names(self, gp_mock, glob_mock):
'''
This test goes along with ``test__load_module_source_no_duplicate_names``
and ensures that we ignore duplicate imports on multiple paths
'''
fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
gp_mock.return_value = [
fixture_path,
'/path/to'
]
glob_mock.glob.side_effect = [
[os.path.join(fixture_path, 'import_fixture.py')],
['/path/to/import_fixture.py']
]
pl = PluginLoader('test', '', 'test', 'test_plugin')
# Aside from needing ``list()`` so we can do a len, ``PluginLoader.all`` returns a generator
# so ``list()`` actually causes ``PluginLoader.all`` to run.
plugins = list(pl.all())
self.assertEqual(len(plugins), 1)
self.assertIn(os.path.join(fixture_path, 'import_fixture.py'), pl._module_cache)
self.assertNotIn('/path/to/import_fixture.py', pl._module_cache)
| gpl-3.0 |
c0defreak/python-for-android | python3-alpha/python3-src/Lib/test/test_copyreg.py | 173 | 4217 | import copyreg
import unittest
from test import support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copyreg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copyreg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copyreg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEqual(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copyreg.remove_extension,
mod, func, code)
copyreg.add_extension(mod, func, code)
# Should be in the registry.
self.assertTrue(copyreg._extension_registry[mod, func] == code)
self.assertTrue(copyreg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assertNotIn(code, copyreg._extension_cache)
# Redundant registration should be OK.
copyreg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copyreg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copyreg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copyreg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copyreg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copyreg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copyreg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copyreg._inverted_registry:
self.assertRaises(ValueError, copyreg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assertNotIn((mod, func), copyreg._extension_registry)
# The code *may* be in copyreg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copyreg.add_extension(mod, func, code)
copyreg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000:
self.assertRaises(ValueError, copyreg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEqual(copyreg._slotnames(WithoutSlots), [])
self.assertEqual(copyreg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copyreg._slotnames(WithPrivate), expected)
self.assertEqual(copyreg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copyreg._slotnames(WithInherited)
result.sort()
self.assertEqual(result, expected)
def test_main():
support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
aminert/scikit-learn | sklearn/preprocessing/tests/test_data.py | 113 | 38432 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
evamwangi/bc-7-Todo_List | venv/Lib/posixpath.py | 62 | 14088 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class _unicode(object):
pass
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
return s.startswith('/')
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
path = a
for b in p:
if b.startswith('/'):
path = b
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
i = p.rfind('/') + 1
head, tail = p[:i], p[i:]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return '', p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
i = p.rfind('/') + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
i = p.rfind('/') + 1
head = p[:i]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
s2 = os.lstat(join(path, '..'))
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
try:
st = os.lstat(name)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_uvarprog = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _uvarprog
if '$' not in path:
return path
if isinstance(path, _unicode):
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})')
varprog = _varprog
encoding = sys.getfilesystemencoding()
else:
if not _uvarprog:
import re
_uvarprog = re.compile(_unicode(r'\$(\w+|\{[^}]*\})'), re.UNICODE)
varprog = _uvarprog
encoding = None
i = 0
while True:
m = varprog.search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
if encoding:
name = name.encode(encoding)
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if encoding:
value = value.decode(encoding)
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, _unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, _unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath('', filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| mit |
dfdx2/django | django/core/management/commands/migrate.py | 16 | 14047 | import time
from collections import OrderedDict
from importlib import import_module
from django.apps import apps
from django.core.checks import Tags, run_checks
from django.core.management.base import BaseCommand, CommandError
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ModelState, ProjectState
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='?',
help='App label of an application to synchronize the state.',
)
parser.add_argument(
'migration_name', nargs='?',
help='Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
parser.add_argument(
'--fake', action='store_true', dest='fake',
help='Mark migrations as run without actually running them.',
)
parser.add_argument(
'--fake-initial', action='store_true', dest='fake_initial',
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.',
)
parser.add_argument(
'--run-syncdb', action='store_true', dest='run_syncdb',
help='Creates tables for apps without migrations.',
)
def _run_checks(self, **kwargs):
issues = run_checks(tags=[Tags.database])
issues.extend(super()._run_checks(**kwargs))
return issues
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options['database']
connection = connections[db]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(sorted(set(a for a, n in targets))) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0], )
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=pre_migrate_apps, plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
fake = False
fake_initial = False
else:
fake = options['fake']
fake_initial = options['fake_initial']
post_migrate_state = executor.migrate(
targets, plan=plan, state=pre_migrate_state.clone(), fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all models
# are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple([
ModelState.from_model(apps.get_model(*model)) for model in model_keys
])
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=post_migrate_apps, plan=plan,
)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not (
(converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
)
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...\n")
| bsd-3-clause |
ax003d/openerp | openerp/addons/hr_recruitment/wizard/hr_recruitment_employee_hired.py | 53 | 2205 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hired_employee(osv.osv_memory):
_name = 'hired.employee'
_description = 'Create Employee'
def case_close(self, cr, uid, ids, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
@param *args: Give Tuple Value
"""
if context is None:
context = {}
self.pool.get('hr.applicant').case_close(cr, uid,context.get('active_ids',[]))
return {}
def case_close_with_emp(self, cr, uid, ids, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
"""
if context is None:
context = {}
return self.pool.get('hr.applicant').case_close_with_emp(cr, uid,context.get('active_ids', []))
hired_employee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cuviper/binutils-gdb | gdb/python/lib/gdb/FrameDecorator.py | 26 | 10392 | # Copyright (C) 2013-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
# This small code snippet deals with problem of strings in Python 2.x
# and Python 3.x. Python 2.x has str and unicode classes which are
# sub-classes of basestring. In Python 3.x all strings are encoded
# and basestring has been removed.
try:
basestring
except NameError:
basestring = str
class FrameDecorator(object):
"""Basic implementation of a Frame Decorator"""
""" This base frame decorator decorates a frame or another frame
decorator, and provides convenience methods. If this object is
wrapping a frame decorator, defer to that wrapped object's method
if it has one. This allows for frame decorators that have
sub-classed FrameDecorator object, but also wrap other frame
decorators on the same frame to correctly execute.
E.g
If the result of frame filters running means we have one gdb.Frame
wrapped by multiple frame decorators, all sub-classed from
FrameDecorator, the resulting hierarchy will be:
Decorator1
-- (wraps) Decorator2
-- (wraps) FrameDecorator
-- (wraps) gdb.Frame
In this case we have two frame decorators, both of which are
sub-classed from FrameDecorator. If Decorator1 just overrides the
'function' method, then all of the other methods are carried out
by the super-class FrameDecorator. But Decorator2 may have
overriden other methods, so FrameDecorator will look at the
'base' parameter and defer to that class's methods. And so on,
down the chain."""
# 'base' can refer to a gdb.Frame or another frame decorator. In
# the latter case, the child class will have called the super
# method and _base will be an object conforming to the Frame Filter
# class.
def __init__(self, base):
self._base = base
@staticmethod
def _is_limited_frame(frame):
"""Internal utility to determine if the frame is special or
limited."""
sal = frame.find_sal()
if (not sal.symtab or not sal.symtab.filename
or frame.type() == gdb.DUMMY_FRAME
or frame.type() == gdb.SIGTRAMP_FRAME):
return True
return False
def elided(self):
"""Return any elided frames that this class might be
wrapping, or None."""
if hasattr(self._base, "elided"):
return self._base.elided()
return None
def function(self):
""" Return the name of the frame's function or an address of
the function of the frame. First determine if this is a
special frame. If not, try to determine filename from GDB's
frame internal function API. Finally, if a name cannot be
determined return the address. If this function returns an
address, GDB will attempt to determine the function name from
its internal minimal symbols store (for example, for inferiors
without debug-info)."""
# Both gdb.Frame, and FrameDecorator have a method called
# "function", so determine which object this is.
if not isinstance(self._base, gdb.Frame):
if hasattr(self._base, "function"):
# If it is not a gdb.Frame, and there is already a
# "function" method, use that.
return self._base.function()
frame = self.inferior_frame()
if frame.type() == gdb.DUMMY_FRAME:
return "<function called from gdb>"
elif frame.type() == gdb.SIGTRAMP_FRAME:
return "<signal handler called>"
func = frame.function()
# If we cannot determine the function name, return the
# address. If GDB detects an integer value from this function
# it will attempt to find the function name from minimal
# symbols via its own internal functions.
if func == None:
pc = frame.pc()
return pc
return str(func)
def address(self):
""" Return the address of the frame's pc"""
if hasattr(self._base, "address"):
return self._base.address()
frame = self.inferior_frame()
return frame.pc()
def filename(self):
""" Return the filename associated with this frame, detecting
and returning the appropriate library name is this is a shared
library."""
if hasattr(self._base, "filename"):
return self._base.filename()
frame = self.inferior_frame()
sal = frame.find_sal()
if not sal.symtab or not sal.symtab.filename:
pc = frame.pc()
return gdb.solib_name(pc)
else:
return sal.symtab.filename
def frame_args(self):
""" Return an iterable of frame arguments for this frame, if
any. The iterable object contains objects conforming with the
Symbol/Value interface. If there are no frame arguments, or
if this frame is deemed to be a special case, return None."""
if hasattr(self._base, "frame_args"):
return self._base.frame_args()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
args = FrameVars(frame)
return args.fetch_frame_args()
def frame_locals(self):
""" Return an iterable of local variables for this frame, if
any. The iterable object contains objects conforming with the
Symbol/Value interface. If there are no frame locals, or if
this frame is deemed to be a special case, return None."""
if hasattr(self._base, "frame_locals"):
return self._base.frame_locals()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
args = FrameVars(frame)
return args.fetch_frame_locals()
def line(self):
""" Return line number information associated with the frame's
pc. If symbol table/line information does not exist, or if
this frame is deemed to be a special case, return None"""
if hasattr(self._base, "line"):
return self._base.line()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
sal = frame.find_sal()
if (sal):
return sal.line
else:
return None
def inferior_frame(self):
""" Return the gdb.Frame underpinning this frame decorator."""
# If 'base' is a frame decorator, we want to call its inferior
# frame method. If '_base' is a gdb.Frame, just return that.
if hasattr(self._base, "inferior_frame"):
return self._base.inferior_frame()
return self._base
class SymValueWrapper(object):
"""A container class conforming to the Symbol/Value interface
which holds frame locals or frame arguments."""
def __init__(self, symbol, value):
self.sym = symbol
self.val = value
def value(self):
""" Return the value associated with this symbol, or None"""
return self.val
def symbol(self):
""" Return the symbol, or Python text, associated with this
symbol, or None"""
return self.sym
class FrameVars(object):
"""Utility class to fetch and store frame local variables, or
frame arguments."""
def __init__(self, frame):
self.frame = frame
self.symbol_class = {
gdb.SYMBOL_LOC_STATIC: True,
gdb.SYMBOL_LOC_REGISTER: True,
gdb.SYMBOL_LOC_ARG: True,
gdb.SYMBOL_LOC_REF_ARG: True,
gdb.SYMBOL_LOC_LOCAL: True,
gdb.SYMBOL_LOC_REGPARM_ADDR: True,
gdb.SYMBOL_LOC_COMPUTED: True
}
def fetch_b(self, sym):
""" Local utility method to determine if according to Symbol
type whether it should be included in the iterator. Not all
symbols are fetched, and only symbols that return
True from this method should be fetched."""
# SYM may be a string instead of a symbol in the case of
# synthetic local arguments or locals. If that is the case,
# always fetch.
if isinstance(sym, basestring):
return True
sym_type = sym.addr_class
return self.symbol_class.get(sym_type, False)
def fetch_frame_locals(self):
"""Public utility method to fetch frame local variables for
the stored frame. Frame arguments are not fetched. If there
are no frame local variables, return an empty list."""
lvars = []
try:
block = self.frame.block()
except RuntimeError:
block = None
while block != None:
if block.is_global or block.is_static:
break
for sym in block:
if sym.is_argument:
continue;
if self.fetch_b(sym):
lvars.append(SymValueWrapper(sym, None))
block = block.superblock
return lvars
def fetch_frame_args(self):
"""Public utility method to fetch frame arguments for the
stored frame. Frame arguments are the only type fetched. If
there are no frame argument variables, return an empty list."""
args = []
try:
block = self.frame.block()
except RuntimeError:
block = None
while block != None:
if block.function != None:
break
block = block.superblock
if block != None:
for sym in block:
if not sym.is_argument:
continue;
args.append(SymValueWrapper(sym, None))
return args
| gpl-2.0 |
danielbair/aeneas | run_all_unit_tests.py | 5 | 5094 | #!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Run all unit tests for the aeneas package.
"""
from __future__ import absolute_import
from __future__ import print_function
import glob
import os
import sys
import unittest
__author__ = "Alberto Pettarin"
__email__ = "aeneas@readbeyond.it"
__copyright__ = """
Copyright 2012-2013, Alberto Pettarin (www.albertopettarin.it)
Copyright 2013-2015, ReadBeyond Srl (www.readbeyond.it)
Copyright 2015-2017, Alberto Pettarin (www.albertopettarin.it)
"""
__license__ = "GNU AGPL 3"
__status__ = "Production"
__version__ = "1.7.3"
TEST_DIRECTORY = "aeneas/tests"
MAP = {
"fast": ("test_*.py", "test_"),
"bench": ("bench_test_*.py", "bench_test_"),
"long": ("long_test_*.py", "long_test_"),
"net": ("net_test_*.py", "net_test_"),
"tool": ("tool_test_*.py", "tool_test_")
}
class NOPStream(object):
""" NOP stream """
def __init__(self, verbose=False):
self.verbose = verbose
def flush(self):
""" NOP """
pass
def write(self, msg):
""" NOP """
if self.verbose:
print(msg)
def main():
""" Perform tests """
if ("--help" in sys.argv) or ("-h" in sys.argv):
print("")
print("Usage: python %s [--bench-tests|--long-tests|--net-tests|--tool-tests] [--sort] [--verbose]" % sys.argv[0])
print("")
sys.exit(0)
sort_tests = ("--sort" in sys.argv) or ("-s" in sys.argv)
verbose = ("--verbose" in sys.argv) or ("-v" in sys.argv)
if ("--bench-tests" in sys.argv) or ("-b" in sys.argv):
test_type = "bench"
elif ("--long-tests" in sys.argv) or ("-l" in sys.argv):
test_type = "long"
elif ("--net-tests" in sys.argv) or ("-n" in sys.argv):
test_type = "net"
elif ("--tool-tests" in sys.argv) or ("-t" in sys.argv):
test_type = "tool"
else:
test_type = "fast"
pattern, prefix = MAP[test_type]
all_files = [os.path.basename(f) for f in glob.glob(os.path.join(TEST_DIRECTORY, pattern))]
cli_files = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
selected_files = []
for cli_file in cli_files:
if not cli_file.startswith(prefix):
cli_file = prefix + cli_file
if not cli_file.endswith(".py"):
cli_file += ".py"
if cli_file in all_files:
selected_files.append(cli_file)
if len(selected_files) == 0:
selected_files = all_files
if sort_tests:
selected_files = sorted(selected_files)
verbosity = 0
if verbose:
verbosity = 2
results = {}
nop_stream = NOPStream(verbose=verbose)
for test_file in selected_files:
print("Running", test_file, "...")
testsuite = unittest.TestLoader().discover(start_dir=TEST_DIRECTORY, pattern=test_file)
result = unittest.TextTestRunner(stream=nop_stream, verbosity=verbosity).run(testsuite)
results[test_file] = {
"tests": result.testsRun,
"errors": len(result.errors),
"failures": len(result.failures)
}
total_tests = sum([results[k]["tests"] for k in results])
total_errors = sum([results[k]["errors"] for k in results])
total_failures = sum([results[k]["failures"] for k in results])
print("")
print("Tests: ", total_tests)
print("Errors: ", total_errors)
print("Failures: ", total_failures)
if total_errors > 0:
print("")
print("Errors in the following tests:")
print("\n".join([key for key in results.keys() if results[key]["errors"] > 0]))
print("")
if total_failures > 0:
print("")
print("Failures in the following tests:")
print("\n".join([key for key in results.keys() if results[key]["failures"] > 0]))
print("")
print("")
if total_errors + total_failures == 0:
print("[INFO] Tests completed: all passed!")
print("")
sys.exit(0)
else:
print("[INFO] Tests completed: errors or failures found!")
print("")
sys.exit(1)
if __name__ == '__main__':
main()
| agpl-3.0 |
zarboz/XBMC-PVR-mac | tools/darwin/depends/samba/samba-3.6.6/wintest/test-s3.py | 19 | 9730 | #!/usr/bin/env python
'''automated testing of Samba3 against windows'''
import sys, os
import optparse
import wintest
def set_libpath(t):
t.putenv("LD_LIBRARY_PATH", "${PREFIX}/lib")
def set_krb5_conf(t):
t.run_cmd("mkdir -p ${PREFIX}/etc")
t.write_file("${PREFIX}/etc/krb5.conf",
'''[libdefaults]
dns_lookup_realm = false
dns_lookup_kdc = true''')
t.putenv("KRB5_CONFIG", '${PREFIX}/etc/krb5.conf')
def build_s3(t):
'''build samba3'''
t.info('Building s3')
t.chdir('${SOURCETREE}/source3')
t.putenv('CC', 'ccache gcc')
t.run_cmd("./autogen.sh")
t.run_cmd("./configure -C --prefix=${PREFIX} --enable-developer")
t.run_cmd('make basics')
t.run_cmd('make -j4')
t.run_cmd('rm -rf ${PREFIX}')
t.run_cmd('make install')
def start_s3(t):
t.info('Starting Samba3')
t.chdir("${PREFIX}")
t.run_cmd('killall -9 -q samba smbd nmbd winbindd', checkfail=False)
t.run_cmd("rm -f var/locks/*.pid")
t.run_cmd(['sbin/nmbd', "-D"])
t.run_cmd(['sbin/winbindd', "-D"])
t.run_cmd(['sbin/smbd', "-D"])
t.port_wait("${INTERFACE_IP}", 139)
def test_wbinfo(t):
t.info('Testing wbinfo')
t.chdir('${PREFIX}')
t.cmd_contains("bin/wbinfo --version", ["Version 3."])
t.cmd_contains("bin/wbinfo -p", ["Ping to winbindd succeeded"])
t.retry_cmd("bin/wbinfo --online-status",
["BUILTIN : online",
"${HOSTNAME} : online",
"${WIN_DOMAIN} : online"],
casefold=True)
t.cmd_contains("bin/wbinfo -u",
["${WIN_DOMAIN}/administrator",
"${WIN_DOMAIN}/krbtgt" ],
casefold=True)
t.cmd_contains("bin/wbinfo -g",
["${WIN_DOMAIN}/domain users",
"${WIN_DOMAIN}/domain guests",
"${WIN_DOMAIN}/domain admins"],
casefold=True)
t.cmd_contains("bin/wbinfo --name-to-sid administrator",
"S-1-5-.*-500 SID_USER .1",
regex=True)
t.cmd_contains("bin/wbinfo --name-to-sid 'domain users'",
"S-1-5-.*-513 SID_DOM_GROUP .2",
regex=True)
t.retry_cmd("bin/wbinfo --authenticate=${WIN_DOMAIN}/administrator%${WIN_PASS}",
["plaintext password authentication succeeded",
"challenge/response password authentication succeeded"])
def test_smbclient(t):
t.info('Testing smbclient')
t.chdir('${PREFIX}')
t.cmd_contains("bin/smbclient --version", ["Version 3."])
t.cmd_contains('bin/smbclient -L ${INTERFACE_IP} -U%', ["Domain=[${WIN_DOMAIN}]", "test", "IPC$", "Samba 3."],
casefold=True)
child = t.pexpect_spawn('bin/smbclient //${HOSTNAME}.${WIN_REALM}/test -Uroot@${WIN_REALM}%${PASSWORD2}')
child.expect("smb:")
child.sendline("dir")
child.expect("blocks available")
child.sendline("mkdir testdir")
child.expect("smb:")
child.sendline("cd testdir")
child.expect('testdir')
child.sendline("cd ..")
child.sendline("rmdir testdir")
child = t.pexpect_spawn('bin/smbclient //${HOSTNAME}.${WIN_REALM}/test -Uroot@${WIN_REALM}%${PASSWORD2} -k')
child.expect("smb:")
child.sendline("dir")
child.expect("blocks available")
child.sendline("mkdir testdir")
child.expect("smb:")
child.sendline("cd testdir")
child.expect('testdir')
child.sendline("cd ..")
child.sendline("rmdir testdir")
def create_shares(t):
t.info("Adding test shares")
t.chdir('${PREFIX}')
t.write_file("lib/smb.conf", '''
[test]
path = ${PREFIX}/test
read only = no
''',
mode='a')
t.run_cmd("mkdir -p test")
def prep_join_as_member(t, vm):
'''prepare to join a windows domain as a member server'''
t.setwinvars(vm)
t.info("Starting VMs for joining ${WIN_VM} as a member using net ads join")
t.chdir('${PREFIX}')
t.run_cmd('killall -9 -q samba smbd nmbd winbindd', checkfail=False)
t.vm_poweroff("${WIN_VM}", checkfail=False)
t.vm_restore("${WIN_VM}", "${WIN_SNAPSHOT}")
child = t.open_telnet("${WIN_HOSTNAME}", "administrator", "${WIN_PASS}", set_time=True)
t.get_ipconfig(child)
t.del_files(["var", "private"])
t.write_file("lib/smb.conf", '''
[global]
netbios name = ${HOSTNAME}
log level = ${DEBUGLEVEL}
realm = ${WIN_REALM}
workgroup = ${WIN_DOMAIN}
security = ADS
bind interfaces only = yes
interfaces = ${INTERFACE}
winbind separator = /
idmap uid = 1000000-2000000
idmap gid = 1000000-2000000
winbind enum users = yes
winbind enum groups = yes
max protocol = SMB2
map hidden = no
map system = no
ea support = yes
panic action = xterm -e gdb --pid %d
''')
def join_as_member(t, vm):
'''join a windows domain as a member server'''
t.setwinvars(vm)
t.info("Joining ${WIN_VM} as a member using net ads join")
t.port_wait("${WIN_IP}", 389)
t.retry_cmd("host -t SRV _ldap._tcp.${WIN_REALM} ${WIN_IP}", ['has SRV record'] )
t.cmd_contains("bin/net ads join -Uadministrator%${WIN_PASS}", ["Joined"])
t.cmd_contains("bin/net ads testjoin", ["Join is OK"])
t.cmd_contains("bin/net ads dns register ${HOSTNAME}.${WIN_REALM} -P", ["Successfully registered hostname with DNS"])
t.cmd_contains("host -t A ${HOSTNAME}.${WIN_REALM}",
['${HOSTNAME}.${WIN_REALM} has address'])
def test_join_as_member(t, vm):
'''test the domain join'''
t.setwinvars(vm)
t.info('Testing join as member')
t.chdir('${PREFIX}')
t.run_cmd('bin/net ads user add root -Uadministrator%${WIN_PASS}')
child = t.pexpect_spawn('bin/net ads password root -Uadministrator%${WIN_PASS}')
child.expect("Enter new password for root")
child.sendline("${PASSWORD2}")
child.expect("Password change for ");
child.expect(" completed")
child = t.pexpect_spawn('bin/net rpc shell -S ${WIN_HOSTNAME}.${WIN_REALM} -Uadministrator%${WIN_PASS}')
child.expect("net rpc>")
child.sendline("user edit disabled root no")
child.expect("Set root's disabled flag")
test_wbinfo(t)
test_smbclient(t)
def test_s3(t):
'''basic s3 testing'''
t.setvar("SAMBA_VERSION", "Version 3")
t.check_prerequesites()
set_libpath(t)
if not t.skip("configure_bind"):
t.configure_bind()
if not t.skip("stop_bind"):
t.stop_bind()
if not t.skip("stop_vms"):
t.stop_vms()
if not t.skip("build"):
build_s3(t)
set_krb5_conf(t)
if not t.skip("configure_bind2"):
t.configure_bind()
if not t.skip("start_bind"):
t.start_bind()
dc_started = False
if t.have_var('W2K8R2A_VM') and not t.skip("join_w2k8r2"):
t.start_winvm('W2K8R2A')
dc_started = True
prep_join_as_member(t, "W2K8R2A")
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
join_as_member(t, "W2K8R2A")
create_shares(t)
start_s3(t)
test_join_as_member(t, "W2K8R2A")
if t.have_var('WINDOWS7_VM') and t.have_var('W2K8R2A_VM') and not t.skip("join_windows7_2008r2"):
if not dc_started:
t.start_winvm('W2K8R2A')
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
dc_started = True
else:
t.setwinvars('W2K8R2A')
realm = t.getvar("WIN_REALM")
dom_username = t.getvar("WIN_USER")
dom_password = t.getvar("WIN_PASS")
dom_realm = t.getvar("WIN_REALM")
t.start_winvm('WINDOWS7')
t.test_remote_smbclient("WINDOWS7")
t.run_winjoin('WINDOWS7', realm, username=dom_username, password=dom_password)
t.test_remote_smbclient("WINDOWS7", dom_username, dom_password)
t.test_remote_smbclient('WINDOWS7', dom_username, dom_password, args='--option=clientntlmv2auth=no')
t.test_remote_smbclient('WINDOWS7', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k")
t.test_remote_smbclient('WINDOWS7', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k --option=clientusespnegoprincipal=yes")
if t.have_var('WINXP_VM') and t.have_var('W2K8R2A_VM') and not t.skip("join_winxp_2008r2"):
if not dc_started:
t.start_winvm('W2K8R2A')
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
dc_started = True
else:
t.setwinvars('W2K8R2A')
realm = t.getvar("WIN_REALM")
dom_username = t.getvar("WIN_USER")
dom_password = t.getvar("WIN_PASS")
dom_realm = t.getvar("WIN_REALM")
t.start_winvm('WINXP')
t.run_winjoin('WINXP', realm, username=dom_username, password=dom_password)
t.test_remote_smbclient('WINXP', dom_username, dom_password)
t.test_remote_smbclient('WINXP', dom_username, dom_password, args='--option=clientntlmv2auth=no')
t.test_remote_smbclient('WINXP', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k")
t.test_remote_smbclient('WINXP', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k --clientusespnegoprincipal=yes")
t.info("S3 test: All OK")
def test_cleanup(t):
'''cleanup after tests'''
t.info("Cleaning up ...")
t.restore_resolv_conf()
if getattr(t, 'bind_child', False):
t.bind_child.kill()
if __name__ == '__main__':
t = wintest.wintest()
t.setup("test-s3.py", "source3")
try:
test_s3(t)
except:
if not t.opts.nocleanup:
test_cleanup(t)
raise
if not t.opts.nocleanup:
test_cleanup(t)
t.info("S3 test: All OK")
| gpl-2.0 |
Yukarumya/Yukarum-Redfoxes | media/webrtc/trunk/tools/gyp/test/compiler-override/gyptest-compiler-env-toolchain.py | 100 | 2877 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the user can override the compiler and linker using
CC/CXX/NM/READELF environment variables.
"""
import TestGyp
import os
import copy
import sys
here = os.path.dirname(os.path.abspath(__file__))
if sys.platform == 'win32':
# cross compiling not supported by ninja on windows
# and make not supported on windows at all.
sys.exit(0)
# Clear any existing compiler related env vars.
for key in ['CC', 'CXX', 'LINK', 'CC_host', 'CXX_host', 'LINK_host',
'NM_target', 'READELF_target']:
if key in os.environ:
del os.environ[key]
def CheckCompiler(test, gypfile, check_for, run_gyp):
if run_gyp:
test.run_gyp(gypfile)
test.build(gypfile)
test.must_contain_all_lines(test.stdout(), check_for)
test = TestGyp.TestGyp(formats=['ninja'])
# Must set the test format to something with a flavor (the part after the '-')
# in order to test the desired behavior. Since we want to run a non-host
# toolchain, we have to set the flavor to something that the ninja generator
# doesn't know about, so it doesn't default to the host-specific tools (e.g.,
# 'otool' on mac to generate the .TOC).
#
# Note that we can't just pass format=['ninja-some_toolchain'] to the
# constructor above, because then this test wouldn't be recognized as a ninja
# format test.
test.formats = ['ninja-my_flavor' if f == 'ninja' else f for f in test.formats]
def TestTargetOverideSharedLib():
# The std output from nm and readelf is redirected to files, so we can't
# expect their output to appear. Instead, check for the files they create to
# see if they actually ran.
expected = ['my_cc.py', 'my_cxx.py', 'FOO']
# Check that CC, CXX, NM, READELF, set target compiler
env = {'CC': 'python %s/my_cc.py FOO' % here,
'CXX': 'python %s/my_cxx.py FOO' % here,
'NM': 'python %s/my_nm.py' % here,
'READELF': 'python %s/my_readelf.py' % here}
with TestGyp.LocalEnv(env):
CheckCompiler(test, 'compiler-shared-lib.gyp', expected, True)
test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
test.unlink(test.built_file_path('RAN_MY_NM'))
test.unlink(test.built_file_path('RAN_MY_READELF'))
# Run the same tests once the eviron has been restored. The generated
# projects should have embedded all the settings in the project files so the
# results should be the same.
CheckCompiler(test, 'compiler-shared-lib.gyp', expected, False)
test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
TestTargetOverideSharedLib()
test.pass_test()
| mpl-2.0 |
lewisc/spark-tk | python/sparktk/frame/ops/export_to_jdbc.py | 11 | 2944 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def export_to_jdbc(self, connection_url, table_name):
"""
Write current frame to JDBC table
Parameters
----------
:param connection_url: (str) JDBC connection url to database server
:param table_name: (str) JDBC table name
Example
-------
<skip>
>>> from sparktk import TkContext
>>> c=TkContext(sc)
>>> data = [[1, 0.2, -2, 5], [2, 0.4, -1, 6], [3, 0.6, 0, 7], [4, 0.8, 1, 8]]
>>> schema = [('a', int), ('b', float),('c', int) ,('d', int)]
>>> my_frame = tc.frame.create(data, schema)
<progress>
</skip>
connection_url : (string) : "jdbc:{datasbase_type}://{host}/{database_name}
Sample connection string for postgres
ex: jdbc:postgresql://localhost/postgres [standard connection string to connect to default 'postgres' database]
table_name: (string): table name. It will create new table with given name if it does not exists already.
<skip>
>>> my_frame.export_to_jdbc("jdbc:postgresql://localhost/postgres", "demo_test")
<progress>
</skip>
Verify exported frame in postgres
From bash shell
$sudo -su ppostgres psql
postgres=#\d
You should see demo_test table.
Run postgres=#select * from demo_test (to verify frame).
Notes
-----
java.sql.SQLException: No suitable driver found for <jdbcUrl>
If this error is encountered while running your application, then your JDBC library cannot be found by the node
running the application. If you're running in Local mode, make sure that you have used the --driver-class-path
parameter. If a Spark cluster is involved, make sure that each cluster member has a copy of library, and that
each node of the cluster has been restarted since you modified the spark-defaults.conf file. See this
[site](https://sparkour.urizone.net/recipes/using-jdbc/).
Sparktk does not come with any JDBC drivers. A driver compatible with the JDBC data sink must be supplied when
creating the TkContext instance:
<skip>
>>> tc = sparktk.TkContext(pyspark_submit_args='--jars myJDBCDriver.jar')
</skip>
"""
self._scala.exportToJdbc(connection_url, table_name) | apache-2.0 |
heran7/edx-platform | common/lib/xmodule/xmodule/tests/test_util_open_ended.py | 1 | 47574 | from xmodule.modulestore import Location
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.tests import DATA_DIR, get_test_system
from StringIO import StringIO
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'blah/',
'username': 'incorrect',
'password': 'incorrect',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
S3_INTERFACE = {
'access_key': "",
'secret_access_key': "",
"storage_bucket_name": "",
}
class MockS3Key(object):
"""
Mock an S3 Key object from boto. Used for file upload testing.
"""
def __init__(self, bucket):
pass
def set_metadata(self, key, value):
setattr(self, key, value)
def set_contents_from_file(self, fileobject):
self.data = fileobject.read()
def set_acl(self, acl):
self.set_metadata("acl", acl)
def generate_url(self, timeout):
return "http://www.edx.org/sample_url"
class MockS3Connection(object):
"""
Mock boto S3Connection for testing image uploads.
"""
def __init__(self, access_key, secret_key, **kwargs):
"""
Mock the init call. S3Connection has a lot of arguments, but we don't need them.
"""
pass
def create_bucket(self, bucket_name, **kwargs):
return "edX Bucket"
class MockUploadedFile(object):
"""
Create a mock uploaded file for image submission tests.
value - String data to place into the mock file.
return - A StringIO object that behaves like a file.
"""
def __init__(self, name, value):
self.mock_file = StringIO()
self.mock_file.write(value)
self.name = name
def seek(self, index):
return self.mock_file.seek(index)
def read(self):
return self.mock_file.read()
class MockQueryDict(dict):
"""
Mock a query dict so that it can be used in test classes. This will only work with the combinedopenended tests,
and does not mock the full query dict, only the behavior that is needed there (namely get_list).
"""
def getlist(self, key, default=None):
try:
return super(MockQueryDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
class DummyModulestore(object):
"""
A mixin that allows test classes to have convenience functions to get a module given a location
"""
get_test_system = get_test_system()
def setup_modulestore(self, name):
self.modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])
def get_course(self, _):
"""Get a test course by directory name. If there's more than one, error."""
courses = self.modulestore.get_courses()
return courses[0]
def get_module_from_location(self, location, course):
course = self.get_course(course)
if not isinstance(location, Location):
location = Location(location)
descriptor = self.modulestore.get_instance(course.id, location, depth=None)
return descriptor.xmodule(self.test_system)
# Task state for a module with self assessment then instructor assessment.
TEST_STATE_SA_IN = ["{\"child_created\": false, \"child_attempts\": 2, \"version\": 1, \"child_history\": [{\"answer\": \"However venture pursuit he am mr cordial. Forming musical am hearing studied be luckily. Ourselves for determine attending how led gentleman sincerity. Valley afford uneasy joy she thrown though bed set. In me forming general prudent on country carried. Behaved an or suppose justice. Seemed whence how son rather easily and change missed. Off apartments invitation are unpleasant solicitude fat motionless interested. Hardly suffer wisdom wishes valley as an. As friendship advantages resolution it alteration stimulated he or increasing. \\r<br><br>Now led tedious shy lasting females off. Dashwood marianne in of entrance be on wondered possible building. Wondered sociable he carriage in speedily margaret. Up devonshire of he thoroughly insensible alteration. An mr settling occasion insisted distance ladyship so. Not attention say frankness intention out dashwoods now curiosity. Stronger ecstatic as no judgment daughter speedily thoughts. Worse downs nor might she court did nay forth these. \", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}, {\"answer\": \"Delightful remarkably mr on announcing themselves entreaties favourable. About to in so terms voice at. Equal an would is found seems of. The particular friendship one sufficient terminated frequently themselves. It more shed went up is roof if loud case. Delay music in lived noise an. Beyond genius really enough passed is up. \\r<br><br>John draw real poor on call my from. May she mrs furnished discourse extremely. Ask doubt noisy shade guest did built her him. Ignorant repeated hastened it do. Consider bachelor he yourself expenses no. Her itself active giving for expect vulgar months. Discovery commanded fat mrs remaining son she principle middleton neglected. Be miss he in post sons held. No tried is defer do money scale rooms. \", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"However venture pursuit he am mr cordial. Forming musical am hearing studied be luckily. Ourselves for determine attending how led gentleman sincerity. Valley afford uneasy joy she thrown though bed set. In me forming general prudent on country carried. Behaved an or suppose justice. Seemed whence how son rather easily and change missed. Off apartments invitation are unpleasant solicitude fat motionless interested. Hardly suffer wisdom wishes valley as an. As friendship advantages resolution it alteration stimulated he or increasing. \\r<br><br>Now led tedious shy lasting females off. Dashwood marianne in of entrance be on wondered possible building. Wondered sociable he carriage in speedily margaret. Up devonshire of he thoroughly insensible alteration. An mr settling occasion insisted distance ladyship so. Not attention say frankness intention out dashwoods now curiosity. Stronger ecstatic as no judgment daughter speedily thoughts. Worse downs nor might she court did nay forth these. \", \"post_assessment\": \"{\\\"submission_id\\\": 1460, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5413, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}, {\"answer\": \"Delightful remarkably mr on announcing themselves entreaties favourable. About to in so terms voice at. Equal an would is found seems of. The particular friendship one sufficient terminated frequently themselves. It more shed went up is roof if loud case. Delay music in lived noise an. Beyond genius really enough passed is up. \\r<br><br>John draw real poor on call my from. May she mrs furnished discourse extremely. Ask doubt noisy shade guest did built her him. Ignorant repeated hastened it do. Consider bachelor he yourself expenses no. Her itself active giving for expect vulgar months. Discovery commanded fat mrs remaining son she principle middleton neglected. Be miss he in post sons held. No tried is defer do money scale rooms. \", \"post_assessment\": \"{\\\"submission_id\\\": 1462, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5418, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"post_assessment\"}"]
# Mock instance state. Should receive a score of 15.
MOCK_INSTANCE_STATE = r"""{"ready_to_reset": false, "skip_spelling_checks": true, "current_task_number": 1, "weight": 5.0, "graceperiod": "1 day 12 hours 59 minutes 59 seconds", "graded": "True", "task_states": ["{\"child_created\": false, \"child_attempts\": 4, \"version\": 1, \"child_history\": [{\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"\", \"post_assessment\": \"[3]\", \"score\": 3}], \"max_score\": 3, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the <bg>table below . starting mass</bg> g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups <bg>procedure , describe what additional</bg> information you would need in order to replicate the <bs>expe</bs>\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"{\\\"submission_id\\\": 3098, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the <bg>table below . starting mass</bg> g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3235, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"{\\\"submission_id\\\": 3099, \\\"score\\\": 3, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"to replicate the experiment , the procedure would require <bg>more detail . one</bg> piece of information <bg>that is omitted is the</bg> amount of vinegar used in the experiment . it is also important to know what temperature the experiment was kept at during the hours . finally , the procedure needs to include details about the experiment , for example if the whole sample must be submerged .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3237, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>3</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"{\\\"submission_id\\\": 3100, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"e the mass of four different samples . pour vinegar in <bg>each of four separate</bg> , but identical , containers . place a sample of one material into one container and label . repeat with remaining samples , placing a single sample into a single container . after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the table below . \\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3239, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"\", \"post_assessment\": \"{\\\"submission_id\\\": 3101, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"invalid essay .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3241, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}], \"max_score\": 3, \"child_state\": \"done\"}"], "attempts": "10000", "student_attempts": 0, "due": null, "state": "done", "accept_file_upload": false, "display_name": "Science Question -- Machine Assessed"}"""
# Task state with self assessment only.
TEST_STATE_SA = ["{\"child_created\": false, \"child_attempts\": 1, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"{\\\"submission_id\\\": 1461, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5414, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"post_assessment\"}"]
# Task state with self and then ai assessment.
TEST_STATE_AI = ["{\"child_created\": false, \"child_attempts\": 2, \"version\": 1, \"child_history\": [{\"answer\": \"In libraries, there should not be censorship on materials considering that it's an individual's decision to read what they prefer. There is no appropriate standard on what makes a book offensive to a group, so it should be undetermined as to what makes a book offensive. In a public library, many children, who the books are censored for, are with their parents. Parents should make an independent choice on what they can allow their children to read. Letting society ban a book simply for the use of inappropriate materials is ridiculous. If an author spent time creating a story, it should be appreciated, and should not put on a list of no-nos. If a certain person doesn't like a book's reputation, all they have to do is not read it. Even in school systems, librarians are there to guide kids to read good books. If a child wants to read an inappropriate book, the librarian will most likely discourage him or her not to read it. In my experience, I wanted to read a book that my mother suggested to me, but as I went to the school library it turned out to be a censored book. Some parents believe children should be ignorant about offensive things written in books, but honestly many of the same ideas are exploited to them everyday on television and internet. So trying to shield your child from the bad things may be a great thing, but the efforts are usually failed attempts. It also never occurs to the people censoring the books, that some people can't afford to buy the books they want to read. The libraries, for some, are the main means for getting books. To conclude there is very little reason to ban a book from the shelves. Many of the books banned have important lessons that can be obtained through reading it. If a person doesn't like a book, the simplest thing to do is not to pick it up.\", \"post_assessment\": \"[1, 1]\", \"score\": 2}, {\"answer\": \"This is another response\", \"post_assessment\": \"[1, 1]\", \"score\": 2}], \"max_score\": 2, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"In libraries, there should not be censorship on materials considering that it's an individual's decision to read what they prefer. There is no appropriate standard on what makes a book offensive to a group, so it should be undetermined as to what makes a book offensive. In a public library, many children, who the books are censored for, are with their parents. Parents should make an independent choice on what they can allow their children to read. Letting society ban a book simply for the use of inappropriate materials is ridiculous. If an author spent time creating a story, it should be appreciated, and should not put on a list of no-nos. If a certain person doesn't like a book's reputation, all they have to do is not read it. Even in school systems, librarians are there to guide kids to read good books. If a child wants to read an inappropriate book, the librarian will most likely discourage him or her not to read it. In my experience, I wanted to read a book that my mother suggested to me, but as I went to the school library it turned out to be a censored book. Some parents believe children should be ignorant about offensive things written in books, but honestly many of the same ideas are exploited to them everyday on television and internet. So trying to shield your child from the bad things may be a great thing, but the efforts are usually failed attempts. It also never occurs to the people censoring the books, that some people can't afford to buy the books they want to read. The libraries, for some, are the main means for getting books. To conclude there is very little reason to ban a book from the shelves. Many of the books banned have important lessons that can be obtained through reading it. If a person doesn't like a book, the simplest thing to do is not to pick it up.\", \"post_assessment\": \"{\\\"submission_id\\\": 6107, \\\"score\\\": 2, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 1898718, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Writing Applications</description><score>1</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\"}\", \"score\": 2}, {\"answer\": \"This is another response\"}], \"max_score\": 2, \"child_state\": \"assessing\"}"]
# Task state with ai assessment only.
TEST_STATE_AI2 = ["{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"This isn't a real essay, and you should give me a zero on it. \", \"post_assessment\": \"{\\\"submission_id\\\": 18446, \\\"score\\\": [0, 1, 0], \\\"feedback\\\": [\\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"Zero it is! \\\\\\\"}\\\"], \\\"success\\\": true, \\\"grader_id\\\": [1944146, 1943188, 1940991], \\\"grader_type\\\": \\\"PE\\\", \\\"rubric_scores_complete\\\": [true, true, true], \\\"rubric_xml\\\": [\\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\"]}\", \"score\": 0}], \"max_score\": 2, \"child_state\": \"post_assessment\"}"]
# Invalid task state with ai assessment.
TEST_STATE_AI2_INVALID = ["{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"This isn't a real essay, and you should give me a zero on it. \", \"post_assessment\": \"{\\\"submission_id\\\": 18446, \\\"score\\\": [0, 1, 0], \\\"feedback\\\": [\\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"Zero it is! \\\\\\\"}\\\"], \\\"success\\\": true, \\\"grader_id\\\": [1943188, 1940991], \\\"grader_type\\\": \\\"PE\\\", \\\"rubric_scores_complete\\\": [true, true, true], \\\"rubric_xml\\\": [\\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\"]}\", \"score\": 0}], \"max_score\": 2, \"child_state\": \"post_assessment\"}"]
# Self assessment state.
TEST_STATE_SINGLE = ["{\"child_created\": false, \"child_attempts\": 1, \"version\": 1, \"child_history\": [{\"answer\": \"'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. \", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}"]
# Peer grading state.
TEST_STATE_PE_SINGLE = ["{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"Passage its ten led hearted removal cordial. Preference any astonished unreserved mrs. Prosperous understood middletons in conviction an uncommonly do. Supposing so be resolving breakfast am or perfectly. Is drew am hill from mr. Valley by oh twenty direct me so. Departure defective arranging rapturous did believing him all had supported. Family months lasted simple set nature vulgar him. Picture for attempt joy excited ten carried manners talking how. Suspicion neglected he resolving agreement perceived at an. \\r<br><br>Ye on properly handsome returned throwing am no whatever. In without wishing he of picture no exposed talking minutes. Curiosity continual belonging offending so explained it exquisite. Do remember to followed yourself material mr recurred carriage. High drew west we no or at john. About or given on witty event. Or sociable up material bachelor bringing landlord confined. Busy so many in hung easy find well up. So of exquisite my an explained remainder. Dashwood denoting securing be on perceive my laughing so. \\r<br><br>Ought these are balls place mrs their times add she. Taken no great widow spoke of it small. Genius use except son esteem merely her limits. Sons park by do make on. It do oh cottage offered cottage in written. Especially of dissimilar up attachment themselves by interested boisterous. Linen mrs seems men table. Jennings dashwood to quitting marriage bachelor in. On as conviction in of appearance apartments boisterous. \", \"post_assessment\": \"{\\\"submission_id\\\": 1439, \\\"score\\\": [0], \\\"feedback\\\": [\\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\"], \\\"success\\\": true, \\\"grader_id\\\": [5337], \\\"grader_type\\\": \\\"PE\\\", \\\"rubric_scores_complete\\\": [true], \\\"rubric_xml\\\": [\\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>0</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>0</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>0</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>0</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>0</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"]}\", \"score\": 0}], \"max_score\": 12, \"child_state\": \"done\"}"] | agpl-3.0 |
mdj2/django | tests/model_inheritance/tests.py | 18 | 12214 | from __future__ import absolute_import, unicode_literals
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase
from django.test.utils import CaptureQueriesContext
from django.utils import six
from .models import (Chef, CommonInfo, ItalianRestaurant, ParkingLot, Place,
Post, Restaurant, Student, StudentWorker, Supplier, Worker, MixinModel)
class ModelInheritanceTests(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __unicode__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
w2 = Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(six.text_type(w1), "Worker Fred")
self.assertEqual(six.text_type(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertQuerysetEqual(
Worker.objects.values("name"), [
{"name": "Barney"},
{"name": "Fred"},
],
lambda o: o
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
self.assertRaises(AttributeError, lambda: CommonInfo.objects.all())
# A StudentWorker which does not exist is both a Student and Worker
# which does not exist.
self.assertRaises(Student.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
self.assertRaises(Worker.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
# MultipleObjectsReturned is also inherited.
# This is written out "long form", rather than using __init__/create()
# because of a bug with diamond inheritance (#10808)
sw1 = StudentWorker()
sw1.name = "Wilma"
sw1.age = 35
sw1.save()
sw2 = StudentWorker()
sw2.name = "Betty"
sw2.age = 24
sw2.save()
self.assertRaises(Student.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
self.assertRaises(Worker.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
def test_multiple_table(self):
post = Post.objects.create(title="Lorem Ipsum")
# The Post model has distinct accessors for the Comment and Link models.
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The Web framework for perfections with deadlines.",
url="http://www.djangoproject.com/"
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
self.assertRaises(AttributeError,
getattr, post, "attached_%(class)s_set"
)
# The Place/Restaurant/ItalianRestaurant models all exist as
# independent models. However, the subclasses also have transparent
# access to the fields of their ancestors.
# Create a couple of Places.
p1 = Place.objects.create(name="Master Shakes", address="666 W. Jersey")
p2 = Place.objects.create(name="Ace Harware", address="1013 N. Ashland")
# Test constructor for Restaurant.
r = Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2
)
# Test the constructor for ItalianRestaurant.
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
ir.address = "1234 W. Elm"
ir.save()
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef"]
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), [])
self.assertRaises(FieldError,
Restaurant.objects.filter, supplier__name="foo"
)
# Parent fields can be used directly in filters on the child model.
self.assertQuerysetEqual(
Restaurant.objects.filter(name="Demon Dogs"), [
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Filters against the parent model return objects of the parent's type.
p = Place.objects.get(name="Demon Dogs")
self.assertIs(type(p), Place)
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the
# child's name.
self.assertEqual(
p.restaurant, Restaurant.objects.get(name="Demon Dogs")
)
self.assertEqual(
Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
self.assertEqual(
Restaurant.objects.get(name="Ristorante Miron").italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
self.assertRaises(ItalianRestaurant.DoesNotExist,
lambda: p.restaurant.italianrestaurant
)
# An ItalianRestaurant which does not exist is also a Place which does
# not exist.
self.assertRaises(Place.DoesNotExist,
ItalianRestaurant.objects.get, name="The Noodle Void"
)
# MultipleObjectsReturned is also inherited.
self.assertRaises(Place.MultipleObjectsReturned,
Restaurant.objects.get, id__lt=12321
)
# Related objects work just as they normally do.
s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
s1.customers = [r, ir]
s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
s2.customers = [ir]
# This won't work because the Place we select is not a Restaurant (it's
# a Supplier).
p = Place.objects.get(name="Joe's Chickens")
self.assertRaises(Restaurant.DoesNotExist,
lambda: p.restaurant
)
self.assertEqual(p.supplier, s1)
self.assertQuerysetEqual(
ir.provider.order_by("-name"), [
"Luigi's Pasta",
"Joe's Chickens"
],
attrgetter("name")
)
self.assertQuerysetEqual(
Restaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
],
attrgetter("name"),
)
park1 = ParkingLot.objects.create(
name="Main St", address="111 Main St", main_site=s1
)
park2 = ParkingLot.objects.create(
name="Well Lit", address="124 Sesame St", main_site=ir
)
self.assertEqual(
Restaurant.objects.get(lot__name="Well Lit").name,
"Ristorante Miron"
)
# The update() command can update fields in parent and child classes at
# once (although it executed multiple SQL queries to do so).
rows = Restaurant.objects.filter(
serves_hot_dogs=True, name__contains="D"
).update(
name="Demon Puppies", serves_hot_dogs=False
)
self.assertEqual(rows, 1)
r1 = Restaurant.objects.get(pk=r.pk)
self.assertFalse(r1.serves_hot_dogs)
self.assertEqual(r1.name, "Demon Puppies")
# The values() command also works on fields from parent models.
self.assertQuerysetEqual(
ItalianRestaurant.objects.values("name", "rating"), [
{"rating": 4, "name": "Ristorante Miron"}
],
lambda o: o
)
# select_related works with fields from the parent object as if they
# were a normal part of the model.
self.assertNumQueries(2,
lambda: ItalianRestaurant.objects.all()[0].chef
)
self.assertNumQueries(1,
lambda: ItalianRestaurant.objects.select_related("chef")[0].chef
)
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
def test_update_query_counts(self):
"""
Test that update queries do not generate non-necessary queries.
Refs #18304.
"""
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
with self.assertNumQueries(3):
ir.save()
def test_update_parent_filtering(self):
"""
Test that updating a field of a model subclass doesn't issue an UPDATE
query constrained by an inner query.
Refs #10399
"""
supplier = Supplier.objects.create(
name='Central market',
address='610 some street'
)
# Capture the expected query in a database agnostic way
with CaptureQueriesContext(connection) as captured_queries:
Place.objects.filter(pk=supplier.pk).update(name=supplier.name)
expected_sql = captured_queries[0]['sql']
# Capture the queries executed when a subclassed model instance is saved.
with CaptureQueriesContext(connection) as captured_queries:
supplier.save(update_fields=('name',))
for query in captured_queries:
sql = query['sql']
if 'UPDATE' in sql:
self.assertEqual(expected_sql, sql)
| bsd-3-clause |
jchodera/bhmm | bhmm/hmm/generic_sampled_hmm.py | 2 | 9741 |
# This file is part of BHMM (Bayesian Hidden Markov Models).
#
# Copyright (c) 2016 Frank Noe (Freie Universitaet Berlin)
# and John D. Chodera (Memorial Sloan-Kettering Cancer Center, New York)
#
# BHMM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from bhmm.hmm.generic_hmm import HMM
from bhmm.util import config
from bhmm.util.statistics import confidence_interval_arr
class SampledHMM(HMM):
""" Sampled HMM with a representative single point estimate and error estimates
Parameters
----------
estimated_hmm : :class:`HMM <generic_hmm.HMM>`
Representative HMM estimate, e.g. a maximum likelihood estimate or mean HMM.
sampled_hmms : list of :class:`HMM <generic_hmm.HMM>`
Sampled HMMs
conf : float, optional, default = 0.95
confidence interval, e.g. 0.68 for 1 sigma or 0.95 for 2 sigma.
"""
def __init__(self, estimated_hmm, sampled_hmms, conf=0.95):
# call superclass constructer with estimated_hmm
HMM.__init__(self, estimated_hmm.initial_distribution, estimated_hmm.transition_matrix,
estimated_hmm.output_model, lag=estimated_hmm.lag)
# save sampled HMMs to calculate statistical moments.
self._sampled_hmms = sampled_hmms
self._nsamples = len(sampled_hmms)
# save confindence interval
self._conf = conf
def set_confidence(self, conf):
r""" Set confidence interval """
self._conf = conf
@property
def nsamples(self):
r""" Number of samples """
return self._nsamples
@property
def sampled_hmms(self):
r""" List of sampled HMMs """
return self._sampled_hmms
@property
def confidence_interval(self):
r""" Confidence interval used """
return self._conf
@property
def initial_distribution_samples(self):
r""" Samples of the initial distribution """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].stationary_distribution
return res
@property
def initial_distribution_mean(self):
r""" The mean of the initial distribution of the hidden states """
return np.mean(self.initial_distribution_samples, axis=0)
@property
def initial_distribution_std(self):
r""" The standard deviation of the initial distribution of the hidden states """
return np.std(self.initial_distribution_samples, axis=0)
@property
def initial_distribution_conf(self):
r""" The standard deviation of the initial distribution of the hidden states """
return confidence_interval_arr(self.initial_distribution_samples, conf=self._conf)
@property
def stationary_distribution_samples(self):
r""" Samples of the stationary distribution """
if self.is_stationary:
return self.initial_distribution_samples
else:
raise ValueError('HMM is not stationary')
@property
def stationary_distribution_mean(self):
r""" The mean of the stationary distribution of the hidden states """
return np.mean(self.stationary_distribution_samples, axis=0)
@property
def stationary_distribution_std(self):
r""" The standard deviation of the stationary distribution of the hidden states """
return np.std(self.stationary_distribution_samples, axis=0)
@property
def stationary_distribution_conf(self):
r""" The standard deviation of the stationary distribution of the hidden states """
return confidence_interval_arr(self.stationary_distribution_samples, conf=self._conf)
@property
def transition_matrix_samples(self):
r""" Samples of the transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].transition_matrix
return res
@property
def transition_matrix_mean(self):
r""" The mean of the transition_matrix of the hidden states """
return np.mean(self.transition_matrix_samples, axis=0)
@property
def transition_matrix_std(self):
r""" The standard deviation of the transition_matrix of the hidden states """
return np.std(self.transition_matrix_samples, axis=0)
@property
def transition_matrix_conf(self):
r""" The standard deviation of the transition_matrix of the hidden states """
return confidence_interval_arr(self.transition_matrix_samples, conf=self._conf)
@property
def eigenvalues_samples(self):
r""" Samples of the eigenvalues """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].eigenvalues
return res
@property
def eigenvalues_mean(self):
r""" The mean of the eigenvalues of the hidden states """
return np.mean(self.eigenvalues_samples, axis=0)
@property
def eigenvalues_std(self):
r""" The standard deviation of the eigenvalues of the hidden states """
return np.std(self.eigenvalues_samples, axis=0)
@property
def eigenvalues_conf(self):
r""" The standard deviation of the eigenvalues of the hidden states """
return confidence_interval_arr(self.eigenvalues_samples, conf=self._conf)
@property
def eigenvectors_left_samples(self):
r""" Samples of the left eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].eigenvectors_left
return res
@property
def eigenvectors_left_mean(self):
r""" The mean of the left eigenvectors of the hidden transition matrix """
return np.mean(self.eigenvectors_left_samples, axis=0)
@property
def eigenvectors_left_std(self):
r""" The standard deviation of the left eigenvectors of the hidden transition matrix """
return np.std(self.eigenvectors_left_samples, axis=0)
@property
def eigenvectors_left_conf(self):
r""" The standard deviation of the left eigenvectors of the hidden transition matrix """
return confidence_interval_arr(self.eigenvectors_left_samples, conf=self._conf)
@property
def eigenvectors_right_samples(self):
r""" Samples of the right eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].eigenvectors_right
return res
@property
def eigenvectors_right_mean(self):
r""" The mean of the right eigenvectors of the hidden transition matrix """
return np.mean(self.eigenvectors_right_samples, axis=0)
@property
def eigenvectors_right_std(self):
r""" The standard deviation of the right eigenvectors of the hidden transition matrix """
return np.std(self.eigenvectors_right_samples, axis=0)
@property
def eigenvectors_right_conf(self):
r""" The standard deviation of the right eigenvectors of the hidden transition matrix """
return confidence_interval_arr(self.eigenvectors_right_samples, conf=self._conf)
@property
def timescales_samples(self):
r""" Samples of the timescales """
res = np.empty((self.nsamples, self.nstates-1), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].timescales
return res
@property
def timescales_mean(self):
r""" The mean of the timescales of the hidden states """
return np.mean(self.timescales_samples, axis=0)
@property
def timescales_std(self):
r""" The standard deviation of the timescales of the hidden states """
return np.std(self.timescales_samples, axis=0)
@property
def timescales_conf(self):
r""" The standard deviation of the timescales of the hidden states """
return confidence_interval_arr(self.timescales_samples, conf=self._conf)
@property
def lifetimes_samples(self):
r""" Samples of the timescales """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].lifetimes
return res
@property
def lifetimes_mean(self):
r""" The mean of the lifetimes of the hidden states """
return np.mean(self.lifetimes_samples, axis=0)
@property
def lifetimes_std(self):
r""" The standard deviation of the lifetimes of the hidden states """
return np.std(self.lifetimes_samples, axis=0)
@property
def lifetimes_conf(self):
r""" The standard deviation of the lifetimes of the hidden states """
return confidence_interval_arr(self.lifetimes_samples, conf=self._conf)
| lgpl-3.0 |
shankar1093/Graphics | mrdoob-three.js-1f968fe/utils/exporters/max/annotate/annotate.py | 160 | 3036 | #!/usr/bin/env python
__author__ = 'Andrew Dunai <andrew@dun.ai>'
import sys
import json
import argparse
import re
from collections import namedtuple
try:
from PyQt4 import QtGui
import argparseui
except ImportError:
CAN_GUI = False
else:
CAN_GUI = True
range_regexp = re.compile(r'^([\w\d]+)\=([\d]+)\.\.([\d]+)$')
Range = namedtuple('Range', ('name', 'start', 'end'))
def parse_range(value):
match = range_regexp.match(value)
if not match:
raise argparse.ArgumentTypeError('Ranges should be in form "name=frame..frame"')
return Range(match.group(1), int(match.group(2)) - 1, int(match.group(3)) - 1)
epilog = 'example:\n %(prog)s -i model.js -o model.new.js idle=1..10 walk=11..20'
if not CAN_GUI:
epilog += '\npro tip:\n Install PyQt4 and argparseui packages to use GUI ("-u" option).'
epilog += '\nCreated by {}'.format(__author__)
parser = argparse.ArgumentParser(
description='Split THREE.js model animation into seperate parts.',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
CAN_GUI and parser.add_argument('-u', '--gui', help='run in GUI', dest='gui', action='store_true')
parser.add_argument('-i', metavar='FILE', help='input file name', required=True, dest='source', type=argparse.FileType('r'))
parser.add_argument('-o', metavar='FILE', help='output file name', required=True, dest='destination', type=argparse.FileType('w'))
parser.add_argument('range', nargs='+', help='range in format "name=frame..frame"', type=parse_range)
def process(parser):
args = parser.parse_args()
data = json.loads(args.source.read())
animation = data.get('animation')
fps = float(animation.get('fps'))
length = float(animation.get('length'))
frame_count = int(length * fps)
frame_duration = 1.0 / fps
all_hierarchy = animation.get('hierarchy')
animations = {}
for r in args.range:
# Create animation & hierarchy
hierarchy = []
animation = {
'name': r.name,
'fps': fps,
'length': (r.end - r.start) * frame_duration,
'hierarchy': hierarchy
}
# Go through each bone animation
for bone in all_hierarchy:
keys = [key for key in bone['keys'] if (key['time'] >= r.start * frame_duration) and (key['time'] <= r.end * frame_duration)]
# Patch time
time = 0.0
for key in keys:
key['time'] = round(time, 3)
time += frame_duration
new_bone = {
'parent': bone['parent'],
'keys': keys
}
hierarchy.append(new_bone)
animations[r.name] = animation
del data['animation']
data['animations'] = animations
args.destination.write(json.dumps(data))
if '-u' in sys.argv and CAN_GUI:
app = QtGui.QApplication(sys.argv)
a = argparseui.ArgparseUi(parser)
a.show()
app.exec_()
if a.result() == 1:
process(a)
else:
process(parser)
| mit |
SunguckLee/MariaDB-PageCompression | storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py | 56 | 1592 | #!/usr/bin/env python
import sys
def supported(from_int, from_modifier, to_int, to_modifer):
if from_modifier != to_modifer:
return False
if from_int > to_int:
return False
return True
def gen_tests_for_int(from_int, from_modifier, int_types, modifiers):
for to_int in range(len(int_types)):
for to_modifer in range(len(modifiers)):
print
print "CREATE TABLE t (a %s %s);" % (int_types[from_int], modifiers[from_modifier])
if not supported(from_int, from_modifier, to_int, to_modifer):
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a %s %s;" % (int_types[to_int], modifiers[to_modifer])
print "DROP TABLE t;"
def gen_tests(int_types, modifiers):
for from_int in range(len(int_types)):
for from_modifier in range(len(modifiers)):
gen_tests_for_int(from_int, from_modifier, int_types, modifiers)
def main():
print "# this test is generated by change_int_not_supported.py"
print "# ensure that int types are only expanded and are not cnverted to some other type"
print "--disable_warnings"
print "DROP TABLE IF EXISTS t;"
print "--enable_warnings"
print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";"
print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;"
gen_tests(
[ "TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT" ],
[ "", "NOT NULL", "UNSIGNED", "UNSIGNED NOT NULL" ]
)
return 0
sys.exit(main())
| gpl-2.0 |
camptocamp/odoo | openerp/addons/base/tests/test_views.py | 3 | 28297 | # -*- encoding: utf-8 -*-
from functools import partial
import unittest2
from lxml import etree as ET
from lxml.builder import E
from openerp.tests import common
Field = E.field
class ViewCase(common.TransactionCase):
def setUp(self):
super(ViewCase, self).setUp()
self.addTypeEqualityFunc(ET._Element, self.assertTreesEqual)
def assertTreesEqual(self, n1, n2, msg=None):
self.assertEqual(n1.tag, n2.tag)
self.assertEqual((n1.text or '').strip(), (n2.text or '').strip(), msg)
self.assertEqual((n1.tail or '').strip(), (n2.tail or '').strip(), msg)
# Because lxml uses ordereddicts in which order is important to
# equality (!?!?!?!)
self.assertEqual(dict(n1.attrib), dict(n2.attrib), msg)
for c1, c2 in zip(n1, n2):
self.assertTreesEqual(c1, c2, msg)
class TestNodeLocator(common.TransactionCase):
"""
The node locator returns None when it can not find a node, and the first
match when it finds something (no jquery-style node sets)
"""
def setUp(self):
super(TestNodeLocator, self).setUp()
self.Views = self.registry('ir.ui.view')
def test_no_match_xpath(self):
"""
xpath simply uses the provided @expr pattern to find a node
"""
node = self.Views.locate_node(
E.root(E.foo(), E.bar(), E.baz()),
E.xpath(expr="//qux"))
self.assertIsNone(node)
def test_match_xpath(self):
bar = E.bar()
node = self.Views.locate_node(
E.root(E.foo(), bar, E.baz()),
E.xpath(expr="//bar"))
self.assertIs(node, bar)
def test_no_match_field(self):
"""
A field spec will match by @name against all fields of the view
"""
node = self.Views.locate_node(
E.root(E.foo(), E.bar(), E.baz()),
Field(name="qux"))
self.assertIsNone(node)
node = self.Views.locate_node(
E.root(Field(name="foo"), Field(name="bar"), Field(name="baz")),
Field(name="qux"))
self.assertIsNone(node)
def test_match_field(self):
bar = Field(name="bar")
node = self.Views.locate_node(
E.root(Field(name="foo"), bar, Field(name="baz")),
Field(name="bar"))
self.assertIs(node, bar)
def test_no_match_other(self):
"""
Non-xpath non-fields are matched by node name first
"""
node = self.Views.locate_node(
E.root(E.foo(), E.bar(), E.baz()),
E.qux())
self.assertIsNone(node)
def test_match_other(self):
bar = E.bar()
node = self.Views.locate_node(
E.root(E.foo(), bar, E.baz()),
E.bar())
self.assertIs(bar, node)
def test_attribute_mismatch(self):
"""
Non-xpath non-field are filtered by matching attributes on spec and
matched nodes
"""
node = self.Views.locate_node(
E.root(E.foo(attr='1'), E.bar(attr='2'), E.baz(attr='3')),
E.bar(attr='5'))
self.assertIsNone(node)
def test_attribute_filter(self):
match = E.bar(attr='2')
node = self.Views.locate_node(
E.root(E.bar(attr='1'), match, E.root(E.bar(attr='3'))),
E.bar(attr='2'))
self.assertIs(node, match)
def test_version_mismatch(self):
"""
A @version on the spec will be matched against the view's version
"""
node = self.Views.locate_node(
E.root(E.foo(attr='1'), version='4'),
E.foo(attr='1', version='3'))
self.assertIsNone(node)
class TestViewInheritance(ViewCase):
def arch_for(self, name, view_type='form', parent=None):
""" Generates a trivial view of the specified ``view_type``.
The generated view is empty but ``name`` is set as its root's ``@string``.
If ``parent`` is not falsy, generates an extension view (instead of
a root view) replacing the parent's ``@string`` by ``name``
:param str name: ``@string`` value for the view root
:param str view_type:
:param bool parent:
:return: generated arch
:rtype: str
"""
if not parent:
element = E(view_type, string=name)
else:
element = E(view_type,
E.attribute(name, name='string'),
position='attributes'
)
return ET.tostring(element)
def makeView(self, name, parent=None, arch=None):
""" Generates a basic ir.ui.view with the provided name, parent and arch.
If no parent is provided, the view is top-level.
If no arch is provided, generates one by calling :meth:`~.arch_for`.
:param str name:
:param int parent: id of the parent view, if any
:param str arch:
:returns: the created view's id.
:rtype: int
"""
view_id = self.View.create(self.cr, self.uid, {
'model': self.model,
'name': name,
'arch': arch or self.arch_for(name, parent=parent),
'inherit_id': parent,
'priority': 5, # higher than default views
})
self.ids[name] = view_id
return view_id
def setUp(self):
super(TestViewInheritance, self).setUp()
self.model = 'ir.ui.view.custom'
self.View = self.registry('ir.ui.view')
self._init = self.View.pool._init
self.View.pool._init = False
self.ids = {}
a = self.makeView("A")
a1 = self.makeView("A1", a)
a11 = self.makeView("A11", a1)
self.makeView("A111", a11)
self.makeView("A12", a1)
a2 = self.makeView("A2", a)
self.makeView("A21", a2)
a22 = self.makeView("A22", a2)
self.makeView("A221", a22)
b = self.makeView('B', arch=self.arch_for("B", 'tree'))
self.makeView('B1', b, arch=self.arch_for("B1", 'tree', parent=b))
c = self.makeView('C', arch=self.arch_for("C", 'tree'))
self.View.write(self.cr, self.uid, c, {'priority': 1})
def tearDown(self):
self.View.pool._init = self._init
super(TestViewInheritance, self).tearDown()
def test_get_inheriting_views_arch(self):
self.assertEqual(self.View.get_inheriting_views_arch(
self.cr, self.uid, self.ids['A'], self.model), [
(self.arch_for('A1', parent=True), self.ids['A1']),
(self.arch_for('A2', parent=True), self.ids['A2']),
])
self.assertEqual(self.View.get_inheriting_views_arch(
self.cr, self.uid, self.ids['A21'], self.model),
[])
self.assertEqual(self.View.get_inheriting_views_arch(
self.cr, self.uid, self.ids['A11'], self.model),
[(self.arch_for('A111', parent=True), self.ids['A111'])])
def test_default_view(self):
default = self.View.default_view(
self.cr, self.uid, model=self.model, view_type='form')
self.assertEqual(default, self.ids['A'])
default_tree = self.View.default_view(
self.cr, self.uid, model=self.model, view_type='tree')
self.assertEqual(default_tree, self.ids['C'])
def test_no_default_view(self):
self.assertFalse(
self.View.default_view(
self.cr, self.uid, model='does.not.exist', view_type='form'))
self.assertFalse(
self.View.default_view(
self.cr, self.uid, model=self.model, view_type='graph'))
class TestApplyInheritanceSpecs(ViewCase):
""" Applies a sequence of inheritance specification nodes to a base
architecture. IO state parameters (cr, uid, model, context) are used for
error reporting
The base architecture is altered in-place.
"""
def setUp(self):
super(TestApplyInheritanceSpecs, self).setUp()
self.View = self.registry('ir.ui.view')
self.base_arch = E.form(
Field(name="target"),
string="Title")
def test_replace(self):
spec = Field(
Field(name="replacement"),
name="target", position="replace")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(Field(name="replacement"), string="Title"))
def test_delete(self):
spec = Field(name="target", position="replace")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(string="Title"))
def test_insert_after(self):
spec = Field(
Field(name="inserted"),
name="target", position="after")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(name="target"),
Field(name="inserted"),
string="Title"
))
def test_insert_before(self):
spec = Field(
Field(name="inserted"),
name="target", position="before")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(name="inserted"),
Field(name="target"),
string="Title"))
def test_insert_inside(self):
default = Field(Field(name="inserted"), name="target")
spec = Field(Field(name="inserted 2"), name="target", position='inside')
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
default, None)
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(
Field(name="inserted"),
Field(name="inserted 2"),
name="target"),
string="Title"))
def test_unpack_data(self):
spec = E.data(
Field(Field(name="inserted 0"), name="target"),
Field(Field(name="inserted 1"), name="target"),
Field(Field(name="inserted 2"), name="target"),
Field(Field(name="inserted 3"), name="target"),
)
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(
Field(name="inserted 0"),
Field(name="inserted 1"),
Field(name="inserted 2"),
Field(name="inserted 3"),
name="target"),
string="Title"))
def test_invalid_position(self):
spec = Field(
Field(name="whoops"),
name="target", position="serious_series")
with self.assertRaises(AttributeError):
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
def test_incorrect_version(self):
# Version ignored on //field elements, so use something else
arch = E.form(E.element(foo="42"))
spec = E.element(
Field(name="placeholder"),
foo="42", version="7.0")
with self.assertRaises(AttributeError):
self.View.apply_inheritance_specs(self.cr, self.uid,
arch,
spec, None)
def test_target_not_found(self):
spec = Field(name="targut")
with self.assertRaises(AttributeError):
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
class TestApplyInheritedArchs(ViewCase):
""" Applies a sequence of modificator archs to a base view
"""
class TestViewCombined(ViewCase):
"""
Test fallback operations of View.read_combined:
* defaults mapping
* ?
"""
class TestNoModel(ViewCase):
def test_create_view_nomodel(self):
View = self.registry('ir.ui.view')
view_id = View.create(self.cr, self.uid, {
'name': 'dummy',
'arch': '<template name="foo"/>',
'inherit_id': False,
'type': 'qweb',
})
fields = ['name', 'arch', 'type', 'priority', 'inherit_id', 'model']
[view] = View.read(self.cr, self.uid, [view_id], fields)
self.assertEqual(view, {
'id': view_id,
'name': 'dummy',
'arch': '<template name="foo"/>',
'type': 'qweb',
'priority': 16,
'inherit_id': False,
'model': False,
})
text_para = E.p("", {'class': 'legalese'})
arch = E.body(
E.div(
E.h1("Title"),
id="header"),
E.p("Welcome!"),
E.div(
E.hr(),
text_para,
id="footer"),
{'class': "index"},)
def test_qweb_translation(self):
"""
Test if translations work correctly without a model
"""
View = self.registry('ir.ui.view')
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
orig_text = "Copyright copyrighter"
translated_text = u"Copyrighter, tous droits réservés"
self.text_para.text = orig_text
self.registry('ir.translation').create(self.cr, self.uid, {
'name': 'website',
'type': 'view',
'lang': 'fr_FR',
'src': orig_text,
'value': translated_text,
})
sarch = View.translate_qweb(self.cr, self.uid, None, self.arch, 'fr_FR')
self.text_para.text = translated_text
self.assertEqual(sarch, self.arch)
class TestTemplating(ViewCase):
def setUp(self):
import openerp.modules
super(TestTemplating, self).setUp()
self._pool = openerp.modules.registry.RegistryManager.get(common.DB)
self._init = self._pool._init
# fuck off
self._pool._init = False
def tearDown(self):
self._pool._init = self._init
super(TestTemplating, self).tearDown()
def test_branding_inherit(self):
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1"/>
</root>
"""
})
id2 = Views.create(self.cr, self.uid, {
'name': "Extension",
'type': 'qweb',
'inherit_id': id,
'arch': """<xpath expr="//item" position="before">
<item order="2"/>
</xpath>
"""
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
[initial] = arch.xpath('//item[@order=1]')
self.assertEqual(
str(id),
initial.get('data-oe-id'),
"initial should come from the root view")
self.assertEqual(
'/root[1]/item[1]',
initial.get('data-oe-xpath'),
"initial's xpath should be within the root view only")
[second] = arch.xpath('//item[@order=2]')
self.assertEqual(
str(id2),
second.get('data-oe-id'),
"second should come from the extension view")
def test_branding_distribute_inner(self):
""" Checks that the branding is correctly distributed within a view
extension
"""
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1"/>
</root>"""
})
id2 = Views.create(self.cr, self.uid, {
'name': "Extension",
'type': 'qweb',
'inherit_id': id,
'arch': """<xpath expr="//item" position="before">
<item order="2">
<content t-att-href="foo">bar</content>
</item>
</xpath>"""
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
self.assertEqual(
arch,
E.root(
E.item(
E.content("bar", {
't-att-href': "foo",
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(id2),
'data-oe-field': 'arch',
'data-oe-xpath': '/xpath/item/content[1]',
}), {
'order': '2',
'data-oe-source-id': str(id)
}),
E.item({
'order': '1',
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(id),
'data-oe-field': 'arch',
'data-oe-xpath': '/root[1]/item[1]'
})
)
)
def test_esc_no_branding(self):
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base View",
'type': 'qweb',
'arch': """<root>
<item><span t-esc="foo"/></item>
</root>""",
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
self.assertEqual(arch, E.root(E.item(E.span({'t-esc': "foo"}))))
def test_ignore_unbrand(self):
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1" t-ignore="true">
<t t-esc="foo"/>
</item>
</root>"""
})
id2 = Views.create(self.cr, self.uid, {
'name': "Extension",
'type': 'qweb',
'inherit_id': id,
'arch': """<xpath expr="//item[@order='1']" position="inside">
<item order="2">
<content t-att-href="foo">bar</content>
</item>
</xpath>"""
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
self.assertEqual(
arch,
E.root(
E.item(
{'t-ignore': 'true', 'order': '1'},
E.t({'t-esc': 'foo'}),
E.item(
{'order': '2', 'data-oe-source-id': str(id)},
E.content(
{'t-att-href': 'foo'},
"bar")
)
)
),
"t-ignore should apply to injected sub-view branding, not just to"
" the main view's"
)
class test_views(ViewCase):
def test_nonexistent_attribute_removal(self):
Views = self.registry('ir.ui.view')
Views.create(self.cr, self.uid, {
'name': 'Test View',
'model': 'ir.ui.view',
'inherit_id': self.browse_ref('base.view_view_tree').id,
'arch': """<?xml version="1.0"?>
<xpath expr="//field[@name='name']" position="attributes">
<attribute name="non_existing_attribute"></attribute>
</xpath>
""",
})
def _insert_view(self, **kw):
"""Insert view into database via a query to passtrough validation"""
kw.pop('id', None)
keys = sorted(kw.keys())
fields = ','.join('"%s"' % (k.replace('"', r'\"'),) for k in keys)
params = ','.join('%%(%s)s' % (k,) for k in keys)
query = 'INSERT INTO ir_ui_view(%s) VALUES(%s) RETURNING id' % (fields, params)
self.cr.execute(query, kw)
return self.cr.fetchone()[0]
def test_custom_view_validation(self):
Views = self.registry('ir.ui.view')
model = 'ir.actions.act_url'
validate = partial(Views._validate_custom_views, self.cr, self.uid, model)
# validation of a single view
vid = self._insert_view(
name='base view',
model=model,
priority=1,
arch="""<?xml version="1.0"?>
<tree string="view">
<field name="url"/>
</tree>
""",
)
self.assertTrue(validate()) # single view
# validation of a inherited view
self._insert_view(
name='inherited view',
model=model,
priority=1,
inherit_id=vid,
arch="""<?xml version="1.0"?>
<xpath expr="//field[@name='url']" position="before">
<field name="name"/>
</xpath>
""",
)
self.assertTrue(validate()) # inherited view
def test_view_inheritance(self):
Views = self.registry('ir.ui.view')
v1 = Views.create(self.cr, self.uid, {
'name': "bob",
'model': 'ir.ui.view',
'arch': """
<form string="Base title" version="7.0">
<separator string="separator" colspan="4"/>
<footer>
<button name="action_next" type="object" string="Next button"/>
or
<button string="Skip" special="cancel" />
</footer>
</form>
"""
})
v2 = Views.create(self.cr, self.uid, {
'name': "edmund",
'model': 'ir.ui.view',
'inherit_id': v1,
'arch': """
<data>
<form position="attributes" version="7.0">
<attribute name="string">Replacement title</attribute>
</form>
<footer position="replace">
<footer>
<button name="action_next" type="object" string="New button"/>
</footer>
</footer>
<separator string="separator" position="replace">
<p>Replacement data</p>
</separator>
</data>
"""
})
v3 = Views.create(self.cr, self.uid, {
'name': 'jake',
'model': 'ir.ui.view',
'inherit_id': v1,
'priority': 17,
'arch': """
<footer position="attributes">
<attribute name="thing">bob</attribute>
</footer>
"""
})
view = self.registry('ir.ui.view').fields_view_get(
self.cr, self.uid, v2, view_type='form', context={
# fucking what?
'check_view_ids': [v2, v3]
})
self.assertEqual(view['type'], 'form')
self.assertEqual(
ET.fromstring(
view['arch'],
parser=ET.XMLParser(remove_blank_text=True)
),
E.form(
E.p("Replacement data"),
E.footer(
E.button(name="action_next", type="object", string="New button"),
thing="bob"
),
string="Replacement title", version="7.0"))
def test_view_inheritance_divergent_models(self):
Views = self.registry('ir.ui.view')
v1 = Views.create(self.cr, self.uid, {
'name': "bob",
'model': 'ir.ui.view.custom',
'arch': """
<form string="Base title" version="7.0">
<separator string="separator" colspan="4"/>
<footer>
<button name="action_next" type="object" string="Next button"/>
or
<button string="Skip" special="cancel" />
</footer>
</form>
"""
})
v2 = Views.create(self.cr, self.uid, {
'name': "edmund",
'model': 'ir.ui.view',
'inherit_id': v1,
'arch': """
<data>
<form position="attributes" version="7.0">
<attribute name="string">Replacement title</attribute>
</form>
<footer position="replace">
<footer>
<button name="action_next" type="object" string="New button"/>
</footer>
</footer>
<separator string="separator" position="replace">
<p>Replacement data</p>
</separator>
</data>
"""
})
v3 = Views.create(self.cr, self.uid, {
'name': 'jake',
'model': 'ir.ui.menu',
'inherit_id': v1,
'priority': 17,
'arch': """
<footer position="attributes">
<attribute name="thing">bob</attribute>
</footer>
"""
})
view = self.registry('ir.ui.view').fields_view_get(
self.cr, self.uid, v2, view_type='form', context={
# fucking what?
'check_view_ids': [v2, v3]
})
self.assertEqual(view['type'], 'form')
self.assertEqual(
ET.fromstring(
view['arch'],
parser=ET.XMLParser(remove_blank_text=True)
),
E.form(
E.p("Replacement data"),
E.footer(
E.button(name="action_next", type="object", string="New button")),
string="Replacement title", version="7.0"
))
class TestXPathExtentions(common.BaseCase):
def test_hasclass(self):
tree = E.node(
E.node({'class': 'foo bar baz'}),
E.node({'class': 'foo bar'}),
{'class': "foo"})
self.assertEqual(
len(tree.xpath('//node[hasclass("foo")]')),
3)
self.assertEqual(
len(tree.xpath('//node[hasclass("bar")]')),
2)
self.assertEqual(
len(tree.xpath('//node[hasclass("baz")]')),
1)
self.assertEqual(
len(tree.xpath('//node[hasclass("foo")][not(hasclass("bar"))]')),
1)
self.assertEqual(
len(tree.xpath('//node[hasclass("foo", "baz")]')),
1)
| agpl-3.0 |
PyLearner/tp-qemu | qemu/tests/watchdog.py | 7 | 12678 | import re
import time
import logging
from autotest.client.shared import error
from autotest.client.shared import utils
from virttest import utils_misc
from virttest import env_process
from virttest import utils_test
@error.context_aware
def run(test, params, env):
"""
Configure watchdog, crash the guest and check if watchdog_action occurs.
Test Step:
1. see every function step
Params:
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
timeout = int(params.get("login_timeout", '360'))
relogin_timeout = int(params.get("relogin_timeout", '240'))
watchdog_device_type = params.get("watchdog_device_type", "i6300esb")
watchdog_action = params.get("watchdog_action", "reset")
trigger_cmd = params.get("trigger_cmd", "echo c > /dev/watchdog")
# internal function
def _watchdog_device_check(session, watchdog_device):
"""
Check the watchdog device have been found and init successfully. if not
will raise error.
"""
# when using ib700 need modprobe it's driver manually.
if watchdog_device == "ib700":
session.cmd("modprobe ib700wdt")
# when wDT is 6300esb need check pci info
if watchdog_device == "i6300esb":
error.context("checking pci info to ensure have WDT device",
logging.info)
o = session.cmd_output("lspci")
if o:
wdt_pci_info = re.findall(".*6300ESB Watchdog Timer", o)
if not wdt_pci_info:
raise error.TestFail("Can not find watchdog pci")
logging.info("Found watchdog pci device : %s" % wdt_pci_info)
# checking watchdog init info using dmesg
error.context("Checking watchdog init info using dmesg", logging.info)
dmesg_info = params.get("dmesg_info", "(i6300ESB|ib700wdt).*init")
(s, o) = session.cmd_status_output(
"dmesg | grep -i '%s' " % dmesg_info)
if s != 0:
error_msg = "Wactchdog device '%s' initialization failed "
raise error.TestError(error_msg % watchdog_device)
logging.info("Watchdog device '%s' add and init successfully"
% watchdog_device)
logging.debug("Init info : '%s'" % o)
def _trigger_watchdog(session, trigger_cmd=None):
"""
Trigger watchdog action
Params:
@session: guest connect session.
@trigger_cmd: cmd trigger the watchdog
"""
if trigger_cmd is not None:
error.context("Trigger Watchdog action using:'%s'." % trigger_cmd,
logging.info)
session.sendline(trigger_cmd)
def _action_check(session, watchdog_action):
"""
Check whether or not the watchdog action occurred. if the action was
not occurred will raise error.
"""
# when watchdog action is pause, shutdown, reset, poweroff
# the vm session will lost responsive
response_timeout = int(params.get("response_timeout", '240'))
error.context("Check whether or not watchdog action '%s' took effect"
% watchdog_action, logging.info)
if not utils_misc.wait_for(lambda: not session.is_responsive(),
response_timeout, 0, 1):
if watchdog_action == "none" or watchdog_action == "debug":
logging.info("OK, the guest session is responsive still")
else:
txt = "Oops, seems action '%s' took no" % watchdog_action
txt += " effect, guest is still responsive."
raise error.TestFail(txt)
# when action is poweroff or shutdown(without no-shutdown option), the vm
# will dead, and qemu exit.
# The others the vm monitor still responsive, can report the vm status.
if (watchdog_action == "poweroff" or (watchdog_action == "shutdown" and
params.get("disable_shutdown") != "yes")):
if not utils_misc.wait_for(lambda: vm.is_dead(),
response_timeout, 0, 1):
txt = "Oops, seems '%s' action took no effect, " % watchdog_action
txt += "guest is still alive!"
raise error.TestFail(txt)
else:
if watchdog_action == "pause":
f_param = "paused"
elif watchdog_action == "shutdown":
f_param = "shutdown"
else:
f_param = "running"
if not utils_misc.wait_for(
lambda: vm.monitor.verify_status(f_param),
response_timeout, 0, 1):
logging.debug("Monitor status is:%s" % vm.monitor.get_status())
txt = "Oops, seems action '%s' took no effect" % watchdog_action
txt += " , Wrong monitor status!"
raise error.TestFail(txt)
# when the action is reset, need can relogin the guest.
if watchdog_action == "reset":
logging.info("Try to login the guest after reboot")
vm.wait_for_login(timeout=relogin_timeout)
logging.info("Watchdog action '%s' come into effect." %
watchdog_action)
def check_watchdog_support():
"""
check the host qemu-kvm support watchdog device
Test Step:
1. Send qemu command 'qemu -watchdog ?'
2. Check the watchdog type that the host support.
"""
qemu_binary = utils_misc.get_qemu_binary(params)
watchdog_type_check = params.get(
"watchdog_type_check", " -watchdog '?'")
qemu_cmd = qemu_binary + watchdog_type_check
# check the host support watchdog types.
error.context("Checking whether or not the host support WDT '%s'"
% watchdog_device_type, logging.info)
watchdog_device = utils.system_output("%s 2>&1" % qemu_cmd,
retain_output=True)
if watchdog_device:
if re.findall(watchdog_device_type, watchdog_device, re.I):
logging.info("The host support '%s' type watchdog device" %
watchdog_device_type)
else:
logging.info("The host support watchdog device type is: '%s'"
% watchdog_device)
raise error.TestNAError("watdog %s isn't supported!"
% watchdog_device_type)
else:
raise error.TestNAError("No watchdog device supported by the host!")
def guest_boot_with_watchdog():
"""
check the guest can boot with watchdog device
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
"""
_watchdog_device_check(session, watchdog_device_type)
def watchdog_action_test():
"""
Watchdog action test
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3.Trigger wathchdog action through open /dev/watchdog
4.Ensure watchdog_action take effect.
"""
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
_action_check(session, watchdog_action)
def magic_close_support():
"""
Magic close the watchdog action.
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3. Inside guest, trigger watchdog action"
4. Inside guest, before heartbeat expires, close this action"
5. Wait heartbeat timeout check the watchdog action deactive.
"""
response_timeout = int(params.get("response_timeout", '240'))
magic_cmd = params.get("magic_close_cmd", "echo V > /dev/watchdog")
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
# magic close
error.context("Magic close is start", logging.info)
_trigger_watchdog(session, magic_cmd)
if utils_misc.wait_for(lambda: not session.is_responsive(),
response_timeout, 0, 1):
error_msg = "Oops,Watchdog action took effect, magic close FAILED"
raise error.TestFail(error_msg)
logging.info("Magic close took effect.")
def migration_when_wdt_timeout():
"""
Migration when WDT timeout
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3. Start VM with watchdog device, action reset|pause
4. Inside RHEL guest, trigger watchdog
5. Before WDT timeout, do vm migration
6. After migration, check the watchdog action take effect
"""
mig_timeout = float(params.get("mig_timeout", "3600"))
mig_protocol = params.get("migration_protocol", "tcp")
mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
error.context("Do migration(protocol:%s),Watchdog have been triggered."
% mig_protocol, logging.info)
args = (mig_timeout, mig_protocol, mig_cancel_delay)
migrate_thread = utils.InterruptedThread(vm.migrate, args)
migrate_thread.start()
_action_check(session, watchdog_action)
migrate_thread.join(timeout=mig_timeout)
def hotplug_unplug_watchdog_device():
"""
Hotplug/unplug watchdog device
Test Step:
1. Start VM with "-watchdog-action pause" CLI option
2. Add WDT via monitor
3. Trigger watchdog action in guest
4. Remove WDT device through monitor cmd "device_del"
5. Resume and relogin the guest, check the device have been removed.
"""
session = vm.wait_for_login(timeout=timeout)
o = session.cmd_output("lspci")
if o:
wdt_pci_info = re.findall(".*6300ESB Watchdog Timer", o)
if wdt_pci_info:
raise error.TestFail("Can find watchdog pci")
plug_watchdog_device = params.get("plug_watchdog_device", "i6300esb")
watchdog_device_add = ("device_add driver=%s, id=%s"
% (plug_watchdog_device, "watchdog"))
watchdog_device_del = ("device_del id=%s" % "watchdog")
error.context("Hotplug watchdog device '%s'" % plug_watchdog_device,
logging.info)
vm.monitor.send_args_cmd(watchdog_device_add)
# wait watchdog device init
time.sleep(5)
_watchdog_device_check(session, plug_watchdog_device)
_trigger_watchdog(session, trigger_cmd)
_action_check(session, watchdog_action)
error.context("Hot unplug watchdog device", logging.info)
vm.monitor.send_args_cmd(watchdog_device_del)
error.context("Resume the guest, check the WDT have been removed",
logging.info)
vm.resume()
session = vm.wait_for_login(timeout=timeout)
o = session.cmd_output("lspci")
if o:
wdt_pci_info = re.findall(".*6300ESB Watchdog Timer", o)
if wdt_pci_info:
raise error.TestFail("Oops, find watchdog pci, unplug failed")
logging.info("The WDT remove successfully")
# main procedure
test_type = params.get("test_type")
check_watchdog_support()
error.context("'%s' test starting ... " % test_type, logging.info)
error.context("Boot VM with WDT(Device:'%s', Action:'%s'),and try to login"
% (watchdog_device_type, watchdog_action), logging.info)
params["start_vm"] = "yes"
env_process.preprocess_vm(test, params, env, params.get("main_vm"))
vm = env.get_vm(params["main_vm"])
session = vm.wait_for_login(timeout=timeout)
if params.get("setup_runlevel") == "yes":
error.context("Setup the runlevel for guest", logging.info)
utils_test.qemu.setup_runlevel(params, session)
if (test_type in locals()):
test_running = locals()[test_type]
test_running()
else:
raise error.TestError("Oops test %s doesn't exist, have a check please."
% test_type)
| gpl-2.0 |
lexyan/SickBeard | lib/hachoir_core/compatibility.py | 186 | 4441 | """
Compatibility constants and functions. This module works on Python 1.5 to 2.5.
This module provides:
- True and False constants ;
- any() and all() function ;
- has_yield and has_slice values ;
- isinstance() with Python 2.3 behaviour ;
- reversed() and sorted() function.
True and False constants
========================
Truth constants: True is yes (one) and False is no (zero).
>>> int(True), int(False) # int value
(1, 0)
>>> int(False | True) # and binary operator
1
>>> int(True & False) # or binary operator
0
>>> int(not(True) == False) # not binary operator
1
Warning: on Python smaller than 2.3, True and False are aliases to
number 1 and 0. So "print True" will displays 1 and not True.
any() function
==============
any() returns True if at least one items is True, or False otherwise.
>>> any([False, True])
True
>>> any([True, True])
True
>>> any([False, False])
False
all() function
==============
all() returns True if all items are True, or False otherwise.
This function is just apply binary and operator (&) on all values.
>>> all([True, True])
True
>>> all([False, True])
False
>>> all([False, False])
False
has_yield boolean
=================
has_yield: boolean which indicatese if the interpreter supports yield keyword.
yield keyworkd is available since Python 2.0.
has_yield boolean
=================
has_slice: boolean which indicates if the interpreter supports slices with step
argument or not. slice with step is available since Python 2.3.
reversed() and sorted() function
================================
reversed() and sorted() function has been introduced in Python 2.4.
It's should returns a generator, but this module it may be a list.
>>> data = list("cab")
>>> list(sorted(data))
['a', 'b', 'c']
>>> list(reversed("abc"))
['c', 'b', 'a']
"""
import copy
import operator
# --- True and False constants from Python 2.0 ---
# --- Warning: for Python < 2.3, they are aliases for 1 and 0 ---
try:
True = True
False = False
except NameError:
True = 1
False = 0
# --- any() from Python 2.5 ---
try:
from __builtin__ import any
except ImportError:
def any(items):
for item in items:
if item:
return True
return False
# ---all() from Python 2.5 ---
try:
from __builtin__ import all
except ImportError:
def all(items):
return reduce(operator.__and__, items)
# --- test if interpreter supports yield keyword ---
try:
eval(compile("""
from __future__ import generators
def gen():
yield 1
yield 2
if list(gen()) != [1, 2]:
raise KeyError("42")
""", "<string>", "exec"))
except (KeyError, SyntaxError):
has_yield = False
else:
has_yield = True
# --- test if interpreter supports slices (with step argument) ---
try:
has_slice = eval('"abc"[::-1] == "cba"')
except (TypeError, SyntaxError):
has_slice = False
# --- isinstance with isinstance Python 2.3 behaviour (arg 2 is a type) ---
try:
if isinstance(1, int):
from __builtin__ import isinstance
except TypeError:
print "Redef isinstance"
def isinstance20(a, typea):
if type(typea) != type(type):
raise TypeError("TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types")
return type(typea) != typea
isinstance = isinstance20
# --- reversed() from Python 2.4 ---
try:
from __builtin__ import reversed
except ImportError:
# if hasYield() == "ok":
# code = """
#def reversed(data):
# for index in xrange(len(data)-1, -1, -1):
# yield data[index];
#reversed"""
# reversed = eval(compile(code, "<string>", "exec"))
if has_slice:
def reversed(data):
if not isinstance(data, list):
data = list(data)
return data[::-1]
else:
def reversed(data):
if not isinstance(data, list):
data = list(data)
reversed_data = []
for index in xrange(len(data)-1, -1, -1):
reversed_data.append(data[index])
return reversed_data
# --- sorted() from Python 2.4 ---
try:
from __builtin__ import sorted
except ImportError:
def sorted(data):
sorted_data = copy.copy(data)
sorted_data.sort()
return sorted
__all__ = ("True", "False",
"any", "all", "has_yield", "has_slice",
"isinstance", "reversed", "sorted")
| gpl-3.0 |
odejesush/tensorflow | tensorflow/python/kernel_tests/extract_image_patches_op_test.py | 111 | 3672 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(test.TestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.test_session(use_gpu=True):
out_tensor = array_ops.extract_image_patches(
constant_op.constant(image),
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
self.assertAllClose(patches, out_tensor.eval())
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 1x1 kernel ."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 1x1 kernel ."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
if __name__ == "__main__":
test.main()
| apache-2.0 |
chudaol/edx-platform | common/djangoapps/config_models/admin.py | 84 | 7287 | """
Admin site models for managing :class:`.ConfigurationModel` subclasses
"""
from django.forms import models
from django.contrib import admin
from django.contrib.admin import ListFilter
from django.core.cache import get_cache, InvalidCacheBackendError
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
try:
cache = get_cache('configuration') # pylint: disable=invalid-name
except InvalidCacheBackendError:
from django.core.cache import cache
# pylint: disable=protected-access
class ConfigurationModelAdmin(admin.ModelAdmin):
"""
:class:`~django.contrib.admin.ModelAdmin` for :class:`.ConfigurationModel` subclasses
"""
date_hierarchy = 'change_date'
def get_actions(self, request):
return {
'revert': (ConfigurationModelAdmin.revert, 'revert', _('Revert to the selected configuration'))
}
def get_list_display(self, request):
return self.model._meta.get_all_field_names()
# Don't allow deletion of configuration
def has_delete_permission(self, request, obj=None):
return False
# Make all fields read-only when editing an object
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.model._meta.get_all_field_names()
return self.readonly_fields
def add_view(self, request, form_url='', extra_context=None):
# Prepopulate new configuration entries with the value of the current config
get = request.GET.copy()
get.update(models.model_to_dict(self.model.current()))
request.GET = get
return super(ConfigurationModelAdmin, self).add_view(request, form_url, extra_context)
# Hide the save buttons in the change view
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['readonly'] = True
return super(ConfigurationModelAdmin, self).change_view(
request,
object_id,
form_url,
extra_context=extra_context
)
def save_model(self, request, obj, form, change):
obj.changed_by = request.user
super(ConfigurationModelAdmin, self).save_model(request, obj, form, change)
cache.delete(obj.cache_key_name(*(getattr(obj, key_name) for key_name in obj.KEY_FIELDS)))
cache.delete(obj.key_values_cache_key_name())
def revert(self, request, queryset):
"""
Admin action to revert a configuration back to the selected value
"""
if queryset.count() != 1:
self.message_user(request, _("Please select a single configuration to revert to."))
return
target = queryset[0]
target.id = None
self.save_model(request, target, None, False)
self.message_user(request, _("Reverted configuration."))
return HttpResponseRedirect(
reverse(
'admin:{}_{}_change'.format(
self.model._meta.app_label,
self.model._meta.module_name,
),
args=(target.id,),
)
)
class ShowHistoryFilter(ListFilter):
"""
Admin change view filter to show only the most recent (i.e. the "current") row for each
unique key value.
"""
title = _('Status')
parameter_name = 'show_history'
def __init__(self, request, params, model, model_admin):
super(ShowHistoryFilter, self).__init__(request, params, model, model_admin)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
def has_output(self):
""" Should this filter be shown? """
return True
def choices(self, cl):
""" Returns choices ready to be output in the template. """
show_all = self.used_parameters.get(self.parameter_name) == "1"
return (
{
'display': _('Current Configuration'),
'selected': not show_all,
'query_string': cl.get_query_string({}, [self.parameter_name]),
},
{
'display': _('All (Show History)'),
'selected': show_all,
'query_string': cl.get_query_string({self.parameter_name: "1"}, []),
}
)
def queryset(self, request, queryset):
""" Filter the queryset. No-op since it's done by KeyedConfigurationModelAdmin """
return queryset
def expected_parameters(self):
""" List the query string params used by this filter """
return [self.parameter_name]
class KeyedConfigurationModelAdmin(ConfigurationModelAdmin):
"""
:class:`~django.contrib.admin.ModelAdmin` for :class:`.ConfigurationModel` subclasses that
use extra keys (i.e. they have KEY_FIELDS set).
"""
date_hierarchy = None
list_filter = (ShowHistoryFilter, )
def queryset(self, request):
"""
Annote the queryset with an 'is_active' property that's true iff that row is the most
recently added row for that particular set of KEY_FIELDS values.
Filter the queryset to show only is_active rows by default.
"""
if request.GET.get(ShowHistoryFilter.parameter_name) == '1':
queryset = self.model.objects.with_active_flag()
else:
# Show only the most recent row for each key.
queryset = self.model.objects.current_set()
ordering = self.get_ordering(request)
if ordering:
return queryset.order_by(*ordering)
return queryset
def get_list_display(self, request):
""" Add a link to each row for creating a new row using the chosen row as a template """
return self.model._meta.get_all_field_names() + ['edit_link']
def add_view(self, request, form_url='', extra_context=None):
# Prepopulate new configuration entries with the value of the current config, if given:
if 'source' in request.GET:
get = request.GET.copy()
source_id = int(get.pop('source')[0])
source = get_object_or_404(self.model, pk=source_id)
get.update(models.model_to_dict(source))
request.GET = get
# Call our grandparent's add_view, skipping the parent code
# because the parent code has a different way to prepopulate new configuration entries
# with the value of the latest config, which doesn't make sense for keyed models.
# pylint: disable=bad-super-call
return super(ConfigurationModelAdmin, self).add_view(request, form_url, extra_context)
def edit_link(self, inst):
""" Edit link for the change view """
if not inst.is_active:
return u'--'
update_url = reverse('admin:{}_{}_add'.format(self.model._meta.app_label, self.model._meta.module_name))
update_url += "?source={}".format(inst.pk)
return u'<a href="{}">{}</a>'.format(update_url, _('Update'))
edit_link.allow_tags = True
edit_link.short_description = _('Update')
| agpl-3.0 |
Kobzol/kaira | ptp/gencpp/checker.py | 10 | 7739 | #
# Copyright (C) 2013 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import base.tester
import base.utils as utils
import base.paths as paths
from base.net import Declarations
import os.path
import build
from copy import copy
class CheckStatement(base.tester.Check):
def __init__(self, expression, decls=None, return_type="void", source=None):
self.expression = expression
self.decls = decls
self.return_type = return_type
self.source = source
def write_prologue(self, writer):
if self.decls is not None:
decls = self.decls.get_list()
else:
decls = []
writer.line("{0} {1} ({2}) {{",
self.return_type,
self.new_id(),
",".join("{0} {1}".format(t, name) for name, t in decls))
def write_epilogue(self, writer):
writer.line("}}")
def write_content(self, writer):
writer.raw_line(self.expression)
def throw_exception(self):
raise utils.PtpException(self.message, self.source)
class TypeChecker:
def __init__(self, name, source, functions):
self.name = name
self.sources = set([ source ])
self.functions = set(functions)
def update(self, type_checker):
assert type_checker.name == self.name
self.sources.update(type_checker.sources)
self.functions.update(type_checker.functions)
def add_checks(self, tester):
var = base.tester.new_id()
source = min(self.sources)
check = CheckStatement("{0} *{1};".format(self.name, base.tester.new_id()), source=source)
check.own_message = "Invalid type '{0}'".format(self.name)
tester.add(check)
message = "Function '{0}' not defined for type '{1}'"
if "token_name" in self.functions:
decls = Declarations()
decls.set(var, self.name + " &")
check = CheckStatement("ca::token_name({0});".format(var),
decls, source=source)
check.own_message = message.format("token_name", self.name)
tester.add(check)
if "pack" in self.functions:
decls = Declarations()
decls.set(var, self.name + "&")
decls.set("packer", "ca::Packer &")
check = CheckStatement("ca::pack(packer, {0});".format(var),
decls,
source=source)
check.own_message = message.format("ca::pack", self.name)
tester.add(check)
if "unpack" in self.functions:
decls = Declarations()
decls.set(var, self.name + "&")
decls.set("unpacker", "ca::Unpacker &")
check = CheckStatement("ca::unpack(unpacker, {0});".format(var),
decls,
source=source)
check.own_message = message.format("ca::unpack", self.name)
tester.add(check)
if "from_octave_value" in self.functions:
decls = Declarations()
decls.set(var, self.name + "&")
ovalue = base.tester.new_id()
decls.set(ovalue, "octave_value&")
check = CheckStatement("caoctave::from_octave_value({0},{1});".format(var, ovalue),
decls,
source=source)
check.own_message = message.format("caoctave::from_octave_value", self.name)
tester.add(check)
if "to_octave_value" in self.functions:
decls = Declarations()
decls.set(var, self.name + "&")
check = CheckStatement("return caoctave::to_octave_value({0});".format(var),
decls,
"octave_value",
source=source)
check.own_message = message.format("caoctave::to_octave_value", self.name)
tester.add(check)
class Checker:
def __init__(self, project):
self.project = project
self.types = {}
self.checks = []
def check_type(self, typename, source, functions=()):
t = self.types.get(typename)
if t is None:
self.types[typename] = TypeChecker(typename, source, functions)
else:
self.types[typename].update(TypeChecker(typename, source, functions))
def check_expression(self, expr, decls, return_type, source, message=None):
self._check_expression(expr, decls, source, message)
check = CheckStatement("return ({0});".format(expr), decls, return_type, source=source)
if message:
check.own_message = message
else:
check.own_message = "Invalid type of expression"
self.checks.append(check)
def check_may_form_vector(self, expr, decls, return_type, source, message=None):
self._check_expression(expr, decls, source, message)
decls = copy(decls)
v = base.tester.new_id()
decls.set(v, return_type)
check = CheckStatement("{0}.push_back({1});".format(v, expr), decls, source=source)
if message:
check.own_message = message
else:
check.own_message = "Invalid type of expression"
self.checks.append(check)
def _check_expression(self, expr, decls, source, message):
check = CheckStatement(expr + ";", decls, source=source)
if message:
check.own_message = message
self.checks.append(check)
def prepare_writer(self, filename):
builder = build.Builder(self.project, filename)
build.write_header(builder)
if self.project.get_build_with_octave():
builder.line("#include <caoctave.h>")
return builder
def run(self):
builder = build.Builder(self.project,
os.path.join("/tmp", self.project.get_name() + ".h"))
build.write_header_file(builder)
builder.write_to_file()
tester = base.tester.Tester()
tester.prepare_writer = self.prepare_writer
tester.args = [ "-I", os.path.join(paths.KAIRA_ROOT, paths.CAILIE_INCLUDE_DIR),
"-I", self.project.root_directory ]
if self.project.get_build_with_octave():
import ptp # To avoid cyclic import
tester.args += [ "-I", os.path.join(paths.KAIRA_ROOT, paths.CAOCTAVE_INCLUDE_DIR) ]
tester.args += ptp.get_config("Octave", "INCFLAGS").split()
if self.project.build_target == "simrun":
tester.args += [ "-I", os.path.join(paths.KAIRA_ROOT, paths.CASIMRUN_INCLUDE_DIR) ]
tester.args += self.project.get_build_option("CFLAGS").split()
tester.run()
if tester.stderr:
raise utils.PtpException(tester.stderr)
for t in self.types.values():
t.add_checks(tester)
for check in self.checks:
tester.add(check)
check = tester.run()
if check is not None:
check.throw_exception()
| gpl-3.0 |
rtrigoso/somepolymath | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/shell.py | 287 | 15340 | # -*- coding: utf-8 -*-
"""
pygments.lexers.shell
~~~~~~~~~~~~~~~~~~~~~
Lexers for various shells.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, include
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
'PowerShellLexer', 'ShellSessionLexer']
line_re = re.compile('.*?\n')
class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
'.bashrc', 'bashrc', '.bash_*', 'bash_*']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)\s*\b',
Keyword),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<<', Operator), # here-string
(r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Punctuation),
(r'&', Punctuation),
(r'\|', Punctuation),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
if shebang_matches(text, r'(ba|z|)sh'):
return 1
if text.startswith('$ '):
return 0.2
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
*New in Pygments 1.1.*
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\(\S+\))?(?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)'
r'?|\[\S+[@:][^\n]+\].+)[$#%])(.*\n?)' , line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class ShellSessionLexer(Lexer):
"""
Lexer for shell sessions that works with different command prompts
*New in Pygments 1.6.*
"""
name = 'Shell Session'
aliases = ['shell-session']
filenames = ['*.shell-session']
mimetypes = ['application/x-sh-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\[?\S+@[^$#%]+)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
*New in Pygments 0.7.*
"""
name = 'Batchfile'
aliases = ['bat', 'dosbatch', 'winbatch']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
*New in Pygments 0.10.*
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
r'source|stop|suspend|source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
*New in Pygments 1.5.*
"""
name = 'PowerShell'
aliases = ['powershell', 'posh', 'ps1', 'psm1']
filenames = ['*.ps1','*.psm1']
mimetypes = ['text/x-powershell']
flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
keywords = (
'while validateset validaterange validatepattern validatelength '
'validatecount until trap switch return ref process param parameter in '
'if global: function foreach for finally filter end elseif else '
'dynamicparam do default continue cmdletbinding break begin alias \\? '
'% #script #private #local #global mandatory parametersetname position '
'valuefrompipeline valuefrompipelinebypropertyname '
'valuefromremainingarguments helpmessage try catch throw').split()
operators = (
'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
'lt match ne not notcontains notlike notmatch or regex replace '
'wildcard').split()
verbs = (
'write where wait use update unregister undo trace test tee take '
'suspend stop start split sort skip show set send select scroll resume '
'restore restart resolve resize reset rename remove register receive '
'read push pop ping out new move measure limit join invoke import '
'group get format foreach export expand exit enter enable disconnect '
'disable debug cxnew copy convertto convertfrom convert connect '
'complete compare clear checkpoint aggregate add').split()
commenthelp = (
'component description example externalhelp forwardhelpcategory '
'forwardhelptargetname functionality inputs link '
'notes outputs parameter remotehelprunspace role synopsis').split()
tokens = {
'root': [
# we need to count pairs of parentheses for correct highlight
# of '$(...)' blocks in strings
(r'\(', Punctuation, 'child'),
(r'\s+', Text),
(r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
bygroups(Comment, String.Doc, Comment)),
(r'#[^\n]*?$', Comment),
(r'(<|<)#', Comment.Multiline, 'multline'),
(r'@"\n', String.Heredoc, 'heredoc-double'),
(r"@'\n.*?\n'@", String.Heredoc),
# escaped syntax
(r'`[\'"$@-]', Punctuation),
(r'"', String.Double, 'string'),
(r"'([^']|'')*'", String.Single),
(r'(\$|@@|@)((global|script|private|env):)?[a-z0-9_]+',
Name.Variable),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'-(%s)\b' % '|'.join(operators), Operator),
(r'(%s)-[a-z_][a-z0-9_]*\b' % '|'.join(verbs), Name.Builtin),
(r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s
(r'-[a-z_][a-z0-9_]*', Name),
(r'\w+', Name),
(r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation),
],
'child': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'multline': [
(r'[^#&.]+', Comment.Multiline),
(r'#(>|>)', Comment.Multiline, '#pop'),
(r'\.(%s)' % '|'.join(commenthelp), String.Doc),
(r'[#&.]', Comment.Multiline),
],
'string': [
(r"`[0abfnrtv'\"\$]", String.Escape),
(r'[^$`"]+', String.Double),
(r'\$\(', Punctuation, 'child'),
(r'""', String.Double),
(r'[`$]', String.Double),
(r'"', String.Double, '#pop'),
],
'heredoc-double': [
(r'\n"@', String.Heredoc, '#pop'),
(r'\$\(', Punctuation, 'child'),
(r'[^@\n]+"]', String.Heredoc),
(r".", String.Heredoc),
]
}
| mit |
umitproject/openmonitor-aggregator | django/db/__init__.py | 60 | 2444 | from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import (ConnectionHandler, ConnectionRouter,
load_backend, DEFAULT_DB_ALIAS, DatabaseError, IntegrityError)
__all__ = ('backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'DEFAULT_DB_ALIAS')
if DEFAULT_DB_ALIAS not in settings.DATABASES:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
connections = ConnectionHandler(settings.DATABASES)
router = ConnectionRouter(settings.DATABASE_ROUTERS)
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
# we manually create the dictionary from the settings, passing only the
# settings that the database backends care about. Note that TIME_ZONE is used
# by the PostgreSQL backends.
# We load all these up for backwards compatibility, you should use
# connections['default'] instead.
class DefaultConnectionProxy(object):
"""
Proxy for accessing the default DatabaseWrapper object's attributes. If you
need to access the DatabaseWrapper object itself, use
connections[DEFAULT_DB_ALIAS] instead.
"""
def __getattr__(self, item):
return getattr(connections[DEFAULT_DB_ALIAS], item)
def __setattr__(self, name, value):
return setattr(connections[DEFAULT_DB_ALIAS], name, value)
connection = DefaultConnectionProxy()
backend = load_backend(connection.settings_dict['ENGINE'])
# Register an event that closes the database connection
# when a Django request is finished.
def close_connection(**kwargs):
for conn in connections.all():
conn.close()
signals.request_finished.connect(close_connection)
# Register an event that resets connection.queries
# when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries = []
signals.request_started.connect(reset_queries)
# Register an event that rolls back the connections
# when a Django request has an exception.
def _rollback_on_exception(**kwargs):
from django.db import transaction
for conn in connections:
try:
transaction.rollback_unless_managed(using=conn)
except DatabaseError:
pass
signals.got_request_exception.connect(_rollback_on_exception)
| agpl-3.0 |
mocne/PycharmProjects | test/lanchView.py | 1 | 1219 | from selenium import webdriver
import time,os,sys
stepNum = 1
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches",["ignore-certificate-errors"])
browser = webdriver.Chrome(chrome_options=options)
browser.get("http://www.cnaidai.com/")
print stepNum,": Come in First_Page"
stepNum += 1
time.sleep(3)
browser.find_element_by_xpath("/html/body/div[12]/div").click()
print stepNum,': Quit_AlarmView'
stepNum += 1
time.sleep(3)
browser.find_element_by_xpath("/html/body/div[2]/div/div[1]/a/img").click()
print stepNum,": Click_logoIMG"
stepNum += 1
time.sleep(3)
browser.find_element_by_id("username").click()
print stepNum,": Click_Register"
stepNum += 1
time.sleep(2)
browser.find_element_by_css_selector("#userName").send_keys("18267175336")
print stepNum,": Input_PhoneNumber"
stepNum += 1
time.sleep(2)
browser.find_element_by_id("valicode").send_keys("1234")
print stepNum,": Input_Auth_Code"
stepNum += 1
time.sleep(2)
browser.find_element_by_class_name("valcode").click()
print stepNum,": Send_PhoneMessage"
stepNum += 1
time.sleep(10)
browser.find_element_by_css_selector("#phoneCode").send_keys("123456")
print stepNum,": Input_Phone_Auth_Code"
stepNum += 1
time.sleep(2)
| mit |
hradec/gaffer | python/GafferImageTest/ImageNodeTest.py | 8 | 3790 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import threading
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ImageNodeTest( GafferImageTest.ImageTestCase ) :
def testCacheThreadSafety( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 200, 200, 1.0 ) )
g = GafferImage.Grade()
g["in"].setInput( c["out"] )
g["multiply"].setValue( imath.Color3f( 0.4, 0.5, 0.6 ) )
gradedImage = GafferImage.ImageAlgo.image( g["out"] )
# not enough for both images - will cause cache thrashing
Gaffer.ValuePlug.setCacheMemoryLimit( 2 * g["out"].channelData( "R", imath.V2i( 0 ) ).memoryUsage() )
images = []
exceptions = []
def grader() :
try :
images.append( GafferImage.ImageAlgo.image( g["out"] ) )
except Exception as e :
exceptions.append( e )
def processer() :
try :
GafferImageTest.processTiles( g["out"] )
except Exception as e :
exceptions.append( e )
graderThreads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target = grader )
graderThreads.append( thread )
thread.start()
for thread in graderThreads :
thread.join()
for image in images :
self.assertEqual( image, gradedImage )
processerThreads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target = processer )
processerThreads.append( thread )
thread.start()
for thread in processerThreads :
thread.join()
for e in exceptions :
raise e
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferImage )
self.assertNodesConstructWithDefaultValues( GafferImageTest )
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.__previousCacheMemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()
def tearDown( self ) :
GafferTest.TestCase.tearDown( self )
Gaffer.ValuePlug.setCacheMemoryLimit( self.__previousCacheMemoryLimit )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
taniwha-qf/io_object_mu | import_mu/camera.py | 2 | 1774 | # vim:ts=4:et
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from math import pi, sqrt
import bpy
from mathutils import Quaternion
from ..mu import MuCamera
from .. import properties
def create_camera(mu, muobj, mucamera, name):
camera = bpy.data.cameras.new(name)
#mucamera.clearFlags
camera.type = ['PERSP', 'ORTHO'][mucamera.orthographic]
camera.lens_unit = 'FOV'
# blender's fov is in radians, unity's in degrees
camera.angle = mucamera.fov * pi / 180
camera.clip_start = mucamera.near
camera.clip_end = mucamera.far
muprops = camera.mucameraprop
properties.SetPropMask(muprops.cullingMask, mucamera.cullingMask)
muprops.backgroundColor = mucamera.backgroundColor
muprops.depth = mucamera.depth
if mucamera.clearFlags > 0:
flags = mucamera.clearFlags - 1
muprops.clearFlags = properties.clearflag_items[flags][0]
return "camera", camera, Quaternion((0.5**0.5, 0.5**0.5, 0, 0))
type_handlers = {
MuCamera: create_camera,
}
| gpl-2.0 |
raychorn/knowu | django/djangononrelsample2/djangoappengine/tests/test_order.py | 28 | 2128 | from django.test import TestCase
from .models import OrderedModel
class OrderTest(TestCase):
def create_ordered_model_items(self):
pks = []
priorities = [5, 2, 9, 1]
for pk, priority in enumerate(priorities):
pk += 1
model = OrderedModel(pk=pk, priority=priority)
model.save()
pks.append(model.pk)
return pks, priorities
def test_default_order(self):
pks, priorities = self.create_ordered_model_items()
self.assertEquals(
[item.priority for item in OrderedModel.objects.all()],
sorted(priorities, reverse=True))
def test_override_default_order(self):
pks, priorities = self.create_ordered_model_items()
self.assertEquals(
[item.priority for item in
OrderedModel.objects.all().order_by('priority')],
sorted(priorities))
def test_remove_default_order(self):
pks, priorities = self.create_ordered_model_items()
self.assertEquals(
[item.pk for item in OrderedModel.objects.all().order_by()],
sorted(pks))
def test_order_with_pk_filter(self):
pks, priorities = self.create_ordered_model_items()
self.assertEquals(
[item.priority for item in
OrderedModel.objects.filter(pk__in=pks)],
sorted(priorities, reverse=True))
# Test with id__in.
self.assertEquals(
[item.priority for item in
OrderedModel.objects.filter(id__in=pks)],
sorted(priorities, reverse=True))
# Test reverse.
self.assertEquals(
[item.priority for item in
OrderedModel.objects.filter(pk__in=pks).reverse()],
sorted(priorities, reverse=False))
def test_remove_default_order_with_pk_filter(self):
pks, priorities = self.create_ordered_model_items()
self.assertEquals(
[item.priority for item in
OrderedModel.objects.filter(pk__in=pks).order_by()],
priorities)
# TODO: Test multiple orders.
| lgpl-3.0 |
ssh-odoo/scrapy | tests/test_utils_defer.py | 121 | 3565 | from twisted.trial import unittest
from twisted.internet import reactor, defer
from twisted.python.failure import Failure
from scrapy.utils.defer import mustbe_deferred, process_chain, \
process_chain_both, process_parallel, iter_errback
from six.moves import xrange
class MustbeDeferredTest(unittest.TestCase):
def test_success_function(self):
steps = []
def _append(v):
steps.append(v)
return steps
dfd = mustbe_deferred(_append, 1)
dfd.addCallback(self.assertEqual, [1, 2]) # it is [1] with maybeDeferred
steps.append(2) # add another value, that should be catched by assertEqual
return dfd
def test_unfired_deferred(self):
steps = []
def _append(v):
steps.append(v)
dfd = defer.Deferred()
reactor.callLater(0, dfd.callback, steps)
return dfd
dfd = mustbe_deferred(_append, 1)
dfd.addCallback(self.assertEqual, [1, 2]) # it is [1] with maybeDeferred
steps.append(2) # add another value, that should be catched by assertEqual
return dfd
def cb1(value, arg1, arg2):
return "(cb1 %s %s %s)" % (value, arg1, arg2)
def cb2(value, arg1, arg2):
return defer.succeed("(cb2 %s %s %s)" % (value, arg1, arg2))
def cb3(value, arg1, arg2):
return "(cb3 %s %s %s)" % (value, arg1, arg2)
def cb_fail(value, arg1, arg2):
return Failure(TypeError())
def eb1(failure, arg1, arg2):
return "(eb1 %s %s %s)" % (failure.value.__class__.__name__, arg1, arg2)
class DeferUtilsTest(unittest.TestCase):
@defer.inlineCallbacks
def test_process_chain(self):
x = yield process_chain([cb1, cb2, cb3], 'res', 'v1', 'v2')
self.assertEqual(x, "(cb3 (cb2 (cb1 res v1 v2) v1 v2) v1 v2)")
gotexc = False
try:
yield process_chain([cb1, cb_fail, cb3], 'res', 'v1', 'v2')
except TypeError as e:
gotexc = True
self.assertTrue(gotexc)
@defer.inlineCallbacks
def test_process_chain_both(self):
x = yield process_chain_both([cb_fail, cb2, cb3], [None, eb1, None], 'res', 'v1', 'v2')
self.assertEqual(x, "(cb3 (eb1 TypeError v1 v2) v1 v2)")
fail = Failure(ZeroDivisionError())
x = yield process_chain_both([eb1, cb2, cb3], [eb1, None, None], fail, 'v1', 'v2')
self.assertEqual(x, "(cb3 (cb2 (eb1 ZeroDivisionError v1 v2) v1 v2) v1 v2)")
@defer.inlineCallbacks
def test_process_parallel(self):
x = yield process_parallel([cb1, cb2, cb3], 'res', 'v1', 'v2')
self.assertEqual(x, ['(cb1 res v1 v2)', '(cb2 res v1 v2)', '(cb3 res v1 v2)'])
def test_process_parallel_failure(self):
d = process_parallel([cb1, cb_fail, cb3], 'res', 'v1', 'v2')
self.failUnlessFailure(d, TypeError)
return d
class IterErrbackTest(unittest.TestCase):
def test_iter_errback_good(self):
def itergood():
for x in xrange(10):
yield x
errors = []
out = list(iter_errback(itergood(), errors.append))
self.assertEqual(out, list(range(10)))
self.failIf(errors)
def test_iter_errback_bad(self):
def iterbad():
for x in xrange(10):
if x == 5:
a = 1/0
yield x
errors = []
out = list(iter_errback(iterbad(), errors.append))
self.assertEqual(out, [0, 1, 2, 3, 4])
self.assertEqual(len(errors), 1)
self.assertIsInstance(errors[0].value, ZeroDivisionError)
| bsd-3-clause |
hsaputra/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/sinh_arcsinh_bijector_test.py | 27 | 7437 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SinhArcsinh Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=g-importing-member
from tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh import SinhArcsinh
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
# pylint: enable=g-importing-member
class SinhArcsinhBijectorTest(test.TestCase):
"""Tests correctness of the power transformation."""
def testBijectorVersusNumpyRewriteOfBasicFunctions(self):
with self.test_session():
skewness = 0.2
tailweight = 2.0
bijector = SinhArcsinh(
skewness=skewness,
tailweight=tailweight,
event_ndims=1,
validate_args=True)
self.assertEqual("SinhArcsinh", bijector.name)
x = np.array([[[-2.01], [2.], [1e-4]]]).astype(np.float32)
y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
np.sum(
np.log(np.cosh(np.arcsinh(y) / tailweight - skewness)) -
np.log(tailweight) - np.log(np.sqrt(y**2 + 1)),
axis=-1), bijector.inverse_log_det_jacobian(y).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y).eval(),
bijector.forward_log_det_jacobian(x).eval(),
rtol=1e-4,
atol=0.)
def testLargerTailWeightPutsMoreWeightInTails(self):
with self.test_session():
# Will broadcast together to shape [3, 2].
x = [-1., 1.]
tailweight = [[0.5], [1.0], [2.0]]
bijector = SinhArcsinh(tailweight=tailweight, validate_args=True)
y = bijector.forward(x).eval()
# x = -1, 1 should be mapped to points symmetric about 0
self.assertAllClose(y[:, 0], -1. * y[:, 1])
# forward(1) should increase as tailweight increases, since higher
# tailweight should map 1 to a larger number.
forward_1 = y[:, 1] # The positive values of y.
self.assertLess(forward_1[0], forward_1[1])
self.assertLess(forward_1[1], forward_1[2])
def testSkew(self):
with self.test_session():
# Will broadcast together to shape [3, 2].
x = [-1., 1.]
skewness = [[-1.], [0.], [1.]]
bijector = SinhArcsinh(skewness=skewness, validate_args=True)
y = bijector.forward(x).eval()
# For skew < 0, |forward(-1)| > |forward(1)|
self.assertGreater(np.abs(y[0, 0]), np.abs(y[0, 1]))
# For skew = 0, |forward(-1)| = |forward(1)|
self.assertAllClose(np.abs(y[1, 0]), np.abs(y[1, 1]))
# For skew > 0, |forward(-1)| < |forward(1)|
self.assertLess(np.abs(y[2, 0]), np.abs(y[2, 1]))
def testScalarCongruencySkewness1Tailweight0p5(self):
with self.test_session():
bijector = SinhArcsinh(skewness=1.0, tailweight=0.5, validate_args=True)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)
def testScalarCongruencySkewnessNeg1Tailweight1p5(self):
with self.test_session():
bijector = SinhArcsinh(skewness=-1.0, tailweight=1.5, validate_args=True)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)
def testBijectiveAndFiniteSkewnessNeg1Tailweight0p5(self):
with self.test_session():
bijector = SinhArcsinh(skewness=-1., tailweight=0.5, validate_args=True)
x = np.concatenate((-np.logspace(-2, 10, 1000), [0], np.logspace(
-2, 10, 1000))).astype(np.float32)
assert_bijective_and_finite(bijector, x, x, rtol=1e-3)
def testBijectiveAndFiniteSkewness1Tailweight3(self):
with self.test_session():
bijector = SinhArcsinh(skewness=1., tailweight=3., validate_args=True)
x = np.concatenate((-np.logspace(-2, 5, 1000), [0], np.logspace(
-2, 5, 1000))).astype(np.float32)
assert_bijective_and_finite(bijector, x, x, rtol=1e-3)
def testBijectorEndpoints(self):
with self.test_session():
for dtype in (np.float32, np.float64):
bijector = SinhArcsinh(
skewness=dtype(0.), tailweight=dtype(1.), validate_args=True)
bounds = np.array(
[np.finfo(dtype).min, np.finfo(dtype).max], dtype=dtype)
# Note that the above bijector is the identity bijector. Hence, the
# log_det_jacobian will be 0. Because of this we use atol.
assert_bijective_and_finite(bijector, bounds, bounds, atol=2e-6)
def testBijectorOverRange(self):
with self.test_session():
for dtype in (np.float32, np.float64):
skewness = np.array([1.2, 5.], dtype=dtype)
tailweight = np.array([2., 10.], dtype=dtype)
# The inverse will be defined up to where sinh is valid, which is
# arcsinh(np.finfo(dtype).max).
log_boundary = np.log(
np.sinh(np.arcsinh(np.finfo(dtype).max) / tailweight - skewness))
x = np.array([
np.logspace(-2, log_boundary[0], base=np.e, num=1000),
np.logspace(-2, log_boundary[1], base=np.e, num=1000)
], dtype=dtype)
# Ensure broadcasting works.
x = np.swapaxes(x, 0, 1)
y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
bijector = SinhArcsinh(
skewness=skewness, tailweight=tailweight, validate_args=True)
self.assertAllClose(y, bijector.forward(x).eval(), rtol=1e-4, atol=0.)
self.assertAllClose(x, bijector.inverse(y).eval(), rtol=1e-4, atol=0.)
# Do the numpy calculation in float128 to avoid inf/nan.
y_float128 = np.float128(y)
self.assertAllClose(
np.log(np.cosh(
np.arcsinh(y_float128) / tailweight - skewness) / np.sqrt(
y_float128**2 + 1)) -
np.log(tailweight),
bijector.inverse_log_det_jacobian(y).eval(),
rtol=1e-4,
atol=0.)
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y).eval(),
bijector.forward_log_det_jacobian(x).eval(),
rtol=1e-4,
atol=0.)
def testZeroTailweightRaises(self):
with self.test_session():
with self.assertRaisesOpError("not positive"):
SinhArcsinh(tailweight=0., validate_args=True).forward(1.0).eval()
def testDefaultDtypeIsFloat32(self):
with self.test_session():
bijector = SinhArcsinh()
self.assertEqual(bijector.tailweight.dtype, np.float32)
self.assertEqual(bijector.skewness.dtype, np.float32)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/test/test_genericpath.py | 8 | 10778 | """
Tests common to genericpath, macpath, ntpath and posixpath
"""
import unittest
from test import test_support
import os
import genericpath
import sys
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class GenericTest(unittest.TestCase):
# The path module to be tested
pathmodule = genericpath
common_attributes = ['commonprefix', 'getsize', 'getatime', 'getctime',
'getmtime', 'exists', 'isdir', 'isfile']
attributes = []
def test_no_argument(self):
for attr in self.common_attributes + self.attributes:
with self.assertRaises(TypeError):
getattr(self.pathmodule, attr)()
raise self.fail("{}.{}() did not raise a TypeError"
.format(self.pathmodule.__name__, attr))
def test_commonprefix(self):
commonprefix = self.pathmodule.commonprefix
self.assertEqual(
commonprefix([]),
""
)
self.assertEqual(
commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
"/home/swen"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
"/home/swen/"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/spam"]),
"/home/swen/spam"
)
self.assertEqual(
commonprefix(["home:swenson:spam", "home:swen:spam"]),
"home:swen"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:eggs"]),
":home:swen:"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:spam"]),
":home:swen:spam"
)
testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd',
'aXc', 'abd', 'ab', 'aX', 'abcX']
for s1 in testlist:
for s2 in testlist:
p = commonprefix([s1, s2])
self.assertTrue(s1.startswith(p))
self.assertTrue(s2.startswith(p))
if s1 != s2:
n = len(p)
self.assertNotEqual(s1[n:n+1], s2[n:n+1])
def test_getsize(self):
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertEqual(self.pathmodule.getsize(test_support.TESTFN), 3)
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_time(self):
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
f = open(test_support.TESTFN, "ab")
f.write("bar")
f.close()
f = open(test_support.TESTFN, "rb")
d = f.read()
f.close()
self.assertEqual(d, "foobar")
self.assertLessEqual(
self.pathmodule.getctime(test_support.TESTFN),
self.pathmodule.getmtime(test_support.TESTFN)
)
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_exists(self):
self.assertIs(self.pathmodule.exists(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(self.pathmodule.exists(test_support.TESTFN), True)
if not self.pathmodule == genericpath:
self.assertIs(self.pathmodule.lexists(test_support.TESTFN),
True)
finally:
if not f.close():
f.close()
test_support.unlink(test_support.TESTFN)
def test_isdir(self):
self.assertIs(self.pathmodule.isdir(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(self.pathmodule.isdir(test_support.TESTFN), False)
os.remove(test_support.TESTFN)
os.mkdir(test_support.TESTFN)
self.assertIs(self.pathmodule.isdir(test_support.TESTFN), True)
os.rmdir(test_support.TESTFN)
finally:
if not f.close():
f.close()
test_support.unlink(test_support.TESTFN)
safe_rmdir(test_support.TESTFN)
def test_isfile(self):
self.assertIs(self.pathmodule.isfile(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(self.pathmodule.isfile(test_support.TESTFN), True)
os.remove(test_support.TESTFN)
os.mkdir(test_support.TESTFN)
self.assertIs(self.pathmodule.isfile(test_support.TESTFN), False)
os.rmdir(test_support.TESTFN)
finally:
if not f.close():
f.close()
test_support.unlink(test_support.TESTFN)
safe_rmdir(test_support.TESTFN)
# Following TestCase is not supposed to be run from test_genericpath.
# It is inherited by other test modules (macpath, ntpath, posixpath).
class CommonTest(GenericTest):
# The path module to be tested
pathmodule = None
common_attributes = GenericTest.common_attributes + [
# Properties
'curdir', 'pardir', 'extsep', 'sep',
'pathsep', 'defpath', 'altsep', 'devnull',
# Methods
'normcase', 'splitdrive', 'expandvars', 'normpath', 'abspath',
'join', 'split', 'splitext', 'isabs', 'basename', 'dirname',
'lexists', 'islink', 'ismount', 'expanduser', 'normpath', 'realpath',
]
def test_normcase(self):
# Check that normcase() is idempotent
p = "FoO/./BaR"
p = self.pathmodule.normcase(p)
self.assertEqual(p, self.pathmodule.normcase(p))
def test_splitdrive(self):
# splitdrive for non-NT paths
splitdrive = self.pathmodule.splitdrive
self.assertEqual(splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertEqual(splitdrive("foo:bar"), ("", "foo:bar"))
self.assertEqual(splitdrive(":foo:bar"), ("", ":foo:bar"))
def test_expandvars(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
with test_support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
self.assertEqual(expandvars("foo"), "foo")
self.assertEqual(expandvars("$foo bar"), "bar bar")
self.assertEqual(expandvars("${foo}bar"), "barbar")
self.assertEqual(expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(expandvars("$bar bar"), "$bar bar")
self.assertEqual(expandvars("$?bar"), "$?bar")
self.assertEqual(expandvars("$foo}bar"), "bar}bar")
self.assertEqual(expandvars("${foo"), "${foo")
self.assertEqual(expandvars("${{foo}}"), "baz1}")
self.assertEqual(expandvars("$foo$foo"), "barbar")
self.assertEqual(expandvars("$bar$bar"), "$bar$bar")
@unittest.skipUnless(test_support.FS_NONASCII, 'need test_support.FS_NONASCII')
def test_expandvars_nonascii(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
def check(value, expected):
self.assertEqual(expandvars(value), expected)
encoding = sys.getfilesystemencoding()
with test_support.EnvironmentVarGuard() as env:
env.clear()
unonascii = test_support.FS_NONASCII
snonascii = unonascii.encode(encoding)
env['spam'] = snonascii
env[snonascii] = 'ham' + snonascii
check(snonascii, snonascii)
check('$spam bar', '%s bar' % snonascii)
check('${spam}bar', '%sbar' % snonascii)
check('${%s}bar' % snonascii, 'ham%sbar' % snonascii)
check('$bar%s bar' % snonascii, '$bar%s bar' % snonascii)
check('$spam}bar', '%s}bar' % snonascii)
check(unonascii, unonascii)
check(u'$spam bar', u'%s bar' % unonascii)
check(u'${spam}bar', u'%sbar' % unonascii)
check(u'${%s}bar' % unonascii, u'ham%sbar' % unonascii)
check(u'$bar%s bar' % unonascii, u'$bar%s bar' % unonascii)
check(u'$spam}bar', u'%s}bar' % unonascii)
def test_abspath(self):
self.assertIn("foo", self.pathmodule.abspath("foo"))
# Abspath returns bytes when the arg is bytes
for path in ('', 'foo', 'f\xf2\xf2', '/foo', 'C:\\'):
self.assertIsInstance(self.pathmodule.abspath(path), str)
def test_realpath(self):
self.assertIn("foo", self.pathmodule.realpath("foo"))
def test_normpath_issue5827(self):
# Make sure normpath preserves unicode
for path in (u'', u'.', u'/', u'\\', u'///foo/.//bar//'):
self.assertIsInstance(self.pathmodule.normpath(path), unicode)
def test_abspath_issue3426(self):
# Check that abspath returns unicode when the arg is unicode
# with both ASCII and non-ASCII cwds.
abspath = self.pathmodule.abspath
for path in (u'', u'fuu', u'f\xf9\xf9', u'/fuu', u'U:\\'):
self.assertIsInstance(abspath(path), unicode)
unicwd = u'\xe7w\xf0'
try:
fsencoding = test_support.TESTFN_ENCODING or "ascii"
asciival = unicwd.encode(fsencoding)
if fsencoding == "mbcs":
# http://bugs.python.org/issue850997
v = asciival.find('?')
if v >= 0:
raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival)
except (AttributeError, UnicodeEncodeError):
# FS encoding is probably ASCII or windows and codepage is non-Latin1
pass
else:
with test_support.temp_cwd(unicwd):
for path in (u'', u'fuu', u'f\xf9\xf9', u'/fuu', u'U:\\'):
self.assertIsInstance(abspath(path), unicode)
@unittest.skipIf(sys.platform == 'darwin',
"Mac OS X denies the creation of a directory with an invalid utf8 name")
def test_nonascii_abspath(self):
# Test non-ASCII, non-UTF8 bytes in the path.
with test_support.temp_cwd('\xe7w\xf0'):
self.test_abspath()
def test_main():
test_support.run_unittest(GenericTest)
if __name__=="__main__":
test_main()
| mit |
eliasdesousa/indico | indico/core/signals/acl.py | 2 | 3773 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from blinker import Namespace
_signals = Namespace()
can_access = _signals.signal('can-access', """
Called when `ProtectionMixin.can_access` is used to determine if a
user can access something or not.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The `user` and `allow_admin`
arguments of `can_access` are passed as kwargs with the same name.
The `authorized` argument is ``None`` when this signal is called at
the beginning of the access check and ``True`` or ``False`` at the end
when regular access rights have already been checked. For expensive
checks (such as anything involving database queries) it is recommended
to skip the check while `authorized` is ``None`` since the regular
access check is likely to be cheaper (due to ACLs being preloaded etc).
If the signal returns ``True`` or ``False``, the access check succeeds
or fails immediately. If multiple subscribers to the signal return
contradictory results, ``False`` wins and access is denied.
""")
can_manage = _signals.signal('can-manage', """
Called when `ProtectionMixin.can_manage` is used to determine if a
user can manage something or not.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The `user`, `role`, `allow_admin`,
`check_parent` and `explicit_role` arguments of `can_manage` are
passed as kwargs with the same name.
If the signal returns ``True`` or ``False``, the access check succeeds
or fails without any further checks. If multiple subscribers to the
signal return contradictory results, ``False`` wins and access is
denied.
""")
entry_changed = _signals.signal('entry-changed', """
Called when an ACL entry is changed.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The `User`, `GroupProxy` or
`EmailPrincipal` is passed as `principal` and `entry` contains the
actual ACL entry (a `PrincipalMixin` instance) or ``None`` in case
the entry was deleted. `is_new` is a boolean indicating whether
the given principal was in the ACL before. If `quiet` is ``True``,
signal handlers should not perform noisy actions such as logging or
sending emails related to the change.
If the ACL uses roles, `old_data` will contain a dictionary of the
previous roles/permissions (see `PrincipalRolesMixin.current_data`).
""")
protection_changed = _signals.signal('protection-changed', """
Called when the protection mode of an object is changed.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The old protection mode is passed
as `old_mode`, the new mode as `mode`.
""")
get_management_roles = _signals.signal('get-management-roles', """
Expected to return `ManagementRole` subclasses. The `sender` is the
type of the object the roles may be used for. Functions subscribing
to this signal **MUST** check the sender by specifying it using the
first argument of `connect_via()` or by comparing it inside the
function.
""")
| gpl-3.0 |
tensorflow/models | official/vision/beta/modeling/layers/box_sampler.py | 1 | 3406 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of box sampler."""
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import sampling_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class BoxSampler(tf.keras.layers.Layer):
"""Creates a BoxSampler to sample positive and negative boxes."""
def __init__(self,
num_samples: int = 512,
foreground_fraction: float = 0.25,
**kwargs):
"""Initializes a box sampler.
Args:
num_samples: An `int` of the number of sampled boxes per image.
foreground_fraction: A `float` in [0, 1], what percentage of boxes should
be sampled from the positive examples.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'num_samples': num_samples,
'foreground_fraction': foreground_fraction,
}
super(BoxSampler, self).__init__(**kwargs)
def call(self, positive_matches: tf.Tensor, negative_matches: tf.Tensor,
ignored_matches: tf.Tensor):
"""Samples and selects positive and negative instances.
Args:
positive_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance
corresponds to a positive example.
negative_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance
corresponds to a negative example.
ignored_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance should
be ignored.
Returns:
A `tf.tensor` of shape of [batch_size, K], storing the indices of the
sampled examples, where K is `num_samples`.
"""
sample_candidates = tf.logical_and(
tf.logical_or(positive_matches, negative_matches),
tf.logical_not(ignored_matches))
sampler = sampling_ops.BalancedPositiveNegativeSampler(
positive_fraction=self._config_dict['foreground_fraction'],
is_static=True)
batch_size = sample_candidates.shape[0]
sampled_indicators = []
for i in range(batch_size):
sampled_indicator = sampler.subsample(
sample_candidates[i],
self._config_dict['num_samples'],
positive_matches[i])
sampled_indicators.append(sampled_indicator)
sampled_indicators = tf.stack(sampled_indicators)
_, selected_indices = tf.nn.top_k(
tf.cast(sampled_indicators, dtype=tf.int32),
k=self._config_dict['num_samples'],
sorted=True)
return selected_indices
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| apache-2.0 |
Teagan42/home-assistant | homeassistant/components/smappee/switch.py | 7 | 2707 | """Support for interacting with Smappee Comport Plugs."""
import logging
from homeassistant.components.switch import SwitchDevice
from . import DATA_SMAPPEE
_LOGGER = logging.getLogger(__name__)
ICON = "mdi:power-plug"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Smappee Comfort Plugs."""
smappee = hass.data[DATA_SMAPPEE]
dev = []
if smappee.is_remote_active:
for location_id in smappee.locations.keys():
for items in smappee.info[location_id].get("actuators"):
if items.get("name") != "":
_LOGGER.debug("Remote actuator %s", items)
dev.append(
SmappeeSwitch(
smappee, items.get("name"), location_id, items.get("id")
)
)
elif smappee.is_local_active:
for items in smappee.local_devices:
_LOGGER.debug("Local actuator %s", items)
dev.append(
SmappeeSwitch(smappee, items.get("value"), None, items.get("key"))
)
add_entities(dev)
class SmappeeSwitch(SwitchDevice):
"""Representation of a Smappee Comport Plug."""
def __init__(self, smappee, name, location_id, switch_id):
"""Initialize a new Smappee Comfort Plug."""
self._name = name
self._state = False
self._smappee = smappee
self._location_id = location_id
self._switch_id = switch_id
self._remoteswitch = True
if location_id is None:
self._remoteswitch = False
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
def turn_on(self, **kwargs):
"""Turn on Comport Plug."""
if self._smappee.actuator_on(
self._location_id, self._switch_id, self._remoteswitch
):
self._state = True
def turn_off(self, **kwargs):
"""Turn off Comport Plug."""
if self._smappee.actuator_off(
self._location_id, self._switch_id, self._remoteswitch
):
self._state = False
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if self._remoteswitch:
attr["Location Id"] = self._location_id
attr["Location Name"] = self._smappee.locations[self._location_id]
attr["Switch Id"] = self._switch_id
return attr
| apache-2.0 |
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_rand_op.py | 2 | 4607 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle import rand
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
class TestRandOpError(unittest.TestCase):
"""
This class test the input type check.
"""
def test_errors(self):
main_prog = Program()
start_prog = Program()
with program_guard(main_prog, start_prog):
def test_Variable():
x1 = fluid.create_lod_tensor(
np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace())
rand(x1)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2], dtype='int32')
self.assertRaises(TypeError, test_dtype)
class TestRandOp(unittest.TestCase):
"""
This class test the common usages of randop.
"""
def run_net(self, use_cuda=False):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
result_0 = rand([3, 4])
result_1 = rand([3, 4], 'float64')
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
result_2 = rand(shape=[dim_1, dim_2])
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = rand(var_shape)
var_shape_int32 = fluid.data(
name='var_shape_int32', shape=[2], dtype="int32")
result_4 = rand(var_shape_int32)
exe.run(startup_program)
x1 = np.array([3, 2]).astype('int64')
x2 = np.array([4, 3]).astype('int32')
ret = exe.run(
train_program,
feed={"var_shape": x1,
"var_shape_int32": x2},
fetch_list=[result_1, result_1, result_2, result_3, result_4])
def test_run(self):
self.run_net(False)
if core.is_compiled_with_cuda():
self.run_net(True)
class TestRandOpForDygraph(unittest.TestCase):
"""
This class test the common usages of randop.
"""
def run_net(self, use_cuda=False):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
with fluid.dygraph.guard(place):
rand([3, 4])
rand([3, 4], 'float64')
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2])
var_shape = fluid.dygraph.to_variable(np.array([3, 4]))
rand(var_shape)
def test_run(self):
self.run_net(False)
if core.is_compiled_with_cuda():
self.run_net(True)
class TestRandDtype(unittest.TestCase):
def test_default_dtype(self):
paddle.disable_static()
def test_default_fp16():
paddle.framework.set_default_dtype('float16')
paddle.tensor.random.rand([2, 3])
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.rand([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.rand([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp64()
test_default_fp32()
paddle.enable_static()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
carolFrohlich/nipype | nipype/interfaces/fsl/tests/test_auto_SliceTimer.py | 12 | 1464 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..preprocess import SliceTimer
def test_SliceTimer_inputs():
input_map = dict(args=dict(argstr='%s',
),
custom_order=dict(argstr='--ocustom=%s',
),
custom_timings=dict(argstr='--tcustom=%s',
),
environ=dict(nohash=True,
usedefault=True,
),
global_shift=dict(argstr='--tglobal',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='--in=%s',
mandatory=True,
position=0,
),
index_dir=dict(argstr='--down',
),
interleaved=dict(argstr='--odd',
),
out_file=dict(argstr='--out=%s',
genfile=True,
hash_files=False,
),
output_type=dict(),
slice_direction=dict(argstr='--direction=%d',
),
terminal_output=dict(nohash=True,
),
time_repetition=dict(argstr='--repeat=%f',
),
)
inputs = SliceTimer.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SliceTimer_outputs():
output_map = dict(slice_time_corrected_file=dict(),
)
outputs = SliceTimer.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
mondwan/py-iems5723 | hw3/1155002613_hw3.py | 1 | 2439 | """
File: 1155002613_hw3.py
Author: Me
Email: 0
Github: 0
Description: Compute betweenness centrality and produce the network graph
"""
import os
import pydot
import networkx as nx
with open(os.path.join('.', 'input.csv'), 'r') as f:
lines = [l.rstrip('\n') for l in f.readlines()]
# Process graph
graph = pydot.Dot(graph_type='graph')
columns = (lines[0].split(','))[1:]
for col_node_label in columns:
col_index = columns.index(col_node_label) + 1
rows = lines[1:]
for row in rows:
values = row.split(',')
row_node_label = values[0]
connected = True if values[col_index] == '1' else False
if (connected):
edge = pydot.Edge(
col_node_label,
row_node_label
)
graph.add_edge(edge)
# Calculate nodes' betweenness
nx_graph = nx.from_pydot(graph)
results = nx.edge_betweenness_centrality(nx_graph)
# Sort them by descending order
link_betweenness_pairs = sorted(
results.items(),
key=lambda x: x[1],
reverse=True
)
# Add edge's label and change font's colour according to the specificaiton
final_graph = pydot.Dot(graph_type='graph')
max_val = -1
for (edge, betweenness_val) in link_betweenness_pairs:
# Pen's width should be 5 times of the betweenness_val
penwidth = betweenness_val * 5
# Update edge's label and penwidth
e = pydot.Edge(edge[0], edge[1])
e.obj_dict['attributes']['label'] = str(betweenness_val)
e.obj_dict['attributes']['penwidth'] = str(penwidth)
# Change font's color to red if it is the max betweenness
# Change font's color to blue otherwise
if (link_betweenness_pairs.index((edge, betweenness_val)) == 0):
e.obj_dict['attributes']['fontcolor'] = 'red'
max_val = betweenness_val
else:
if (betweenness_val == max_val):
e.obj_dict['attributes']['fontcolor'] = 'red'
else:
e.obj_dict['attributes']['fontcolor'] = 'blue'
# Add the edge to the finalized graph
final_graph.add_edge(e)
# Output required png and txt
final_graph.write('1155002613.png', prog='neato', format='png')
with open(os.path.join('.', '1155002613.txt'), 'w') as f:
for (edge, betweenness_val) in link_betweenness_pairs:
# The edge ('A', '2')'s betweenness is 0.4
f.write(
"The edge ('%s', '%s')'s betweenness is %0.1f\n" %
(edge[0], edge[1], betweenness_val)
)
| mit |
snakeleon/YouCompleteMe-x86 | third_party/ycmd/ycmd/tests/clang/flags_test.py | 1 | 22341 | # Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import os
from nose.tools import eq_, ok_
from ycmd.completers.cpp import flags
from mock import patch, Mock
from ycmd.tests.test_utils import MacOnly
from ycmd.responses import NoExtraConfDetected
from ycmd.tests.clang import TemporaryClangProject, TemporaryClangTestDir
from hamcrest import assert_that, calling, contains, raises
@patch( 'ycmd.extra_conf_store.ModuleForSourceFile', return_value = Mock() )
def FlagsForFile_FlagsNotReady_test( *args ):
fake_flags = {
'flags': [ ],
'flags_ready': False
}
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = fake_flags ):
flags_object = flags.Flags()
flags_list = flags_object.FlagsForFile( '/foo', False )
eq_( list( flags_list ), [ ] )
@patch( 'ycmd.extra_conf_store.ModuleForSourceFile', return_value = Mock() )
def FlagsForFile_BadNonUnicodeFlagsAreAlsoRemoved_test( *args ):
fake_flags = {
'flags': [ bytes( b'-c' ), '-c', bytes( b'-foo' ), '-bar' ]
}
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = fake_flags ):
flags_object = flags.Flags()
flags_list = flags_object.FlagsForFile( '/foo', False )
eq_( list( flags_list ), [ '-foo', '-bar' ] )
@patch( 'ycmd.extra_conf_store.ModuleForSourceFile', return_value = Mock() )
def FlagsForFile_FlagsCachedByDefault_test( *args ):
flags_object = flags.Flags()
results = { 'flags': [ '-x', 'c' ] }
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = results ):
flags_list = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains( '-x', 'c' ) )
results[ 'flags' ] = [ '-x', 'c++' ]
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = results ):
flags_list = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains( '-x', 'c' ) )
@patch( 'ycmd.extra_conf_store.ModuleForSourceFile', return_value = Mock() )
def FlagsForFile_FlagsNotCachedWhenDoCacheIsFalse_test( *args ):
flags_object = flags.Flags()
results = {
'flags': [ '-x', 'c' ],
'do_cache': False
}
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = results ):
flags_list = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains( '-x', 'c' ) )
results[ 'flags' ] = [ '-x', 'c++' ]
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = results ):
flags_list = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains( '-x', 'c++' ) )
@patch( 'ycmd.extra_conf_store.ModuleForSourceFile', return_value = Mock() )
def FlagsForFile_FlagsCachedWhenDoCacheIsTrue_test( *args ):
flags_object = flags.Flags()
results = {
'flags': [ '-x', 'c' ],
'do_cache': True
}
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = results ):
flags_list = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains( '-x', 'c' ) )
results[ 'flags' ] = [ '-x', 'c++' ]
with patch( 'ycmd.completers.cpp.flags._CallExtraConfFlagsForFile',
return_value = results ):
flags_list = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains( '-x', 'c' ) )
def RemoveUnusedFlags_Passthrough_test():
eq_( [ '-foo', '-bar' ],
flags._RemoveUnusedFlags( [ '-foo', '-bar' ], 'file' ) )
def RemoveUnusedFlags_RemoveDashC_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-c' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveColor_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '--fcolor-diagnostics' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveDashO_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-o', 'output_name' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveMP_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-MP' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveFilename_test():
expected = [ 'foo', '-bar' ]
to_remove = [ 'file' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( expected[ :1 ] + to_remove + expected[ 1: ],
filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveFlagWithoutPrecedingDashFlag_test():
expected = [ 'g++', '-foo', '-x', 'c++', '-bar', 'include_dir' ]
to_remove = [ 'unrelated_file' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( expected[ :1 ] + to_remove + expected[ 1: ],
filename ) )
def RemoveUnusedFlags_Depfiles_test():
full_flags = [
'/bin/clang',
'-x', 'objective-c',
'-arch', 'armv7',
'-MMD',
'-MT', 'dependencies',
'-MF', 'file',
'--serialize-diagnostics', 'diagnostics'
]
expected = [
'/bin/clang',
'-x', 'objective-c',
'-arch', 'armv7',
]
assert_that( flags._RemoveUnusedFlags( full_flags, 'test.m' ),
contains( *expected ) )
def EnableTypoCorrection_Empty_test():
eq_( flags._EnableTypoCorrection( [] ), [ '-fspell-checking' ] )
def EnableTypoCorrection_Trivial_test():
eq_( flags._EnableTypoCorrection( [ '-x', 'c++' ] ),
[ '-x', 'c++', '-fspell-checking' ] )
def EnableTypoCorrection_Reciprocal_test():
eq_( flags._EnableTypoCorrection( [ '-fno-spell-checking' ] ),
[ '-fno-spell-checking' ] )
def EnableTypoCorrection_ReciprocalOthers_test():
eq_( flags._EnableTypoCorrection( [ '-x', 'c++', '-fno-spell-checking' ] ),
[ '-x', 'c++', '-fno-spell-checking' ] )
def RemoveUnusedFlags_RemoveFilenameWithoutPrecedingInclude_test():
def tester( flag ):
expected = [ 'clang', flag, '/foo/bar', '-isystem/zoo/goo' ]
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( expected[ :1 ] + to_remove + expected[ 1: ],
filename ) )
eq_( expected + expected[ 1: ],
flags._RemoveUnusedFlags( expected + to_remove + expected[ 1: ],
filename ) )
include_flags = [ '-isystem', '-I', '-iquote', '-isysroot', '--sysroot',
'-gcc-toolchain', '-include', '-include-pch',
'-iframework', '-F', '-imacros' ]
to_remove = [ '/moo/boo' ]
filename = 'file'
for flag in include_flags:
yield tester, flag
def RemoveXclangFlags_test():
expected = [ '-I', '/foo/bar', '-DMACRO=Value' ]
to_remove = [ '-Xclang', 'load', '-Xclang', 'libplugin.so',
'-Xclang', '-add-plugin', '-Xclang', 'plugin-name' ]
eq_( expected,
flags._RemoveXclangFlags( expected + to_remove ) )
eq_( expected,
flags._RemoveXclangFlags( to_remove + expected ) )
eq_( expected + expected,
flags._RemoveXclangFlags( expected + to_remove + expected ) )
def AddLanguageFlagWhenAppropriate_Passthrough_test():
eq_( [ '-foo', '-bar' ],
flags._AddLanguageFlagWhenAppropriate( [ '-foo', '-bar' ] ) )
def _AddLanguageFlagWhenAppropriateTester( compiler, language_flag = [] ):
to_removes = [
[],
[ '/usr/bin/ccache' ],
[ 'some_command', 'another_command' ]
]
expected = [ '-foo', '-bar' ]
for to_remove in to_removes:
eq_( [ compiler ] + language_flag + expected,
flags._AddLanguageFlagWhenAppropriate( to_remove + [ compiler ] +
expected ) )
def AddLanguageFlagWhenAppropriate_CCompiler_test():
compilers = [ 'cc', 'gcc', 'clang', '/usr/bin/cc',
'/some/other/path', 'some_command' ]
for compiler in compilers:
yield _AddLanguageFlagWhenAppropriateTester, compiler
def AddLanguageFlagWhenAppropriate_CppCompiler_test():
compilers = [ 'c++', 'g++', 'clang++', '/usr/bin/c++',
'/some/other/path++', 'some_command++',
'c++-5', 'g++-5.1', 'clang++-3.7.3', '/usr/bin/c++-5',
'c++-5.11', 'g++-50.1.49', 'clang++-3.12.3', '/usr/bin/c++-10',
'/some/other/path++-4.9.3', 'some_command++-5.1',
'/some/other/path++-4.9.31', 'some_command++-5.10' ]
for compiler in compilers:
yield _AddLanguageFlagWhenAppropriateTester, compiler, [ '-x', 'c++' ]
def ExtraClangFlags_test():
flags_object = flags.Flags()
num_found = 0
for flag in flags_object.extra_clang_flags:
if flag.startswith( '-resource-dir=' ):
ok_( flag.endswith( 'clang_includes' ) )
num_found += 1
eq_( 1, num_found )
@MacOnly
@patch( 'ycmd.completers.cpp.flags._GetMacClangVersionList',
return_value = [ '1.0.0', '7.0.1', '7.0.2', '___garbage__' ] )
@patch( 'ycmd.completers.cpp.flags._MacClangIncludeDirExists',
side_effect = [ False, True, True, True ] )
def Mac_LatestMacClangIncludes_test( *args ):
eq_( flags._LatestMacClangIncludes(),
[ '/Applications/Xcode.app/Contents/Developer/Toolchains/'
'XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include' ] )
@MacOnly
def Mac_LatestMacClangIncludes_NoSuchDirectory_test():
def RaiseOSError( x ):
raise OSError( x )
with patch( 'os.listdir', side_effect = RaiseOSError ):
eq_( flags._LatestMacClangIncludes(), [] )
@MacOnly
def Mac_PathsForAllMacToolchains_test():
eq_( flags._PathsForAllMacToolchains( 'test' ),
[ '/Applications/Xcode.app/Contents/Developer/Toolchains/'
'XcodeDefault.xctoolchain/test',
'/Library/Developer/CommandLineTools/test' ] )
def CompilationDatabase_NoDatabase_test():
with TemporaryClangTestDir() as tmp_dir:
assert_that(
calling( flags.Flags().FlagsForFile ).with_args(
os.path.join( tmp_dir, 'test.cc' ) ),
raises( NoExtraConfDetected ) )
def CompilationDatabase_FileNotInDatabase_test():
compile_commands = [ ]
with TemporaryClangTestDir() as tmp_dir:
with TemporaryClangProject( tmp_dir, compile_commands ):
eq_(
flags.Flags().FlagsForFile( os.path.join( tmp_dir, 'test.cc' ) ),
[] )
def CompilationDatabase_InvalidDatabase_test():
with TemporaryClangTestDir() as tmp_dir:
with TemporaryClangProject( tmp_dir, 'this is junk' ):
assert_that(
calling( flags.Flags().FlagsForFile ).with_args(
os.path.join( tmp_dir, 'test.cc' ) ),
raises( NoExtraConfDetected ) )
def CompilationDatabase_UseFlagsFromDatabase_test():
with TemporaryClangTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -I. -I/absolute/path -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
assert_that(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'test.cc' ),
add_extra_clang_flags = False ),
contains( 'clang++',
'-x',
'c++',
'-x',
'c++',
'-I' + os.path.normpath( tmp_dir ),
'-I' + os.path.normpath( '/absolute/path' ),
'-Wall' ) )
def CompilationDatabase_UseFlagsFromSameDir_test():
with TemporaryClangTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
f = flags.Flags()
# If we now ask for a file _not_ in the DB, we get []
eq_(
f.FlagsForFile(
os.path.join( tmp_dir, 'test1.cc' ),
add_extra_clang_flags = False ),
[] )
# Then, we ask for a file that _is_ in the db. It will cache these flags
# against the files' directory.
assert_that(
f.FlagsForFile(
os.path.join( tmp_dir, 'test.cc' ),
add_extra_clang_flags = False ),
contains( 'clang++',
'-x',
'c++',
'-x',
'c++',
'-Wall' ) )
# If we now ask for a file _not_ in the DB, but in the same dir, we should
# get the same flags
assert_that(
f.FlagsForFile(
os.path.join( tmp_dir, 'test2.cc' ),
add_extra_clang_flags = False ),
contains( 'clang++',
'-x',
'c++',
'-x',
'c++',
'-Wall' ) )
def CompilationDatabase_HeaderFileHeuristic_test():
with TemporaryClangTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
# If we ask for a header file, it returns the equivalent cc file
assert_that(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'test.h' ),
add_extra_clang_flags = False ),
contains( 'clang++',
'-x',
'c++',
'-x',
'c++',
'-Wall' ) )
def CompilationDatabase_HeaderFileHeuristicNotFound_test():
with TemporaryClangTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
# If we ask for a header file, it returns the equivalent cc file (if and
# only if there are flags for that file)
eq_(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'not_in_the_db.h' ),
add_extra_clang_flags = False ),
[] )
def _MakeRelativePathsInFlagsAbsoluteTest( test ):
wd = test[ 'wd' ] if 'wd' in test else '/not_test'
assert_that(
flags._MakeRelativePathsInFlagsAbsolute( test[ 'flags' ], wd ),
contains( *test[ 'expect' ] ) )
def MakeRelativePathsInFlagsAbsolute_test():
tests = [
# Already absolute, positional arguments
{
'flags': [ '-isystem', '/test' ],
'expect': [ '-isystem', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-I', '/test' ],
'expect': [ '-I', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-iquote', '/test' ],
'expect': [ '-iquote', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-isysroot', '/test' ],
'expect': [ '-isysroot', os.path.normpath( '/test' ) ],
},
# Already absolute, single arguments
{
'flags': [ '-isystem/test' ],
'expect': [ '-isystem' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-I/test' ],
'expect': [ '-I' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-iquote/test' ],
'expect': [ '-iquote' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-isysroot/test' ],
'expect': [ '-isysroot' + os.path.normpath( '/test' ) ],
},
# Already absolute, double-dash arguments
{
'flags': [ '--isystem=/test' ],
'expect': [ '--isystem=/test' ],
},
{
'flags': [ '--I=/test' ],
'expect': [ '--I=/test' ],
},
{
'flags': [ '--iquote=/test' ],
'expect': [ '--iquote=/test' ],
},
{
'flags': [ '--sysroot=/test' ],
'expect': [ '--sysroot=' + os.path.normpath( '/test' ) ],
},
# Relative, positional arguments
{
'flags': [ '-isystem', 'test' ],
'expect': [ '-isystem', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-I', 'test' ],
'expect': [ '-I', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-iquote', 'test' ],
'expect': [ '-iquote', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-isysroot', 'test' ],
'expect': [ '-isysroot', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
# Relative, single arguments
{
'flags': [ '-isystemtest' ],
'expect': [ '-isystem' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-Itest' ],
'expect': [ '-I' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-iquotetest' ],
'expect': [ '-iquote' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-isysroottest' ],
'expect': [ '-isysroot' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
# Already absolute, double-dash arguments
{
'flags': [ '--isystem=test' ],
'expect': [ '--isystem=test' ],
'wd': '/test',
},
{
'flags': [ '--I=test' ],
'expect': [ '--I=test' ],
'wd': '/test',
},
{
'flags': [ '--iquote=test' ],
'expect': [ '--iquote=test' ],
'wd': '/test',
},
{
'flags': [ '--sysroot=test' ],
'expect': [ '--sysroot=' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
]
for test in tests:
yield _MakeRelativePathsInFlagsAbsoluteTest, test
def MakeRelativePathsInFlagsAbsolute_IgnoreUnknown_test():
tests = [
{
'flags': [
'ignored',
'-isystem',
'/test',
'-ignored',
'-I',
'/test',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem', os.path.normpath( '/test' ),
'-ignored',
'-I', os.path.normpath( '/test' ),
'--ignored=ignored'
]
},
{
'flags': [
'ignored',
'-isystem/test',
'-ignored',
'-I/test',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem' + os.path.normpath( '/test' ),
'-ignored',
'-I' + os.path.normpath( '/test/' ),
'--ignored=ignored'
]
},
{
'flags': [
'ignored',
'--isystem=/test',
'-ignored',
'--I=/test',
'--ignored=ignored'
],
'expect': [
'ignored',
'--isystem=/test',
'-ignored',
'--I=/test',
'--ignored=ignored'
]
},
{
'flags': [
'ignored',
'-isystem', 'test',
'-ignored',
'-I', 'test',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem', os.path.normpath( '/test/test' ),
'-ignored',
'-I', os.path.normpath( '/test/test' ),
'--ignored=ignored'
],
'wd': '/test',
},
{
'flags': [
'ignored',
'-isystemtest',
'-ignored',
'-Itest',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem' + os.path.normpath( '/test/test' ),
'-ignored',
'-I' + os.path.normpath( '/test/test' ),
'--ignored=ignored'
],
'wd': '/test',
},
{
'flags': [
'ignored',
'--isystem=test',
'-ignored',
'--I=test',
'--ignored=ignored',
'--sysroot=test'
],
'expect': [
'ignored',
'--isystem=test',
'-ignored',
'--I=test',
'--ignored=ignored',
'--sysroot=' + os.path.normpath( '/test/test' ),
],
'wd': '/test',
},
]
for test in tests:
yield _MakeRelativePathsInFlagsAbsoluteTest, test
def MakeRelativePathsInFlagsAbsolute_NoWorkingDir_test():
yield _MakeRelativePathsInFlagsAbsoluteTest, {
'flags': [ 'list', 'of', 'flags', 'not', 'changed', '-Itest' ],
'expect': [ 'list', 'of', 'flags', 'not', 'changed', '-Itest' ],
'wd': ''
}
| gpl-3.0 |
yousafsyed/casperjs | bin/Lib/ctypes/test/test_simplesubclasses.py | 170 | 1289 | import unittest
from ctypes import *
class MyInt(c_int):
def __eq__(self, other):
if type(other) != MyInt:
return NotImplementedError
return self.value == other.value
class Test(unittest.TestCase):
def test_compare(self):
self.assertEqual(MyInt(3), MyInt(3))
self.assertNotEqual(MyInt(42), MyInt(43))
def test_ignore_retval(self):
# Test if the return value of a callback is ignored
# if restype is None
proto = CFUNCTYPE(None)
def func():
return (1, "abc", None)
cb = proto(func)
self.assertEqual(None, cb())
def test_int_callback(self):
args = []
def func(arg):
args.append(arg)
return arg
cb = CFUNCTYPE(None, MyInt)(func)
self.assertEqual(None, cb(42))
self.assertEqual(type(args[-1]), MyInt)
cb = CFUNCTYPE(c_int, c_int)(func)
self.assertEqual(42, cb(42))
self.assertEqual(type(args[-1]), int)
def test_int_struct(self):
class X(Structure):
_fields_ = [("x", MyInt)]
self.assertEqual(X().x, MyInt())
s = X()
s.x = MyInt(42)
self.assertEqual(s.x, MyInt(42))
if __name__ == "__main__":
unittest.main()
| mit |
cfei18/incubator-airflow | airflow/operators/generic_transfer.py | 15 | 3125 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.base_hook import BaseHook
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is meant to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database. (templated)
:type sql: str
:param destination_table: target table. (templated)
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data. (templated)
:type preoperator: str or list of str
"""
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = ('.sql', '.hql',)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
sql,
destination_table,
source_conn_id,
destination_conn_id,
preoperator=None,
*args, **kwargs):
super(GenericTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.log.info("Extracting data from %s", self.source_conn_id)
self.log.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.log.info("Running preoperator")
self.log.info(self.preoperator)
destination_hook.run(self.preoperator)
self.log.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
| apache-2.0 |
ZachRiegel/scriptbin | pypyjs/modules/rfc822.py | 99 | 33295 | """RFC 2822 message manipulation.
Note: This is only a very rough sketch of a full RFC-822 parser; in particular
the tokenizing of addresses does not adhere to all the quoting rules.
Note: RFC 2822 is a long awaited update to RFC 822. This module should
conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some
effort at RFC 2822 updates have been made, but a thorough audit has not been
performed. Consider any RFC 2822 non-conformance to be a bug.
RFC 2822: http://www.faqs.org/rfcs/rfc2822.html
RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete)
Directions for use:
To create a Message object: first open a file, e.g.:
fp = open(file, 'r')
You can use any other legal way of getting an open file object, e.g. use
sys.stdin or call os.popen(). Then pass the open file object to the Message()
constructor:
m = Message(fp)
This class can work with any input object that supports a readline method. If
the input object has seek and tell capability, the rewindbody method will
work; also illegal lines will be pushed back onto the input stream. If the
input object lacks seek but has an `unread' method that can push back a line
of input, Message will use that to push back illegal lines. Thus this class
can be used to parse messages coming from a buffered stream.
The optional `seekable' argument is provided as a workaround for certain stdio
libraries in which tell() discards buffered data before discovering that the
lseek() system call doesn't work. For maximum portability, you should set the
seekable argument to zero to prevent that initial \code{tell} when passing in
an unseekable object such as a file object created from a socket object. If
it is 1 on entry -- which it is by default -- the tell() method of the open
file object is called once; if this raises an exception, seekable is reset to
0. For other nonzero values of seekable, this test is not made.
To get the text of a particular header there are several methods:
str = m.getheader(name)
str = m.getrawheader(name)
where name is the name of the header, e.g. 'Subject'. The difference is that
getheader() strips the leading and trailing whitespace, while getrawheader()
doesn't. Both functions retain embedded whitespace (including newlines)
exactly as they are specified in the header, and leave the case of the text
unchanged.
For addresses and address lists there are functions
realname, mailaddress = m.getaddr(name)
list = m.getaddrlist(name)
where the latter returns a list of (realname, mailaddr) tuples.
There is also a method
time = m.getdate(name)
which parses a Date-like field and returns a time-compatible tuple,
i.e. a tuple such as returned by time.localtime() or accepted by
time.mktime().
See the class definition for lower level access methods.
There are also some utility functions here.
"""
# Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com>
import time
from warnings import warnpy3k
warnpy3k("in 3.x, rfc822 has been removed in favor of the email package",
stacklevel=2)
__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"]
_blanklines = ('\r\n', '\n') # Optimization for islast()
class Message:
"""Represents a single RFC 2822-compliant message."""
def __init__(self, fp, seekable = 1):
"""Initialize the class instance and read the headers."""
if seekable == 1:
# Exercise tell() to make sure it works
# (and then assume seek() works, too)
try:
fp.tell()
except (AttributeError, IOError):
seekable = 0
self.fp = fp
self.seekable = seekable
self.startofheaders = None
self.startofbody = None
#
if self.seekable:
try:
self.startofheaders = self.fp.tell()
except IOError:
self.seekable = 0
#
self.readheaders()
#
if self.seekable:
try:
self.startofbody = self.fp.tell()
except IOError:
self.seekable = 0
def rewindbody(self):
"""Rewind the file to the start of the body (if seekable)."""
if not self.seekable:
raise IOError, "unseekable file"
self.fp.seek(self.startofbody)
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
"""
self.dict = {}
self.unixfrom = ''
self.headers = lst = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while 1:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# It's a continuation line.
lst.append(line)
x = (self.dict[headerseen] + "\n " + line.strip())
self.dict[headerseen] = x.strip()
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
lst.append(line)
self.dict[headerseen] = line[len(headerseen)+1:].strip()
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
def isheader(self, line):
"""Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats with special header formats.
"""
i = line.find(':')
if i > 0:
return line[:i].lower()
return None
def islast(self, line):
"""Determine whether a line is a legal end of RFC 2822 headers.
You may override this method if your application wants to bend the
rules, e.g. to strip trailing whitespace, or to recognize MH template
separators ('--------'). For convenience (e.g. for code reading from
sockets) a line consisting of \\r\\n also matches.
"""
return line in _blanklines
def iscomment(self, line):
"""Determine whether a line should be skipped entirely.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats that support embedded comments or
free-text data.
"""
return False
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.headers:
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def getfirstmatchingheader(self, name):
"""Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns only the
first matching header (and its continuation lines).
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.headers:
if hit:
if not line[:1].isspace():
break
elif line[:n].lower() == name:
hit = 1
if hit:
lst.append(line)
return lst
def getrawheader(self, name):
"""A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the header but with the
keyword stripped. All leading, trailing and embedded whitespace is
kept in the string, however. Return None if the header does not
occur.
"""
lst = self.getfirstmatchingheader(name)
if not lst:
return None
lst[0] = lst[0][len(name) + 1:]
return ''.join(lst)
def getheader(self, name, default=None):
"""Get the header value for a name.
This is the normal interface: it returns a stripped version of the
header value for a given header name, or None if it doesn't exist.
This uses the dictionary version which finds the *last* such header.
"""
return self.dict.get(name.lower(), default)
get = getheader
def getheaders(self, name):
"""Get all values for a header.
This returns a list of values for headers given more than once; each
value in the result list is stripped in the same way as the result of
getheader(). If the header is not given, return an empty list.
"""
result = []
current = ''
have_header = 0
for s in self.getallmatchingheaders(name):
if s[0].isspace():
if current:
current = "%s\n %s" % (current, s.strip())
else:
current = s.strip()
else:
if have_header:
result.append(current)
current = s[s.find(":") + 1:].strip()
have_header = 1
if have_header:
result.append(current)
return result
def getaddr(self, name):
"""Get a single address from a header, as a tuple.
An example return value:
('Guido van Rossum', 'guido@cwi.nl')
"""
# New, by Ben Escoto
alist = self.getaddrlist(name)
if alist:
return alist[0]
else:
return (None, None)
def getaddrlist(self, name):
"""Get a list of addresses from a header.
Retrieves a list of addresses from a header, where each address is a
tuple as returned by getaddr(). Scans all named headers, so it works
properly with multiple To: or Cc: headers for example.
"""
raw = []
for h in self.getallmatchingheaders(name):
if h[0] in ' \t':
raw.append(h)
else:
if raw:
raw.append(', ')
i = h.find(':')
if i > 0:
addr = h[i+1:]
raw.append(addr)
alladdrs = ''.join(raw)
a = AddressList(alladdrs)
return a.addresslist
def getdate(self, name):
"""Retrieve a date field from a header.
Retrieves a date field from the named header, returning a tuple
compatible with time.mktime().
"""
try:
data = self[name]
except KeyError:
return None
return parsedate(data)
def getdate_tz(self, name):
"""Retrieve a date field from a header as a 10-tuple.
The first 9 elements make up a tuple compatible with time.mktime(),
and the 10th is the offset of the poster's time zone from GMT/UTC.
"""
try:
data = self[name]
except KeyError:
return None
return parsedate_tz(data)
# Access as a dictionary (only finds *last* header of each type):
def __len__(self):
"""Get the number of headers in a message."""
return len(self.dict)
def __getitem__(self, name):
"""Get a specific header, as from a dictionary."""
return self.dict[name.lower()]
def __setitem__(self, name, value):
"""Set the value of a header.
Note: This is not a perfect inversion of __getitem__, because any
changed headers get stuck at the end of the raw-headers list rather
than where the altered header was.
"""
del self[name] # Won't fail if it doesn't exist
self.dict[name.lower()] = value
text = name + ": " + value
for line in text.split("\n"):
self.headers.append(line + "\n")
def __delitem__(self, name):
"""Delete all occurrences of a specific header, if it is present."""
name = name.lower()
if not name in self.dict:
return
del self.dict[name]
name = name + ':'
n = len(name)
lst = []
hit = 0
for i in range(len(self.headers)):
line = self.headers[i]
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(i)
for i in reversed(lst):
del self.headers[i]
def setdefault(self, name, default=""):
lowername = name.lower()
if lowername in self.dict:
return self.dict[lowername]
else:
text = name + ": " + default
for line in text.split("\n"):
self.headers.append(line + "\n")
self.dict[lowername] = default
return default
def has_key(self, name):
"""Determine whether a message contains the named header."""
return name.lower() in self.dict
def __contains__(self, name):
"""Determine whether a message contains the named header."""
return name.lower() in self.dict
def __iter__(self):
return iter(self.dict)
def keys(self):
"""Get all of a message's header field names."""
return self.dict.keys()
def values(self):
"""Get all of a message's header field values."""
return self.dict.values()
def items(self):
"""Get all of a message's headers.
Returns a list of name, value tuples.
"""
return self.dict.items()
def __str__(self):
return ''.join(self.headers)
# Utility functions
# -----------------
# XXX Should fix unquote() and quote() to be really conformant.
# XXX The inverses of the parse functions may also be useful.
def unquote(s):
"""Remove quotes from a string."""
if len(s) > 1:
if s.startswith('"') and s.endswith('"'):
return s[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if s.startswith('<') and s.endswith('>'):
return s[1:-1]
return s
def quote(s):
"""Add quotes around a string."""
return s.replace('\\', '\\\\').replace('"', '\\"')
def parseaddr(address):
"""Parse an address into a (realname, mailaddr) tuple."""
a = AddressList(address)
lst = a.addresslist
if not lst:
return (None, None)
return lst[0]
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of
RFC 2822 in front of you.
http://www.faqs.org/rfcs/rfc2822.html
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing one or more
addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos = self.pos + 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else: break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
ad = self.getaddress()
while ad:
result += ad
ad = self.getaddress()
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(' '.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(' '.join(plist) + ' (' + \
' '.join(self.commentlist) + ')', routeaddr)]
else: returnlist = [(' '.join(plist), routeaddr)]
else:
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = 0
self.pos += 1
self.gotonext()
adlist = ""
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = 0
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = 1
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else: aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return ''.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return ''.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else: sdlist.append(self.getatom())
return ''.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments = 1):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment. If self is not
looking at an instance of `beginchar' then getdelimited returns the
empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = 0
self.pos += 1
while self.pos < len(self.field):
if quote == 1:
slist.append(self.field[self.pos])
quote = 0
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = 1
else:
slist.append(self.field[self.pos])
self.pos += 1
return ''.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', 0)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', 1)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', 0)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else: atomlist.append(self.field[self.pos])
self.pos += 1
return ''.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __str__(self):
return ", ".join(map(dump_address_pair, self.addresslist))
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
def dump_address_pair(pair):
"""Dump a (name, address) pair in a canonicalized form."""
if pair[0]:
return '"' + pair[0] + '" <' + pair[1] + '>'
else:
return pair[1]
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
if not data:
return None
data = data.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
# no space after the "weekday,"?
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if not mm in _monthnames:
dd, mm = mm, dd.lower()
if not mm in _monthnames:
return None
mm = _monthnames.index(mm)+1
if mm > 12: mm = mm - 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset)
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if t is None:
return t
return t[:9]
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def formatdate(timeval=None):
"""Returns time format preferred for Internet standards.
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
According to RFC 1123, day and month names must always be in
English. If not for that, this code could use strftime(). It
can't because strftime() honors the locale and could generated
non-English names.
"""
if timeval is None:
timeval = time.time()
timeval = time.gmtime(timeval)
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]],
timeval[2],
("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1],
timeval[0], timeval[3], timeval[4], timeval[5])
# When used as script, run a small test program.
# The first command line argument must be a filename containing one
# message in RFC-822 format.
if __name__ == '__main__':
import sys, os
file = os.path.join(os.environ['HOME'], 'Mail/inbox/1')
if sys.argv[1:]: file = sys.argv[1]
f = open(file, 'r')
m = Message(f)
print 'From:', m.getaddr('from')
print 'To:', m.getaddrlist('to')
print 'Subject:', m.getheader('subject')
print 'Date:', m.getheader('date')
date = m.getdate_tz('date')
tz = date[-1]
date = time.localtime(mktime_tz(date))
if date:
print 'ParsedDate:', time.asctime(date),
hhmmss = tz
hhmm, ss = divmod(hhmmss, 60)
hh, mm = divmod(hhmm, 60)
print "%+03d%02d" % (hh, mm),
if ss: print ".%02d" % ss,
print
else:
print 'ParsedDate:', None
m.rewindbody()
n = 0
while f.readline():
n += 1
print 'Lines:', n
print '-'*70
print 'len =', len(m)
if 'Date' in m: print 'Date =', m['Date']
if 'X-Nonsense' in m: pass
print 'keys =', m.keys()
print 'values =', m.values()
print 'items =', m.items()
| gpl-3.0 |
valentin-krasontovitsch/ansible | test/units/modules/network/nxos/nxos_module.py | 29 | 3594 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
from units.modules.utils import set_module_args as _set_module_args
def set_module_args(args):
if 'provider' not in args:
args['provider'] = {'transport': args.get('transport') or 'cli'}
return _set_module_args(args)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(module_name, name, device=''):
path = os.path.join(fixture_path, module_name, device, name)
if not os.path.exists(path):
path = os.path.join(fixture_path, module_name, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestNxosModule(ModuleTestCase):
def execute_module_devices(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
module_name = self.module.__name__.rsplit('.', 1)[1]
local_fixture_path = os.path.join(fixture_path, module_name)
models = []
for path in os.listdir(local_fixture_path):
path = os.path.join(local_fixture_path, path)
if os.path.isdir(path):
models.append(os.path.basename(path))
if not models:
models = ['']
retvals = {}
for model in models:
retvals[model] = self.execute_module(failed, changed, commands, sort, device=model)
return retvals
def execute_module(self, failed=False, changed=False, commands=None, sort=True, device=''):
self.load_fixtures(commands, device=device)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None, device=''):
pass
| gpl-3.0 |
lesina/Hack70 | env/lib/python3.5/site-packages/django/contrib/gis/utils/layermapping.py | 137 | 27371 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.BigAutoField: OFTInteger64,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except Exception: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| gpl-3.0 |
lz1988/django-web2015 | django/dispatch/saferef.py | 218 | 10623 | """
"Safe weakrefs", originally from pyDispatcher.
Provides a way to safely weakref any function, including bound methods (which
aren't handled by the core weakref module).
"""
import traceback
import weakref
def safeRef(target, onDelete = None):
"""Return a *safe* weak reference to a callable target
target -- the object to be weakly referenced, if it's a
bound method reference, will create a BoundMethodWeakref,
otherwise creates a simple weakref.
onDelete -- if provided, will have a hard reference stored
to the callable to be called after the safe reference
goes out of scope with the reference object, (either a
weakref or a BoundMethodWeakref) as argument.
"""
if hasattr(target, '__self__'):
if target.__self__ is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
assert hasattr(target, '__func__'), """safeRef target %r has __self__, but no __func__, don't know how to create reference"""%( target,)
reference = get_bound_method_weakref(
target=target,
onDelete=onDelete
)
return reference
if callable(onDelete):
return weakref.ref(target, onDelete)
else:
return weakref.ref( target )
class BoundMethodWeakref(object):
"""'Safe' and reusable weak references to instance methods
BoundMethodWeakref objects provide a mechanism for
referencing a bound method without requiring that the
method object itself (which is normally a transient
object) is kept alive. Instead, the BoundMethodWeakref
object keeps weak references to both the object and the
function which together define the instance method.
Attributes:
key -- the identity key for the reference, calculated
by the class's calculateKey method applied to the
target instance method
deletionMethods -- sequence of callable objects taking
single argument, a reference to this object which
will be called when *either* the target object or
target function is garbage collected (i.e. when
this object becomes invalid). These are specified
as the onDelete parameters of safeRef calls.
weakSelf -- weak reference to the target object
weakFunc -- weak reference to the target function
Class Attributes:
_allInstances -- class attribute pointing to all live
BoundMethodWeakref objects indexed by the class's
calculateKey(target) method applied to the target
objects. This weak value dictionary is used to
short-circuit creation so that multiple references
to the same (object, function) pair produce the
same BoundMethodWeakref instance.
"""
_allInstances = weakref.WeakValueDictionary()
def __new__( cls, target, onDelete=None, *arguments,**named ):
"""Create new instance or return current instance
Basically this method of construction allows us to
short-circuit creation of references to already-
referenced instance methods. The key corresponding
to the target is calculated, and if there is already
an existing reference, that is returned, with its
deletionMethods attribute updated. Otherwise the
new instance is created and registered in the table
of already-referenced methods.
"""
key = cls.calculateKey(target)
current =cls._allInstances.get(key)
if current is not None:
current.deletionMethods.append( onDelete)
return current
else:
base = super( BoundMethodWeakref, cls).__new__( cls )
cls._allInstances[key] = base
base.__init__( target, onDelete, *arguments,**named)
return base
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have __self__ and __func__ attributes
and be reconstructable via:
target.__func__.__get__( target.__self__ )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
def remove(weak, self=self):
"""Set self.isDead to true when method or instance is destroyed"""
methods = self.deletionMethods[:]
del self.deletionMethods[:]
try:
del self.__class__._allInstances[ self.key ]
except KeyError:
pass
for function in methods:
try:
if callable( function ):
function( self )
except Exception as e:
try:
traceback.print_exc()
except AttributeError:
print('Exception during saferef %s cleanup function %s: %s' % (
self, function, e)
)
self.deletionMethods = [onDelete]
self.key = self.calculateKey( target )
self.weakSelf = weakref.ref(target.__self__, remove)
self.weakFunc = weakref.ref(target.__func__, remove)
self.selfName = str(target.__self__)
self.funcName = str(target.__func__.__name__)
def calculateKey( cls, target ):
"""Calculate the reference key for this reference
Currently this is a two-tuple of the id()'s of the
target object and the target function respectively.
"""
return (id(target.__self__),id(target.__func__))
calculateKey = classmethod( calculateKey )
def __str__(self):
"""Give a friendly representation of the object"""
return """%s( %s.%s )"""%(
self.__class__.__name__,
self.selfName,
self.funcName,
)
__repr__ = __str__
def __hash__(self):
return hash(self.key)
def __bool__( self ):
"""Whether we are still a valid reference"""
return self() is not None
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __eq__(self, other):
"""Compare with another reference"""
if not isinstance(other, self.__class__):
return self.__class__ == type(other)
return self.key == other.key
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
return function.__get__(target)
return None
class BoundNonDescriptorMethodWeakref(BoundMethodWeakref):
"""A specialized BoundMethodWeakref, for platforms where instance methods
are not descriptors.
It assumes that the function name and the target attribute name are the
same, instead of assuming that the function is a descriptor. This approach
is equally fast, but not 100% reliable because functions can be stored on an
attribute named differenty than the function's name such as in:
class A: pass
def foo(self): return "foo"
A.bar = foo
But this shouldn't be a common use case. So, on platforms where methods
aren't descriptors (such as Jython) this implementation has the advantage
of working in the most cases.
"""
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have __self__ and __func__ attributes
and be reconstructable via:
target.__func__.__get__( target.__self__ )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
assert getattr(target.__self__, target.__name__) == target, \
("method %s isn't available as the attribute %s of %s" %
(target, target.__name__, target.__self__))
super(BoundNonDescriptorMethodWeakref, self).__init__(target, onDelete)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
# Using partial() would be another option, but it erases the
# "signature" of the function. That is, after a function is
# curried, the inspect module can't be used to determine how
# many arguments the function expects, nor what keyword
# arguments it supports, and pydispatcher needs this
# information.
return getattr(target, function.__name__)
return None
def get_bound_method_weakref(target, onDelete):
"""Instantiates the appropiate BoundMethodWeakRef, depending on the details of
the underlying class method implementation"""
if hasattr(target, '__get__'):
# target method is a descriptor, so the default implementation works:
return BoundMethodWeakref(target=target, onDelete=onDelete)
else:
# no luck, use the alternative implementation:
return BoundNonDescriptorMethodWeakref(target=target, onDelete=onDelete)
| bsd-2-clause |
alheinecke/tensorflow-xsmm | tensorflow/tools/quantization/quantize_graph_test.py | 28 | 42167 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = ops_lib.Graph()
with graph.as_default():
importer.import_graph_def(graph_def, input_map={}, name="")
with session.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
print("Tensors are different sizes: " + str(len(flat_a)) + " vs " + str(
len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
print("Tensors have {0} different values ({1}%), with mean difference"
" {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100, mean_difference,
mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1",
value=[.5, .6, .7, .8, .9],
dtype=dtypes.float32,
shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = graph_pb2.GraphDef()
g.node.extend([
input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
weight_2_node, matmul_2_node
])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
g, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(1, ops.count("QuantizedReshape"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]),
2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_non_float_concat(self):
concat_dim = quantize_graph.create_constant_node(
"concat_dim", value=0, dtype=dtypes.int32, shape=[])
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
b = quantize_graph.create_constant_node(
"b",
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.int32,
shape=[2, 2, 3])
concat = quantize_graph.create_node("Concat", "concat",
[concat_dim.name, a.name, b.name])
quantize_graph.set_attr_int(concat, "N", 2)
quantize_graph.set_attr_dtype(concat, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([concat_dim, a, b, concat])
test_graph(g, {}, [concat.name])
def test_non_float_reshape(self):
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
shape = quantize_graph.create_constant_node(
"shape", value=[12], dtype=dtypes.int32, shape=[1])
reshape = quantize_graph.create_node("Reshape", "reshape",
[a.name, shape.name])
quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([a, shape, reshape])
test_graph(g, {}, [reshape.name])
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=0, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(
a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("PlaceholderV2", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("PlaceholderV2", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[bias_add_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([
input_node, offset_node, bias_add_node, min_node, max_node,
fake_quant_node
])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-100, 100])
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# The fallback constants are not in the graph.
self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
def test_bias_add_w_fallback_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_node, offset_node, bias_add_node])
test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)
# Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-.5, 15.5])
eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# No RequantizationRange
self.assertEqual(0, ops.count("RequantizationRange"))
# The fallback constants are in the graph.
self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node(
"Dequantize", a_dequantize_name,
[a_constant_name, a_constant_min_name, a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node(
"QuantizeV2", a_quantize_name,
[a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node(
"Dequantize", b_dequantize_name,
[b_constant_name, b_constant_min_name, b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node(
"QuantizeV2", b_quantize_name,
[b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_quantize_name, b_quantize_name, a_quantize_name + ":1",
a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
graph_def.node.extend([mat_mul_node])
expected_output = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_constant_name, b_constant_name, a_constant_min_name,
a_constant_max_name, b_constant_min_name, b_constant_max_name
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
expected_output.node.extend([mat_mul_node])
rewriter = quantize_graph.GraphRewriter(
graph_def, [mat_mul_name], quantized_input_range=None)
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
hastern/jelly | plugin.py | 1 | 8548 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Jelly Plugin - A minimal plugin framework.
The idea and basis for this code came from:
http://martyalchin.com/2008/jan/10/simple-plugin-framework/
To create a pluginhook define a new class, and set `PluginMount` or
`TaxonomyPluginMount` as its `__metaclass__`.
Every plugin must be a child of the pluginhook, inheriting its interface.
Therefore no special interface declaration is necessary.
"""
import logging
# We are assuming, that there is an already configured logger present
logger = logging.getLogger(__name__)
def createPluginsFolder(dirname='plugins'): # pragma: no cover
"""Create a plugin folder in the current working directory,
if none is existent.
@type dirname: str
@param dirname: The name for the plugin-directory
"""
import os
import os.path
pluginInit = """#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
__import__(module[:-3], locals(), globals())
del module
"""
if not os.path.exists(dirname):
os.mkdir(dirname)
open("{}/__init__.py".format(dirname), "w").write(pluginInit)
class PluginMount(type):
"""A simple pluginMount.
To hook a plugin into the mount, simply let your object inherit from it.
"""
def __init__(cls, name, bases, attrs):
"""The first object to be initialized is always the parent.
Due to the way class-members are accessed, all children of this
object hook themself into the mounts plugin list. Thereby
getting visible to the mount.
@type cls: type
@param cls: The class object to be initialized.
@type name: str
@param name: The name of the class object.
@type bases: list
@param bases: A list of base classes for the class object
@type attrs: list
@param attrs: A list of attributes for the class object
"""
if not hasattr(cls, 'plugins'):
logger.debug("Creating pluginmount {}".format(cls.__name__))
# Create plugin list
cls.plugins = []
# Set self as base class
cls.base = cls
else:
logger.debug("Registering plugin {}".format(cls.__name__))
# Append self to plugin list
cls.plugins.append(cls)
cls.isMount = lambda self: self.base is self.__class__
cls.isPlugin = lambda self: self.base is not self.__class__
def loadPlugins(cls, *args, **kwargs):
"""Create a list of instantiated plugins
if this is not called from inside the mount instance, you should
specify the *caller* argument, to avoid double instantiation of
your child class"""
caller = kwargs['caller'].__class__ if 'caller' in kwargs else None
return [p(*args, **kwargs) for p in cls.plugins if p is not caller]
def __iter__(self):
"""Iterate all plugins
"""
for plugin in self.plugins:
yield plugin
class TaxonomyPluginMount(type):
"""PluginMount for plugins with a taxonomy on its plugins
To hook a plugin into the mount, let your object inherit from it."""
def __init__(cls, name, bases, attrs):
"""Like the `PluginMount` the first object to be initialized
will become the mount, any later objects (meaning: childs)
act as plugins.
The taxonomy is created through a dictionary.
Each plugin can define a class-member `__category__`.
The Elements of the hierarchy are separated by a single dot.
@type cls: object
@param cls: The class object to be initialized.
@type name: str
@param name: The name of the class object.
@type bases: list
@param bases: A list of base classes for the class object
@type attrs: list
@param attrs: A list of attributes for the class object
"""
if not hasattr(cls, 'taxonomy'):
logger.debug("Creating TaxonomyPluginMount {}".format(cls.__name__))
cls.taxonomy = {}
cls.__category__ = ""
else:
logger.debug("Registering plugin {} into taxonomy {}".format(cls.__name__, cls.__category__))
cls.taxonomy[cls.FQClassName] = cls
def __getitem__(cls, key):
"""Implementation of the Indexed-Access-Operator (`[]`).
Delegating to a call of `TaxonomyPluginMount.findClass`.
@type cls: object
@param cls: The hook class
@type key: str
@param key: The fully qualified class name
@rtype: object
@return: The class
"""
return cls.taxonomy[key]
def __iter__(cls):
""" Iterate the class object to get all plugins
"""
for key, plugin in cls.taxonomy.iteritems():
yield plugin
def getFQClassName(cls):
"""
@type cls: object
@param cls: A Class object
@rtype: str
@return: the fully qualified class name of a class
"""
if cls.__category__ != "":
return ".".join((cls.__category__, cls.__name__))
else:
return cls.__name__
@property
def FQClassName(cls):
"""Remapping getter method to a property"""
return cls.getFQClassName()
def loadPlugins(cls, *args, **kwargs):
"""Create a list of instantiated plugins
if this is not called from inside the mount instance, you should
specify the *caller* argument, to avoid double instantiation of
your child class"""
caller = kwargs['caller'].__class__ if 'caller' in kwargs else None
return {key: clazz(*args, **kwargs) for key, clazz in cls.taxonomy.iteritems() if key is not caller}
def getAllCategories(cls, exclude=[]):
"""Create a dictionary with all categories and the class per
category.
Returns a dictionary where the keys are the full category with
a list of class names as values.
@type exclude: list
@param exclude: List of categories to be excluded
"""
d = {}
for k, e in map(lambda s: s.rsplit(".", 1), cls.taxonomy.keys()):
if k not in exclude:
d[k] = [e] if k not in d else d[k] + [e]
return d
class MixinMount(type):
"""Metaclass to mix all child methods into the base parent.
Every child class object will be a reference to the base object.
The init functions of all childrens will be called consecutivly.
"""
def __new__(cls, name, bases, attrs):
"""Override the __new__ method the create a singleton class
object"""
def init(self, *args, **kwargs):
"""Call all initializer method of all child functions"""
for init in self.__init_collection__:
init(self, *args, **kwargs)
if not hasattr(cls, "instance"):
# 1. Object -> Base class: create class object instance
if "__init__" in attrs:
attrs['__init_collection__'] = [attrs['__init__']]
else:
attrs['__init_collection__'] = []
attrs['__init__'] = init
logger.debug("Creating mixinmount {}".format(name))
cls.instance = super(MixinMount, cls).__new__(cls, name, bases, attrs)
elif "__init__" in attrs:
# Every other object: Append all non-dunderscore methods
logger.debug("Appending methods from '{}' to {}".format(name, cls.instance))
cls.instance.__init_collection__.append(attrs["__init__"])
for name, attr in attrs.iteritems():
if not name.startswith("__"):
if hasattr(cls.instance, name):
logger.warning("Member '{}' already exists in {}".format(name, cls.instance))
setattr(cls.instance, name, attr)
# Emulate multi inheritance
if len(bases) > 1:
for base in bases:
if base != cls.instance:
for name, member in base.__dict__.iteritems():
if not name.startswith("__"):
if hasattr(cls.instance, name):
logger.warning("Mixin-member '{}' from {} already exists in {}".format(name, base, cls.instance))
setattr(cls.instance, name, member)
return cls.instance
| mit |
CiscoSystems/vespa | neutron/tests/unit/test_common_log.py | 22 | 2943 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import log as call_log
from neutron.tests import base
MODULE_NAME = 'neutron.tests.unit.test_common_log'
class TargetKlass(object):
@call_log.log
def test_method(self, arg1, arg2, *args, **kwargs):
pass
class TestCallLog(base.BaseTestCase):
def setUp(self):
super(TestCallLog, self).setUp()
self.klass = TargetKlass()
self.expected_format = ('%(class_name)s method %(method_name)s '
'called with arguments %(args)s %(kwargs)s')
self.expected_data = {'class_name': MODULE_NAME + '.TargetKlass',
'method_name': 'test_method',
'args': (),
'kwargs': {}}
def test_call_log_all_args(self):
self.expected_data['args'] = (10, 20)
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_all_kwargs(self):
self.expected_data['kwargs'] = {'arg1': 10, 'arg2': 20}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(arg1=10, arg2=20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_unknown_args_kwargs(self):
self.expected_data['args'] = (10, 20, 30)
self.expected_data['kwargs'] = {'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20, 30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_kwargs_unknown_kwargs(self):
self.expected_data['args'] = (10,)
self.expected_data['kwargs'] = {'arg2': 20, 'arg3': 30, 'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, arg2=20, arg3=30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
| apache-2.0 |
swjtuacmer/Ranker | Ranker/venv/lib/python2.7/site-packages/pip/commands/uninstall.py | 798 | 2884 | from __future__ import absolute_import
import pip
from pip.wheel import WheelCache
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
with self._build_session(options) as session:
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
options=options,
session=session,
wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError(
'You must give at least one requirement to %(name)s (see '
'"pip help %(name)s")' % dict(name=self.name)
)
requirement_set.uninstall(auto_confirm=options.yes)
| mit |
MechCoder/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 33 | 17877 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss']
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = ["adjusted_rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score"]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_clf) for name in CLUSTER_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), cluster_module.adjusted_rand_score)
| bsd-3-clause |
GeoscienceAustralia/geodesy-domain-model | aws/amazonia/amazonia/classes/amz_cf_distribution.py | 3 | 10697 | #!/usr/bin/python3
from troposphere import cloudfront, ImportValue
class CloudfrontConfigError(Exception):
"""
Error thrown Cloudfront objects are misconfigured
"""
def __init__(self, value):
self.value = value
class CFDistribution(object):
def __init__(self, title, template, cf_origins_config, cf_cache_behavior_config,
cf_distribution_config):
"""
Class to abstract a Cloudfront Distribution object
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distributionconfig.html
https://github.com/cloudtools/troposphere/blob/master/troposphere/cloudfront.py
:param title: title of the Cloudfront Distribution and associated resources to be used in cloud formation
:param template: Troposphere stack to append resources to
:param cf_origins_config: A list of CFOriginsConfig objects
:param cf_cache_behavior_config: A list of CFCacheBehavior objects
:param cf_distribution_config: A CFDistributionConfig object
"""
self.title = title
self.origins = []
self.cache_behaviors = []
self.default_cache_behavior = cloudfront.DefaultCacheBehavior()
# Populate origins
self.add_origins(self.title, cf_origins_config)
# Populate cache_behaviors
self.add_cache_behaviors(self.title, cf_cache_behavior_config)
# Set distribution-wide parameters
self.cf_dist = cloudfront.DistributionConfig(
self.title + 'CfDistConfig',
Aliases=cf_distribution_config.aliases,
Comment=self.title,
DefaultCacheBehavior=self.default_cache_behavior,
CacheBehaviors=self.cache_behaviors,
DefaultRootObject=cf_distribution_config.default_root_object,
Enabled=cf_distribution_config.enabled,
Origins=self.origins,
PriceClass=cf_distribution_config.price_class
)
if cf_distribution_config.acm_cert_arn:
self.cf_dist.ViewerCertificate = cloudfront.ViewerCertificate(
AcmCertificateArn=cf_distribution_config.acm_cert_arn,
SslSupportMethod='sni-only'
)
self.cf_dist = template.add_resource(cloudfront.Distribution(
title,
DistributionConfig=self.cf_dist
)
)
def add_origins(self, title, cf_origins_config):
"""
Create Cloudfront Origin objects and append to list of origins
:param title: Title of this Cloudfront Distribution
:param cf_origins_config: List of CFOrigins
"""
for number, origin in enumerate(cf_origins_config):
created_origin = cloudfront.Origin(
'{0}Origin{1}'.format(title, number),
DomainName=origin.domain_name,
Id=origin.origin_id
)
if origin.origin_path:
created_origin.OriginPath = origin.origin_path
if origin.custom_headers:
created_headers = []
for k, v in origin.custom_headers.items():
if v is not None:
created_headers.append(
cloudfront.OriginCustomHeader(HeaderName=k, HeaderValue=v)
)
created_origin.OriginCustomHeaders = created_headers
# Set S3 config
if origin.origin_policy['is_s3']:
# Create S3Origin
s3_origin_config = cloudfront.S3Origin()
# Ensure variables exist
if origin.origin_access_identity:
s3_origin_config.OriginAccessIdentity = origin.origin_access_identity
# Set S3Origin
created_origin.S3OriginConfig = s3_origin_config
# Set Custom config
else:
created_origin.DomainName = self.get_custom_reference(origin.domain_name)
# Create CustomOrigin
custom_origin_config = cloudfront.CustomOrigin()
# Ensure variables exist
if origin.http_port:
custom_origin_config.HTTPPort = origin.http_port
if origin.https_port:
custom_origin_config.HTTPSPort = origin.https_port
if origin.origin_protocol_policy:
custom_origin_config.OriginProtocolPolicy = origin.origin_protocol_policy
if origin.origin_ssl_protocols:
custom_origin_config.OriginSSLProtocols = origin.origin_ssl_protocols
# Set CustomOrigin
created_origin.CustomOriginConfig = custom_origin_config
self.origins.append(created_origin)
def add_cache_behaviors(self, title, cf_cache_behavior_config):
"""
Create Cloudfront CacheBehavior objects and append to list of cache_behaviors
:param title: Title of this Cloudfront Distribution
:param cf_cache_behavior_config: list of CFCacheBehavior
"""
default_cache_behavior_count = 0
cache_behavior_count = 0
for number, cache_behavior in enumerate(cf_cache_behavior_config):
forwarded_values = cloudfront.ForwardedValues(
Cookies=cloudfront.Cookies(
Forward=cache_behavior.forward_cookies
),
QueryString=cache_behavior.query_string
)
if cache_behavior.forwarded_headers is not None:
forwarded_values.Headers = cache_behavior.forwarded_headers
cf_cache_behavior_params = {
'AllowedMethods': cache_behavior.allowed_methods,
'CachedMethods': cache_behavior.cached_methods,
'Compress': False,
'TargetOriginId': cache_behavior.target_origin_id,
'ForwardedValues': forwarded_values,
'TrustedSigners': cache_behavior.trusted_signers,
'ViewerProtocolPolicy': cache_behavior.viewer_protocol_policy,
'MinTTL': cache_behavior.min_ttl,
'DefaultTTL': cache_behavior.default_ttl,
'MaxTTL': cache_behavior.max_ttl,
'SmoothStreaming': False
}
if cache_behavior.is_default:
# Add default cache behavior
self.default_cache_behavior = cloudfront.DefaultCacheBehavior(
'{0}DefaultCacheBehavior'.format(title),
**cf_cache_behavior_params
)
default_cache_behavior_count += 1
else:
# Append additional cache behaviors to list
cf_cache_behavior_params['PathPattern'] = cache_behavior.path_pattern
created_cache_behavior = cloudfront.CacheBehavior(
'{0}CacheBehavior{1}'.format(title, number),
**cf_cache_behavior_params
)
self.cache_behaviors.append(created_cache_behavior)
cache_behavior_count += 1
# if there is at least one cache behavior, there must be exactly one default cache behavior
if cache_behavior_count > 0 and default_cache_behavior_count != 1:
raise CloudfrontConfigError(
'Error: cf_distribution_unit {0} must have exactly one default cache behavior.'.format(title))
def get_custom_reference(self, domain_name):
"""
Define abstract method to be overridden by implementing classes
:param domain_name: domain name of amazonia resource
"""
raise NotImplementedError("Please Implement this method")
class CFDistributionLeaf(CFDistribution):
def __init__(self, leaf_title, tree_name, template, cf_origins_config, cf_cache_behavior_config,
cf_distribution_config):
"""
Create an Cloudfront distribution as a leaf, part of cross referenced stack
:param leaf_title: title of the API Gateway as part of cross referenced stack
:param tree_name: name of cross referenced stack
:param template: Troposphere stack to append resources to
:param cf_origins_config: A list of CFOriginsConfig objects
:param cf_cache_behavior_config: A list of CFCacheBehavior objects
:param cf_distribution_config: A CFDistributionConfig object
"""
self.tree_name = tree_name
super(CFDistributionLeaf, self).__init__(title=leaf_title, template=template,
cf_origins_config=cf_origins_config,
cf_cache_behavior_config=cf_cache_behavior_config,
cf_distribution_config=cf_distribution_config)
def get_custom_reference(self, domain_name):
"""
Return the endpoint from a different stack in the same tree
:param domain_name: amazonia name of the endpoint
:return: The endpoint of the specified amazonia object
"""
return ImportValue(self.tree_name + '-' + domain_name + '-Endpoint')
class CFDistributionUnit(CFDistribution):
def __init__(self, unit_title, template, stack_config, cf_origins_config, cf_cache_behavior_config,
cf_distribution_config):
"""
Create a Cloudfront Distribution as a unit, part of an integrated stack
:param unit_title: title of the API Gateway as part of an integrated stack
:param template: troposphere template
:param stack_config: shared stack configuration object to store generated API Gateway endpoint
:param cf_origins_config: A list of CFOriginsConfig objects
:param cf_cache_behavior_config: A list of CFCacheBehavior objects
:param cf_distribution_config: A CFDistributionConfig object
"""
self.stack_config = stack_config
super(CFDistributionUnit, self).__init__(title=unit_title, template=template,
cf_origins_config=cf_origins_config,
cf_cache_behavior_config=cf_cache_behavior_config,
cf_distribution_config=cf_distribution_config)
def get_custom_reference(self, domain_name):
"""
Return the endpoint from a different stack in the same stack
:param domain_name: amazonia name of the endpoint
:return: The endpoint of the specified amazonia object
"""
return self.stack_config.endpoints[domain_name]
| bsd-3-clause |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/openerp/addons/base/res/ir_property.py | 116 | 12223 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
import time
from openerp import models, api
from openerp.osv import osv, orm, fields
from openerp.tools.misc import attrgetter
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
TYPE2FIELD = {
'char': 'value_text',
'float': 'value_float',
'boolean': 'value_integer',
'integer': 'value_integer',
'text': 'value_text',
'binary': 'value_binary',
'many2one': 'value_reference',
'date': 'value_datetime',
'datetime': 'value_datetime',
'selection': 'value_text',
}
class ir_property(osv.osv):
_name = 'ir.property'
_columns = {
'name': fields.char('Name', select=1),
'res_id': fields.char('Resource', help="If not set, acts as a default value for new resources", select=1),
'company_id': fields.many2one('res.company', 'Company', select=1),
'fields_id': fields.many2one('ir.model.fields', 'Field', ondelete='cascade', required=True, select=1),
'value_float' : fields.float('Value'),
'value_integer' : fields.integer('Value'),
'value_text' : fields.text('Value'), # will contain (char, text)
'value_binary' : fields.binary('Value'),
'value_reference': fields.char('Value'),
'value_datetime' : fields.datetime('Value'),
'type' : fields.selection([('char', 'Char'),
('float', 'Float'),
('boolean', 'Boolean'),
('integer', 'Integer'),
('text', 'Text'),
('binary', 'Binary'),
('many2one', 'Many2One'),
('date', 'Date'),
('datetime', 'DateTime'),
('selection', 'Selection'),
],
'Type',
required=True,
select=1),
}
_defaults = {
'type': 'many2one',
}
def _update_values(self, cr, uid, ids, values):
value = values.pop('value', None)
if not value:
return values
prop = None
type_ = values.get('type')
if not type_:
if ids:
prop = self.browse(cr, uid, ids[0])
type_ = prop.type
else:
type_ = self._defaults['type']
field = TYPE2FIELD.get(type_)
if not field:
raise osv.except_osv('Error', 'Invalid type')
if field == 'value_reference':
if isinstance(value, orm.BaseModel):
value = '%s,%d' % (value._name, value.id)
elif isinstance(value, (int, long)):
field_id = values.get('fields_id')
if not field_id:
if not prop:
raise ValueError()
field_id = prop.fields_id
else:
field_id = self.pool.get('ir.model.fields').browse(cr, uid, field_id)
value = '%s,%d' % (field_id.relation, value)
values[field] = value
return values
def write(self, cr, uid, ids, values, context=None):
return super(ir_property, self).write(cr, uid, ids, self._update_values(cr, uid, ids, values), context=context)
def create(self, cr, uid, values, context=None):
return super(ir_property, self).create(cr, uid, self._update_values(cr, uid, None, values), context=context)
def get_by_record(self, cr, uid, record, context=None):
if record.type in ('char', 'text', 'selection'):
return record.value_text
elif record.type == 'float':
return record.value_float
elif record.type == 'boolean':
return bool(record.value_integer)
elif record.type == 'integer':
return record.value_integer
elif record.type == 'binary':
return record.value_binary
elif record.type == 'many2one':
if not record.value_reference:
return False
model, resource_id = record.value_reference.split(',')
value = self.pool[model].browse(cr, uid, int(resource_id), context=context)
return value.exists()
elif record.type == 'datetime':
return record.value_datetime
elif record.type == 'date':
if not record.value_datetime:
return False
return time.strftime('%Y-%m-%d', time.strptime(record.value_datetime, '%Y-%m-%d %H:%M:%S'))
return False
def get(self, cr, uid, name, model, res_id=False, context=None):
domain = self._get_domain(cr, uid, name, model, context=context)
if domain is not None:
domain = [('res_id', '=', res_id)] + domain
#make the search with company_id asc to make sure that properties specific to a company are given first
nid = self.search(cr, uid, domain, limit=1, order='company_id asc', context=context)
if not nid: return False
record = self.browse(cr, uid, nid[0], context=context)
return self.get_by_record(cr, uid, record, context=context)
return False
def _get_domain(self, cr, uid, prop_name, model, context=None):
context = context or {}
cr.execute('select id from ir_model_fields where name=%s and model=%s', (prop_name, model))
res = cr.fetchone()
if not res:
return None
cid = context.get('force_company')
if not cid:
company = self.pool.get('res.company')
cid = company._company_default_get(cr, uid, model, res[0], context=context)
return [('fields_id', '=', res[0]), ('company_id', 'in', [cid, False])]
@api.model
def get_multi(self, name, model, ids):
""" Read the property field `name` for the records of model `model` with
the given `ids`, and return a dictionary mapping `ids` to their
corresponding value.
"""
if not ids:
return {}
domain = self._get_domain(name, model)
if domain is None:
return dict.fromkeys(ids, False)
# retrieve the values for the given ids and the default value, too
refs = {('%s,%s' % (model, id)): id for id in ids}
refs[False] = False
domain += [('res_id', 'in', list(refs))]
# note: order by 'company_id asc' will return non-null values first
props = self.search(domain, order='company_id asc')
result = {}
for prop in props:
# for a given res_id, take the first property only
id = refs.pop(prop.res_id, None)
if id is not None:
result[id] = self.get_by_record(prop)
# set the default value to the ids that are not in result
default_value = result.pop(False, False)
for id in ids:
result.setdefault(id, default_value)
return result
@api.model
def set_multi(self, name, model, values):
""" Assign the property field `name` for the records of model `model`
with `values` (dictionary mapping record ids to their value).
"""
def clean(value):
return value.id if isinstance(value, models.BaseModel) else value
if not values:
return
domain = self._get_domain(name, model)
if domain is None:
raise Exception()
# retrieve the default value for the field
default_value = clean(self.get(name, model))
# retrieve the properties corresponding to the given record ids
self._cr.execute("SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", (name, model))
field_id = self._cr.fetchone()[0]
company_id = self.env['res.company']._company_default_get(model, field_id)
refs = {('%s,%s' % (model, id)): id for id in values}
props = self.search([
('fields_id', '=', field_id),
('company_id', '=', company_id),
('res_id', 'in', list(refs)),
])
# modify existing properties
for prop in props:
id = refs.pop(prop.res_id)
value = clean(values[id])
if value == default_value:
prop.unlink()
elif value != clean(prop.get_by_record(prop)):
prop.write({'value': value})
# create new properties for records that do not have one yet
for ref, id in refs.iteritems():
value = clean(values[id])
if value != default_value:
self.create({
'fields_id': field_id,
'company_id': company_id,
'res_id': ref,
'name': name,
'value': value,
'type': self.env[model]._fields[name].type,
})
@api.model
def search_multi(self, name, model, operator, value):
""" Return a domain for the records that match the given condition. """
field = self.env[model]._fields[name]
if field.type == 'many2one':
comodel = field.comodel_name
def makeref(value):
return value and '%s,%s' % (comodel, value)
if operator in ('=', '!=', '<=', '<', '>', '>='):
value = makeref(value)
elif operator in ('in', 'not in'):
value = map(makeref, value)
elif operator in ('=like', '=ilike', 'like', 'not like', 'ilike', 'not ilike'):
# most probably inefficient... but correct
target = self.env[comodel]
target_names = target.name_search(value, operator=operator, limit=None)
target_ids = map(itemgetter(0), target_names)
operator, value = 'in', map(makeref, target_ids)
# retrieve the properties that match the condition
domain = self._get_domain(name, model)
if domain is None:
raise Exception()
props = self.search(domain + [(TYPE2FIELD[field.type], operator, value)])
# retrieve the records corresponding to the properties that match
good_ids = []
default_matches = False
for prop in props:
if prop.res_id:
res_model, res_id = prop.res_id.split(',')
good_ids.append(int(res_id))
else:
default_matches = True
if default_matches:
# exclude all records with a property that does not match
all_ids = []
props = self.search(domain + [('res_id', '!=', False)])
for prop in props:
res_model, res_id = prop.res_id.split(',')
all_ids.append(int(res_id))
bad_ids = list(set(all_ids) - set(good_ids))
return [('id', 'not in', bad_ids)]
else:
return [('id', 'in', good_ids)]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Peratham/tensorlib | tensorlib/decomposition/tests/test_decomposition.py | 2 | 1591 | import numpy as np
from tensorlib.decomposition import cp
from tensorlib.decomposition.decomposition import _cp3
from tensorlib.decomposition import tucker
from tensorlib.decomposition.decomposition import _tucker3
from tensorlib.datasets import load_bread
from numpy.testing import assert_almost_equal
from nose.tools import assert_raises
def test_generated_cp():
"""
Test CANDECOMP/PARFAC decomposition. Problem from
http://issnla2010.ba.cnr.it/DecompositionsI.pdf
"""
rs = np.random.RandomState(1999)
X = .7 * rs.rand(2, 4, 3) + .25 * rs.rand(2, 4, 3)
assert_raises(ValueError, cp, X)
U1 = cp(X, 2, init_type="hosvd")
U2 = _cp3(X, 2, tol=1E-4, max_iter=500, init_type="hosvd")
for n, i in enumerate(U1):
assert_almost_equal(U1[n], U2[n])
def test_bread_cp():
"""
Test CANDECOMP/PARFAC decomposition using bread dataset.
"""
X, meta = load_bread()
assert_raises(ValueError, cp, X)
U1 = cp(X, 2, init_type="hosvd")
U2 = _cp3(X, 2, tol=1E-4, max_iter=500, init_type="hosvd")
for n, i in enumerate(U1):
assert_almost_equal(U1[n], U2[n])
def test_generated_tucker():
"""
Test CANDECOMP/PARFAC decomposition. Problem from
http://issnla2010.ba.cnr.it/DecompositionsI.pdf
"""
rs = np.random.RandomState(1999)
X = .7 * rs.rand(2, 4, 3) + .25 * rs.rand(2, 4, 3)
assert_raises(ValueError, cp, X)
U1 = tucker(X, 2, init_type="hosvd")
U2 = _tucker3(X, 2, tol=1E-4, max_iter=500, init_type="hosvd")
for n, i in enumerate(U1):
assert_almost_equal(U1[n], U2[n])
| bsd-3-clause |
brownnrl/moneyguru | core/tests/gui/general_ledger_view_test.py | 1 | 1195 | # Copyright 2019 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from ..testutil import eq_
from ...const import AccountType
from ..base import TestApp, with_app
# ---
def app_two_txns():
app = TestApp()
app.add_account('one')
app.add_account('two', account_type=AccountType.Liability)
app.add_txn(description='first', from_='one', to='two', amount='42')
app.add_txn(description='second', from_='two', to='one', amount='12')
app.show_glview()
return app
@with_app(app_two_txns)
def test_totals_one_selected(app):
# the totals line shows totals for selected entries
print(len(app.gltable))
app.gltable.select([1])
expected = "1 out of 4 selected. Debit: 0.00 Credit: 42.00"
eq_(app.mw.status_line, expected)
@with_app(app_two_txns)
def test_totals_four_selected(app):
# the totals line shows totals for selected entries
app.gltable.select([1, 2, 5, 6])
expected = "4 out of 4 selected. Debit: 54.00 Credit: 54.00"
eq_(app.mw.status_line, expected)
| gpl-3.0 |
placrosse/ImpalaToGo | common/thrift/generate_error_codes.py | 4 | 6443 | #!/usr/bin/env python
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# For readability purposes we define the error codes and messages at the top of the
# file. New codes and messages must be added here. Old error messages MUST NEVER BE
# DELETED, but can be renamed. The tuple layout for a new entry is: error code enum name,
# numeric error code, format string of the message.
#
# TODO Add support for SQL Error Codes
# https://msdn.microsoft.com/en-us/library/ms714687%28v=vs.85%29.aspx
error_codes = (
("OK", 1, ""),
("GENERAL", 2, "$0"),
("CANCELLED", 3, "$0"),
("ANALYSIS_ERROR", 4, "$0"),
("NOT_IMPLEMENTED_ERROR", 5, "$0"),
("RUNTIME_ERROR", 6, "$0"),
("MEM_LIMIT_EXCEEDED", 7, "$0"),
("INTERNAL_ERROR", 8, "$0"),
("RECOVERABLE_ERROR", 9, "$0"),
("PARQUET_MULTIPLE_BLOCKS", 10,
"Parquet files should not be split into multiple hdfs-blocks. file=$0"),
("PARQUET_COLUMN_METADATA_INVALID", 11,
"Column metadata states there are $0 values, but only read $1 values "
"from column $2"),
("PARQUET_HEADER_PAGE_SIZE_EXCEEDED", 12,
"ParquetScanner: could not read data page because page header exceeded "
"maximum size of $0"),
("PARQUET_HEADER_EOF", 13,
"ParquetScanner: reached EOF while deserializing data page header."),
("PARQUET_GROUP_ROW_COUNT_ERROR", 14,
"Metadata states that in group $0($1) there are $2 rows, but only $3 "
"rows were read."),
("PARQUET_GROUP_ROW_COUNT_OVERFLOW", 15,
"Metadata states that in group $0($1) there are $2 rows, but there is at least one "
"more row in the file."),
("PARQUET_MISSING_PRECISION", 16,
"File '$0' column '$1' does not have the decimal precision set."),
("PARQUET_WRONG_PRECISION", 17,
"File '$0' column '$1' has a precision that does not match the table metadata "
" precision. File metadata precision: $2, table metadata precision: $3."),
("PARQUET_BAD_CONVERTED_TYPE", 18,
"File '$0' column '$1' does not have converted type set to DECIMAL"),
("PARQUET_INCOMPATIBLE_DECIMAL", 19,
"File '$0' column '$1' contains decimal data but the table metadata has type $2"),
("SEQUENCE_SCANNER_PARSE_ERROR", 20,
"Problem parsing file $0 at $1$2"),
("SNAPPY_DECOMPRESS_INVALID_BLOCK_SIZE", 21,
"Decompressor: block size is too big. Data is likely corrupt. Size: $0"),
("SNAPPY_DECOMPRESS_INVALID_COMPRESSED_LENGTH", 22,
"Decompressor: invalid compressed length. Data is likely corrupt."),
("SNAPPY_DECOMPRESS_UNCOMPRESSED_LENGTH_FAILED", 23,
"Snappy: GetUncompressedLength failed"),
("SNAPPY_DECOMPRESS_RAW_UNCOMPRESS_FAILED", 24,
"SnappyBlock: RawUncompress failed"),
("SNAPPY_DECOMPRESS_DECOMPRESS_SIZE_INCORRECT", 25,
"Snappy: Decompressed size is not correct."),
("HDFS_SCAN_NODE_UNKNOWN_DISK", 26, "Unknown disk id. "
"This will negatively affect performance. "
"Check your hdfs settings to enable block location metadata."),
("FRAGMENT_EXECUTOR", 27, "Reserved resource size ($0) is larger than "
"query mem limit ($1), and will be restricted to $1. Configure the reservation "
"size by setting RM_INITIAL_MEM."),
("PARTITIONED_HASH_JOIN_MAX_PARTITION_DEPTH", 28,
"Cannot perform join at hash join node with id $0."
" The input data was partitioned the maximum number of $1 times."
" This could mean there is significant skew in the data or the memory limit is"
" set too low."),
("PARTITIONED_AGG_MAX_PARTITION_DEPTH", 29,
"Cannot perform aggregation at hash aggregation node with id $0."
" The input data was partitioned the maximum number of $1 times."
" This could mean there is significant skew in the data or the memory limit is"
" set too low."),
("MISSING_BUILTIN", 30, "Builtin '$0' with symbol '$1' does not exist. "
"Verify that all your impalads are the same version."),
("RPC_GENERAL_ERROR", 31, "RPC Error: $0"),
("RPC_TIMEOUT", 32, "RPC timed out"),
)
import sys
import os
# Verifies the uniqueness of the error constants and numeric error codes.
def check_duplicates(codes):
constants = {}
num_codes = {}
for row in codes:
if row[0] in constants:
print("Constant %s already used, please check definition of '%s'!" % \
(row[0], constants[row[0]]))
exit(1)
if row[1] in num_codes:
print("Numeric error code %d already used, please check definition of '%s'!" % \
(row[1], num_codes[row[1]]))
exit(1)
constants[row[0]] = row[2]
num_codes[row[1]] = row[2]
preamble = """
// Copyright 2015 Cloudera Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
// THIS FILE IS AUTO GENERATED BY generated_error_codes.py DO NOT MODIFY
// IT BY HAND.
//
namespace cpp impala
namespace java com.cloudera.impala.thrift
"""
# The script will always generate the file, CMake will take care of running it only if
# necessary.
target_file = "ErrorCodes.thrift"
# Check uniqueness of error constants and numeric codes
check_duplicates(error_codes)
fid = open(target_file, "w+")
try:
fid.write(preamble)
fid.write("""\nenum TErrorCode {\n""")
fid.write(",\n".join(map(lambda x: " %s" % x[0], error_codes)))
fid.write("\n}")
fid.write("\n")
fid.write("const list<string> TErrorMessage = [\n")
fid.write(",\n".join(map(lambda x: " // %s\n \"%s\"" %(x[0], x[2]), error_codes)))
fid.write("\n]")
finally:
fid.close()
print("%s created." % target_file)
| apache-2.0 |
Danfocus/Flexget | flexget/tests/test_feed_control.py | 13 | 2291 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
class TestOnlyTask(object):
"""
Test --task option
"""
config = """
tasks:
test:
mock:
- {title: 'download', url: 'http://localhost/download'}
test2:
mock:
- {title: 'nodownload', url: 'http://localhost/nodownload'}
"""
@pytest.mark.skip(reason="1.2 we need to test this with execute command")
def test_manual_with_onlytask(self, execute_task):
# TODO: 1.2 we need to test this with execute command
# Pretend we have been run with --task test
# This task should run normally, as we specified it as onlytask
task = execute_task('test', options=dict(tasks=['test']))
assert task.find_entry(title='download'), 'task failed to download with --task'
# This task should be disabled, as it wasn't specified with onlytask
task = execute_task('test2', options=dict(tasks=['test']), abort_ok=True)
assert task.aborted
assert not task.find_entry(title='nodownload'), 'task should not have been executed'
class TestManualAutomatic(object):
"""
Test manual download tasks
"""
config = """
tasks:
test:
manual: true
mock:
- {title: 'nodownload', url: 'http://localhost/nodownload'}
"""
def test_manual_without_onlytask(self, execute_task):
task = execute_task('test', abort=True)
assert task.aborted
assert not task.find_entry(title='nodownload'), 'Manual tasks downloaded on automatic run'
class TestManualOnlytask(object):
"""
Test manual download tasks
"""
config = """
tasks:
test2:
manual: true
mock:
- {title: 'download', url: 'http://localhost/download'}
"""
def test_manual_with_onlytask(self, execute_task):
# Pretend we have been run with --task test2
task = execute_task('test2', options=dict(tasks=['test2'], allow_manual=True))
assert task.find_entry(title='download'), 'Manual tasks failed to download on manual run'
| mit |
stvstnfrd/edx-platform | openedx/core/djangoapps/olx_rest_api/block_serializer.py | 5 | 7645 | """
Code for serializing a modulestore XBlock to OLX suitable for import into
Blockstore.
"""
import logging
import os
from collections import namedtuple
from lxml import etree
from . import adapters
log = logging.getLogger(__name__)
# A static file required by an XBlock
StaticFile = namedtuple('StaticFile', ['name', 'url', 'data'])
def blockstore_def_key_from_modulestore_usage_key(usage_key):
"""
In modulestore, the "definition key" is a MongoDB ObjectID kept in split's
definitions table, which theoretically allows the same block to be used in
many places (each with a unique usage key). However, that functionality is
not exposed in Studio (other than via content libraries). So when we import
into Blockstore, we assume that each usage is unique, don't generate a usage
key, and create a new "definition key" from the original usage key.
So modulestore usage key
block-v1:A+B+C+type@html+block@introduction
will become Blockstore definition key
html/introduction
"""
block_type = usage_key.block_type
if block_type == 'vertical':
# We transform <vertical> to <unit>
block_type = "unit"
return block_type + "/" + usage_key.block_id
class XBlockSerializer(object):
"""
This class will serialize an XBlock, producing:
(1) A new definition ID for use in Blockstore
(2) an XML string defining the XBlock and referencing the IDs of its
children (but not containing the actual XML of its children)
(3) a list of any static files required by the XBlock and their URL
"""
def __init__(self, block):
"""
Serialize an XBlock to an OLX string + supporting files, and store the
resulting data in this object.
"""
self.orig_block_key = block.scope_ids.usage_id
self.static_files = []
self.def_id = blockstore_def_key_from_modulestore_usage_key(self.orig_block_key)
# Special cases:
if self.orig_block_key.block_type == 'html':
self.serialize_html_block(block)
else:
self.serialize_normal_block(block)
course_key = self.orig_block_key.course_key
# Search the OLX for references to files stored in the course's
# "Files & Uploads" (contentstore):
self.olx_str = adapters.rewrite_absolute_static_urls(self.olx_str, course_key)
for asset in adapters.collect_assets_from_text(self.olx_str, course_key):
path = asset['path']
if path not in [sf.name for sf in self.static_files]:
self.static_files.append(StaticFile(name=path, url=asset['url'], data=None))
def serialize_normal_block(self, block):
"""
Serialize an XBlock to XML.
This method is used for every block type except HTML, which uses
serialize_html_block() instead.
"""
# Create an XML node to hold the exported data
olx_node = etree.Element("root") # The node name doesn't matter: add_xml_to_node will change it
# ^ Note: We could pass nsmap=xblock.core.XML_NAMESPACES here, but the
# resulting XML namespace attributes don't seem that useful?
with adapters.override_export_fs(block) as filesystem: # Needed for XBlocks that inherit XModuleDescriptor
# Tell the block to serialize itself as XML/OLX:
if not block.has_children:
block.add_xml_to_node(olx_node)
else:
# We don't want the children serialized at this time, because
# otherwise we can't tell which files in 'filesystem' belong to
# this block and which belong to its children. So, temporarily
# disable any children:
children = block.children
block.children = []
block.add_xml_to_node(olx_node)
block.children = children
# Now the block/module may have exported addtional data as files in
# 'filesystem'. If so, store them:
for item in filesystem.walk(): # pylint: disable=not-callable
for unit_file in item.files:
file_path = os.path.join(item.path, unit_file.name)
with filesystem.open(file_path, 'rb') as fh:
data = fh.read()
self.static_files.append(StaticFile(name=unit_file.name, data=data, url=None))
# Apply some transformations to the OLX:
self.transform_olx(olx_node, usage_id=block.scope_ids.usage_id)
# Add <xblock-include /> tags for each child (XBlock XML export
# normally puts children inline as e.g. <html> tags, but we want
# references to them only.)
if block.has_children:
for child_id in block.children:
# In modulestore, the "definition key" is a MongoDB ObjectID
# kept in split's definitions table, which theoretically allows
# the same block to be used in many places (each with a unique
# usage key). However, that functionality is not exposed in
# Studio (other than via content libraries). So when we import
# into Blockstore, we assume that each usage is unique, don't
# generate a usage key, and create a new "definition key" from
# the original usage key.
# So modulestore usage key
# block-v1:A+B+C+type@html+block@introduction
# will become Blockstore definition key
# html+introduction
#
# If we needed the real definition key, we could get it via
# child = block.runtime.get_block(child_id)
# child_def_id = str(child.scope_ids.def_id)
# and then use
# <xblock-include definition={child_def_id} usage={child_id.block_id} />
def_id = blockstore_def_key_from_modulestore_usage_key(child_id)
olx_node.append(olx_node.makeelement("xblock-include", {"definition": def_id}))
# Store the resulting XML as a string:
self.olx_str = etree.tostring(olx_node, encoding="unicode", pretty_print=True)
def serialize_html_block(self, block):
"""
Special case handling for HTML blocks
"""
olx_node = etree.Element("html")
if block.display_name:
olx_node.attrib["display_name"] = block.display_name
olx_node.text = etree.CDATA("\n" + block.data + "\n")
self.olx_str = etree.tostring(olx_node, encoding="unicode", pretty_print=True)
def transform_olx(self, olx_node, usage_id):
"""
Apply transformations to the given OLX etree Node.
"""
# Remove 'url_name' - we store the definition key in the folder name
# that holds the OLX and the usage key elsewhere, so specifying it
# within the OLX file is redundant and can lead to issues if the file is
# copied and pasted elsewhere in the bundle with a new definition key.
olx_node.attrib.pop('url_name', None)
# Convert <vertical> to the new <unit> tag/block
if olx_node.tag == 'vertical':
olx_node.tag = 'unit'
for key in olx_node.attrib.keys():
if key not in ('display_name', 'url_name'):
log.warning(
'<vertical> tag attribute "%s" will be ignored after conversion to <unit> (in %s)',
key,
str(usage_id)
)
| agpl-3.0 |
NLeSC/embodied-emotions-scripts | embem/emotools/folia_helpers.py | 1 | 1678 | """Helper functions for using the FoLiA format from pynlpl.
"""
from pynlpl.formats import folia
from lxml import etree
import os
def parse_document(file_name):
try:
doc = folia.Document(file=file_name)
except Exception as e:
return None, 'Unable to parse FoLiA XML file.\n{}'.format(str(e))
return doc, 'Successfully parsed FoLiA XML file.'
def add_entity(sentence, cls, words, text_content_tag, annotation=None):
entity_tag = '{http://ilk.uvt.nl/folia}entities'
if sentence.find(entity_tag) is not None:
entities = sentence.find(entity_tag)
else:
entities = etree.SubElement(sentence, 'entities')
entity = etree.SubElement(entities, 'entity', {'class': cls})
if not annotation:
for w in words:
wref_attrs = {
'id': w.attrib.get('{http://www.w3.org/XML/1998/namespace}id'),
't': w.find(text_content_tag).text
}
etree.SubElement(entity, 'wref', wref_attrs)
else:
for w_id in annotation.word_ids:
wref_attrs = {
'id': w_id,
't': annotation.words.get(w_id)
}
etree.SubElement(entity, 'wref', wref_attrs)
def write_folia_file(context, folia_in, dir_out, ext):
head, tail = os.path.split(folia_in)
p = tail.split('.')
file_out = '{d}{s}{n}-{e}.xml'.format(n=p[0], d=dir_out, s=os.sep, e=ext)
print file_out
with open(file_out, 'w') as f:
f.write(etree.tostring(context.root,
encoding='utf8',
xml_declaration=True,
pretty_print=True))
| apache-2.0 |
Hack-a-thingie/game-of-will | Test.py | 1 | 7962 | import math
import pygame
import time
# Initializes the pygame library
pygame.init()
# Color definition
BLACK = (0, 0, 0)
BLACKA = (0, 0, 0, 0.3)
WHITE = (255, 255, 255)
WHITEA = (255, 255, 255, 0.3)
BLUE = (0, 0, 255)
BLUEA = (0, 0, 255, 0.3)
GREEN = (0, 255, 0)
GREENA = (0, 255, 0, 0.3)
RED = (255, 0, 0)
REDA = (255, 0, 0, 0.3)
YELLOWA=(255, 255, 0, 0.3)
class Hexagon(object):
def __init__(self, col, row, radius, offset_x,offset_y):
self.player=100
self.colour=BLUE
self.transcolour=WHITEA
self.radius = radius
self.offsetx = offset_x
self.offsety = offset_y
self.col = col
self.row = row
self.x_pixel=self.offsetx+1.5*self.radius*self.col
if self.col%2==1:
self.y_pixel=self.offsety+math.sqrt(3)*self.radius*self.row+math.sqrt(3)/2*self.radius
else:
self.y_pixel=self.offsety+math.sqrt(3)*self.radius*self.row
self.hex_z=self.row-((self.col-self.col%2))/2
self.hex_x=self.col
self.cube_z=self.row-((self.col-self.col%2))/2
self.cube_x=self.col
self.cube_y=-self.cube_x-self.cube_z
self.cube_xyz=[self.cube_x,self.cube_y,self.cube_z]
def vertices(self):
self.vertices_points = []
for ind in range(6):
angle_deg = 60*ind
angle_rad = math.pi/180*angle_deg
self.vertex_x = self.x_pixel+self.radius*math.cos(angle_rad)
self.vertex_y = self.y_pixel+self.radius*math.sin(angle_rad)
self.vertices_points.append([self.vertex_x, self.vertex_y])
class Hexgrid(object):
def __init__(self,size,offset_x,offset_y,screen):
self.hexlistname= ["hex"+ str(x) + '_' + str(y) for x in range(15) for y in range(11)]
self.hexdict={}
self.playercolours=[GREENA, YELLOWA, REDA, BLACKA]
self.size=size
self.offsetx=offset_x
self.offsety=offset_y
self.screen = screen
for k in self.hexlistname:
self.ksplit=k.split("hex")[1]
self.col=self.ksplit.split('_')[0]
self.row=self.ksplit.split('_')[1]
if int(self.row) == 10 and int(self.col)%2==1:
pass
else:
self.hexdict[k]=Hexagon(int(self.col),int(self.row),self.size,self.offsetx,self.offsety)
def draw_hexgrid(self):
for a in self.hexdict:
self.hexdict[a].vertices()
self.plist=self.hexdict[a].vertices_points
pygame.draw.polygon(self.screen, self.hexdict[a].colour, self.plist, 0)
#pygame.draw.polygon(self.screen, self.hexdict[a].transcolour, self.plist, 0)
pygame.draw.aalines(self.screen, BLACK, True, self.plist, True)
def cube2hex(self,cube_coord):
self.hex_x=cube_coord[0]
self.hex_z=cube_coord[2]
return self.hex_x,self.hex_z
def hex2cube(self,hex_x, hex_z):
self.cube_x = hex_x
self.cube_y = -hex_x -hex_z
self.cube_z = hex_z
self.cube_coords= [self.cube_x,self.cube_y,self.cube_z]
return self.cube_coords
def pixel_to_hex(self,x_pixel, y_pixel):
self.x_pixel=x_pixel-self.offsetx
self.y_pixel=y_pixel-self.offsety
self.q = (self.x_pixel*2.0/3.0)/self.size
self.r =( (-self.x_pixel/3.0)+(math.sqrt(3)/3.0)*self.y_pixel)/self.size
self.hex_frac= [self.q,self.r]
return self.hex_frac
def hex_round(self,x,y):
return self.cube2hex(self.cube_round(self.hex2cube(self.pixel_to_hex(x,y)[0],self.pixel_to_hex(x,y)[1])))
def cube_round(self,frac_cube):
self.h = frac_cube
self.rx = round(self.h[0])
self.ry = round(self.h[1])
self.rz = round(self.h[2])
self.x_diff = abs(self.rx - self.h[0])
self.y_diff = abs(self.ry - self.h[1])
self.z_diff = abs(self.rz - self.h[2])
if self.x_diff > self.y_diff and self.x_diff > self.z_diff:
self.rx = -self.ry-self.rz
elif self.y_diff > self.z_diff:
self.ry = -self.rx-self.rz
else:
self.rz = -self.rx-self.ry
self.cubes=[self.rx, self.ry, self.rz]
return self.cubes
def hex_add(self,hexa, hexb):
return [hexa.cube_x + hexb.cube_x, hexa.cube_y + hexb.cube_y, hexa.cube_z + hexb.cube_z]
def hex_subtract(self,hexa, hexb):
return [hexa.cube_x - hexb.cube_x, hexa.cube_y - hexb.cube_y, hexa.cube_z - hexb.cube_z]
def hex_length(self, len_xyz):
return (abs(len_xyz[0]) + abs(len_xyz[1]) + abs(len_xyz[2]))/ 2
def hex_distance(self, hexa, hexb):
return self.hex_length(self.hex_subtract(hexa, hexb))
def occupied_by(self,mouse_x,mouse_y):
self.hex_round(mouse_x,mouse_y)
self.hex_cube=self.cube_round(self.hex2cube(self.pixel_to_hex(mouse_x,mouse_y)[0],self.pixel_to_hex(mouse_x,mouse_y)[1]))
for k in self.hexdict:
if self.hexdict[k].cube_xyz == self.hex_cube:
return self.hexdict[k].player
def occupied(self,mouse_x,mouse_y):
self.hex_round(mouse_x,mouse_y)
self.hex_cube=self.cube_round(self.hex2cube(self.pixel_to_hex(mouse_x,mouse_y)[0],self.pixel_to_hex(mouse_x,mouse_y)[1]))
for k in self.hexdict:
if self.hexdict[k].cube_xyz == self.hex_cube:
if self.hexdict[k].player == 100:
return False
else:
return True
def change_owner(self,playernum,mouse_x,mouse_y):
self.hex_round(mouse_x,mouse_y)
self.hex_cube=self.cube_round(self.hex2cube(self.pixel_to_hex(mouse_x,mouse_y)[0],self.pixel_to_hex(mouse_x,mouse_y)[1]))
for k in self.hexdict:
if self.hexdict[k].cube_xyz == self.hex_cube:
self.hexdict[k].player=playernum
self.hexdict[k].colour=self.playercolours[playernum]
def num_terr(self,mouse_x,mouse_y):
terrnum=0
self.hex_round(mouse_x,mouse_y)
self.hex_cube=self.cube_round(self.hex2cube(self.pixel_to_hex(mouse_x,mouse_y)[0],self.pixel_to_hex(mouse_x,mouse_y)[1]))
for k in self.hexdict:
if self.hexdict[k].cube_xyz == self.hex_cube:
for j in self.hexdict:
if self.hexdict[j].player == 100:
pass
elif self.hexdict[j].player == self.hexdict[k].player and self.hex_distance(self.hexdict[k],self.hexdict[j]) < 2:
terrnum += 1
return terrnum-1
def close_neighbour(self, playernum, mouse_x, mouse_y):
dist=20
flag=0
self.hex_round(mouse_x,mouse_y)
self.hex_cube=self.cube_round(self.hex2cube(self.pixel_to_hex(mouse_x,mouse_y)[0],self.pixel_to_hex(mouse_x,mouse_y)[1]))
for k in self.hexdict:
if self.hexdict[k].cube_xyz == self.hex_cube:
myname = k
flag = 1
if flag == 0:
return 99
else:
for k in self.hexdict:
if self.hexdict[k].player == playernum:
if self.hex_distance(self.hexdict[k],self.hexdict[myname]) < dist:
dist=self.hex_distance(self.hexdict[k],self.hexdict[myname])
return dist
playernum=3
pygame.init()
resl = pygame.display.Info()
screen = pygame.display.set_mode((resl.current_w, resl.current_h))
hexgrid1=Hexgrid(50,100,100,screen)
clock=pygame.time.Clock()
while 1:
clock.tick(60)
screen.fill(WHITE)
hexgrid1.draw_hexgrid()
pygame.display.flip()
xpos, ypos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0]:
hexgrid1.change_owner(playernum,xpos, ypos)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
print hexgrid1.num_terr(xpos,ypos)
#print hexgrid1.occupied(xpos,ypos)
#print hexgrid1.close_neighbour(playernum,xpos,ypos) | mit |
OAPDF/oapdf_tools | oapdf/jrecord.py | 2 | 3110 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
######################## Part3: Journal Record #############################
############### General Journal Record class ###############################
class Jrecord(object):
'''Basic journal record information'''
def __init__(self):
self.journal=""
self.title=""
self.authors=[]
self.year=""
self.volume=""
self.issue=""
self.pages=""
self.doi=""
self.issn=""
self.publisher=""
self.urls=[]
self.pdf=""
self.abstract=""
self.note=""
def __getattr__(self, name):
"""Locate the function with the dotted attribute."""
def traverse(parent, child):
if instance(parent, str):
parent = getattr(self, parent)
return getattr(parent, child)
return reduce(traverse, name.split('.'))
def __getitem__(self,name):
'''Aact as dict'''
return getattr(self,name)
def reset(self):
self.journal=""
self.title=""
del self.authors[:]
self.year=""
self.volume=""
self.issue=""
self.pages=""
self.doi=""
self.issn=""
self.publisher=""
del self.urls[:]
self.pdf=""
self.abstract=""
self.note=""
def __repr__(self):
return (self.doi+": "+self.title+" | "+self.journal+" | "+self.year+"; "+self.volume+"("+self.issue+")"+", "+self.pages).encode('utf-8')
def __str__(self):
return self.__repr__()
def writexml(self):
pass
def writeenw(self):
pass
def writebib(self):
pass
def writeris(self):
pass
def parseNoteFirst(self,text=None,infile=None):
'''Parse NoteFirst record (xml format), return self'''
if isinstance(text,basestring):
pass
elif isinstance(infile,basestring):
f=open(infile);
text=f.read()
f.close()
elif isinstance(infile,file):
text=infile.read()
else: #Do nothing
return None
soup=BeautifulSoup(text,"html.parser")
self.title=soup.primarytitle.text
doi=soup.doi.text
self.doi=doi[doi.find("10."):]
self.journal=soup.media.info.text
self.year=soup.year.text
self.volume=soup.volume.text
self.issue=soup.issue.text
self.pages=soup.pagescope.text
authors=soup.findChildren('fullname')
self.authors=[ author.info.text for author in authors]
#self.issn=""
return self
def parseenw(self,text=None,infile=None):
'''Parse the endnote enw file, return self'''
lines=None
# Use basestring for both str and unicode!
if isinstance(text,basestring):
lines=text.splitlines()
elif isinstance(text,list):
lines=text
elif isinstance(infile,basestring):
lines=open(infile);
elif isinstance(infile,file):
lines=infile
else: #Do nothing
return None
for line in lines:
if (len(line)>1):
item=line[1]
if item=="T":
self.title=line[3:].strip()
elif item=="D":
self.year=line[3:].strip()
elif item=="P":
self.pages=line[3:].strip()
elif item=="J":
self.journal=line[3:].strip()
elif item=="V":
self.volume=line[3:].strip()
elif item=="N":
self.issue=line[3:].strip()
elif item=="A":
self.authors.append(line[3:].strip())
if isinstance(infile,basestring):
lines.close()
return self | mit |
bswartz/cinder | cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py | 1 | 7144 | # Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp 7mode NFS storage driver
"""
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_7mode
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetApp7modeNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeNfsDriverTestCase, self).setUp()
kwargs = {'configuration': self.get_config_7mode()}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
self.driver.zapi_client = mock.Mock()
self.driver.perf_library = mock.Mock()
def get_config_7mode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'nfs'
config.netapp_login = 'root'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@ddt.data({'share': None, 'is_snapshot': False},
{'share': None, 'is_snapshot': True},
{'share': 'fake_share', 'is_snapshot': False},
{'share': 'fake_share', 'is_snapshot': True})
@ddt.unpack
def test_clone_backing_file_for_volume(self, share, is_snapshot):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path',
mock.Mock(return_value=(fake.SHARE_IP, fake.EXPORT_PATH)))
mock_get_actual_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export',
mock.Mock(return_value='fake_path'))
self.driver._clone_backing_file_for_volume(
fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share,
is_snapshot=is_snapshot)
mock_get_export_ip_path.assert_called_once_with(
fake.VOLUME_ID, share)
mock_get_actual_path_for_export.assert_called_once_with(
fake.EXPORT_PATH)
self.driver.zapi_client.clone_file.assert_called_once_with(
'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone')
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, nfs_sparsed_volumes):
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
thick = not nfs_sparsed_volumes
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES // units.Gi, '0.01')
free_capacity_gb = na_utils.round_down(
fake.AVAILABLE_BYTES // units.Gi, '0.01')
provisioned_capacity_gb = total_capacity_gb - free_capacity_gb
capacity = {
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
}
self.mock_object(self.driver,
'_get_share_capacity_info',
mock.Mock(return_value=capacity))
self.mock_object(self.driver.perf_library,
'get_node_utilization',
mock.Mock(return_value=30.0))
result = self.driver._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'QoS_support': False,
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'total_capacity_gb': 4468.0,
'reserved_percentage': 7,
'max_over_subscription_ratio': 19.0,
'provisioned_capacity_gb': 4456.0,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness'}]
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files(self):
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
mock_get_file_usage = self.mock_object(
self.driver.zapi_client, 'get_file_usage')
mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0]
expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file
in fake.FILE_LIST]
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, fake.FILE_LIST)
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files_empty_list(self):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path')
mock_get_export_ip_path.return_value = ('', '/export_path')
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, [])
self.assertEqual([], result)
@ddt.data({'has_space': True, 'expected': True},
{'has_space': False, 'expected': False})
@ddt.unpack
def test_is_share_clone_compatible(self, has_space, expected):
mock_share_has_space_for_clone = self.mock_object(
self.driver, '_share_has_space_for_clone')
mock_share_has_space_for_clone.return_value = has_space
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
| apache-2.0 |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-mac/macerrors.py | 40 | 116661 | # -coding=latin1-
from warnings import warnpy3k
warnpy3k("In 3.x, the macerrors module is removed.", stacklevel=2)
svTempDisable = -32768 #svTempDisable
svDisabled = -32640 #Reserve range -32640 to -32768 for Apple temp disables.
fontNotOutlineErr = -32615 #bitmap font passed to routine that does outlines only
kURL68kNotSupportedError = -30788 #kURL68kNotSupportedError
kURLAccessNotAvailableError = -30787 #kURLAccessNotAvailableError
kURLInvalidConfigurationError = -30786 #kURLInvalidConfigurationError
kURLExtensionFailureError = -30785 #kURLExtensionFailureError
kURLFileEmptyError = -30783 #kURLFileEmptyError
kURLInvalidCallError = -30781 #kURLInvalidCallError
kURLUnsettablePropertyError = -30780 #kURLUnsettablePropertyError
kURLPropertyBufferTooSmallError = -30779 #kURLPropertyBufferTooSmallError
kURLUnknownPropertyError = -30778 #kURLUnknownPropertyError
kURLPropertyNotYetKnownError = -30777 #kURLPropertyNotYetKnownError
kURLAuthenticationError = -30776 #kURLAuthenticationError
kURLServerBusyError = -30775 #kURLServerBusyError
kURLUnsupportedSchemeError = -30774 #kURLUnsupportedSchemeError
kURLInvalidURLError = -30773 #kURLInvalidURLError
kURLDestinationExistsError = -30772 #kURLDestinationExistsError
kURLProgressAlreadyDisplayedError = -30771 #kURLProgressAlreadyDisplayedError
kURLInvalidURLReferenceError = -30770 #kURLInvalidURLReferenceError
controlHandleInvalidErr = -30599 #controlHandleInvalidErr
controlInvalidDataVersionErr = -30597 #controlInvalidDataVersionErr
errItemNotControl = -30596 #errItemNotControl
errCantEmbedRoot = -30595 #errCantEmbedRoot
errCantEmbedIntoSelf = -30594 #errCantEmbedIntoSelf
errWindowRegionCodeInvalid = -30593 #errWindowRegionCodeInvalid
errControlHiddenOrDisabled = -30592 #errControlHiddenOrDisabled
errDataSizeMismatch = -30591 #errDataSizeMismatch
errControlIsNotEmbedder = -30590 #errControlIsNotEmbedder
errControlsAlreadyExist = -30589 #errControlsAlreadyExist
errInvalidPartCode = -30588 #errInvalidPartCode
errRootAlreadyExists = -30587 #errRootAlreadyExists
errNoRootControl = -30586 #errNoRootControl
errCouldntSetFocus = -30585 #errCouldntSetFocus
errUnknownControl = -30584 #errUnknownControl
errWindowDoesntSupportFocus = -30583 #errWindowDoesntSupportFocus
errControlDoesntSupportFocus = -30582 #errControlDoesntSupportFocus
errDataNotSupported = -30581 #errDataNotSupported
errMessageNotSupported = -30580 #errMessageNotSupported
themeMonitorDepthNotSupportedErr = -30567 #theme not supported at monitor depth
themeScriptFontNotFoundErr = -30566 #theme font requested for uninstalled script system
themeBadCursorIndexErr = -30565 #themeBadCursorIndexErr
themeHasNoAccentsErr = -30564 #themeHasNoAccentsErr
themeBadTextColorErr = -30563 #themeBadTextColorErr
themeProcessNotRegisteredErr = -30562 #themeProcessNotRegisteredErr
themeProcessRegisteredErr = -30561 #themeProcessRegisteredErr
themeInvalidBrushErr = -30560 #pattern index invalid
qtvrUninitialized = -30555 #qtvrUninitialized
qtvrLibraryLoadErr = -30554 #qtvrLibraryLoadErr
streamingNodeNotReadyErr = -30553 #streamingNodeNotReadyErr
noMemoryNodeFailedInitialize = -30552 #noMemoryNodeFailedInitialize
invalidHotSpotIDErr = -30551 #invalidHotSpotIDErr
invalidNodeFormatErr = -30550 #invalidNodeFormatErr
limitReachedErr = -30549 #limitReachedErr
settingNotSupportedByNodeErr = -30548 #settingNotSupportedByNodeErr
propertyNotSupportedByNodeErr = -30547 #propertyNotSupportedByNodeErr
timeNotInViewErr = -30546 #timeNotInViewErr
invalidViewStateErr = -30545 #invalidViewStateErr
invalidNodeIDErr = -30544 #invalidNodeIDErr
selectorNotSupportedByNodeErr = -30543 #selectorNotSupportedByNodeErr
callNotSupportedByNodeErr = -30542 #callNotSupportedByNodeErr
constraintReachedErr = -30541 #constraintReachedErr
notAQTVRMovieErr = -30540 #notAQTVRMovieErr
kFBCnoSuchHit = -30532 #kFBCnoSuchHit
kFBCbadSearchSession = -30531 #kFBCbadSearchSession
kFBCindexDiskIOFailed = -30530 #kFBCindexDiskIOFailed
kFBCsummarizationCanceled = -30529 #kFBCsummarizationCanceled
kFBCbadIndexFileVersion = -30528 #kFBCbadIndexFileVersion
kFBCanalysisNotAvailable = -30527 #kFBCanalysisNotAvailable
kFBCillegalSessionChange = -30526 #tried to add/remove vols to a session
kFBCsomeFilesNotIndexed = -30525 #kFBCsomeFilesNotIndexed
kFBCsearchFailed = -30524 #kFBCsearchFailed
kFBCindexNotAvailable = -30523 #kFBCindexNotAvailable
kFBCindexFileDestroyed = -30522 #kFBCindexFileDestroyed
kFBCaccessCanceled = -30521 #kFBCaccessCanceled
kFBCindexingCanceled = -30520 #kFBCindexingCanceled
kFBCnoSearchSession = -30519 #kFBCnoSearchSession
kFBCindexNotFound = -30518 #kFBCindexNotFound
kFBCflushFailed = -30517 #kFBCflushFailed
kFBCaddDocFailed = -30516 #kFBCaddDocFailed
kFBCaccessorStoreFailed = -30515 #kFBCaccessorStoreFailed
kFBCindexCreationFailed = -30514 #couldn't create index
kFBCmergingFailed = -30513 #couldn't merge index files
kFBCtokenizationFailed = -30512 #couldn't read from document or query
kFBCmoveFailed = -30511 #V-Twin exception caught
kFBCdeletionFailed = -30510 #V-Twin exception caught
kFBCcommitFailed = -30509 #V-Twin exception caught
kFBCindexingFailed = -30508 #V-Twin exception caught
kFBCvalidationFailed = -30507 #V-Twin exception caught
kFBCcompactionFailed = -30506 #V-Twin exception caught
kFBCbadIndexFile = -30505 #bad FSSpec, or bad data in file
kFBCfileNotIndexed = -30504 #kFBCfileNotIndexed
kFBCbadParam = -30503 #kFBCbadParam
kFBCallocFailed = -30502 #probably low memory
kFBCnoIndexesFound = -30501 #kFBCnoIndexesFound
kFBCvTwinExceptionErr = -30500 #no telling what it was
kDSpStereoContextErr = -30450 #kDSpStereoContextErr
kDSpInternalErr = -30449 #kDSpInternalErr
kDSpConfirmSwitchWarning = -30448 #kDSpConfirmSwitchWarning
kDSpFrameRateNotReadyErr = -30447 #kDSpFrameRateNotReadyErr
kDSpContextNotFoundErr = -30446 #kDSpContextNotFoundErr
kDSpContextNotReservedErr = -30445 #kDSpContextNotReservedErr
kDSpContextAlreadyReservedErr = -30444 #kDSpContextAlreadyReservedErr
kDSpInvalidAttributesErr = -30443 #kDSpInvalidAttributesErr
kDSpInvalidContextErr = -30442 #kDSpInvalidContextErr
kDSpSystemSWTooOldErr = -30441 #kDSpSystemSWTooOldErr
kDSpNotInitializedErr = -30440 #kDSpNotInitializedErr
kISpListBusyErr = -30429 #kISpListBusyErr
kISpDeviceActiveErr = -30428 #kISpDeviceActiveErr
kISpSystemActiveErr = -30427 #kISpSystemActiveErr
kISpDeviceInactiveErr = -30426 #kISpDeviceInactiveErr
kISpSystemInactiveErr = -30425 #kISpSystemInactiveErr
kISpElementNotInListErr = -30424 #kISpElementNotInListErr
kISpElementInListErr = -30423 #kISpElementInListErr
kISpBufferToSmallErr = -30422 #kISpBufferToSmallErr
kISpSystemListErr = -30421 #kISpSystemListErr
kISpInternalErr = -30420 #kISpInternalErr
kNSpJoinFailedErr = -30399 #kNSpJoinFailedErr
kNSpCantBlockErr = -30398 #kNSpCantBlockErr
kNSpMessageTooBigErr = -30397 #kNSpMessageTooBigErr
kNSpSendFailedErr = -30396 #kNSpSendFailedErr
kNSpConnectFailedErr = -30395 #kNSpConnectFailedErr
kNSpGameTerminatedErr = -30394 #kNSpGameTerminatedErr
kNSpTimeoutErr = -30393 #kNSpTimeoutErr
kNSpInvalidProtocolListErr = -30392 #kNSpInvalidProtocolListErr
kNSpInvalidProtocolRefErr = -30391 #kNSpInvalidProtocolRefErr
kNSpInvalidDefinitionErr = -30390 #kNSpInvalidDefinitionErr
kNSpAddPlayerFailedErr = -30389 #kNSpAddPlayerFailedErr
kNSpCreateGroupFailedErr = -30388 #kNSpCreateGroupFailedErr
kNSpNoHostVolunteersErr = -30387 #kNSpNoHostVolunteersErr
kNSpNoGroupsErr = -30386 #kNSpNoGroupsErr
kNSpNoPlayersErr = -30385 #kNSpNoPlayersErr
kNSpInvalidGroupIDErr = -30384 #kNSpInvalidGroupIDErr
kNSpInvalidPlayerIDErr = -30383 #kNSpInvalidPlayerIDErr
kNSpNameRequiredErr = -30382 #kNSpNameRequiredErr
kNSpFeatureNotImplementedErr = -30381 #kNSpFeatureNotImplementedErr
kNSpAddressInUseErr = -30380 #kNSpAddressInUseErr
kNSpRemovePlayerFailedErr = -30379 #kNSpRemovePlayerFailedErr
kNSpFreeQExhaustedErr = -30378 #kNSpFreeQExhaustedErr
kNSpInvalidAddressErr = -30377 #kNSpInvalidAddressErr
kNSpNotAdvertisingErr = -30376 #kNSpNotAdvertisingErr
kNSpAlreadyAdvertisingErr = -30374 #kNSpAlreadyAdvertisingErr
kNSpMemAllocationErr = -30373 #kNSpMemAllocationErr
kNSpOTVersionTooOldErr = -30371 #kNSpOTVersionTooOldErr
kNSpOTNotPresentErr = -30370 #kNSpOTNotPresentErr
kNSpInvalidParameterErr = -30369 #kNSpInvalidParameterErr
kNSpInvalidGameRefErr = -30367 #kNSpInvalidGameRefErr
kNSpProtocolNotAvailableErr = -30366 #kNSpProtocolNotAvailableErr
kNSpHostFailedErr = -30365 #kNSpHostFailedErr
kNSpPipeFullErr = -30364 #kNSpPipeFullErr
kNSpTopologyNotSupportedErr = -30362 #kNSpTopologyNotSupportedErr
kNSpAlreadyInitializedErr = -30361 #kNSpAlreadyInitializedErr
kNSpInitializationFailedErr = -30360 #kNSpInitializationFailedErr
kSSpScaleToZeroErr = -30344 #kSSpScaleToZeroErr
kSSpParallelUpVectorErr = -30343 #kSSpParallelUpVectorErr
kSSpCantInstallErr = -30342 #kSSpCantInstallErr
kSSpVersionErr = -30341 #kSSpVersionErr
kSSpInternalErr = -30340 #kSSpInternalErr
kALMInternalErr = -30049 #kALMInternalErr
kALMGroupNotFoundErr = -30048 #kALMGroupNotFoundErr
kALMNoSuchModuleErr = -30047 #kALMNoSuchModuleErr
kALMModuleCommunicationErr = -30046 #kALMModuleCommunicationErr
kALMDuplicateModuleErr = -30045 #kALMDuplicateModuleErr
kALMInstallationErr = -30044 #kALMInstallationErr
kALMDeferSwitchErr = -30043 #kALMDeferSwitchErr
kALMRebootFlagsLevelErr = -30042 #kALMRebootFlagsLevelErr
kLocalesDefaultDisplayStatus = -30029 #Requested display locale unavailable, used default
kLocalesTableFormatErr = -30002 #kLocalesTableFormatErr
kLocalesBufferTooSmallErr = -30001 #kLocalesBufferTooSmallErr
kFNSNameNotFoundErr = -29589 #The name with the requested paramters was not found
kFNSBadFlattenedSizeErr = -29587 #flattened size didn't match input or was too small
kFNSInsufficientDataErr = -29586 #insufficient data for the operation
kFNSMismatchErr = -29585 #reference didn't match or wasn't found in profile
kFNSDuplicateReferenceErr = -29584 #the ref. being added is already in the profile
kFNSBadProfileVersionErr = -29583 #profile version is out of known range
kFNSInvalidProfileErr = -29582 #profile is NULL or otherwise bad
kFNSBadReferenceVersionErr = -29581 #ref. version is out of known range
kFNSInvalidReferenceErr = -29580 #ref. was NULL or otherwise bad
kCollateInvalidCollationRef = -29507 #kCollateInvalidCollationRef
kCollateBufferTooSmall = -29506 #kCollateBufferTooSmall
kCollateInvalidChar = -29505 #kCollateInvalidChar
kCollatePatternNotFoundErr = -29504 #kCollatePatternNotFoundErr
kCollateUnicodeConvertFailedErr = -29503 #kCollateUnicodeConvertFailedErr
kCollateMissingUnicodeTableErr = -29502 #kCollateMissingUnicodeTableErr
kCollateInvalidOptions = -29501 #kCollateInvalidOptions
kCollateAttributesNotFoundErr = -29500 #kCollateAttributesNotFoundErr
kMPInvalidIDErr = -29299 #kMPInvalidIDErr
kMPInsufficientResourcesErr = -29298 #kMPInsufficientResourcesErr
kMPTaskAbortedErr = -29297 #kMPTaskAbortedErr
kMPTimeoutErr = -29296 #kMPTimeoutErr
kMPDeletedErr = -29295 #kMPDeletedErr
kMPBlueBlockingErr = -29293 #kMPBlueBlockingErr
kMPTaskStoppedErr = -29292 #A convention used with MPThrowException.
kMPTaskBlockedErr = -29291 #kMPTaskBlockedErr
kMPTaskCreatedErr = -29290 #kMPTaskCreatedErr
kMPProcessTerminatedErr = -29289 #kMPProcessTerminatedErr
kMPProcessCreatedErr = -29288 #kMPProcessCreatedErr
kMPPrivilegedErr = -29276 #kMPPrivilegedErr
kMPIterationEndErr = -29275 #kMPIterationEndErr
kUCTextBreakLocatorMissingType = -25341 #Unicode text break error
kUCOutputBufferTooSmall = -25340 #Output buffer too small for Unicode string result
errKCCreateChainFailed = -25318 #errKCCreateChainFailed
errKCDataNotModifiable = -25317 #errKCDataNotModifiable
errKCDataNotAvailable = -25316 #errKCDataNotAvailable
errKCInteractionRequired = -25315 #errKCInteractionRequired
errKCNoPolicyModule = -25314 #errKCNoPolicyModule
errKCNoCertificateModule = -25313 #errKCNoCertificateModule
errKCNoStorageModule = -25312 #errKCNoStorageModule
errKCKeySizeNotAllowed = -25311 #errKCKeySizeNotAllowed
errKCWrongKCVersion = -25310 #errKCWrongKCVersion
errKCReadOnlyAttr = -25309 #errKCReadOnlyAttr
errKCInteractionNotAllowed = -25308 #errKCInteractionNotAllowed
errKCNoDefaultKeychain = -25307 #errKCNoDefaultKeychain
errKCNoSuchClass = -25306 #errKCNoSuchClass
errKCInvalidSearchRef = -25305 #errKCInvalidSearchRef
errKCInvalidItemRef = -25304 #errKCInvalidItemRef
errKCNoSuchAttr = -25303 #errKCNoSuchAttr
errKCDataTooLarge = -25302 #errKCDataTooLarge
errKCBufferTooSmall = -25301 #errKCBufferTooSmall
errKCItemNotFound = -25300 #errKCItemNotFound
errKCDuplicateItem = -25299 #errKCDuplicateItem
errKCInvalidCallback = -25298 #errKCInvalidCallback
errKCDuplicateCallback = -25297 #errKCDuplicateCallback
errKCDuplicateKeychain = -25296 #errKCDuplicateKeychain
errKCInvalidKeychain = -25295 #errKCInvalidKeychain
errKCNoSuchKeychain = -25294 #errKCNoSuchKeychain
errKCAuthFailed = -25293 #errKCAuthFailed
errKCReadOnly = -25292 #errKCReadOnly
errKCNotAvailable = -25291 #errKCNotAvailable
printerStatusOpCodeNotSupportedErr = -25280 #printerStatusOpCodeNotSupportedErr
kTXNOutsideOfFrameErr = -22018 #kTXNOutsideOfFrameErr
kTXNOutsideOfLineErr = -22017 #kTXNOutsideOfLineErr
kTXNATSUIIsNotInstalledErr = -22016 #kTXNATSUIIsNotInstalledErr
kTXNDataTypeNotAllowedErr = -22015 #kTXNDataTypeNotAllowedErr
kTXNCopyNotAllowedInEchoModeErr = -22014 #kTXNCopyNotAllowedInEchoModeErr
kTXNCannotTurnTSMOffWhenUsingUnicodeErr = -22013 #kTXNCannotTurnTSMOffWhenUsingUnicodeErr
kTXNAlreadyInitializedErr = -22012 #kTXNAlreadyInitializedErr
kTXNInvalidRunIndex = -22011 #kTXNInvalidRunIndex
kTXNSomeOrAllTagsInvalidForRunErr = -22010 #kTXNSomeOrAllTagsInvalidForRunErr
kTXNAttributeTagInvalidForRunErr = -22009 #dataValue is set to this per invalid tag
kTXNNoMatchErr = -22008 #kTXNNoMatchErr
kTXNRunIndexOutofBoundsErr = -22007 #kTXNRunIndexOutofBoundsErr
kTXNCannotSetAutoIndentErr = -22006 #kTXNCannotSetAutoIndentErr
kTXNBadDefaultFileTypeWarning = -22005 #kTXNBadDefaultFileTypeWarning
kTXNUserCanceledOperationErr = -22004 #kTXNUserCanceledOperationErr
kTXNIllegalToCrossDataBoundariesErr = -22003 #kTXNIllegalToCrossDataBoundariesErr
kTXNInvalidFrameIDErr = -22002 #kTXNInvalidFrameIDErr
kTXNCannotAddFrameErr = -22001 #kTXNCannotAddFrameErr
kTXNEndIterationErr = -22000 #kTXNEndIterationErr
invalidIndexErr = -20002 #The recordIndex parameter is not valid.
recordDataTooBigErr = -20001 #The record data is bigger than buffer size (1024 bytes).
unknownInsertModeErr = -20000 #There is no such an insert mode.
kModemScriptMissing = -14002 #kModemScriptMissing
kModemPreferencesMissing = -14001 #kModemPreferencesMissing
kModemOutOfMemory = -14000 #kModemOutOfMemory
kHIDBaseError = -13950 #kHIDBaseError
kHIDNullStateErr = -13949 #kHIDNullStateErr
kHIDBufferTooSmallErr = -13948 #kHIDBufferTooSmallErr
kHIDValueOutOfRangeErr = -13947 #kHIDValueOutOfRangeErr
kHIDUsageNotFoundErr = -13946 #kHIDUsageNotFoundErr
kHIDNotValueArrayErr = -13945 #kHIDNotValueArrayErr
kHIDInvalidPreparsedDataErr = -13944 #kHIDInvalidPreparsedDataErr
kHIDIncompatibleReportErr = -13943 #kHIDIncompatibleReportErr
kHIDBadLogPhysValuesErr = -13942 #kHIDBadLogPhysValuesErr
kHIDInvalidReportTypeErr = -13941 #kHIDInvalidReportTypeErr
kHIDInvalidReportLengthErr = -13940 #kHIDInvalidReportLengthErr
kHIDNullPointerErr = -13939 #kHIDNullPointerErr
kHIDBadParameterErr = -13938 #kHIDBadParameterErr
kHIDNotEnoughMemoryErr = -13937 #kHIDNotEnoughMemoryErr
kHIDEndOfDescriptorErr = -13936 #kHIDEndOfDescriptorErr
kHIDUsagePageZeroErr = -13935 #kHIDUsagePageZeroErr
kHIDBadLogicalMinimumErr = -13934 #kHIDBadLogicalMinimumErr
kHIDBadLogicalMaximumErr = -13933 #kHIDBadLogicalMaximumErr
kHIDInvertedLogicalRangeErr = -13932 #kHIDInvertedLogicalRangeErr
kHIDInvertedPhysicalRangeErr = -13931 #kHIDInvertedPhysicalRangeErr
kHIDUnmatchedUsageRangeErr = -13930 #kHIDUnmatchedUsageRangeErr
kHIDInvertedUsageRangeErr = -13929 #kHIDInvertedUsageRangeErr
kHIDUnmatchedStringRangeErr = -13928 #kHIDUnmatchedStringRangeErr
kHIDUnmatchedDesignatorRangeErr = -13927 #kHIDUnmatchedDesignatorRangeErr
kHIDReportSizeZeroErr = -13926 #kHIDReportSizeZeroErr
kHIDReportCountZeroErr = -13925 #kHIDReportCountZeroErr
kHIDReportIDZeroErr = -13924 #kHIDReportIDZeroErr
kHIDInvalidRangePageErr = -13923 #kHIDInvalidRangePageErr
kHIDDeviceNotReady = -13910 #The device is still initializing, try again later
kHIDVersionIncompatibleErr = -13909 #kHIDVersionIncompatibleErr
debuggingNoMatchErr = -13887 #debugging component or option not found at this index
debuggingNoCallbackErr = -13886 #debugging component has no callback
debuggingInvalidNameErr = -13885 #componentName or optionName is invalid (NULL)
debuggingInvalidOptionErr = -13884 #optionSelectorNum is not registered
debuggingInvalidSignatureErr = -13883 #componentSignature not registered
debuggingDuplicateOptionErr = -13882 #optionSelectorNum already registered
debuggingDuplicateSignatureErr = -13881 #componentSignature already registered
debuggingExecutionContextErr = -13880 #routine cannot be called at this time
kBridgeSoftwareRunningCantSleep = -13038 #kBridgeSoftwareRunningCantSleep
kNoSuchPowerSource = -13020 #kNoSuchPowerSource
kProcessorTempRoutineRequiresMPLib2 = -13014 #kProcessorTempRoutineRequiresMPLib2
kCantReportProcessorTemperatureErr = -13013 #kCantReportProcessorTemperatureErr
kPowerMgtRequestDenied = -13010 #kPowerMgtRequestDenied
kPowerMgtMessageNotHandled = -13009 #kPowerMgtMessageNotHandled
kPowerHandlerNotFoundForProcErr = -13008 #kPowerHandlerNotFoundForProcErr
kPowerHandlerNotFoundForDeviceErr = -13007 #kPowerHandlerNotFoundForDeviceErr
kPowerHandlerExistsForDeviceErr = -13006 #kPowerHandlerExistsForDeviceErr
pmRecvEndErr = -13005 #during receive, pmgr did not finish hs configured for this connection
pmRecvStartErr = -13004 #during receive, pmgr did not start hs
pmSendEndErr = -13003 #during send, pmgr did not finish hs
pmSendStartErr = -13002 #during send, pmgr did not start hs
pmReplyTOErr = -13001 #Timed out waiting for reply
pmBusyErr = -13000 #Power Mgr never ready to start handshake
pictureDataErr = -11005 #the picture data was invalid
colorsRequestedErr = -11004 #the number of colors requested was illegal
cantLoadPickMethodErr = -11003 #unable to load the custom pick proc
pictInfoVerbErr = -11002 #the passed verb was invalid
pictInfoIDErr = -11001 #the internal consistancy check for the PictInfoID is wrong
pictInfoVersionErr = -11000 #wrong version of the PictInfo structure
errTaskNotFound = -10780 #no task with that task id exists
telNotEnoughdspBW = -10116 #not enough real-time for allocation
telBadSampleRate = -10115 #incompatible sample rate
telBadSWErr = -10114 #Software not installed properly
telDetAlreadyOn = -10113 #detection is already turned on
telAutoAnsNotOn = -10112 #autoAnswer in not turned on
telValidateFailed = -10111 #telValidate failed
telBadProcID = -10110 #invalid procID
telDeviceNotFound = -10109 #device not found
telBadCodeResource = -10108 #code resource not found
telInitFailed = -10107 #initialization failed
telNoCommFolder = -10106 #Communications/Extensions not found
telUnknownErr = -10103 #unable to set config
telNoSuchTool = -10102 #unable to find tool with name specified
telBadFunction = -10091 #bad msgCode specified
telPBErr = -10090 #parameter block error, bad format
telCANotDeflectable = -10082 #CA not "deflectable"
telCANotRejectable = -10081 #CA not "rejectable"
telCANotAcceptable = -10080 #CA not "acceptable"
telTermNotOpen = -10072 #terminal not opened via TELOpenTerm
telStillNeeded = -10071 #terminal driver still needed by someone else
telAlreadyOpen = -10070 #terminal already open
telNoCallbackRef = -10064 #no call back reference was specified, but is required
telDisplayModeNotSupp = -10063 #display mode not supported by tool
telBadDisplayMode = -10062 #bad display mode specified
telFwdTypeNotSupp = -10061 #forward type not supported by tool
telDNTypeNotSupp = -10060 #DN type not supported by tool
telBadRate = -10059 #bad rate specified
telBadBearerType = -10058 #bad bearerType specified
telBadSelect = -10057 #unable to select or deselect DN
telBadParkID = -10056 #bad park id specified
telBadPickupGroupID = -10055 #bad pickup group ID specified
telBadFwdType = -10054 #bad fwdType specified
telBadFeatureID = -10053 #bad feature ID specified
telBadIntercomID = -10052 #bad intercom ID specified
telBadPageID = -10051 #bad page ID specified
telBadDNType = -10050 #DN type invalid
telConfLimitExceeded = -10047 #attempt to exceed switch conference limits
telCBErr = -10046 #call back feature not set previously
telTransferRej = -10045 #transfer request rejected
telTransferErr = -10044 #transfer not prepared
telConfRej = -10043 #conference request was rejected
telConfErr = -10042 #conference was not prepared
telConfNoLimit = -10041 #no limit was specified but required
telConfLimitErr = -10040 #limit specified is too high for this configuration
telFeatNotSupp = -10033 #feature program call not supported by this tool
telFeatActive = -10032 #feature already active
telFeatNotAvail = -10031 #feature subscribed but not available
telFeatNotSub = -10030 #feature not subscribed
errAEPropertiesClash = -10025 #illegal combination of properties settings for Set Data, make new, or duplicate
errAECantPutThatThere = -10024 #in make new, duplicate, etc. class can't be an element of container
errAENotAnEnumMember = -10023 #enumerated value in SetData is not allowed for this property
telIntExtNotSupp = -10022 #internal external type not supported by this tool
telBadIntExt = -10021 #bad internal external error
telStateNotSupp = -10020 #device state not supported by tool
telBadStateErr = -10019 #bad device state specified
telIndexNotSupp = -10018 #index not supported by this tool
telBadIndex = -10017 #bad index specified
telAPattNotSupp = -10016 #alerting pattern not supported by tool
telBadAPattErr = -10015 #bad alerting pattern specified
telVTypeNotSupp = -10014 #volume type not supported by this tool
telBadVTypeErr = -10013 #bad volume type error
telBadLevelErr = -10012 #bad volume level setting
telHTypeNotSupp = -10011 #hook type not supported by this tool
telBadHTypeErr = -10010 #bad hook type specified
errAECantSupplyType = -10009 #errAECantSupplyType
telNoOpenErr = -10008 #unable to open terminal
telNoMemErr = -10007 #no memory to allocate handle
errOSACantAssign = -10006 #Signaled when an object cannot be set in a container.
telBadProcErr = -10005 #bad msgProc specified
telBadHandErr = -10004 #bad handle specified
OSAIllegalAssign = -10003 #Signaled when an object can never be set in a container
telBadDNErr = -10002 #TELDNHandle not found or invalid
telBadTermErr = -10001 #invalid TELHandle or handle not found
errAEEventFailed = -10000 #errAEEventFailed
cannotMoveAttachedController = -9999 #cannotMoveAttachedController
controllerHasFixedHeight = -9998 #controllerHasFixedHeight
cannotSetWidthOfAttachedController = -9997 #cannotSetWidthOfAttachedController
controllerBoundsNotExact = -9996 #controllerBoundsNotExact
editingNotAllowed = -9995 #editingNotAllowed
badControllerHeight = -9994 #badControllerHeight
deviceCantMeetRequest = -9408 #deviceCantMeetRequest
seqGrabInfoNotAvailable = -9407 #seqGrabInfoNotAvailable
badSGChannel = -9406 #badSGChannel
couldntGetRequiredComponent = -9405 #couldntGetRequiredComponent
notEnoughDiskSpaceToGrab = -9404 #notEnoughDiskSpaceToGrab
notEnoughMemoryToGrab = -9403 #notEnoughMemoryToGrab
cantDoThatInCurrentMode = -9402 #cantDoThatInCurrentMode
grabTimeComplete = -9401 #grabTimeComplete
noDeviceForChannel = -9400 #noDeviceForChannel
kNoCardBusCISErr = -9109 #No valid CIS exists for this CardBus card
kNotZVCapableErr = -9108 #This socket does not support Zoomed Video
kCardPowerOffErr = -9107 #Power to the card has been turned off
kAttemptDupCardEntryErr = -9106 #The Enabler was asked to create a duplicate card entry
kAlreadySavedStateErr = -9105 #The state has been saved on previous call
kTooManyIOWindowsErr = -9104 #device requested more than one I/O window
kNotReadyErr = -9103 #PC Card failed to go ready
kClientRequestDenied = -9102 #CS Clients should return this code inorder to
kNoCompatibleNameErr = -9101 #There is no compatible driver name for this device
kNoEnablerForCardErr = -9100 #No Enablers were found that can support the card
kNoCardEnablersFoundErr = -9099 #No Enablers were found
kUnsupportedCardErr = -9098 #Card not supported by generic enabler
kNoClientTableErr = -9097 #The client table has not be initialized yet
kNoMoreInterruptSlotsErr = -9096 #All internal Interrupt slots are in use
kNoMoreTimerClientsErr = -9095 #All timer callbacks are in use
kNoIOWindowRequestedErr = -9094 #Request I/O window before calling configuration
kBadCustomIFIDErr = -9093 #Custom interface ID is invalid
kBadTupleDataErr = -9092 #Data in tuple is invalid
kInvalidCSClientErr = -9091 #Card Services ClientID is not registered
kUnsupportedVsErr = -9090 #Unsupported Voltage Sense
kInvalidDeviceNumber = -9089 #kInvalidDeviceNumber
kPostCardEventErr = -9088 #_PCCSLPostCardEvent failed and dropped an event
kCantConfigureCardErr = -9087 #kCantConfigureCardErr
kPassCallToChainErr = -9086 #kPassCallToChainErr
kCardBusCardErr = -9085 #kCardBusCardErr
k16BitCardErr = -9084 #k16BitCardErr
kBadDeviceErr = -9083 #kBadDeviceErr
kBadLinkErr = -9082 #kBadLinkErr
kInvalidRegEntryErr = -9081 #kInvalidRegEntryErr
kNoCardSevicesSocketsErr = -9080 #kNoCardSevicesSocketsErr
kOutOfResourceErr = -9079 #Card Services has exhausted the resource
kNoMoreItemsErr = -9078 #there are no more of the requested item
kInUseErr = -9077 #requested resource is being used by a client
kConfigurationLockedErr = -9076 #a configuration has already been locked
kWriteProtectedErr = -9075 #media is write-protected
kBusyErr = -9074 #unable to process request at this time - try later
kUnsupportedModeErr = -9073 #mode is not supported
kUnsupportedFunctionErr = -9072 #function is not supported by this implementation
kNoCardErr = -9071 #no PC card in the socket
kGeneralFailureErr = -9070 #an undefined error has occurred
kWriteFailureErr = -9069 #unable to complete write request
kReadFailureErr = -9068 #unable to complete read request
kBadSpeedErr = -9067 #specified speed is unavailable
kBadCISErr = -9066 #CIS on card is invalid
kBadHandleErr = -9065 #clientHandle is invalid
kBadArgsErr = -9064 #values in argument packet are invalid
kBadArgLengthErr = -9063 #ArgLength argument is invalid
kBadWindowErr = -9062 #specified window is invalid
kBadVppErr = -9061 #specified Vpp1 or Vpp2 power level index is invalid
kBadVccErr = -9060 #specified Vcc power level index is invalid
kBadTypeErr = -9059 #specified window or interface type is invalid
kBadSocketErr = -9058 #specified logical or physical socket number is invalid
kBadSizeErr = -9057 #specified size is invalid
kBadPageErr = -9056 #specified page is invalid
kBadOffsetErr = -9055 #specified PC card memory array offset is invalid
kBadIRQErr = -9054 #specified IRQ level is invalid
kBadEDCErr = -9053 #specified EDC generator specified is invalid
kBadBaseErr = -9052 #specified base system memory address is invalid
kBadAttributeErr = -9051 #specified attributes field value is invalid
kBadAdapterErr = -9050 #invalid adapter number
codecOffscreenFailedPleaseRetryErr = -8992 #codecOffscreenFailedPleaseRetryErr
lockPortBitsWrongGDeviceErr = -8991 #lockPortBitsWrongGDeviceErr
directXObjectAlreadyExists = -8990 #directXObjectAlreadyExists
codecDroppedFrameErr = -8989 #returned from ImageCodecDrawBand
codecOffscreenFailedErr = -8988 #codecOffscreenFailedErr
codecNeedAccessKeyErr = -8987 #codec needs password in order to decompress
codecParameterDialogConfirm = -8986 #codecParameterDialogConfirm
lockPortBitsSurfaceLostErr = -8985 #lockPortBitsSurfaceLostErr
lockPortBitsBadPortErr = -8984 #lockPortBitsBadPortErr
lockPortBitsWindowClippedErr = -8983 #lockPortBitsWindowClippedErr
lockPortBitsWindowResizedErr = -8982 #lockPortBitsWindowResizedErr
lockPortBitsWindowMovedErr = -8981 #lockPortBitsWindowMovedErr
lockPortBitsBadSurfaceErr = -8980 #lockPortBitsBadSurfaceErr
codecNeedToFlushChainErr = -8979 #codecNeedToFlushChainErr
codecDisabledErr = -8978 #codec disabled itself -- pass codecFlagReenable to reset
codecNoMemoryPleaseWaitErr = -8977 #codecNoMemoryPleaseWaitErr
codecNothingToBlitErr = -8976 #codecNothingToBlitErr
codecCantQueueErr = -8975 #codecCantQueueErr
codecCantWhenErr = -8974 #codecCantWhenErr
codecOpenErr = -8973 #codecOpenErr
codecConditionErr = -8972 #codecConditionErr
codecExtensionNotFoundErr = -8971 #codecExtensionNotFoundErr
codecDataVersErr = -8970 #codecDataVersErr
codecBadDataErr = -8969 #codecBadDataErr
codecWouldOffscreenErr = -8968 #codecWouldOffscreenErr
codecAbortErr = -8967 #codecAbortErr
codecSpoolErr = -8966 #codecSpoolErr
codecImageBufErr = -8965 #codecImageBufErr
codecScreenBufErr = -8964 #codecScreenBufErr
codecSizeErr = -8963 #codecSizeErr
codecUnimpErr = -8962 #codecUnimpErr
noCodecErr = -8961 #noCodecErr
codecErr = -8960 #codecErr
kIllegalClockValueErr = -8852 #kIllegalClockValueErr
kUTCOverflowErr = -8851 #kUTCOverflowErr
kUTCUnderflowErr = -8850 #kUTCUnderflowErr
kATSULastErr = -8809 #The last ATSUI error code.
kATSULineBreakInWord = -8808 #This is not an error code but is returned by ATSUBreakLine to
kATSUCoordinateOverflowErr = -8807 #Used to indicate the coordinates provided to an ATSUI routine caused
kATSUNoFontScalerAvailableErr = -8806 #Used when no font scaler is available for the font passed
kATSUNoFontCmapAvailableErr = -8805 #Used when no CMAP table can be accessed or synthesized for the
kATSULowLevelErr = -8804 #Used when an error was encountered within the low level ATS
kATSUQuickDrawTextErr = -8803 #Used when QuickDraw Text encounters an error rendering or measuring
kATSUNoStyleRunsAssignedErr = -8802 #Used when an attempt was made to measure, highlight or draw
kATSUNotSetErr = -8801 #Used when the client attempts to retrieve an attribute,
kATSUInvalidCacheErr = -8800 #Used when an attempt was made to read in style data
kATSUInvalidAttributeTagErr = -8799 #Used when an attempt was made to use a tag value that
kATSUInvalidAttributeSizeErr = -8798 #Used when an attempt was made to use an attribute with a
kATSUInvalidAttributeValueErr = -8797 #Used when an attempt was made to use an attribute with
kATSUInvalidFontErr = -8796 #Used when an attempt was made to use an invalid font ID.
kATSUNoCorrespondingFontErr = -8795 #This value is retrned by font ID conversion
kATSUFontsNotMatched = -8794 #This value is returned by ATSUMatchFontsToText()
kATSUFontsMatched = -8793 #This is not an error code but is returned by
kATSUInvalidTextRangeErr = -8792 #An attempt was made to extract information
kATSUInvalidStyleErr = -8791 #An attempt was made to use a ATSUStyle which
kATSUInvalidTextLayoutErr = -8790 #An attempt was made to use a ATSUTextLayout
kTECOutputBufferFullStatus = -8785 #output buffer has no room for conversion of next input text element (partial conversion)
kTECNeedFlushStatus = -8784 #kTECNeedFlushStatus
kTECUsedFallbacksStatus = -8783 #kTECUsedFallbacksStatus
kTECItemUnavailableErr = -8771 #item (e.g. name) not available for specified region (& encoding if relevant)
kTECGlobalsUnavailableErr = -8770 #globals have already been deallocated (premature TERM)
unicodeChecksumErr = -8769 #unicodeChecksumErr
unicodeNoTableErr = -8768 #unicodeNoTableErr
unicodeVariantErr = -8767 #unicodeVariantErr
unicodeFallbacksErr = -8766 #unicodeFallbacksErr
unicodePartConvertErr = -8765 #unicodePartConvertErr
unicodeBufErr = -8764 #unicodeBufErr
unicodeCharErr = -8763 #unicodeCharErr
unicodeElementErr = -8762 #unicodeElementErr
unicodeNotFoundErr = -8761 #unicodeNotFoundErr
unicodeTableFormatErr = -8760 #unicodeTableFormatErr
unicodeDirectionErr = -8759 #unicodeDirectionErr
unicodeContextualErr = -8758 #unicodeContextualErr
unicodeTextEncodingDataErr = -8757 #unicodeTextEncodingDataErr
kTECDirectionErr = -8756 #direction stack overflow, etc.
kTECIncompleteElementErr = -8755 #text element may be incomplete or is too long for internal buffers
kTECUnmappableElementErr = -8754 #kTECUnmappableElementErr
kTECPartialCharErr = -8753 #input buffer ends in the middle of a multibyte character, conversion stopped
kTECBadTextRunErr = -8752 #kTECBadTextRunErr
kTECArrayFullErr = -8751 #supplied name buffer or TextRun, TextEncoding, or UnicodeMapping array is too small
kTECBufferBelowMinimumSizeErr = -8750 #output buffer too small to allow processing of first input text element
kTECNoConversionPathErr = -8749 #kTECNoConversionPathErr
kTECCorruptConverterErr = -8748 #invalid converter object reference
kTECTableFormatErr = -8747 #kTECTableFormatErr
kTECTableChecksumErr = -8746 #kTECTableChecksumErr
kTECMissingTableErr = -8745 #kTECMissingTableErr
kTextUndefinedElementErr = -8740 #text conversion errors
kTextMalformedInputErr = -8739 #in DBCS, for example, high byte followed by invalid low byte
kTextUnsupportedEncodingErr = -8738 #specified encoding not supported for this operation
kRANotEnabled = -7139 #kRANotEnabled
kRACallBackFailed = -7138 #kRACallBackFailed
kRADuplicateIPAddr = -7137 #kRADuplicateIPAddr
kRANCPRejectedbyPeer = -7136 #kRANCPRejectedbyPeer
kRAExtAuthenticationFailed = -7135 #kRAExtAuthenticationFailed
kRAATalkInactive = -7134 #kRAATalkInactive
kRAPeerNotResponding = -7133 #kRAPeerNotResponding
kRAPPPPeerDisconnected = -7132 #kRAPPPPeerDisconnected
kRAPPPUserDisconnected = -7131 #kRAPPPUserDisconnected
kRAPPPNegotiationFailed = -7130 #kRAPPPNegotiationFailed
kRAPPPAuthenticationFailed = -7129 #kRAPPPAuthenticationFailed
kRAPPPProtocolRejected = -7128 #kRAPPPProtocolRejected
dcmBufferOverflowErr = -7127 #data is larger than buffer size
kRANotPrimaryInterface = -7126 #when IPCP is not primary TCP/IP intf.
kRATCPIPNotConfigured = -7125 #TCP/IP not configured, could be loaded
kRATCPIPInactive = -7124 #TCP/IP inactive, cannot be loaded
kRARemoteAccessNotReady = -7123 #kRARemoteAccessNotReady
kRAInitOpenTransportFailed = -7122 #kRAInitOpenTransportFailed
dcmProtectedErr = -7121 #need keyword to use dictionary
kRAUserPwdEntryRequired = -7120 #kRAUserPwdEntryRequired
kRAUserPwdChangeRequired = -7119 #kRAUserPwdChangeRequired
dcmBadFindMethodErr = -7118 #no such find method supported
kRAInvalidSerialProtocol = -7117 #kRAInvalidSerialProtocol
kRAInvalidPortState = -7116 #kRAInvalidPortState
dcmBadKeyErr = -7115 #bad key information
kRAPortBusy = -7114 #kRAPortBusy
kRAInstallationDamaged = -7113 #kRAInstallationDamaged
dcmBadFieldTypeErr = -7112 #no such field type supported
dcmBadFieldInfoErr = -7111 #incomplete information
dcmNecessaryFieldErr = -7110 #lack required/identify field
dcmDupRecordErr = -7109 #same record already exist
kRANotConnected = -7108 #kRANotConnected
dcmBlockFullErr = -7107 #dictionary block full
kRAMissingResources = -7106 #kRAMissingResources
dcmDictionaryBusyErr = -7105 #dictionary is busy
dcmDictionaryNotOpenErr = -7104 #dictionary not opened
dcmPermissionErr = -7103 #invalid permission
dcmBadDictionaryErr = -7102 #invalid dictionary
dcmNotDictionaryErr = -7101 #not dictionary
kRAInvalidParameter = -7100 #kRAInvalidParameter
laEngineNotFoundErr = -7000 #can't find the engine
laPropertyErr = -6999 #Error in properties
kUSBUnknownDeviceErr = -6998 #device ref not recognised
laPropertyIsReadOnlyErr = -6997 #the property is read only
laPropertyUnknownErr = -6996 #the property is unknown to this environment
laPropertyValueErr = -6995 #Invalid property value
laDictionaryTooManyErr = -6994 #too many dictionaries
laDictionaryUnknownErr = -6993 #can't use this dictionary with this environment
laDictionaryNotOpenedErr = -6992 #the dictionary is not opened
laTextOverFlowErr = -6991 #text is too long
laFailAnalysisErr = -6990 #analysis failed
laNoMoreMorphemeErr = -6989 #nothing to read
laInvalidPathErr = -6988 #path is not correct
kUSBNotHandled = -6987 #Notification was not handled (same as NotFound)
laEnvironmentNotFoundErr = -6986 #can't fint the specified environment
laEnvironmentBusyErr = -6985 #specified environment is used
laTooSmallBufferErr = -6984 #output buffer is too small to store any result
kUSBFlagsError = -6983 #Unused flags not zeroed
kUSBAbortedError = -6982 #Pipe aborted
kUSBNoBandwidthError = -6981 #Not enough bandwidth available
kUSBPipeIdleError = -6980 #Pipe is Idle, it will not accept transactions
kUSBPipeStalledError = -6979 #Pipe has stalled, error needs to be cleared
kUSBUnknownInterfaceErr = -6978 #Interface ref not recognised
kUSBDeviceBusy = -6977 #Device is already being configured
kUSBDevicePowerProblem = -6976 #Device has a power problem
kUSBInvalidBuffer = -6975 #bad buffer, usually nil
kUSBDeviceSuspended = -6974 #Device is suspended
kUSBDeviceNotSuspended = -6973 #device is not suspended for resume
kUSBDeviceDisconnected = -6972 #Disconnected during suspend or reset
kUSBTimedOut = -6971 #Transaction timed out.
kUSBQueueAborted = -6970 #Pipe zero stall cleared.
kUSBPortDisabled = -6969 #The port you are attached to is disabled, use USBDeviceReset.
kUSBBadDispatchTable = -6950 #Improper driver dispatch table
kUSBUnknownNotification = -6949 #Notification type not defined
kUSBQueueFull = -6948 #Internal queue maxxed
kUSBLinkErr = -6916 #kUSBLinkErr
kUSBCRCErr = -6915 #Pipe stall, bad CRC
kUSBBitstufErr = -6914 #Pipe stall, bitstuffing
kUSBDataToggleErr = -6913 #Pipe stall, Bad data toggle
kUSBEndpointStallErr = -6912 #Device didn't understand
kUSBNotRespondingErr = -6911 #Pipe stall, No device, device hung
kUSBPIDCheckErr = -6910 #Pipe stall, PID CRC error
kUSBWrongPIDErr = -6909 #Pipe stall, Bad or wrong PID
kUSBOverRunErr = -6908 #Packet too large or more data than buffer
kUSBUnderRunErr = -6907 #Less data than buffer
kUSBRes1Err = -6906 #kUSBRes1Err
kUSBRes2Err = -6905 #kUSBRes2Err
kUSBBufOvrRunErr = -6904 #Host hardware failure on data in, PCI busy?
kUSBBufUnderRunErr = -6903 #Host hardware failure on data out, PCI busy?
kUSBNotSent1Err = -6902 #Transaction not sent
kUSBNotSent2Err = -6901 #Transaction not sent
kDMFoundErr = -6232 #Did not proceed because we found an item
kDMMainDisplayCannotMoveErr = -6231 #Trying to move main display (or a display mirrored to it)
kDMDisplayAlreadyInstalledErr = -6230 #Attempt to add an already installed display.
kDMDisplayNotFoundErr = -6229 #Could not find item (will someday remove).
kDMDriverNotDisplayMgrAwareErr = -6228 #Video Driver does not support display manager.
kDMSWNotInitializedErr = -6227 #Required software not initialized (eg windowmanager or display mgr).
kSysSWTooOld = -6226 #Missing critical pieces of System Software.
kDMMirroringNotOn = -6225 #Returned by all calls that need mirroring to be on to do their thing.
kDMCantBlock = -6224 #Mirroring is already on, canÕt Block now (call DMUnMirror() first).
kDMMirroringBlocked = -6223 #DMBlockMirroring() has been called.
kDMWrongNumberOfDisplays = -6222 #Can only handle 2 displays for now.
kDMMirroringOnAlready = -6221 #Returned by all calls that need mirroring to be off to do their thing.
kDMGenErr = -6220 #Unexpected Error
kQTSSUnknownErr = -6150 #kQTSSUnknownErr
collectionVersionErr = -5753 #collectionVersionErr
collectionIndexRangeErr = -5752 #collectionIndexRangeErr
collectionItemNotFoundErr = -5751 #collectionItemNotFoundErr
collectionItemLockedErr = -5750 #collectionItemLockedErr
kNavMissingKindStringErr = -5699 #kNavMissingKindStringErr
kNavInvalidCustomControlMessageErr = -5698 #kNavInvalidCustomControlMessageErr
kNavCustomControlMessageFailedErr = -5697 #kNavCustomControlMessageFailedErr
kNavInvalidSystemConfigErr = -5696 #kNavInvalidSystemConfigErr
kNavWrongDialogClassErr = -5695 #kNavWrongDialogClassErr
kNavWrongDialogStateErr = -5694 #kNavWrongDialogStateErr
dialogNoTimeoutErr = -5640 #dialogNoTimeoutErr
menuInvalidErr = -5623 #menu is invalid
menuItemNotFoundErr = -5622 #specified menu item wasn't found
menuUsesSystemDefErr = -5621 #GetMenuDefinition failed because the menu uses the system MDEF
menuNotFoundErr = -5620 #specified menu or menu ID wasn't found
windowWrongStateErr = -5615 #window is not in a state that is valid for the current action
windowManagerInternalErr = -5614 #something really weird happened inside the window manager
windowAttributesConflictErr = -5613 #passed some attributes that are mutually exclusive
windowAttributeImmutableErr = -5612 #tried to change attributes which can't be changed
errWindowDoesNotFitOnscreen = -5611 #ConstrainWindowToScreen could not make the window fit onscreen
errWindowNotFound = -5610 #returned from FindWindowOfClass
errFloatingWindowsNotInitialized = -5609 #called HideFloatingWindows or ShowFloatingWindows without calling InitFloatingWindows
errWindowsAlreadyInitialized = -5608 #tried to call InitFloatingWindows twice, or called InitWindows and then floating windows
errUserWantsToDragWindow = -5607 #if returned from TrackWindowProxyDrag, you should call DragWindow on the window
errCorruptWindowDescription = -5606 #tried to load a corrupt window description (size or version fields incorrect)
errUnrecognizedWindowClass = -5605 #tried to create a window with a bad WindowClass
errWindowPropertyNotFound = -5604 #tried to get a nonexistent property
errInvalidWindowProperty = -5603 #tried to access a property tag with private creator
errWindowDoesNotHaveProxy = -5602 #tried to do something requiring a proxy to a window which doesnÕt have a proxy
errUnsupportedWindowAttributesForClass = -5601 #tried to create a window with WindowAttributes not supported by the WindowClass
errInvalidWindowPtr = -5600 #tried to pass a bad WindowRef argument
gestaltLocationErr = -5553 #gestalt function ptr wasn't in sysheap
gestaltDupSelectorErr = -5552 #tried to add an entry that already existed
gestaltUndefSelectorErr = -5551 #undefined selector was passed to Gestalt
gestaltUnknownErr = -5550 #value returned if Gestalt doesn't know the answer
envVersTooBig = -5502 #Version bigger than call can handle
envBadVers = -5501 #Version non-positive
envNotPresent = -5500 #returned by glue.
qtsAddressBusyErr = -5421 #qtsAddressBusyErr
qtsConnectionFailedErr = -5420 #qtsConnectionFailedErr
qtsTimeoutErr = -5408 #qtsTimeoutErr
qtsUnknownValueErr = -5407 #qtsUnknownValueErr
qtsTooMuchDataErr = -5406 #qtsTooMuchDataErr
qtsUnsupportedFeatureErr = -5405 #qtsUnsupportedFeatureErr
qtsUnsupportedRateErr = -5404 #qtsUnsupportedRateErr
qtsUnsupportedDataTypeErr = -5403 #qtsUnsupportedDataTypeErr
qtsBadDataErr = -5402 #something is wrong with the data
qtsBadStateErr = -5401 #qtsBadStateErr
qtsBadSelectorErr = -5400 #qtsBadSelectorErr
errIAEndOfTextRun = -5388 #errIAEndOfTextRun
errIATextExtractionErr = -5387 #errIATextExtractionErr
errIAInvalidDocument = -5386 #errIAInvalidDocument
errIACanceled = -5385 #errIACanceled
errIABufferTooSmall = -5384 #errIABufferTooSmall
errIANoMoreItems = -5383 #errIANoMoreItems
errIAParamErr = -5382 #errIAParamErr
errIAAllocationErr = -5381 #errIAAllocationErr
errIAUnknownErr = -5380 #errIAUnknownErr
hrURLNotHandledErr = -5363 #hrURLNotHandledErr
hrUnableToResizeHandleErr = -5362 #hrUnableToResizeHandleErr
hrMiscellaneousExceptionErr = -5361 #hrMiscellaneousExceptionErr
hrHTMLRenderingLibNotInstalledErr = -5360 #hrHTMLRenderingLibNotInstalledErr
errCannotUndo = -5253 #errCannotUndo
errNonContiuousAttribute = -5252 #errNonContiuousAttribute
errUnknownElement = -5251 #errUnknownElement
errReadOnlyText = -5250 #errReadOnlyText
errEmptyScrap = -5249 #errEmptyScrap
errNoHiliteText = -5248 #errNoHiliteText
errOffsetNotOnElementBounday = -5247 #errOffsetNotOnElementBounday
errInvalidRange = -5246 #errInvalidRange
errIteratorReachedEnd = -5245 #errIteratorReachedEnd
errEngineNotFound = -5244 #errEngineNotFound
errAlreadyInImagingMode = -5243 #errAlreadyInImagingMode
errNotInImagingMode = -5242 #errNotInImagingMode
errMarginWilllNotFit = -5241 #errMarginWilllNotFit
errUnknownAttributeTag = -5240 #errUnknownAttributeTag
afpSameNodeErr = -5063 #An Attempt was made to connect to a file server running on the same machine
afpAlreadyMounted = -5062 #The volume is already mounted
afpCantMountMoreSrvre = -5061 #The Maximum number of server connections has been reached
afpBadDirIDType = -5060 #afpBadDirIDType
afpCallNotAllowed = -5048 #The server knows what you wanted to do, but won't let you do it just now
afpAlreadyLoggedInErr = -5047 #User has been authenticated but is already logged in from another machine (and that's not allowed on this server)
afpPwdPolicyErr = -5046 #Password does not conform to servers password policy
afpPwdNeedsChangeErr = -5045 #The password needs to be changed
afpInsideTrashErr = -5044 #The folder being shared is inside the trash folder OR the shared folder is being moved into the trash folder
afpInsideSharedErr = -5043 #The folder being shared is inside a shared folder OR the folder contains a shared folder and is being moved into a shared folder
afpPwdExpiredErr = -5042 #The password being used is too old: this requires the user to change the password before log-in can continue
afpPwdTooShortErr = -5041 #The password being set is too short: there is a minimum length that must be met or exceeded
afpPwdSameErr = -5040 #Someone tried to change their password to the same password on a mantadory password change
afpBadIDErr = -5039 #afpBadIDErr
afpSameObjectErr = -5038 #afpSameObjectErr
afpCatalogChanged = -5037 #afpCatalogChanged
afpDiffVolErr = -5036 #afpDiffVolErr
afpIDExists = -5035 #afpIDExists
afpIDNotFound = -5034 #afpIDNotFound
afpContainsSharedErr = -5033 #the folder being shared contains a shared folder
afpObjectLocked = -5032 #Object is M/R/D/W inhibited
afpVolLocked = -5031 #Volume is Read-Only
afpIconTypeError = -5030 #Icon size specified different from existing icon size
afpDirNotFound = -5029 #Unknown directory specified
afpCantRename = -5028 #AFPRename cannot rename volume
afpServerGoingDown = -5027 #Server is shutting down
afpTooManyFilesOpen = -5026 #Maximum open file count reached
afpObjectTypeErr = -5025 #File/Directory specified where Directory/File expected
afpCallNotSupported = -5024 #Unsupported AFP call was made
afpUserNotAuth = -5023 #No AFPLogin call has successfully been made for this session
afpSessClosed = -5022 #Session closed
afpRangeOverlap = -5021 #Some or all of range already locked by same user
afpRangeNotLocked = -5020 #Tried to unlock range that was not locked by user
afpParmErr = -5019 #A specified parameter was out of allowable range
afpObjectNotFound = -5018 #Specified file or directory does not exist
afpObjectExists = -5017 #Specified destination file or directory already exists
afpNoServer = -5016 #Server not responding
afpNoMoreLocks = -5015 #Maximum lock limit reached
afpMiscErr = -5014 #Unexpected error encountered during execution
afpLockErr = -5013 #Some or all of requested range is locked by another user
afpItemNotFound = -5012 #Unknown UserName/UserID or missing comment/APPL entry
afpFlatVol = -5011 #Cannot create directory on specified volume
afpFileBusy = -5010 #Cannot delete an open file
afpEofError = -5009 #Read beyond logical end-of-file
afpDiskFull = -5008 #Insufficient free space on volume for operation
afpDirNotEmpty = -5007 #Cannot delete non-empty directory
afpDenyConflict = -5006 #Specified open/deny modes conflict with current open modes
afpCantMove = -5005 #Move destination is offspring of source, or root was specified
afpBitmapErr = -5004 #Bitmap contained bits undefined for call
afpBadVersNum = -5003 #Unknown AFP protocol version number specified
afpBadUAM = -5002 #Unknown user authentication method specified
afpAuthContinue = -5001 #Further information required to complete AFPLogin call
afpAccessDenied = -5000 #Insufficient access privileges for operation
illegalScrapFlavorSizeErr = -4999 #illegalScrapFlavorSizeErr
illegalScrapFlavorTypeErr = -4998 #illegalScrapFlavorTypeErr
illegalScrapFlavorFlagsErr = -4997 #illegalScrapFlavorFlagsErr
scrapFlavorSizeMismatchErr = -4996 #scrapFlavorSizeMismatchErr
scrapFlavorFlagsMismatchErr = -4995 #scrapFlavorFlagsMismatchErr
nilScrapFlavorDataErr = -4994 #nilScrapFlavorDataErr
noScrapPromiseKeeperErr = -4993 #noScrapPromiseKeeperErr
scrapPromiseNotKeptErr = -4992 #scrapPromiseNotKeptErr
processStateIncorrectErr = -4991 #processStateIncorrectErr
badScrapRefErr = -4990 #badScrapRefErr
duplicateScrapFlavorErr = -4989 #duplicateScrapFlavorErr
internalScrapErr = -4988 #internalScrapErr
coreFoundationUnknownErr = -4960 #coreFoundationUnknownErr
badRoutingSizeErr = -4276 #badRoutingSizeErr
routingNotFoundErr = -4275 #routingNotFoundErr
duplicateRoutingErr = -4274 #duplicateRoutingErr
invalidFolderTypeErr = -4273 #invalidFolderTypeErr
noMoreFolderDescErr = -4272 #noMoreFolderDescErr
duplicateFolderDescErr = -4271 #duplicateFolderDescErr
badFolderDescErr = -4270 #badFolderDescErr
cmCantGamutCheckError = -4217 #Gammut checking not supported by this ColorWorld
cmNamedColorNotFound = -4216 #NamedColor not found
cmCantCopyModifiedV1Profile = -4215 #Illegal to copy version 1 profiles that have been modified
cmRangeOverFlow = -4214 #Color conversion warning that some output color values over/underflowed and were clipped
cmInvalidProfileComment = -4213 #Bad Profile comment during drawpicture
cmNoGDevicesError = -4212 #Begin/End Matching -- no gdevices available
cmInvalidDstMap = -4211 #Destination pix/bit map was invalid
cmInvalidSrcMap = -4210 #Source pix/bit map was invalid
cmInvalidColorSpace = -4209 #Profile colorspace does not match bitmap type
cmErrIncompatibleProfile = -4208 #Other ColorSync Errors
cmSearchError = -4207 #cmSearchError
cmInvalidSearch = -4206 #Bad Search Handle
cmInvalidProfileLocation = -4205 #Operation not supported for this profile location
cmInvalidProfile = -4204 #A Profile must contain a 'cs1 ' tag to be valid
cmFatalProfileErr = -4203 #cmFatalProfileErr
cmCantDeleteElement = -4202 #cmCantDeleteElement
cmIndexRangeErr = -4201 #Tag index out of range
kNSLInitializationFailed = -4200 #UNABLE TO INITIALIZE THE MANAGER!!!!! DO NOT CONTINUE!!!!
kNSLNotInitialized = -4199 #kNSLNotInitialized
kNSLInsufficientSysVer = -4198 #kNSLInsufficientSysVer
kNSLInsufficientOTVer = -4197 #kNSLInsufficientOTVer
kNSLNoElementsInList = -4196 #kNSLNoElementsInList
kNSLBadReferenceErr = -4195 #kNSLBadReferenceErr
kNSLBadServiceTypeErr = -4194 #kNSLBadServiceTypeErr
kNSLBadDataTypeErr = -4193 #kNSLBadDataTypeErr
kNSLBadNetConnection = -4192 #kNSLBadNetConnection
kNSLNoSupportForService = -4191 #kNSLNoSupportForService
kNSLInvalidPluginSpec = -4190 #kNSLInvalidPluginSpec
kNSLRequestBufferAlreadyInList = -4189 #kNSLRequestBufferAlreadyInList
kNSLNoContextAvailable = -4188 #(ContinueLookup function ptr invalid)
kNSLBufferTooSmallForData = -4187 #(Client buffer too small for data from plugin)
kNSLCannotContinueLookup = -4186 #(Can't continue lookup; error or bad state)
kNSLBadClientInfoPtr = -4185 #(nil ClientAsyncInfoPtr; no reference available)
kNSLNullListPtr = -4184 #(client is trying to add items to a nil list)
kNSLBadProtocolTypeErr = -4183 #(client is trying to add a null protocol type)
kNSLPluginLoadFailed = -4182 #(manager unable to load one of the plugins)
kNSLNoPluginsFound = -4181 #(manager didn't find any valid plugins to load)
kNSLSearchAlreadyInProgress = -4180 #(you can only have one ongoing search per clientRef)
kNSLNoPluginsForSearch = -4179 #(no plugins will respond to search request; bad protocol(s)?)
kNSLNullNeighborhoodPtr = -4178 #(client passed a null neighborhood ptr)
kNSLSomePluginsFailedToLoad = -4177 #(one or more plugins failed to load, but at least one did load; this error isn't fatal)
kNSLErrNullPtrError = -4176 #kNSLErrNullPtrError
kNSLNotImplementedYet = -4175 #kNSLNotImplementedYet
kNSLUILibraryNotAvailable = -4174 #The NSL UI Library needs to be in the Extensions Folder
kNSLNoCarbonLib = -4173 #kNSLNoCarbonLib
kNSLBadURLSyntax = -4172 #URL contains illegal characters
kNSLSchedulerError = -4171 #A custom thread routine encountered an error
kNSL68kContextNotSupported = -4170 #no 68k allowed
noHelpForItem = -4009 #noHelpForItem
badProfileError = -4008 #badProfileError
colorSyncNotInstalled = -4007 #colorSyncNotInstalled
pickerCantLive = -4006 #pickerCantLive
cantLoadPackage = -4005 #cantLoadPackage
cantCreatePickerWindow = -4004 #cantCreatePickerWindow
cantLoadPicker = -4003 #cantLoadPicker
pickerResourceError = -4002 #pickerResourceError
requiredFlagsDontMatch = -4001 #requiredFlagsDontMatch
firstPickerError = -4000 #firstPickerError
kOTPortLostConnection = -3285 #
kOTUserRequestedErr = -3284 #
kOTConfigurationChangedErr = -3283 #
kOTBadConfigurationErr = -3282 #
kOTPortWasEjectedErr = -3281 #
kOTPortHasDiedErr = -3280 #
kOTClientNotInittedErr = -3279 #
kENOMSGErr = -3278 #
kESRCHErr = -3277 #
kEINPROGRESSErr = -3276 #
kENODATAErr = -3275 #
kENOSTRErr = -3274 #
kECANCELErr = -3273 #
kEBADMSGErr = -3272 #
kENOSRErr = -3271 #
kETIMEErr = -3270 #
kEPROTOErr = -3269 # fill out missing codes
kEHOSTUNREACHErr = -3264 #No route to host
kEHOSTDOWNErr = -3263 #Host is down
kECONNREFUSEDErr = -3260 #Connection refused
kETIMEDOUTErr = -3259 #Connection timed out
kETOOMANYREFSErr = -3258 #Too many references: can't splice
kESHUTDOWNErr = -3257 #Can't send after socket shutdown
kENOTCONNErr = -3256 #Socket is not connected
kEISCONNErr = -3255 #Socket is already connected
kENOBUFSErr = -3254 #No buffer space available
kECONNRESETErr = -3253 #Connection reset by peer
kECONNABORTEDErr = -3252 #Software caused connection abort
kENETRESETErr = -3251 #Network dropped connection on reset
kENETUNREACHErr = -3250 #Network is unreachable
kENETDOWNErr = -3249 #Network is down
kEADDRNOTAVAILErr = -3248 #Can't assign requested address
kEADDRINUSEErr = -3247 #Address already in use
kEOPNOTSUPPErr = -3244 #Operation not supported on socket
kESOCKTNOSUPPORTErr = -3243 #Socket type not supported
kEPROTONOSUPPORTErr = -3242 #Protocol not supported
kENOPROTOOPTErr = -3241 #Protocol not available
kEPROTOTYPEErr = -3240 #Protocol wrong type for socket
kEMSGSIZEErr = -3239 #Message too long
kEDESTADDRREQErr = -3238 #Destination address required
kENOTSOCKErr = -3237 #Socket operation on non-socket
kEALREADYErr = -3236 #
kEWOULDBLOCKErr = -3234 #Call would block, so was aborted
kERANGEErr = -3233 #Message size too large for STREAM
kEPIPEErr = -3231 #Broken pipe
kENOTTYErr = -3224 #Not a character device
kEINVALErr = -3221 #Invalid argument
kENODEVErr = -3218 #No such device
kOTDuplicateFoundErr = -3216 #OT generic duplicate found error
kEBUSYErr = -3215 #Device or resource busy
kEFAULTErr = -3213 #Bad address
kEACCESErr = -3212 #Permission denied
kOTOutOfMemoryErr = -3211 #OT ran out of memory, may be a temporary
kEAGAINErr = -3210 #Try operation again later
kEBADFErr = -3208 #Bad file number
kENXIOErr = -3205 #No such device or address
kEIOErr = -3204 #I/O error
kEINTRErr = -3203 #Interrupted system service
kENORSRCErr = -3202 #No such resource
kOTNotFoundErr = -3201 #OT generic not found error
kEPERMErr = -3200 #Permission denied
kOTCanceledErr = -3180 #XTI2OSStatus(TCANCELED) The command was cancelled
kOTBadSyncErr = -3179 #XTI2OSStatus(TBADSYNC) A synchronous call at interrupt time
kOTProtocolErr = -3178 #XTI2OSStatus(TPROTO) An unspecified provider error occurred
kOTQFullErr = -3177 #XTI2OSStatus(TQFULL)
kOTResAddressErr = -3176 #XTI2OSStatus(TRESADDR)
kOTResQLenErr = -3175 #XTI2OSStatus(TRESQLEN)
kOTProviderMismatchErr = -3174 #XTI2OSStatus(TPROVMISMATCH) Tried to accept on incompatible endpoint
kOTIndOutErr = -3173 #XTI2OSStatus(TINDOUT) Accept failed because of pending listen
kOTAddressBusyErr = -3172 #XTI2OSStatus(TADDRBUSY) Address requested is already in use
kOTBadQLenErr = -3171 #XTI2OSStatus(TBADQLEN) A Bind to an in-use addr with qlen > 0
kOTBadNameErr = -3170 #XTI2OSStatus(TBADNAME) A bad endpoint name was supplied
kOTNoStructureTypeErr = -3169 #XTI2OSStatus(TNOSTRUCTYPE) Bad structure type requested for OTAlloc
kOTStateChangeErr = -3168 #XTI2OSStatus(TSTATECHNG) State is changing - try again later
kOTNotSupportedErr = -3167 #XTI2OSStatus(TNOTSUPPORT) Command is not supported
kOTNoReleaseErr = -3166 #XTI2OSStatus(TNOREL) No orderly release indication available
kOTBadFlagErr = -3165 #XTI2OSStatus(TBADFLAG) A Bad flag value was supplied
kOTNoUDErrErr = -3164 #XTI2OSStatus(TNOUDERR) No Unit Data Error indication available
kOTNoDisconnectErr = -3163 #XTI2OSStatus(TNODIS) No disconnect indication available
kOTNoDataErr = -3162 #XTI2OSStatus(TNODATA) No data available for reading
kOTFlowErr = -3161 #XTI2OSStatus(TFLOW) Provider is flow-controlled
kOTBufferOverflowErr = -3160 #XTI2OSStatus(TBUFOVFLW) Passed buffer not big enough
kOTBadDataErr = -3159 #XTI2OSStatus(TBADDATA) An illegal amount of data was specified
kOTLookErr = -3158 #XTI2OSStatus(TLOOK) An event occurred - call Look()
kOTSysErrorErr = -3157 #XTI2OSStatus(TSYSERR) A system error occurred
kOTBadSequenceErr = -3156 #XTI2OSStatus(TBADSEQ) Sequence specified does not exist
kOTOutStateErr = -3155 #XTI2OSStatus(TOUTSTATE) Call issued in wrong state
kOTNoAddressErr = -3154 #XTI2OSStatus(TNOADDR) No address was specified
kOTBadReferenceErr = -3153 #XTI2OSStatus(TBADF) Bad provider reference
kOTAccessErr = -3152 #XTI2OSStatus(TACCES) Missing access permission
kOTBadOptionErr = -3151 #XTI2OSStatus(TBADOPT) A Bad option was specified
kOTBadAddressErr = -3150 #XTI2OSStatus(TBADADDR) A Bad address was specified
sktClosedErr = -3109 #sktClosedErr
recNotFnd = -3108 #recNotFnd
atpBadRsp = -3107 #atpBadRsp
atpLenErr = -3106 #atpLenErr
readQErr = -3105 #readQErr
extractErr = -3104 #extractErr
ckSumErr = -3103 #ckSumErr
noMPPErr = -3102 #noMPPErr
buf2SmallErr = -3101 #buf2SmallErr
noPrefAppErr = -3032 #noPrefAppErr
badTranslationSpecErr = -3031 #badTranslationSpecErr
noTranslationPathErr = -3030 #noTranslationPathErr
couldNotParseSourceFileErr = -3026 #Source document does not contain source type
invalidTranslationPathErr = -3025 #Source type to destination type not a valid path
retryComponentRegistrationErr = -3005 #retryComponentRegistrationErr
unresolvedComponentDLLErr = -3004 #unresolvedComponentDLLErr
componentDontRegister = -3003 #componentDontRegister
componentNotCaptured = -3002 #componentNotCaptured
validInstancesExist = -3001 #validInstancesExist
invalidComponentID = -3000 #invalidComponentID
cfragLastErrCode = -2899 #The last value in the range of CFM errors.
cfragOutputLengthErr = -2831 #An output parameter is too small to hold the value.
cfragAbortClosureErr = -2830 #Used by notification handlers to abort a closure.
cfragClosureIDErr = -2829 #The closure ID was not valid.
cfragContainerIDErr = -2828 #The fragment container ID was not valid.
cfragNoRegistrationErr = -2827 #The registration name was not found.
cfragNotClosureErr = -2826 #The closure ID was actually a connection ID.
cfragFileSizeErr = -2825 #A file was too large to be mapped.
cfragFragmentUsageErr = -2824 #A semantic error in usage of the fragment.
cfragArchitectureErr = -2823 #A fragment has an unacceptable architecture.
cfragNoApplicationErr = -2822 #No application member found in the cfrg resource.
cfragInitFunctionErr = -2821 #A fragment's initialization routine returned an error.
cfragFragmentCorruptErr = -2820 #A fragment's container was corrupt (known format).
cfragCFMInternalErr = -2819 #An internal inconstistancy has been detected.
cfragCFMStartupErr = -2818 #Internal error during CFM initialization.
cfragLibConnErr = -2817 #
cfragInitAtBootErr = -2816 #A boot library has an initialization function. (System 7 only)
cfragInitLoopErr = -2815 #Circularity in required initialization order.
cfragImportTooNewErr = -2814 #An import library was too new for a client.
cfragImportTooOldErr = -2813 #An import library was too old for a client.
cfragInitOrderErr = -2812 #
cfragNoIDsErr = -2811 #No more CFM IDs for contexts, connections, etc.
cfragNoClientMemErr = -2810 #Out of memory for fragment mapping or section instances.
cfragNoPrivateMemErr = -2809 #Out of memory for internal bookkeeping.
cfragNoPositionErr = -2808 #The registration insertion point was not found.
cfragUnresolvedErr = -2807 #A fragment had "hard" unresolved imports.
cfragFragmentFormatErr = -2806 #A fragment's container format is unknown.
cfragDupRegistrationErr = -2805 #The registration name was already in use.
cfragNoLibraryErr = -2804 #The named library was not found.
cfragNoSectionErr = -2803 #The specified section was not found.
cfragNoSymbolErr = -2802 #The specified symbol was not found.
cfragConnectionIDErr = -2801 #The connection ID was not valid.
cfragFirstErrCode = -2800 #The first value in the range of CFM errors.
errASInconsistentNames = -2780 #English errors:
errASNoResultReturned = -2763 #The range -2780 thru -2799 is reserved for dialect specific error codes. (Error codes from different dialects may overlap.)
errASParameterNotForEvent = -2762 #errASParameterNotForEvent
errASIllegalFormalParameter = -2761 #errASIllegalFormalParameter
errASTerminologyNestingTooDeep = -2760 #errASTerminologyNestingTooDeep
OSAControlFlowError = -2755 #Signaled when illegal control flow occurs in an application (no catcher for throw, non-lexical loop exit, etc.)
OSAInconsistentDeclarations = -2754 #Signaled when a variable is declared inconsistently in the same scope, such as both local and global
OSAUndefinedVariable = -2753 #Signaled when a variable is accessed that has no value
OSADuplicateHandler = -2752 #Signaled when more than one handler is defined with the same name in a scope where the language doesn't allow it
OSADuplicateProperty = -2751 #Signaled when a formal parameter, local variable, or instance variable is specified more than once.
OSADuplicateParameter = -2750 #Signaled when a formal parameter, local variable, or instance variable is specified more than once
OSATokenTooLong = -2742 #Signaled when a name or number is too long to be parsed
OSASyntaxTypeError = -2741 #Signaled when another form of syntax was expected. (e.g. "expected a <type> but found <this>")
OSASyntaxError = -2740 #Signaled when a syntax error occurs. (e.g. "Syntax error" or "<this> can't go after <that>")
errASCantCompareMoreThan32k = -2721 #Parser/Compiler errors:
errASCantConsiderAndIgnore = -2720 #errASCantConsiderAndIgnore
errOSACantCreate = -2710 #errOSACantCreate
errOSACantGetTerminology = -2709 #errOSACantGetTerminology
errOSADataBlockTooLarge = -2708 #Signaled when an intrinsic limitation is exceeded for the size of a value or data structure.
errOSAInternalTableOverflow = -2707 #Signaled when a runtime internal data structure overflows
errOSAStackOverflow = -2706 #Signaled when the runtime stack overflows
errOSACorruptTerminology = -2705 #Signaled when an application's terminology resource is not readable
errOSAAppNotHighLevelEventAware = -2704 #Signaled when an application can't respond to AppleEvents
errOSACantLaunch = -2703 #Signaled when application can't be launched or when it is remote and program linking is not enabled
errOSANumericOverflow = -2702 #Signaled when integer or real value is too large to be represented
errOSADivideByZero = -2701 #Signaled when there is an attempt to divide by zero
errOSAGeneralError = -2700 #Signaled by user scripts or applications when no actual error code is to be returned.
noIconDataAvailableErr = -2582 #The necessary icon data is not available
noSuchIconErr = -2581 #The requested icon could not be found
invalidIconRefErr = -2580 #The icon ref is not valid
nrCallNotSupported = -2557 #This call is not available or supported on this machine
nrTransactionAborted = -2556 #transaction was aborted
nrExitedIteratorScope = -2555 #outer scope of iterator was exited
nrIterationDone = -2554 #iteration operation is done
nrPropertyAlreadyExists = -2553 #property already exists
nrInvalidEntryIterationOp = -2552 #invalid entry iteration operation
nrPathBufferTooSmall = -2551 #buffer for path is too small
nrPathNotFound = -2550 #a path component lookup failed
nrResultCodeBase = -2549 #nrResultCodeBase
nrOverrunErr = -2548 #nrOverrunErr
nrNotModifiedErr = -2547 #nrNotModifiedErr
nrTypeMismatchErr = -2546 #nrTypeMismatchErr
nrPowerSwitchAbortErr = -2545 #nrPowerSwitchAbortErr
nrPowerErr = -2544 #nrPowerErr
nrDataTruncatedErr = -2543 #nrDataTruncatedErr
nrNotSlotDeviceErr = -2542 #nrNotSlotDeviceErr
nrNameErr = -2541 #nrNameErr
nrNotCreatedErr = -2540 #nrNotCreatedErr
nrNotFoundErr = -2539 #nrNotFoundErr
nrInvalidNodeErr = -2538 #nrInvalidNodeErr
nrNotEnoughMemoryErr = -2537 #nrNotEnoughMemoryErr
nrLockedErr = -2536 #nrLockedErr
mmInternalError = -2526 #mmInternalError
tsmDefaultIsNotInputMethodErr = -2524 #Current Input source is KCHR or uchr, not Input Method (GetDefaultInputMethod)
tsmNoStem = -2523 #No stem exists for the token
tsmNoMoreTokens = -2522 #No more tokens are available for the source text
tsmNoHandler = -2521 #No Callback Handler exists for callback
tsmInvalidContext = -2520 #Invalid TSMContext specified in call
tsmUnknownErr = -2519 #any other errors
tsmUnsupportedTypeErr = -2518 #unSupported interface type error
tsmScriptHasNoIMErr = -2517 #script has no imput method or is using old IM
tsmInputMethodIsOldErr = -2516 #returned by GetDefaultInputMethod
tsmComponentAlreadyOpenErr = -2515 #text service already opened for the document
tsmTSNotOpenErr = -2514 #text service is not open
tsmTSHasNoMenuErr = -2513 #the text service has no menu
tsmUseInputWindowErr = -2512 #not TSM aware because we are using input window
tsmDocumentOpenErr = -2511 #there are open documents
tsmTextServiceNotFoundErr = -2510 #no text service found
tsmCantOpenComponentErr = -2509 #canÕt open the component
tsmNoOpenTSErr = -2508 #no open text service
tsmDocNotActiveErr = -2507 #document is NOT active
tsmTSMDocBusyErr = -2506 #document is still active
tsmInvalidDocIDErr = -2505 #invalid TSM documentation id
tsmNeverRegisteredErr = -2504 #app never registered error (not TSM aware)
tsmAlreadyRegisteredErr = -2503 #want to register again error
tsmNotAnAppErr = -2502 #not an application error
tsmInputMethodNotFoundErr = -2501 #tsmInputMethodNotFoundErr
tsmUnsupScriptLanguageErr = -2500 #tsmUnsupScriptLanguageErr
kernelUnrecoverableErr = -2499 #kernelUnrecoverableErr
kernelReturnValueErr = -2422 #kernelReturnValueErr
kernelAlreadyFreeErr = -2421 #kernelAlreadyFreeErr
kernelIDErr = -2419 #kernelIDErr
kernelExceptionErr = -2418 #kernelExceptionErr
kernelTerminatedErr = -2417 #kernelTerminatedErr
kernelInUseErr = -2416 #kernelInUseErr
kernelTimeoutErr = -2415 #kernelTimeoutErr
kernelAsyncReceiveLimitErr = -2414 #kernelAsyncReceiveLimitErr
kernelAsyncSendLimitErr = -2413 #kernelAsyncSendLimitErr
kernelAttributeErr = -2412 #kernelAttributeErr
kernelExecutionLevelErr = -2411 #kernelExecutionLevelErr
kernelDeletePermissionErr = -2410 #kernelDeletePermissionErr
kernelExecutePermissionErr = -2409 #kernelExecutePermissionErr
kernelReadPermissionErr = -2408 #kernelReadPermissionErr
kernelWritePermissionErr = -2407 #kernelWritePermissionErr
kernelObjectExistsErr = -2406 #kernelObjectExistsErr
kernelUnsupportedErr = -2405 #kernelUnsupportedErr
kernelPrivilegeErr = -2404 #kernelPrivilegeErr
kernelOptionsErr = -2403 #kernelOptionsErr
kernelCanceledErr = -2402 #kernelCanceledErr
kernelIncompleteErr = -2401 #kernelIncompleteErr
badCallOrderErr = -2209 #Usually due to a status call being called prior to being setup first
noDMAErr = -2208 #CanÕt do DMA digitizing (i.e. can't go to requested dest
badDepthErr = -2207 #CanÕt digitize into this depth
notExactSizeErr = -2206 #CanÕt do exact size requested
noMoreKeyColorsErr = -2205 #all key indexes in use
notExactMatrixErr = -2204 #warning of bad matrix, digitizer did its best
matrixErr = -2203 #bad matrix, digitizer did nothing
qtParamErr = -2202 #bad input parameter (out of range, etc)
digiUnimpErr = -2201 #feature unimplemented
qtXMLApplicationErr = -2159 #qtXMLApplicationErr
qtXMLParseErr = -2158 #qtXMLParseErr
qtActionNotHandledErr = -2157 #qtActionNotHandledErr
notEnoughDataErr = -2149 #notEnoughDataErr
urlDataHFTPURLErr = -2148 #urlDataHFTPURLErr
urlDataHFTPServerDisconnectedErr = -2147 #urlDataHFTPServerDisconnectedErr
urlDataHFTPNoPasswordErr = -2146 #urlDataHFTPNoPasswordErr
urlDataHFTPNeedPasswordErr = -2145 #urlDataHFTPNeedPasswordErr
urlDataHFTPBadNameListErr = -2144 #urlDataHFTPBadNameListErr
urlDataHFTPNoNetDriverErr = -2143 #urlDataHFTPNoNetDriverErr
urlDataHFTPFilenameErr = -2142 #urlDataHFTPFilenameErr
urlDataHFTPPermissionsErr = -2141 #urlDataHFTPPermissionsErr
urlDataHFTPQuotaErr = -2140 #urlDataHFTPQuotaErr
urlDataHFTPNoDirectoryErr = -2139 #urlDataHFTPNoDirectoryErr
urlDataHFTPDataConnectionErr = -2138 #urlDataHFTPDataConnectionErr
urlDataHFTPServerErr = -2137 #urlDataHFTPServerErr
urlDataHFTPBadPasswordErr = -2136 #urlDataHFTPBadPasswordErr
urlDataHFTPBadUserErr = -2135 #urlDataHFTPBadUserErr
urlDataHFTPShutdownErr = -2134 #urlDataHFTPShutdownErr
urlDataHFTPProtocolErr = -2133 #urlDataHFTPProtocolErr
urlDataHHTTPRedirectErr = -2132 #urlDataHHTTPRedirectErr
urlDataHHTTPURLErr = -2131 #urlDataHHTTPURLErr
urlDataHHTTPNoNetDriverErr = -2130 #urlDataHHTTPNoNetDriverErr
urlDataHHTTPProtocolErr = -2129 #urlDataHHTTPProtocolErr
qtNetworkAlreadyAllocatedErr = -2127 #qtNetworkAlreadyAllocatedErr
notAllowedToSaveMovieErr = -2126 #notAllowedToSaveMovieErr
fileOffsetTooBigErr = -2125 #fileOffsetTooBigErr
ASDEntryNotFoundErr = -2124 #ASDEntryNotFoundErr
ASDBadForkErr = -2123 #ASDBadForkErr
ASDBadHeaderErr = -2122 #ASDBadHeaderErr
AAPNotFoundErr = -2121 #AAPNotFoundErr
AAPNotCreatedErr = -2120 #AAPNotCreatedErr
qfcbNotCreatedErr = -2119 #qfcbNotCreatedErr
qfcbNotFoundErr = -2118 #qfcbNotFoundErr
wackBadMetaDataErr = -2117 #wackBadMetaDataErr
wackForkNotFoundErr = -2116 #wackForkNotFoundErr
wackBadFileErr = -2115 #wackBadFileErr
unknownFormatErr = -2114 #unknownFormatErr
pathNotVerifiedErr = -2113 #pathNotVerifiedErr
noPathMappingErr = -2112 #noPathMappingErr
emptyPathErr = -2111 #emptyPathErr
pathTooLongErr = -2110 #pathTooLongErr
cannotBeLeafAtomErr = -2109 #cannotBeLeafAtomErr
invalidAtomTypeErr = -2108 #invalidAtomTypeErr
invalidAtomContainerErr = -2107 #invalidAtomContainerErr
invalidAtomErr = -2106 #invalidAtomErr
duplicateAtomTypeAndIDErr = -2105 #duplicateAtomTypeAndIDErr
atomIndexInvalidErr = -2104 #atomIndexInvalidErr
atomsNotOfSameTypeErr = -2103 #atomsNotOfSameTypeErr
notLeafAtomErr = -2102 #notLeafAtomErr
cannotFindAtomErr = -2101 #cannotFindAtomErr
unsupportedProcessorErr = -2097 #unsupportedProcessorErr
unsupportedOSErr = -2096 #unsupportedOSErr
qtmlUninitialized = -2095 #qtmlUninitialized
qtmlDllEntryNotFoundErr = -2094 #Windows specific errors (when qtml is loading)
qtmlDllLoadErr = -2093 #Windows specific errors (when qtml is loading)
componentDllEntryNotFoundErr = -2092 #Windows specific errors (when component is loading)
componentDllLoadErr = -2091 #Windows specific errors (when component is loading)
videoOutputInUseErr = -2090 #videoOutputInUseErr
noExportProcAvailableErr = -2089 #noExportProcAvailableErr
tuneParseOSErr = -2087 #tuneParseOSErr
tunePlayerFullOSErr = -2086 #tunePlayerFullOSErr
noteChannelNotAllocatedOSErr = -2085 #noteChannelNotAllocatedOSErr
illegalNoteChannelOSErr = -2084 #illegalNoteChannelOSErr
synthesizerOSErr = -2083 #synthesizerOSErr
synthesizerNotRespondingOSErr = -2082 #synthesizerNotRespondingOSErr
midiManagerAbsentOSErr = -2081 #midiManagerAbsentOSErr
illegalControllerOSErr = -2080 #illegalControllerOSErr
illegalInstrumentOSErr = -2079 #illegalInstrumentOSErr
illegalKnobValueOSErr = -2078 #illegalKnobValueOSErr
illegalKnobOSErr = -2077 #illegalKnobOSErr
illegalChannelOSErr = -2076 #illegalChannelOSErr
illegalPartOSErr = -2075 #illegalPartOSErr
illegalVoiceAllocationOSErr = -2074 #illegalVoiceAllocationOSErr
cantReceiveFromSynthesizerOSErr = -2073 #cantReceiveFromSynthesizerOSErr
cantSendToSynthesizerOSErr = -2072 #cantSendToSynthesizerOSErr
notImplementedMusicOSErr = -2071 #notImplementedMusicOSErr
internalComponentErr = -2070 #internalComponentErr
invalidSpriteIDErr = -2069 #invalidSpriteIDErr
invalidImageIndexErr = -2068 #invalidImageIndexErr
invalidSpriteIndexErr = -2067 #invalidSpriteIndexErr
gWorldsNotSameDepthAndSizeErr = -2066 #gWorldsNotSameDepthAndSizeErr
invalidSpritePropertyErr = -2065 #invalidSpritePropertyErr
invalidSpriteWorldPropertyErr = -2064 #invalidSpriteWorldPropertyErr
missingRequiredParameterErr = -2063 #missingRequiredParameterErr
movieTextNotFoundErr = -2062 #movieTextNotFoundErr
sourceNotFoundErr = -2061 #sourceNotFoundErr
noSourceTreeFoundErr = -2060 #noSourceTreeFoundErr
samplesAlreadyInMediaErr = -2059 #samplesAlreadyInMediaErr
auxiliaryExportDataUnavailable = -2058 #auxiliaryExportDataUnavailable
unsupportedAuxiliaryImportData = -2057 #unsupportedAuxiliaryImportData
soundSupportNotAvailableErr = -2056 #QT for Windows error
noSoundTrackInMovieErr = -2055 #QT for Windows error
noVideoTrackInMovieErr = -2054 #QT for Windows error
featureUnsupported = -2053 #featureUnsupported
couldNotUseAnExistingSample = -2052 #couldNotUseAnExistingSample
noDefaultDataRef = -2051 #noDefaultDataRef
badDataRefIndex = -2050 #badDataRefIndex
invalidDataRefContainer = -2049 #invalidDataRefContainer
noMovieFound = -2048 #noMovieFound
dataNoDataRef = -2047 #dataNoDataRef
endOfDataReached = -2046 #endOfDataReached
dataAlreadyClosed = -2045 #dataAlreadyClosed
dataAlreadyOpenForWrite = -2044 #dataAlreadyOpenForWrite
dataNotOpenForWrite = -2043 #dataNotOpenForWrite
dataNotOpenForRead = -2042 #dataNotOpenForRead
invalidSampleDescription = -2041 #invalidSampleDescription
invalidChunkCache = -2040 #invalidChunkCache
invalidSampleDescIndex = -2039 #invalidSampleDescIndex
invalidChunkNum = -2038 #invalidChunkNum
invalidSampleNum = -2037 #invalidSampleNum
invalidRect = -2036 #invalidRect
cantEnableTrack = -2035 #cantEnableTrack
internalQuickTimeError = -2034 #internalQuickTimeError
badEditIndex = -2033 #badEditIndex
timeNotInMedia = -2032 #timeNotInMedia
timeNotInTrack = -2031 #timeNotInTrack
trackNotInMovie = -2030 #trackNotInMovie
trackIDNotFound = -2029 #trackIDNotFound
badTrackIndex = -2028 #badTrackIndex
maxSizeToGrowTooSmall = -2027 #maxSizeToGrowTooSmall
userDataItemNotFound = -2026 #userDataItemNotFound
staleEditState = -2025 #staleEditState
nonMatchingEditState = -2024 #nonMatchingEditState
invalidEditState = -2023 #invalidEditState
cantCreateSingleForkFile = -2022 #happens when file already exists
wfFileNotFound = -2021 #wfFileNotFound
movieToolboxUninitialized = -2020 #movieToolboxUninitialized
progressProcAborted = -2019 #progressProcAborted
mediaTypesDontMatch = -2018 #mediaTypesDontMatch
badEditList = -2017 #badEditList
cantPutPublicMovieAtom = -2016 #cantPutPublicMovieAtom
invalidTime = -2015 #invalidTime
invalidDuration = -2014 #invalidDuration
invalidHandler = -2013 #invalidHandler
invalidDataRef = -2012 #invalidDataRef
invalidSampleTable = -2011 #invalidSampleTable
invalidMovie = -2010 #invalidMovie
invalidTrack = -2009 #invalidTrack
invalidMedia = -2008 #invalidMedia
noDataHandler = -2007 #noDataHandler
noMediaHandler = -2006 #noMediaHandler
badComponentType = -2005 #badComponentType
cantOpenHandler = -2004 #cantOpenHandler
cantFindHandler = -2003 #cantFindHandler
badPublicMovieAtom = -2002 #badPublicMovieAtom
badImageDescription = -2001 #badImageDescription
couldNotResolveDataRef = -2000 #couldNotResolveDataRef
nonDragOriginatorErr = -1862 #illegal attempt at originator only data
badImageErr = -1861 #bad translucent image PixMap
badImageRgnErr = -1860 #bad translucent image region
noSuitableDisplaysErr = -1859 #no displays support translucency
unsupportedForPlatformErr = -1858 #call is for PowerPC only
dragNotAcceptedErr = -1857 #drag was not accepted by receiver
handlerNotFoundErr = -1856 #handler not found
duplicateHandlerErr = -1855 #handler already exists
cantGetFlavorErr = -1854 #error while trying to get flavor data
duplicateFlavorErr = -1853 #flavor type already exists
badDragFlavorErr = -1852 #unknown flavor type
badDragItemErr = -1851 #unknown drag item reference
badDragRefErr = -1850 #unknown drag reference
errEndOfBody = -1813 #errEndOfBody
errEndOfDocument = -1812 #errEndOfDocument
errTopOfBody = -1811 #errTopOfBody
errTopOfDocument = -1810 #errTopOfDocument
errOffsetIsOutsideOfView = -1801 #errOffsetIsOutsideOfView
errOffsetInvalid = -1800 #errOffsetInvalid
errOSACantOpenComponent = -1762 #Can't connect to scripting system with that ID
errOSAComponentMismatch = -1761 #Parameters are from 2 different components
errOSADataFormatTooNew = -1759 #errOSADataFormatTooNew
errOSADataFormatObsolete = -1758 #errOSADataFormatObsolete
errOSANoSuchDialect = -1757 #errOSANoSuchDialect
errOSASourceNotAvailable = -1756 #errOSASourceNotAvailable
errOSABadSelector = -1754 #errOSABadSelector
errOSAScriptError = -1753 #errOSAScriptError
errOSABadStorageType = -1752 #errOSABadStorageType
errOSAInvalidID = -1751 #errOSAInvalidID
errOSASystemError = -1750 #errOSASystemError
errAEBufferTooSmall = -1741 #buffer for AEFlattenDesc too small
errAEBuildSyntaxError = -1740 #AEBuildDesc and friends detected a syntax error
errAEDescIsNull = -1739 #attempting to perform an invalid operation on a null descriptor
errAEStreamAlreadyConverted = -1738 #attempt to convert a stream that has already been converted
errAEStreamBadNesting = -1737 #nesting violation while streaming
errAEDuplicateHandler = -1736 #attempt to install handler in table for identical class and id (1.1 or greater)
errAEEventFiltered = -1735 #event has been filtered, and should not be propogated (1.1 or greater)
errAEReceiveEscapeCurrent = -1734 #break out of only lowest level of AEReceive (1.1 or greater)
errAEReceiveTerminate = -1733 #break out of all levels of AEReceive to the topmost (1.1 or greater)
errAERecordingIsAlreadyOn = -1732 #available only in version 1.0.1 or greater
errAEUnknownObjectType = -1731 #available only in version 1.0.1 or greater
errAEEmptyListContainer = -1730 #Attempt to pass empty list as container to accessor
errAENegativeCount = -1729 #CountProc returned negative value
errAENoSuchObject = -1728 #e.g.,: specifier asked for the 3rd, but there are only 2. Basically, this indicates a run-time resolution error.
errAENotAnObjSpec = -1727 #Param to AEResolve not of type 'obj '
errAEBadTestKey = -1726 #Test is neither typeLogicalDescriptor nor typeCompDescriptor
errAENoSuchLogical = -1725 #Something other than AND, OR, or NOT
errAEAccessorNotFound = -1723 #Accessor proc matching wantClass and containerType or wildcards not found
errAEWrongNumberArgs = -1721 #Logical op kAENOT used with other than 1 term
errAEImpossibleRange = -1720 #A range like 3rd to 2nd, or 1st to all.
errAEIllegalIndex = -1719 #index is out of range in a put operation
errAEReplyNotArrived = -1718 #the contents of the reply you are accessing have not arrived yet
errAEHandlerNotFound = -1717 #no handler in the dispatch tables fits the parameters to AEGetEventHandler or AEGetCoercionHandler
errAEUnknownAddressType = -1716 #the target address type is not known
errAEParamMissed = -1715 #a required parameter was not accessed
errAENotASpecialFunction = -1714 #there is no special function for/with this keyword
errAENoUserInteraction = -1713 #no user interaction is allowed
errAETimeout = -1712 #the AppleEvent timed out
errAEWaitCanceled = -1711 #in AESend, the user cancelled out of wait loop for reply or receipt
errAEUnknownSendMode = -1710 #mode wasn't NoReply, WaitReply, or QueueReply or Interaction level is unknown
errAEReplyNotValid = -1709 #AEResetTimer was passed an invalid reply parameter
errAEEventNotHandled = -1708 #the AppleEvent was not handled by any handler
errAENotAppleEvent = -1707 #the event is not in AppleEvent format
errAENewerVersion = -1706 #need newer version of the AppleEvent manager
errAEBadListItem = -1705 #the specified list item does not exist
errAENotAEDesc = -1704 #errAENotAEDesc
errAEWrongDataType = -1703 #errAEWrongDataType
errAECorruptData = -1702 #errAECorruptData
errAEDescNotFound = -1701 #errAEDescNotFound
errAECoercionFail = -1700 #bad parameter data or unable to coerce the data supplied
errFSIteratorNotSupported = -1424 #The iterator's flags or container are not supported by this call
errFSIteratorNotFound = -1423 #Passed FSIterator is not an open iterator
errFSBadIteratorFlags = -1422 #Flags passed to FSOpenIterator are bad
errFSForkExists = -1421 #Named fork already exists.
errFSRefsDifferent = -1420 #FSCompareFSRefs; refs are for different objects
errFSBadSearchParams = -1419 #Something wrong with CatalogSearch searchParams
errFSBadItemCount = -1418 #maximumItems was zero
errFSNoMoreItems = -1417 #Iteration ran out of items to return
errFSBadAllocFlags = -1413 #Invalid bits set in allocationFlags
errFSBadPosMode = -1412 #Newline bits set in positionMode
errFSMissingName = -1411 #A Unicode name parameter was NULL or nameLength parameter was zero
errFSNameTooLong = -1410 #File/fork name is too long to create/rename
errFSForkNotFound = -1409 #Named fork does not exist
errFSNotAFolder = -1407 #Expected a folder, got a file
errFSMissingCatInfo = -1406 #A CatalogInfo parameter was NULL
errFSBadInfoBitmap = -1405 #A CatalogInfoBitmap or VolumeInfoBitmap has reserved or invalid bits set
errFSBadForkRef = -1404 #A ForkRefNum parameter was bad
errFSBadBuffer = -1403 #A buffer parameter was bad
errFSBadForkName = -1402 #Fork name parameter is bad
errFSBadFSRef = -1401 #FSRef parameter is bad
errFSUnknownCall = -1400 #selector is not recognized by this filesystem
badFCBErr = -1327 #FCBRecPtr is not valid
volVMBusyErr = -1311 #can't eject because volume is in use by VM
fsDataTooBigErr = -1310 #file or volume is too big for system
fileBoundsErr = -1309 #file's EOF, offset, mark or size is too big
notARemountErr = -1308 #when _Mount allows only remounts and doesn't get one
badFidErr = -1307 #file id is dangling or doesn't match with the file number
sameFileErr = -1306 #can't exchange a file with itself
desktopDamagedErr = -1305 #desktop database files are corrupted
catChangedErr = -1304 #the catalog has been modified
diffVolErr = -1303 #files on different volumes
notAFileErr = -1302 #directory specified
fidExists = -1301 #file id already exists
fidNotFound = -1300 #no file thread exists.
errRefNum = -1280 #bad connection refNum
errAborted = -1279 #control call was aborted
errState = -1278 #bad connection state for this operation
errOpening = -1277 #open connection request failed
errAttention = -1276 #attention message too long
errFwdReset = -1275 #read terminated by forward reset
errDSPQueueSize = -1274 #DSP Read/Write Queue Too small
errOpenDenied = -1273 #open connection request was denied
reqAborted = -1105 #reqAborted
noDataArea = -1104 #noDataArea
noSendResp = -1103 #noSendResp
cbNotFound = -1102 #cbNotFound
noRelErr = -1101 #noRelErr
badBuffNum = -1100 #badBuffNum
badATPSkt = -1099 #badATPSkt
tooManySkts = -1098 #tooManySkts
tooManyReqs = -1097 #tooManyReqs
reqFailed = -1096 #reqFailed
aspNoAck = -1075 #No ack on attention request (server err)
aspTooMany = -1074 #Too many clients (server error)
aspSizeErr = -1073 #Command block too big
aspSessClosed = -1072 #Session closed
aspServerBusy = -1071 #Server cannot open another session
aspParamErr = -1070 #Parameter error
aspNoServers = -1069 #No servers at that address
aspNoMoreSess = -1068 #No more sessions on server
aspBufTooSmall = -1067 #Buffer too small
aspBadVersNum = -1066 #Server cannot support this ASP version
nbpNISErr = -1029 #Error trying to open the NIS
nbpNotFound = -1028 #Name not found on remove
nbpDuplicate = -1027 #Duplicate name exists already
nbpConfDiff = -1026 #Name confirmed at different socket
nbpNoConfirm = -1025 #nbpNoConfirm
nbpBuffOvr = -1024 #Buffer overflow in LookupName
noMaskFoundErr = -1000 #Icon Utilties Error
kFMFontContainerAccessErr = -985 #kFMFontContainerAccessErr
kFMFontTableAccessErr = -984 #kFMFontTableAccessErr
kFMIterationScopeModifiedErr = -983 #kFMIterationScopeModifiedErr
kFMInvalidFontErr = -982 #kFMInvalidFontErr
kFMInvalidFontFamilyErr = -981 #kFMInvalidFontFamilyErr
kFMIterationCompleted = -980 #kFMIterationCompleted
guestNotAllowedErr = -932 #destination port requires authentication
badLocNameErr = -931 #location name malformed
badServiceMethodErr = -930 #illegal service type, or not supported
noUserRecErr = -928 #Invalid user reference number
authFailErr = -927 #unable to authenticate user at destination
noInformErr = -926 #PPCStart failed because destination did not have inform pending
networkErr = -925 #An error has occurred in the network, not too likely
noUserRefErr = -924 #unable to create a new userRefNum
notLoggedInErr = -923 #The default userRefNum does not yet exist
noDefaultUserErr = -922 #user hasn't typed in owners name in Network Setup Control Pannel
badPortNameErr = -919 #PPCPortRec malformed
sessClosedErr = -917 #session was closed
portClosedErr = -916 #port was closed
noResponseErr = -915 #unable to contact destination
noToolboxNameErr = -914 #A system resource is missing, not too likely
noMachineNameErr = -913 #user hasn't named his Macintosh in the Network Setup Control Panel
userRejectErr = -912 #Destination rejected the session request
noUserNameErr = -911 #user name unknown on destination machine
portNameExistsErr = -910 #port is already open (perhaps in another app)
badReqErr = -909 #bad parameter or invalid state for operation
noSessionErr = -908 #Invalid session reference number
sessTableErr = -907 #Out of session tables, try again later
destPortErr = -906 #Port does not exist at destination
localOnlyErr = -905 #Network activity is currently disabled
noGlobalsErr = -904 #The system is hosed, better re-boot
noPortErr = -903 #Unable to open port or bad portRefNum. If you're calling
nameTypeErr = -902 #Invalid or inappropriate locationKindSelector in locationName
notInitErr = -900 #PPCToolBox not initialized
notAppropriateForClassic = -877 #This application won't or shouldn't run on Classic (Problem 2481058).
appVersionTooOld = -876 #The application's creator and version are incompatible with the current version of Mac OS.
wrongApplicationPlatform = -875 #The application could not launch because the required platform is not available
hmCloseViewActive = -863 #Returned from HMRemoveBalloon if CloseView was active
hmNoBalloonUp = -862 #Returned from HMRemoveBalloon if no balloon was visible when call was made
hmOperationUnsupported = -861 #Returned from HMShowBalloon call if bad method passed to routine
hmUnknownHelpType = -859 #Returned if help msg record contained a bad type
hmWrongVersion = -858 #Returned if help mgr resource was the wrong version
hmSkippedBalloon = -857 #Returned from calls if helpmsg specified a skip balloon
hmHelpManagerNotInited = -855 #Returned from HMGetHelpMenuHandle if help menu not setup
hmSameAsLastBalloon = -854 #Returned from HMShowMenuBalloon if menu & item is same as last time
hmBalloonAborted = -853 #Returned if mouse was moving or mouse wasn't in window port rect
hmHelpDisabled = -850 #Show Balloons mode was off, call to routine ignored
rcDBPackNotInited = -813 #attempt to call other routine before InitDBPack
rcDBWrongVersion = -812 #incompatible versions
rcDBNoHandler = -811 #no app handler for specified data type
rcDBBadAsyncPB = -810 #tried to kill a bad pb
rcDBAsyncNotSupp = -809 #ddev does not support async calls
rcDBBadDDEV = -808 #bad ddev specified on DBInit
rcDBBadSessNum = -807 #bad session number for DBGetConnInfo
rcDBBadSessID = -806 #rcDBBadSessID
rcDBExec = -805 #rcDBExec
rcDBBreak = -804 #rcDBBreak
rcDBBadType = -803 #rcDBBadType
rcDBError = -802 #rcDBError
rcDBValue = -801 #rcDBValue
rcDBNull = -800 #rcDBNull
icTooManyProfilesErr = -677 #too many profiles in database
icProfileNotFoundErr = -676 #profile not found
icConfigInappropriateErr = -675 #incorrect manufacturer code
icConfigNotFoundErr = -674 #no internet configuration was found
icNoURLErr = -673 #no URL found
icNothingToOverrideErr = -672 #no component for the override component to capture
icNoMoreWritersErr = -671 #you cannot begin a write session because someone else is already doing it
icTruncatedErr = -670 #more data was present than was returned
icInternalErr = -669 #Internet Config internal error
icPrefDataErr = -668 #problem with preference data
icPermErr = -667 #cannot set preference
icPrefNotFoundErr = -666 #Internet preference not found
vmInvalidOwningProcessErr = -648 #current process does not own the BackingFileID or FileViewID
vmAddressNotInFileViewErr = -647 #address is not in a FileView
vmNoMoreFileViewsErr = -646 #no more FileViews were found
vmFileViewAccessErr = -645 #requested FileViewAccess cannot be obtained
vmInvalidFileViewIDErr = -644 #invalid FileViewID
vmNoMoreBackingFilesErr = -643 #no more BackingFiles were found
vmBusyBackingFileErr = -642 #open views found on BackingFile
vmMappingPrivilegesErr = -641 #requested MappingPrivileges cannot be obtained
vmInvalidBackingFileIDErr = -640 #invalid BackingFileID
noMMUErr = -626 #no MMU present
cannotDeferErr = -625 #unable to defer additional functions
interruptsMaskedErr = -624 #donÕt call with interrupts masked
notLockedErr = -623 #specified range of memory is not locked
cannotMakeContiguousErr = -622 #cannot make specified range contiguous
notHeldErr = -621 #specified range of memory is not held
notEnoughMemoryErr = -620 #insufficient physical memory
threadProtocolErr = -619 #threadProtocolErr
threadNotFoundErr = -618 #threadNotFoundErr
threadTooManyReqsErr = -617 #threadTooManyReqsErr
noUserInteractionAllowed = -610 #no user interaction allowed
connectionInvalid = -609 #connectionInvalid
noOutstandingHLE = -608 #noOutstandingHLE
bufferIsSmall = -607 #error returns from Post and Accept
appIsDaemon = -606 #app is BG-only, and launch flags disallow this
appMemFullErr = -605 #application SIZE not big enough for launch
hardwareConfigErr = -604 #hardware configuration not correct for call
protocolErr = -603 #app made module calls in improper order
appModeErr = -602 #memory mode is 32-bit, but app not 32-bit clean
memFragErr = -601 #not enough room to launch app w/special requirements
procNotFound = -600 #no eligible process with specified descriptor
driverHardwareGoneErr = -503 #disk driver's hardware was disconnected
hwParamErr = -502 #bad selector for _HWPriv
teScrapSizeErr = -501 #scrap item too big for text edit record
rgnTooBigErr = -500 #rgnTooBigErr
exUserBreak = -492 #user debugger break; execute debugger commands on stack
strUserBreak = -491 #user debugger break; display string on stack
userBreak = -490 #user debugger break
notThePublisherWrn = -463 #not the first registered publisher for that container
containerAlreadyOpenWrn = -462 #container already opened by this section
containerNotFoundWrn = -461 #could not find editionContainer at this time
multiplePublisherWrn = -460 #A Publisher is already registered for that container
badSubPartErr = -454 #can not use sub parts in this release
badEditionFileErr = -453 #edition file is corrupt
notRegisteredSectionErr = -452 #not a registered SectionRecord
badSectionErr = -451 #not a valid SectionRecord
editionMgrInitErr = -450 #edition manager not inited by this app
fsmUnknownFSMMessageErr = -438 #unknown message passed to FSM
fsmNoAlternateStackErr = -437 #no alternate stack for HFS CI
fsmBadFSDVersionErr = -436 #FSM version incompatible with FSD
fsmDuplicateFSIDErr = -435 #FSID already exists on InstallFS
fsmBadFSDLenErr = -434 #FSD size incompatible with current FSM vers
fsmBadFFSNameErr = -433 #Name length not 1 <= length <= 31
fsmBusyFFSErr = -432 #File system is busy, cannot be removed
fsmFFSNotFoundErr = -431 #Foreign File system does not exist - new Pack2 could return this error too
btKeyAttrErr = -417 #There is no such a key attribute.
btKeyLenErr = -416 #Maximum key length is too long or equal to zero.
btRecNotFnd = -415 #Record cannot be found.
btDupRecErr = -414 #Record already exists.
btNoSpace = -413 #Can't allocate disk space.
notBTree = -410 #The file is not a dictionary.
gcrOnMFMErr = -400 #gcr format on high density media error
slotNumErr = -360 #invalid slot # error
smRecNotFnd = -351 #Record not found in the SRT.
smSRTOvrFlErr = -350 #SRT over flow.
smNoGoodOpens = -349 #No opens were successfull in the loop.
smOffsetErr = -348 #Offset was too big (temporary error
smByteLanesErr = -347 #NumByteLanes was determined to be zero.
smBadsPtrErr = -346 #Bad pointer was passed to sCalcsPointer
smsGetDrvrErr = -345 #Error occurred during _sGetDriver.
smNoMoresRsrcs = -344 #No more sResources
smDisDrvrNamErr = -343 #Error occurred during _sDisDrvrName.
smGetDrvrNamErr = -342 #Error occurred during _sGetDrvrName.
smCkStatusErr = -341 #Status of slot = fail.
smBlkMoveErr = -340 #_BlockMove error
smNewPErr = -339 #_NewPtr error
smSelOOBErr = -338 #Selector out of bounds error
smSlotOOBErr = -337 #Slot out of bounds error
smNilsBlockErr = -336 #Nil sBlock error (Dont allocate and try to use a nil sBlock)
smsPointerNil = -335 #LPointer is nil From sOffsetData. If this error occurs; check sInfo rec for more information.
smCPUErr = -334 #Code revision is wrong
smCodeRevErr = -333 #Code revision is wrong
smReservedErr = -332 #Reserved field not zero
smBadsList = -331 #Bad sList: Id1 < Id2 < Id3 ...format is not followed.
smBadRefId = -330 #Reference Id not found in List
smBusErrTO = -320 #BusError time out.
smBadBoardId = -319 #BoardId was wrong; re-init the PRAM record.
smReservedSlot = -318 #slot is reserved, VM should not use this address space.
smInitTblVErr = -317 #An error occurred while trying to initialize the Slot Resource Table.
smInitStatVErr = -316 #The InitStatusV field was negative after primary or secondary init.
smNoBoardId = -315 #No Board Id.
smGetPRErr = -314 #Error occurred during _sGetPRAMRec (See SIMStatus).
smNoBoardSRsrc = -313 #No Board sResource.
smDisposePErr = -312 #_DisposePointer error
smFHBlkDispErr = -311 #Error occurred during _sDisposePtr (Dispose of FHeader block).
smFHBlockRdErr = -310 #Error occurred during _sGetFHeader.
smBLFieldBad = -309 #ByteLanes field was bad.
smUnExBusErr = -308 #Unexpected BusError
smResrvErr = -307 #Fatal reserved error. Reserved field != 0.
smNosInfoArray = -306 #No sInfoArray. Memory Mgr error.
smDisabledSlot = -305 #This slot is disabled (-305 use to be smLWTstBad)
smNoDir = -304 #Directory offset is Nil
smRevisionErr = -303 #Wrong revison level
smFormatErr = -302 #FHeader Format is not Apple's
smCRCFail = -301 #CRC check failed for declaration data
smEmptySlot = -300 #No card in slot
nmTypErr = -299 #Notification Manager:wrong queue type
smPriInitErr = -293 #Error; Cards could not be initialized.
smPRAMInitErr = -292 #Error; Slot Resource Table could not be initialized.
smSRTInitErr = -291 #Error; Slot Resource Table could not be initialized.
smSDMInitErr = -290 #Error; SDM could not be initialized.
midiInvalidCmdErr = -261 #command not supported for port type
midiDupIDErr = -260 #duplicate client ID
midiNameLenErr = -259 #name supplied is longer than 31 characters
midiWriteErr = -258 #MIDIWritePacket couldn't write to all connected ports
midiNoConErr = -257 #no connection exists between specified ports
midiVConnectRmvd = -256 #pending virtual connection removed
midiVConnectMade = -255 #pending virtual connection resolved
midiVConnectErr = -254 #pending virtual connection created
midiTooManyConsErr = -253 #too many connections made
midiTooManyPortsErr = -252 #too many ports already installed in the system
midiNoPortErr = -251 #no port with that ID found
midiNoClientErr = -250 #no client with that ID found
badInputText = -247 #badInputText
badDictFormat = -246 #badDictFormat
incompatibleVoice = -245 #incompatibleVoice
voiceNotFound = -244 #voiceNotFound
bufTooSmall = -243 #bufTooSmall
synthNotReady = -242 #synthNotReady
synthOpenFailed = -241 #synthOpenFailed
noSynthFound = -240 #noSynthFound
siUnknownQuality = -232 #invalid quality selector (returned by driver)
siUnknownInfoType = -231 #invalid info type selector (returned by driver)
siInputDeviceErr = -230 #input device hardware failure
siBadRefNum = -229 #invalid input device reference number
siBadDeviceName = -228 #input device could not be opened
siDeviceBusyErr = -227 #input device already in use
siInvalidSampleSize = -226 #invalid sample size
siInvalidSampleRate = -225 #invalid sample rate
siHardDriveTooSlow = -224 #hard drive too slow to record to disk
siInvalidCompression = -223 #invalid compression type
siNoBufferSpecified = -222 #returned by synchronous SPBRecord if nil buffer passed
siBadSoundInDevice = -221 #invalid index passed to SoundInGetIndexedDevice
siNoSoundInHardware = -220 #no Sound Input hardware
siVBRCompressionNotSupported = -213 #vbr audio compression not supported for this operation
noMoreRealTime = -212 #not enough CPU cycles left to add another task
channelNotBusy = -211 #channelNotBusy
buffersTooSmall = -210 #can not operate in the memory allowed
channelBusy = -209 #the Channel is being used for a PFD already
badFileFormat = -208 #was not type AIFF or was of bad format,corrupt
notEnoughBufferSpace = -207 #could not allocate enough memory
badFormat = -206 #Sound Manager Error Returns
badChannel = -205 #Sound Manager Error Returns
resProblem = -204 #Sound Manager Error Returns
queueFull = -203 #Sound Manager Error Returns
notEnoughHardwareErr = -201 #Sound Manager Error Returns
noHardwareErr = -200 #Sound Manager Error Returns
mapReadErr = -199 #map inconsistent with operation
resAttrErr = -198 #attribute inconsistent with operation
rmvRefFailed = -197 #RmveReference failed
rmvResFailed = -196 #RmveResource failed
addRefFailed = -195 #AddReference failed
addResFailed = -194 #AddResource failed
resFNotFound = -193 #Resource file not found
resNotFound = -192 #Resource not found
inputOutOfBounds = -190 #Offset of Count out of bounds
writingPastEnd = -189 #Writing past end of file
resourceInMemory = -188 #Resource already in memory
CantDecompress = -186 #resource bent ("the bends") - can't decompress a compressed resource
badExtResource = -185 #extended resource has a bad format.
cmNoCurrentProfile = -182 #Responder error
cmUnsupportedDataType = -181 #Responder error
cmCantDeleteProfile = -180 #Responder error
cmCantXYZ = -179 #CMM cant handle XYZ space
cmCantConcatenateError = -178 #Profile can't be concatenated
cmProfilesIdentical = -177 #Profiles the same
cmProfileNotFound = -176 #Responder error
cmMethodNotFound = -175 #CMM not present
cmMethodError = -171 #cmMethodError
cmProfileError = -170 #cmProfileError
cDepthErr = -157 #invalid pixel depth
cResErr = -156 #invalid resolution for MakeITable
cDevErr = -155 #invalid type of graphics device
cProtectErr = -154 #colorTable entry protection violation
cRangeErr = -153 #range error on colorTable request
cNoMemErr = -152 #failed to allocate memory for structure
cTempMemErr = -151 #failed to allocate memory for temporary structures
cMatchErr = -150 #Color2Index failed to find an index
insufficientStackErr = -149 #insufficientStackErr
pixMapTooDeepErr = -148 #pixMapTooDeepErr
rgnOverflowErr = -147 #rgnOverflowErr
noMemForPictPlaybackErr = -145 #noMemForPictPlaybackErr
userCanceledErr = -128 #userCanceledErr
hMenuFindErr = -127 #could not find HMenu's parent in MenuKey (wrong error code - obsolete)
mBarNFnd = -126 #system error code for MBDF not found
updPixMemErr = -125 #insufficient memory to update a pixmap
volGoneErr = -124 #Server volume has been disconnected.
wrgVolTypErr = -123 #Wrong volume type error [operation not supported for MFS]
badMovErr = -122 #Move into offspring error
tmwdoErr = -121 #No free WDCB available
dirNFErr = -120 #Directory not found
memLockedErr = -117 #trying to move a locked block (MoveHHi)
memSCErr = -116 #Size Check failed
memBCErr = -115 #Block Check failed
memPCErr = -114 #Pointer Check failed
memAZErr = -113 #Address in zone check failed
memPurErr = -112 #trying to purge a locked or non-purgeable block
memWZErr = -111 #WhichZone failed (applied to free block)
memAdrErr = -110 #address was odd; or out of range
nilHandleErr = -109 #Master Pointer was NIL in HandleZone or other
memFullErr = -108 #Not enough room in heap zone
noTypeErr = -102 #No object of that type in scrap
noScrapErr = -100 #No scrap exists error
memROZWarn = -99 #soft error in ROZ
portNotCf = -98 #driver Open error code (parameter RAM not configured for this connection)
portInUse = -97 #driver Open error code (port is in use)
portNotPwr = -96 #serial port not currently powered
excessCollsns = -95 #excessive collisions on write
lapProtErr = -94 #error in attaching/detaching protocol
noBridgeErr = -93 #no network bridge for non-local send
eLenErr = -92 #Length error ddpLenErr
eMultiErr = -91 #Multicast address error ddpSktErr
breakRecd = -90 #Break received (SCC)
rcvrErr = -89 #SCC receiver error (framing; parity; OR)
prInitErr = -88 #InitUtil found the parameter ram uninitialized
prWrErr = -87 #parameter ram written didn't read-verify
clkWrErr = -86 #time written did not verify
clkRdErr = -85 #unable to read same clock value twice
verErr = -84 #track failed to verify
fmt2Err = -83 #can't get enough sync
fmt1Err = -82 #can't find sector 0 after track format
sectNFErr = -81 #sector number never found on a track
seekErr = -80 #track number wrong on address mark
spdAdjErr = -79 #unable to correctly adjust disk speed
twoSideErr = -78 #tried to read 2nd side on a 1-sided drive
initIWMErr = -77 #unable to initialize IWM
tk0BadErr = -76 #track 0 detect doesn't change
cantStepErr = -75 #step handshake failed
wrUnderrun = -74 #write underrun occurred
badDBtSlp = -73 #bad data mark bit slip nibbles
badDCksum = -72 #bad data mark checksum
noDtaMkErr = -71 #couldn't find a data mark header
badBtSlpErr = -70 #bad addr mark bit slip nibbles
badCksmErr = -69 #addr mark checksum didn't check
dataVerErr = -68 #read verify compare failed
noAdrMkErr = -67 #couldn't find valid addr mark
noNybErr = -66 #couldn't find 5 nybbles in 200 tries
offLinErr = -65 #r/w requested for an off-line drive
fontDecError = -64 #error during font declaration
wrPermErr = -61 #write permissions error
badMDBErr = -60 #bad master directory block
fsRnErr = -59 #file system internal error:during rename the old entry was deleted but could not be restored.
extFSErr = -58 #volume in question belongs to an external fs
noMacDskErr = -57 #not a mac diskette (sig bytes are wrong)
nsDrvErr = -56 #no such drive (tried to mount a bad drive num)
volOnLinErr = -55 #drive volume already on-line at MountVol
permErr = -54 #permissions error (on file open)
volOffLinErr = -53 #volume not on line error (was Ejected)
gfpErr = -52 #get file position error
rfNumErr = -51 #refnum error
paramErr = -50 #error in user parameter list
opWrErr = -49 #file already open with with write permission
dupFNErr = -48 #duplicate filename (rename)
fBsyErr = -47 #File is busy (delete)
vLckdErr = -46 #volume is locked
fLckdErr = -45 #file is locked
wPrErr = -44 #diskette is write protected.
fnfErr = -43 #File not found
tmfoErr = -42 #too many files open
mFulErr = -41 #memory full (open) or file won't fit (load)
posErr = -40 #tried to position to before start of file (r/w)
eofErr = -39 #End of file
fnOpnErr = -38 #File not open
bdNamErr = -37 #there may be no bad names in the final system!
ioErr = -36 #I/O error (bummers)
nsvErr = -35 #no such volume
dskFulErr = -34 #disk full
dirFulErr = -33 #Directory full
dceExtErr = -30 #dce extension error
unitTblFullErr = -29 #unit table has no more entries
notOpenErr = -28 #Couldn't rd/wr/ctl/sts cause driver not opened
iIOAbortErr = -27 #IO abort error (Printing Manager)
dInstErr = -26 #DrvrInstall couldn't find driver in resources
dRemovErr = -25 #tried to remove an open driver
closErr = -24 #I/O System Errors
openErr = -23 #I/O System Errors
unitEmptyErr = -22 #I/O System Errors
badUnitErr = -21 #I/O System Errors
writErr = -20 #I/O System Errors
readErr = -19 #I/O System Errors
statusErr = -18 #I/O System Errors
controlErr = -17 #I/O System Errors
dsExtensionsDisabled = -13 #say Extensions Disabled
dsHD20Installed = -12 #say HD20 Startup
dsDisassemblerInstalled = -11 #say Disassembler Installed
dsMacsBugInstalled = -10 #say MacsBug Installed
seNoDB = -8 #no debugger installed to handle debugger command
SlpTypeErr = -5 #invalid queue element
unimpErr = -4 #unimplemented core routine
corErr = -3 #core routine number out of range
dsNoExtsDisassembler = -2 #not a SysErr, just a placeholder
qErr = -1 #queue element not found during deletion
tsmComponentNoErr = 0 #component result = no error
EPERM = 1 #Operation not permitted
ENOENT = 2 #No such file or directory
ESRCH = 3 #No such process
EINTR = 4 #Interrupted system call
EIO = 5 #Input/output error
ENXIO = 6 #Device not configured
E2BIG = 7 #Argument list too long
ENOEXEC = 8 #Exec format error
EBADF = 9 #Bad file descriptor
ECHILD = 10 #No child processes
EDEADLK = 11 #Resource deadlock avoided
ENOMEM = 12 #Cannot allocate memory
EACCES = 13 #Permission denied
EFAULT = 14 #Bad address
ECANCELED = 15 #Operation cancelled
EBUSY = 16 #Device busy
EEXIST = 17 #File exists
EXDEV = 18 #Cross-device link
ENODEV = 19 #Operation not supported by device
ENOTDIR = 20 #Not a directory
EISDIR = 21 #Is a directory
EINVAL = 22 #Invalid argument
ENFILE = 23 #Too many open files in system
EMFILE = 24 #Too many open files
ENOTTY = 25 #Inappropriate ioctl for device
ESIGPARM = 26 #Signal error
EFBIG = 27 #File too large
ENOSPC = 28 #No space left on device
ESPIPE = 29 #Illegal seek
EROFS = 30 #Read-only file system
EMLINK = 31 #Too many links
EPIPE = 32 #Broken pipe
EDOM = 33 #Numerical argument out of domain
ERANGE = 34 #Result too large
EAGAIN = 35 #Resource temporarily unavailable
EINPROGRESS = 36 #Operation now in progress
EALREADY = 37 #Operation already in progress
ENOTSOCK = 38 #Socket operation on non-socket
EDESTADDRREQ = 39 #Destination address required
EMSGSIZE = 40 #Message too long
EPROTOTYPE = 41 #Protocol wrong type for socket
ENOPROTOOPT = 42 #Protocol not available
EPROTONOSUPPORT = 43 #Protocol not supported
ESOCKTNOSUPPORT = 44 #Socket type not supported
EOPNOTSUPP = 45 #Operation not supported
EPFNOSUPPORT = 46 #Protocol family not supported
EAFNOSUPPORT = 47 #Address family not supported by protocol family
EADDRINUSE = 48 #Address already in use
EADDRNOTAVAIL = 49 #Can't assign requested address
ENETDOWN = 50 #Network is down
ENETUNREACH = 51 #Network is unreachable
ENETRESET = 52 #Network dropped connection on reset
ECONNABORTED = 53 #Software caused connection abort
ECONNRESET = 54 #Connection reset by peer
ENOBUFS = 55 #No buffer space available
EISCONN = 56 #Socket is already connected
ENOTCONN = 57 #Socket is not connected
ESHUTDOWN = 58 #Can't send after socket shutdown
ETOOMANYREFS = 59 #Too many references: can't splice
ETIMEDOUT = 60 #Operation timed out
ECONNREFUSED = 61 #Connection refused
ELOOP = 62 #Too many levels of symbolic links
ENAMETOOLONG = 63 #File name too long
EHOSTDOWN = 64 #Host is down
EHOSTUNREACH = 65 #No route to host
ENOTEMPTY = 66 #Directory not empty
ELOOK = 67 #Internal mapping for kOTLookErr, don't return to client
ENOLCK = 77 #No locks available
ENOSYS = 78 #Function not implemented
EILSEQ = 88 #Wide character encoding error
EUNKNOWN = 99 #Unknown error
| mit |
cloud-fan/spark | examples/src/main/python/mllib/logistic_regression_with_lbfgs_example.py | 27 | 2043 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Logistic Regression With LBFGS Example.
"""
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
from pyspark.mllib.regression import LabeledPoint
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
# $example on$
# Load and parse the data
def parsePoint(line):
values = [float(x) for x in line.split(' ')]
return LabeledPoint(values[0], values[1:])
data = sc.textFile("data/mllib/sample_svm_data.txt")
parsedData = data.map(parsePoint)
# Build the model
model = LogisticRegressionWithLBFGS.train(parsedData)
# Evaluating the model on training data
labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
trainErr = labelsAndPreds.filter(lambda lp: lp[0] != lp[1]).count() / float(parsedData.count())
print("Training Error = " + str(trainErr))
# Save and load model
model.save(sc, "target/tmp/pythonLogisticRegressionWithLBFGSModel")
sameModel = LogisticRegressionModel.load(sc,
"target/tmp/pythonLogisticRegressionWithLBFGSModel")
# $example off$
| apache-2.0 |
simbs/edx-platform | common/djangoapps/track/views/tests/test_segmentio.py | 40 | 21960 | """Ensure we can parse events sent to us from the Segment webhook integration"""
from datetime import datetime
import json
from ddt import ddt, data, unpack
from mock import sentinel
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_event_matches
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase
from track.views import segmentio
SECRET = 'anything'
ENDPOINT = '/segmentio/test/event'
USER_ID = 10
MOBILE_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
},
{
'ENGINE': 'track.shim.VideoEventProcessor'
}
]
def expect_failure_with_message(message):
"""Ensure the test raises an exception and does not emit an event"""
def test_decorator(func):
def test_decorated(self, *args, **kwargs):
self.assertRaisesRegexp(segmentio.EventValidationError, message, func, self, *args, **kwargs)
self.assert_no_events_emitted()
return test_decorated
return test_decorator
@ddt
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=SECRET,
TRACKING_IGNORE_URL_PATTERNS=[ENDPOINT],
TRACKING_SEGMENTIO_ALLOWED_TYPES=['track'],
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES=['.bi.'],
TRACKING_SEGMENTIO_SOURCE_MAP={'test-app': 'mobile'},
EVENT_TRACKING_PROCESSORS=MOBILE_SHIM_PROCESSOR,
)
class SegmentIOTrackingTestCase(EventTrackingTestCase):
"""Test processing of Segment events"""
def setUp(self):
super(SegmentIOTrackingTestCase, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.request_factory = RequestFactory()
def test_get_request(self):
request = self.request_factory.get(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 405)
self.assert_no_events_emitted()
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=None
)
def test_no_secret_config(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_no_secret_provided(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_secret_mismatch(self):
request = self.create_request(key='y')
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def create_request(self, key=None, **kwargs):
"""Create a fake request that emulates a request from the Segment servers to ours"""
if key is None:
key = SECRET
request = self.request_factory.post(ENDPOINT + "?key=" + key, **kwargs)
if 'data' in kwargs:
request.json = json.loads(kwargs['data'])
return request
@data('identify', 'Group', 'Alias', 'Page', 'identify', 'screen')
@expect_failure_with_message(segmentio.WARNING_IGNORED_TYPE)
def test_segmentio_ignore_actions(self, action):
self.post_segmentio_event(action=action)
@data('edx.bi.some_name', 'EDX.BI.CAPITAL_NAME')
@expect_failure_with_message(segmentio.WARNING_IGNORED_TYPE)
def test_segmentio_ignore_names(self, name):
self.post_segmentio_event(name=name)
def post_segmentio_event(self, **kwargs):
"""Post a fake Segment event to the view that processes it"""
request = self.create_request(
data=self.create_segmentio_event_json(**kwargs),
content_type='application/json'
)
segmentio.track_segmentio_event(request)
def create_segmentio_event(self, **kwargs):
"""Populate a fake Segment event with data of interest"""
action = kwargs.get('action', 'Track')
sample_event = {
"userId": kwargs.get('user_id', USER_ID),
"event": "Did something",
"properties": {
'name': kwargs.get('name', str(sentinel.name)),
'data': kwargs.get('data', {}),
'context': {
'course_id': kwargs.get('course_id') or '',
'app_name': 'edx.mobile.android',
}
},
"channel": 'server',
"context": {
"library": {
"name": kwargs.get('library_name', 'test-app'),
"version": "unknown"
},
"app": {
"version": "1.0.1",
},
'userAgent': str(sentinel.user_agent),
},
"receivedAt": "2014-08-27T16:33:39.100Z",
"timestamp": "2014-08-27T16:33:39.215Z",
"type": action.lower(),
"projectId": "u0j33yjkr8",
"messageId": "qy52hwp4",
"version": 2,
"integrations": {},
"options": {
"library": "unknown",
"providers": {}
},
"action": action
}
if 'context' in kwargs:
sample_event['properties']['context'].update(kwargs['context'])
return sample_event
def create_segmentio_event_json(self, **kwargs):
"""Return a json string containing a fake Segment event"""
return json.dumps(self.create_segmentio_event(**kwargs))
@expect_failure_with_message(segmentio.WARNING_IGNORED_SOURCE)
def test_segmentio_ignore_unknown_libraries(self):
self.post_segmentio_event(library_name='foo')
@expect_failure_with_message(segmentio.ERROR_USER_NOT_EXIST)
def test_no_user_for_user_id(self):
self.post_segmentio_event(user_id=40)
@expect_failure_with_message(segmentio.ERROR_INVALID_USER_ID)
def test_invalid_user_id(self):
self.post_segmentio_event(user_id='foobar')
@data('foo/bar/baz', 'course-v1:foo+bar+baz')
def test_success(self, course_id):
middleware = TrackMiddleware()
request = self.create_request(
data=self.create_segmentio_event_json(data={'foo': 'bar'}, course_id=course_id),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
# The middleware normally emits an event, make sure it doesn't in this case.
self.assert_no_events_emitted()
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': str(sentinel.name),
'name': str(sentinel.name),
'event': {'foo': 'bar'},
'agent': str(sentinel.user_agent),
'page': None,
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'application': {
'name': 'edx.mobile.android',
'version': '1.0.1',
},
'user_id': USER_ID,
'course_id': course_id,
'org_id': u'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
finally:
middleware.process_response(request, None)
assert_event_matches(expected_event, self.get_event())
def test_invalid_course_id(self):
request = self.create_request(
data=self.create_segmentio_event_json(course_id='invalid'),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
self.assert_events_emitted()
@expect_failure_with_message(segmentio.ERROR_MISSING_NAME)
def test_missing_name(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['name']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_DATA)
def test_missing_data(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['data']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_TIMESTAMP)
def test_missing_timestamp(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_RECEIVED_AT)
def test_missing_received_at(self):
sample_event_raw = self.create_event_without_fields('receivedAt')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
def create_event_without_fields(self, *fields):
"""Create a fake event and remove some fields from it"""
event = self.create_segmentio_event()
for field in fields:
if field in event:
del event[field]
return event
def test_string_user_id(self):
User.objects.create(pk=USER_ID, username=str(sentinel.username))
self.post_segmentio_event(user_id=str(USER_ID))
self.assert_events_emitted()
def test_hiding_failure(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
self.assert_no_events_emitted()
@data(
('edx.video.played', 'play_video'),
('edx.video.paused', 'pause_video'),
('edx.video.stopped', 'stop_video'),
('edx.video.loaded', 'load_video'),
('edx.video.position.changed', 'seek_video'),
('edx.video.transcript.shown', 'show_transcript'),
('edx.video.transcript.hidden', 'hide_transcript'),
)
@unpack
def test_video_event(self, name, event_type):
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
'current_time': 132.134456,
'module_id': 'i4x://foo/bar/baz/some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the only
# one that is not actually expected to contain a "current time" field. So we remove it from the expected
# event here.
del input_payload['current_time']
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
}
}),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': event_type,
'name': name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
'currentTime': 132.134456,
'id': 'i4x-foo-bar-baz-some_module',
'code': 'mobile'
}
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the
# only one that is not actually expected to contain a "current time" field. So we remove it from the
# expected event here.
del expected_event['event']['currentTime']
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
@data(
# Verify positive slide case. Verify slide to onSlideSeek. Verify edx.video.seeked emitted from iOS v1.0.02 is changed to edx.video.position.changed.
(1, 1, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify negative slide case. Verify slide to onSlideSeek. Verify edx.video.seeked to edx.video.position.changed.
(-2, -2, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify +30 is changed to -30 which is incorrectly emitted in iOS v1.0.02. Verify skip to onSkipSeek
(30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify the correct case of -30 is also handled as well. Verify skip to onSkipSeek
(-30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is changed to edx.video.position.changed.
(1, 1, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is changed to edx.video.position.changed.
(-2, -2, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed and does not become negative.
(30, 30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed.
(-30, -30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02')
)
@unpack
def test_previous_builds(self,
requested_skip_interval,
expected_skip_interval,
seek_type_key,
seek_type,
expected_seek_type,
name,
expected_name,
platform,
version,
):
"""
Test backwards compatibility of previous app builds
iOS version 1.0.02: Incorrectly emits the skip back 30 seconds as +30
instead of -30.
Android version 1.0.02: Skip and slide were both being returned as a
skip. Skip or slide is determined by checking if the skip time is == -30
Additionally, for both of the above mentioned versions, edx.video.seeked
was sent instead of edx.video.position.changed
"""
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
seek_type_key: seek_type,
"requested_skip_interval": requested_skip_interval,
'module_id': 'i4x://foo/bar/baz/some_module',
}
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
}
},
),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': "seek_video",
'name': expected_name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
"type": expected_seek_type,
"requested_skip_interval": expected_skip_interval,
'id': 'i4x-foo-bar-baz-some_module',
}
}
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
| agpl-3.0 |
cristian99garcia/pilas-activity | pilas/actores/globoelegir.py | 1 | 1510 | # -*- encoding: utf-8 -*-
import pilas
from pilas.actores.globo import Globo
class GloboElegir(Globo):
def __init__(self, texto, opciones, funcion_a_invocar, x=0, y=0, dialogo=None):
self.dialogo = dialogo
self.opciones = opciones
self.funcion_a_invocar = funcion_a_invocar
espacio = "\n" * (len(opciones) +1) # representa un espacio en blanco para poner la seleccion
Globo.__init__(self, texto + espacio, x, y, dialogo=dialogo)
self.lista_seleccion = pilas.interfaz.ListaSeleccion(opciones, self._cuando_selecciona_opcion, x, y)
self.lista_seleccion.escala = 0.1
self.lista_seleccion.escala = [1], 0.2
def colocar_origen_del_globo(self, x, y):
self.lista_seleccion.centro = ("derecha", "abajo")
self.lista_seleccion.x = x - 10
self.lista_seleccion.y = y - 10
def _obtener_area_para_el_texto(self, texto):
ancho, alto = self.lienzo.obtener_area_de_texto(texto, tamano=14)
opciones_ancho, opciones_alto = self.lienzo.obtener_area_para_lista_de_texto(self.opciones, tamano=14)
return ancho + opciones_ancho, alto + opciones_alto
def _escribir_texto(self, texto):
self.lienzo.escribir(texto, 12, 25, tamano=14)
def cuando_quieren_avanzar(self, *k):
pass
def _cuando_selecciona_opcion(self, opcion):
self.funcion_a_invocar(opcion)
Globo.cuando_quieren_avanzar(self)
self.lista_seleccion.eliminar()
| gpl-3.0 |
Innovahn/odoo.old | openerp/report/render/rml2html/utils.py | 438 | 2386 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2005, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import re
import reportlab
import reportlab.lib.units
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*px$'), 0.7),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
for unit in units:
res = unit[0].search(size, 0)
if res:
return int(unit[1]*float(res.group(1))*1.3)
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
res = [int(x) for x in node.get(attr_name).split(',')]
return res
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = str(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bertucho/epic-movie-quotes-quiz | dialogos/build/Twisted/twisted/web/test/test_xml.py | 8 | 41745 | # -*- test-case-name: twisted.web.test.test_xml -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Some fairly inadequate testcases for Twisted XML support.
"""
from twisted.trial.unittest import TestCase
from twisted.web import sux
from twisted.web import microdom
from twisted.web import domhelpers
class Sux0r(sux.XMLParser):
def __init__(self):
self.tokens = []
def getTagStarts(self):
return [token for token in self.tokens if token[0] == 'start']
def gotTagStart(self, name, attrs):
self.tokens.append(("start", name, attrs))
def gotText(self, text):
self.tokens.append(("text", text))
class SUXTests(TestCase):
def testBork(self):
s = "<bork><bork><bork>"
ms = Sux0r()
ms.connectionMade()
ms.dataReceived(s)
self.assertEqual(len(ms.getTagStarts()),3)
class MicroDOMTests(TestCase):
def test_leadingTextDropping(self):
"""
Make sure that if there's no top-level node lenient-mode won't
drop leading text that's outside of any elements.
"""
s = "Hi orders! <br>Well. <br>"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
'<html>Hi orders! <br />Well. <br /></html>')
def test_trailingTextDropping(self):
"""
Ensure that no *trailing* text in a mal-formed
no-top-level-element document(s) will not be dropped.
"""
s = "<br>Hi orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
'<html><br />Hi orders!</html>')
def test_noTags(self):
"""
A string with nothing that looks like a tag at all should just
be parsed as body text.
"""
s = "Hi orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
"<html>Hi orders!</html>")
def test_surroundingCrap(self):
"""
If a document is surrounded by non-xml text, the text should
be remain in the XML.
"""
s = "Hi<br> orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
"<html>Hi<br /> orders!</html>")
def testCaseSensitiveSoonCloser(self):
s = """
<HTML><BODY>
<P ALIGN="CENTER">
<A HREF="http://www.apache.org/"><IMG SRC="/icons/apache_pb.gif"></A>
</P>
<P>
This is an insane set of text nodes that should NOT be gathered under
the A tag above.
</P>
</BODY></HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'a')
n = domhelpers.gatherTextNodes(l[0],1).replace(' ',' ')
self.assertEqual(n.find('insane'), -1)
def test_lenientParenting(self):
"""
Test that C{parentNode} attributes are set to meaningful values when
we are parsing HTML that lacks a root node.
"""
# Spare the rod, ruin the child.
s = "<br/><br/>"
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertIdentical(d.documentElement,
d.documentElement.firstChild().parentNode)
def test_lenientParentSingle(self):
"""
Test that the C{parentNode} attribute is set to a meaningful value
when we parse an HTML document that has a non-Element root node.
"""
s = "Hello"
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertIdentical(d.documentElement,
d.documentElement.firstChild().parentNode)
def testUnEntities(self):
s = """
<HTML>
This HTML goes between Stupid <=CrAzY!=> Dumb.
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
n = domhelpers.gatherTextNodes(d)
self.assertNotEquals(n.find('>'), -1)
def testEmptyError(self):
self.assertRaises(sux.ParseError, microdom.parseString, "")
def testTameDocument(self):
s = """
<test>
<it>
<is>
<a>
test
</a>
</is>
</it>
</test>
"""
d = microdom.parseString(s)
self.assertEqual(
domhelpers.gatherTextNodes(d.documentElement).strip() ,'test')
def testAwfulTagSoup(self):
s = """
<html>
<head><title> I send you this message to have your advice!!!!</titl e
</headd>
<body bgcolor alink hlink vlink>
<h1><BLINK>SALE</blINK> TWENTY MILLION EMAILS & FUR COAT NOW
FREE WITH `ENLARGER'</h1>
YES THIS WONDERFUL AWFER IS NOW HERER!!!
<script LANGUAGE="javascript">
function give_answers() {
if (score < 70) {
alert("I hate you");
}}
</script><a href=/foo.com/lalal name=foo>lalal</a>
</body>
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'blink')
self.assertEqual(len(l), 1)
def testScriptLeniency(self):
s = """
<script>(foo < bar) and (bar > foo)</script>
<script language="javascript">foo </scrip bar </script>
<script src="foo">
<script src="foo">baz</script>
<script /><script></script>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertEqual(d.firstChild().firstChild().firstChild().data,
"(foo < bar) and (bar > foo)")
self.assertEqual(
d.firstChild().getElementsByTagName("script")[1].firstChild().data,
"foo </scrip bar ")
def testScriptLeniencyIntelligence(self):
# if there is comment or CDATA in script, the autoquoting in bEL mode
# should not happen
s = """<script><!-- lalal --></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script><![CDATA[lalal]]></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script> // <![CDATA[
lalal
//]]></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
def testPreserveCase(self):
s = '<eNcApSuLaTe><sUxor></sUxor><bOrk><w00T>TeXt</W00t></BoRk></EnCaPsUlAtE>'
s2 = s.lower().replace('text', 'TeXt')
# these are the only two option permutations that *can* parse the above
d = microdom.parseString(s, caseInsensitive=1, preserveCase=1)
d2 = microdom.parseString(s, caseInsensitive=1, preserveCase=0)
# caseInsensitive=0 preserveCase=0 is not valid, it's converted to
# caseInsensitive=0 preserveCase=1
d3 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1)
d4 = microdom.parseString(s2, caseInsensitive=1, preserveCase=0)
d5 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1)
# this is slightly contrived, toxml() doesn't need to be identical
# for the documents to be equivalent (i.e. <b></b> to <b/>),
# however this assertion tests preserving case for start and
# end tags while still matching stuff like <bOrk></BoRk>
self.assertEqual(d.documentElement.toxml(), s)
self.assert_(d.isEqualToDocument(d2), "%r != %r" % (d.toxml(), d2.toxml()))
self.assert_(d2.isEqualToDocument(d3), "%r != %r" % (d2.toxml(), d3.toxml()))
# caseInsensitive=0 on the left, NOT perserveCase=1 on the right
## XXX THIS TEST IS TURNED OFF UNTIL SOMEONE WHO CARES ABOUT FIXING IT DOES
#self.failIf(d3.isEqualToDocument(d2), "%r == %r" % (d3.toxml(), d2.toxml()))
self.assert_(d3.isEqualToDocument(d4), "%r != %r" % (d3.toxml(), d4.toxml()))
self.assert_(d4.isEqualToDocument(d5), "%r != %r" % (d4.toxml(), d5.toxml()))
def testDifferentQuotes(self):
s = '<test a="a" b=\'b\' />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.getAttribute('a'), 'a')
self.assertEqual(e.getAttribute('b'), 'b')
def testLinebreaks(self):
s = '<test \na="a"\n\tb="#b" />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.getAttribute('a'), 'a')
self.assertEqual(e.getAttribute('b'), '#b')
def testMismatchedTags(self):
for s in '<test>', '<test> </tset>', '</test>':
self.assertRaises(microdom.MismatchedTags, microdom.parseString, s)
def testComment(self):
s = "<bar><!--<foo />--></bar>"
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.nodeName, "bar")
c = e.childNodes[0]
self.assert_(isinstance(c, microdom.Comment))
self.assertEqual(c.value, "<foo />")
c2 = c.cloneNode()
self.assert_(c is not c2)
self.assertEqual(c2.toxml(), "<!--<foo />-->")
def testText(self):
d = microdom.parseString("<bar>xxxx</bar>").documentElement
text = d.childNodes[0]
self.assert_(isinstance(text, microdom.Text))
self.assertEqual(text.value, "xxxx")
clone = text.cloneNode()
self.assert_(clone is not text)
self.assertEqual(clone.toxml(), "xxxx")
def testEntities(self):
nodes = microdom.parseString("<b>&AB;</b>").documentElement.childNodes
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].data, "&")
self.assertEqual(nodes[1].data, "AB;")
self.assertEqual(nodes[0].cloneNode().toxml(), "&")
for n in nodes:
self.assert_(isinstance(n, microdom.EntityReference))
def testCData(self):
s = '<x><![CDATA[</x>\r\n & foo]]></x>'
cdata = microdom.parseString(s).documentElement.childNodes[0]
self.assert_(isinstance(cdata, microdom.CDATASection))
self.assertEqual(cdata.data, "</x>\r\n & foo")
self.assertEqual(cdata.cloneNode().toxml(), "<![CDATA[</x>\r\n & foo]]>")
def testSingletons(self):
s = "<foo><b/><b /><b\n/></foo>"
s2 = "<foo><b/><b/><b/></foo>"
nodes = microdom.parseString(s).documentElement.childNodes
nodes2 = microdom.parseString(s2).documentElement.childNodes
self.assertEqual(len(nodes), 3)
for (n, n2) in zip(nodes, nodes2):
self.assert_(isinstance(n, microdom.Element))
self.assertEqual(n.nodeName, "b")
self.assert_(n.isEqualToNode(n2))
def testAttributes(self):
s = '<foo a="b" />'
node = microdom.parseString(s).documentElement
self.assertEqual(node.getAttribute("a"), "b")
self.assertEqual(node.getAttribute("c"), None)
self.assert_(node.hasAttribute("a"))
self.assert_(not node.hasAttribute("c"))
a = node.getAttributeNode("a")
self.assertEqual(a.value, "b")
node.setAttribute("foo", "bar")
self.assertEqual(node.getAttribute("foo"), "bar")
def testChildren(self):
s = "<foo><bar /><baz /><bax>foo</bax></foo>"
d = microdom.parseString(s).documentElement
self.assertEqual([n.nodeName for n in d.childNodes], ["bar", "baz", "bax"])
self.assertEqual(d.lastChild().nodeName, "bax")
self.assertEqual(d.firstChild().nodeName, "bar")
self.assert_(d.hasChildNodes())
self.assert_(not d.firstChild().hasChildNodes())
def testMutate(self):
s = "<foo />"
s1 = '<foo a="b"><bar/><foo/></foo>'
s2 = '<foo a="b">foo</foo>'
d = microdom.parseString(s).documentElement
d1 = microdom.parseString(s1).documentElement
d2 = microdom.parseString(s2).documentElement
d.appendChild(d.cloneNode())
d.setAttribute("a", "b")
child = d.childNodes[0]
self.assertEqual(child.getAttribute("a"), None)
self.assertEqual(child.nodeName, "foo")
d.insertBefore(microdom.Element("bar"), child)
self.assertEqual(d.childNodes[0].nodeName, "bar")
self.assertEqual(d.childNodes[1], child)
for n in d.childNodes:
self.assertEqual(n.parentNode, d)
self.assert_(d.isEqualToNode(d1))
d.removeChild(child)
self.assertEqual(len(d.childNodes), 1)
self.assertEqual(d.childNodes[0].nodeName, "bar")
t = microdom.Text("foo")
d.replaceChild(t, d.firstChild())
self.assertEqual(d.firstChild(), t)
self.assert_(d.isEqualToNode(d2))
def test_replaceNonChild(self):
"""
L{Node.replaceChild} raises L{ValueError} if the node given to be
replaced is not a child of the node C{replaceChild} is called on.
"""
parent = microdom.parseString('<foo />')
orphan = microdom.parseString('<bar />')
replacement = microdom.parseString('<baz />')
self.assertRaises(
ValueError, parent.replaceChild, replacement, orphan)
def testSearch(self):
s = "<foo><bar id='me' /><baz><foo /></baz></foo>"
s2 = "<fOo><bAr id='me' /><bAz><fOO /></bAz></fOo>"
d = microdom.parseString(s)
d2 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1)
d3 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1)
root = d.documentElement
self.assertEqual(root.firstChild(), d.getElementById('me'))
self.assertEqual(d.getElementsByTagName("foo"),
[root, root.lastChild().firstChild()])
root = d2.documentElement
self.assertEqual(root.firstChild(), d2.getElementById('me'))
self.assertEqual(d2.getElementsByTagName('fOo'), [root])
self.assertEqual(d2.getElementsByTagName('fOO'),
[root.lastChild().firstChild()])
self.assertEqual(d2.getElementsByTagName('foo'), [])
root = d3.documentElement
self.assertEqual(root.firstChild(), d3.getElementById('me'))
self.assertEqual(d3.getElementsByTagName('FOO'),
[root, root.lastChild().firstChild()])
self.assertEqual(d3.getElementsByTagName('fOo'),
[root, root.lastChild().firstChild()])
def testDoctype(self):
s = ('<?xml version="1.0"?>'
'<!DOCTYPE foo PUBLIC "baz" "http://www.example.com/example.dtd">'
'<foo></foo>')
s2 = '<foo/>'
d = microdom.parseString(s)
d2 = microdom.parseString(s2)
self.assertEqual(d.doctype,
'foo PUBLIC "baz" "http://www.example.com/example.dtd"')
self.assertEqual(d.toxml(), s)
self.failIf(d.isEqualToDocument(d2))
self.failUnless(d.documentElement.isEqualToNode(d2.documentElement))
samples = [("<img/>", "<img />"),
("<foo A='b'>x</foo>", '<foo A="b">x</foo>'),
("<foo><BAR /></foo>", "<foo><BAR></BAR></foo>"),
("<foo>hello there & yoyoy</foo>",
"<foo>hello there & yoyoy</foo>"),
]
def testOutput(self):
for s, out in self.samples:
d = microdom.parseString(s, caseInsensitive=0)
d2 = microdom.parseString(out, caseInsensitive=0)
testOut = d.documentElement.toxml()
self.assertEqual(out, testOut)
self.assert_(d.isEqualToDocument(d2))
def testErrors(self):
for s in ["<foo>&am</foo>", "<foo", "<f>&</f>", "<() />"]:
self.assertRaises(Exception, microdom.parseString, s)
def testCaseInsensitive(self):
s = "<foo a='b'><BAx>x</bax></FOO>"
s2 = '<foo a="b"><bax>x</bax></foo>'
s3 = "<FOO a='b'><BAx>x</BAx></FOO>"
s4 = "<foo A='b'>x</foo>"
d = microdom.parseString(s)
d2 = microdom.parseString(s2)
d3 = microdom.parseString(s3, caseInsensitive=1)
d4 = microdom.parseString(s4, caseInsensitive=1, preserveCase=1)
d5 = microdom.parseString(s4, caseInsensitive=1, preserveCase=0)
d6 = microdom.parseString(s4, caseInsensitive=0, preserveCase=0)
out = microdom.parseString(s).documentElement.toxml()
self.assertRaises(microdom.MismatchedTags, microdom.parseString,
s, caseInsensitive=0)
self.assertEqual(out, s2)
self.failUnless(d.isEqualToDocument(d2))
self.failUnless(d.isEqualToDocument(d3))
self.failUnless(d4.documentElement.hasAttribute('a'))
self.failIf(d6.documentElement.hasAttribute('a'))
self.assertEqual(d4.documentElement.toxml(), '<foo A="b">x</foo>')
self.assertEqual(d5.documentElement.toxml(), '<foo a="b">x</foo>')
def testEatingWhitespace(self):
s = """<hello>
</hello>"""
d = microdom.parseString(s)
self.failUnless(not d.documentElement.hasChildNodes(),
d.documentElement.childNodes)
self.failUnless(d.isEqualToDocument(microdom.parseString('<hello></hello>')))
def testLenientAmpersand(self):
prefix = "<?xml version='1.0'?>"
# we use <pre> so space will be preserved
for i, o in [("&", "&"),
("& ", "& "),
("&", "&"),
("&hello monkey", "&hello monkey")]:
d = microdom.parseString("%s<pre>%s</pre>"
% (prefix, i), beExtremelyLenient=1)
self.assertEqual(d.documentElement.toxml(), "<pre>%s</pre>" % o)
# non-space preserving
d = microdom.parseString("<t>hello & there</t>", beExtremelyLenient=1)
self.assertEqual(d.documentElement.toxml(), "<t>hello & there</t>")
def testInsensitiveLenient(self):
# testing issue #537
d = microdom.parseString(
"<?xml version='1.0'?><bar><xA><y>c</Xa> <foo></bar>",
beExtremelyLenient=1)
self.assertEqual(d.documentElement.firstChild().toxml(), "<xa><y>c</y></xa>")
def testLaterCloserSimple(self):
s = "<ul><li>foo<li>bar<li>baz</ul>"
d = microdom.parseString(s, beExtremelyLenient=1)
expected = "<ul><li>foo</li><li>bar</li><li>baz</li></ul>"
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testLaterCloserCaseInsensitive(self):
s = "<DL><p><DT>foo<DD>bar</DL>"
d = microdom.parseString(s, beExtremelyLenient=1)
expected = "<dl><p></p><dt>foo</dt><dd>bar</dd></dl>"
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testLaterCloserDL(self):
s = ("<dl>"
"<dt>word<dd>definition"
"<dt>word<dt>word<dd>definition<dd>definition"
"</dl>")
expected = ("<dl>"
"<dt>word</dt><dd>definition</dd>"
"<dt>word</dt><dt>word</dt><dd>definition</dd><dd>definition</dd>"
"</dl>")
d = microdom.parseString(s, beExtremelyLenient=1)
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testUnicodeTolerance(self):
import struct
s = '<foo><bar><baz /></bar></foo>'
j =(u'<?xml version="1.0" encoding="UCS-2" ?>\r\n<JAPANESE>\r\n'
u'<TITLE>\u5c02\u9580\u5bb6\u30ea\u30b9\u30c8 </TITLE></JAPANESE>')
j2=('\xff\xfe<\x00?\x00x\x00m\x00l\x00 \x00v\x00e\x00r\x00s\x00i\x00o'
'\x00n\x00=\x00"\x001\x00.\x000\x00"\x00 \x00e\x00n\x00c\x00o\x00d'
'\x00i\x00n\x00g\x00=\x00"\x00U\x00C\x00S\x00-\x002\x00"\x00 \x00?'
'\x00>\x00\r\x00\n\x00<\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E'
'\x00>\x00\r\x00\n\x00<\x00T\x00I\x00T\x00L\x00E\x00>\x00\x02\\'
'\x80\x95\xb6[\xea0\xb90\xc80 \x00<\x00/\x00T\x00I\x00T\x00L\x00E'
'\x00>\x00<\x00/\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E\x00>\x00')
def reverseBytes(s):
fmt = str(len(s) // 2) + 'H'
return struct.pack('<' + fmt, *struct.unpack('>' + fmt, s))
urd = microdom.parseString(reverseBytes(s.encode('UTF-16')))
ud = microdom.parseString(s.encode('UTF-16'))
sd = microdom.parseString(s)
self.assert_(ud.isEqualToDocument(sd))
self.assert_(ud.isEqualToDocument(urd))
ud = microdom.parseString(j)
urd = microdom.parseString(reverseBytes(j2))
sd = microdom.parseString(j2)
self.assert_(ud.isEqualToDocument(sd))
self.assert_(ud.isEqualToDocument(urd))
# test that raw text still gets encoded
# test that comments get encoded
j3=microdom.parseString(u'<foo/>')
hdr='<?xml version="1.0"?>'
div=microdom.lmx().text(u'\u221a', raw=1).node
de=j3.documentElement
de.appendChild(div)
de.appendChild(j3.createComment(u'\u221a'))
self.assertEqual(j3.toxml(), hdr+
u'<foo><div>\u221a</div><!--\u221a--></foo>'.encode('utf8'))
def testNamedChildren(self):
tests = {"<foo><bar /><bar unf='1' /><bar>asdfadsf</bar>"
"<bam/></foo>" : 3,
'<foo>asdf</foo>' : 0,
'<foo><bar><bar></bar></bar></foo>' : 1,
}
for t in tests.keys():
node = microdom.parseString(t).documentElement
result = domhelpers.namedChildren(node, 'bar')
self.assertEqual(len(result), tests[t])
if result:
self.assert_(hasattr(result[0], 'tagName'))
def testCloneNode(self):
s = '<foo a="b"><bax>x</bax></foo>'
node = microdom.parseString(s).documentElement
clone = node.cloneNode(deep=1)
self.failIfEquals(node, clone)
self.assertEqual(len(node.childNodes), len(clone.childNodes))
c1, c2 = node.firstChild(), clone.firstChild()
self.failIfEquals(c1, c2)
self.assertEqual(len(c1.childNodes), len(c2.childNodes))
self.failIfEquals(c1.firstChild(), c2.firstChild())
self.assertEqual(s, clone.toxml())
self.assertEqual(node.namespace, clone.namespace)
def testCloneDocument(self):
s = ('<?xml version="1.0"?>'
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><foo></foo>')
node = microdom.parseString(s)
clone = node.cloneNode(deep=1)
self.failIfEquals(node, clone)
self.assertEqual(len(node.childNodes), len(clone.childNodes))
self.assertEqual(s, clone.toxml())
self.failUnless(clone.isEqualToDocument(node))
self.failUnless(node.isEqualToDocument(clone))
def testLMX(self):
n = microdom.Element("p")
lmx = microdom.lmx(n)
lmx.text("foo")
b = lmx.b(a="c")
b.foo()["z"] = "foo"
b.foo()
b.add("bar", c="y")
s = '<p>foo<b a="c"><foo z="foo"></foo><foo></foo><bar c="y"></bar></b></p>'
self.assertEqual(s, n.toxml())
def testDict(self):
"""
Returns a dictionary which is hashable.
"""
n = microdom.Element("p")
hash(n)
def testEscaping(self):
# issue 590
raw = "&'some \"stuff\"', <what up?>"
cooked = "&'some "stuff"', <what up?>"
esc1 = microdom.escape(raw)
self.assertEqual(esc1, cooked)
self.assertEqual(microdom.unescape(esc1), raw)
def testNamespaces(self):
s = '''
<x xmlns="base">
<y />
<y q="1" x:q="2" y:q="3" />
<y:y xml:space="1">here is some space </y:y>
<y:y />
<x:y />
</x>
'''
d = microdom.parseString(s)
# at least make sure it doesn't traceback
s2 = d.toprettyxml()
self.assertEqual(d.documentElement.namespace,
"base")
self.assertEqual(d.documentElement.getElementsByTagName("y")[0].namespace,
"base")
self.assertEqual(
d.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'),
'1')
d2 = microdom.parseString(s2)
self.assertEqual(d2.documentElement.namespace,
"base")
self.assertEqual(d2.documentElement.getElementsByTagName("y")[0].namespace,
"base")
self.assertEqual(
d2.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'),
'1')
def testNamespaceDelete(self):
"""
Test that C{toxml} can support xml structures that remove namespaces.
"""
s1 = ('<?xml version="1.0"?><html xmlns="http://www.w3.org/TR/REC-html40">'
'<body xmlns=""></body></html>')
s2 = microdom.parseString(s1).toxml()
self.assertEqual(s1, s2)
def testNamespaceInheritance(self):
"""
Check that unspecified namespace is a thing separate from undefined
namespace. This test added after discovering some weirdness in Lore.
"""
# will only work if childNodes is mutated. not sure why.
child = microdom.Element('ol')
parent = microdom.Element('div', namespace='http://www.w3.org/1999/xhtml')
parent.childNodes = [child]
self.assertEqual(parent.toxml(),
'<div xmlns="http://www.w3.org/1999/xhtml"><ol></ol></div>')
def test_prefixedTags(self):
"""
XML elements with a prefixed name as per upper level tag definition
have a start-tag of C{"<prefix:tag>"} and an end-tag of
C{"</prefix:tag>"}.
Refer to U{http://www.w3.org/TR/xml-names/#ns-using} for details.
"""
outerNamespace = "http://example.com/outer"
innerNamespace = "http://example.com/inner"
document = microdom.Document()
# Create the root in one namespace. Microdom will probably make this
# the default namespace.
root = document.createElement("root", namespace=outerNamespace)
# Give the root some prefixes to use.
root.addPrefixes({innerNamespace: "inner"})
# Append a child to the root from the namespace that prefix is bound
# to.
tag = document.createElement("tag", namespace=innerNamespace)
# Give that tag a child too. This way we test rendering of tags with
# children and without children.
child = document.createElement("child", namespace=innerNamespace)
tag.appendChild(child)
root.appendChild(tag)
document.appendChild(root)
# ok, the xml should appear like this
xmlOk = (
'<?xml version="1.0"?>'
'<root xmlns="http://example.com/outer" '
'xmlns:inner="http://example.com/inner">'
'<inner:tag><inner:child></inner:child></inner:tag>'
'</root>')
xmlOut = document.toxml()
self.assertEqual(xmlOut, xmlOk)
def test_prefixPropagation(self):
"""
Children of prefixed tags respect the default namespace at the point
where they are rendered. Specifically, they are not influenced by the
prefix of their parent as that prefix has no bearing on them.
See U{http://www.w3.org/TR/xml-names/#scoping} for details.
To further clarify the matter, the following::
<root xmlns="http://example.com/ns/test">
<mytag xmlns="http://example.com/ns/mytags">
<mysubtag xmlns="http://example.com/ns/mytags">
<element xmlns="http://example.com/ns/test"></element>
</mysubtag>
</mytag>
</root>
Should become this after all the namespace declarations have been
I{moved up}::
<root xmlns="http://example.com/ns/test"
xmlns:mytags="http://example.com/ns/mytags">
<mytags:mytag>
<mytags:mysubtag>
<element></element>
</mytags:mysubtag>
</mytags:mytag>
</root>
"""
outerNamespace = "http://example.com/outer"
innerNamespace = "http://example.com/inner"
document = microdom.Document()
# creates a root element
root = document.createElement("root", namespace=outerNamespace)
document.appendChild(root)
# Create a child with a specific namespace with a prefix bound to it.
root.addPrefixes({innerNamespace: "inner"})
mytag = document.createElement("mytag",namespace=innerNamespace)
root.appendChild(mytag)
# Create a child of that which has the outer namespace.
mysubtag = document.createElement("mysubtag", namespace=outerNamespace)
mytag.appendChild(mysubtag)
xmlOk = (
'<?xml version="1.0"?>'
'<root xmlns="http://example.com/outer" '
'xmlns:inner="http://example.com/inner">'
'<inner:mytag>'
'<mysubtag></mysubtag>'
'</inner:mytag>'
'</root>'
)
xmlOut = document.toxml()
self.assertEqual(xmlOut, xmlOk)
class BrokenHTMLTests(TestCase):
"""
Tests for when microdom encounters very bad HTML and C{beExtremelyLenient}
is enabled. These tests are inspired by some HTML generated in by a mailer,
which breaks up very long lines by splitting them with '!\n '. The expected
behaviour is loosely modelled on the way Firefox treats very bad HTML.
"""
def checkParsed(self, input, expected, beExtremelyLenient=1):
"""
Check that C{input}, when parsed, produces a DOM where the XML
of the document element is equal to C{expected}.
"""
output = microdom.parseString(input,
beExtremelyLenient=beExtremelyLenient)
self.assertEqual(output.documentElement.toxml(), expected)
def test_brokenAttributeName(self):
"""
Check that microdom does its best to handle broken attribute names.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><div al!\n ign="center">Foo</div></h1></body>'
expected = ('<body><h1><div ign="center" al="True">'
'Foo</div></h1></body>')
self.checkParsed(input, expected)
def test_brokenAttributeValue(self):
"""
Check that microdom encompasses broken attribute values.
"""
input = '<body><h1><div align="cen!\n ter">Foo</div></h1></body>'
expected = '<body><h1><div align="cen!\n ter">Foo</div></h1></body>'
self.checkParsed(input, expected)
def test_brokenOpeningTag(self):
"""
Check that microdom does its best to handle broken opening tags.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><sp!\n an>Hello World!</span></h1></body>'
expected = '<body><h1><sp an="True">Hello World!</sp></h1></body>'
self.checkParsed(input, expected)
def test_brokenSelfClosingTag(self):
"""
Check that microdom does its best to handle broken self-closing tags
The important thing is that it doesn't raise an exception.
"""
self.checkParsed('<body><span /!\n></body>',
'<body><span></span></body>')
self.checkParsed('<span!\n />', '<span></span>')
def test_brokenClosingTag(self):
"""
Check that microdom does its best to handle broken closing tags.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><span>Hello World!</sp!\nan></h1></body>'
expected = '<body><h1><span>Hello World!</span></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!</!\nspan></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!</span!\n></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!<!\n/span></h1></body>'
expected = '<body><h1><span>Hello World!<!></!></span></h1></body>'
self.checkParsed(input, expected)
class NodeTests(TestCase):
"""
Tests for L{Node}.
"""
def test_isNodeEqualTo(self):
"""
L{Node.isEqualToNode} returns C{True} if and only if passed a L{Node}
with the same children.
"""
# A node is equal to itself
node = microdom.Node(object())
self.assertTrue(node.isEqualToNode(node))
another = microdom.Node(object())
# Two nodes with no children are equal
self.assertTrue(node.isEqualToNode(another))
node.appendChild(microdom.Node(object()))
# A node with no children is not equal to a node with a child
self.assertFalse(node.isEqualToNode(another))
another.appendChild(microdom.Node(object()))
# A node with a child and no grandchildren is equal to another node
# with a child and no grandchildren.
self.assertTrue(node.isEqualToNode(another))
# A node with a child and a grandchild is not equal to another node
# with a child and no grandchildren.
node.firstChild().appendChild(microdom.Node(object()))
self.assertFalse(node.isEqualToNode(another))
# A node with a child and a grandchild is equal to another node with a
# child and a grandchild.
another.firstChild().appendChild(microdom.Node(object()))
self.assertTrue(node.isEqualToNode(another))
def test_validChildInstance(self):
"""
Children of L{Node} instances must also be L{Node} instances.
"""
node = microdom.Node()
child = microdom.Node()
# Node.appendChild() only accepts Node instances.
node.appendChild(child)
self.assertRaises(TypeError, node.appendChild, None)
# Node.insertBefore() only accepts Node instances.
self.assertRaises(TypeError, node.insertBefore, child, None)
self.assertRaises(TypeError, node.insertBefore, None, child)
self.assertRaises(TypeError, node.insertBefore, None, None)
# Node.removeChild() only accepts Node instances.
node.removeChild(child)
self.assertRaises(TypeError, node.removeChild, None)
# Node.replaceChild() only accepts Node instances.
self.assertRaises(TypeError, node.replaceChild, child, None)
self.assertRaises(TypeError, node.replaceChild, None, child)
self.assertRaises(TypeError, node.replaceChild, None, None)
class DocumentTests(TestCase):
"""
Tests for L{Document}.
"""
doctype = 'foo PUBLIC "baz" "http://www.example.com/example.dtd"'
def test_isEqualToNode(self):
"""
L{Document.isEqualToNode} returns C{True} if and only if passed a
L{Document} with the same C{doctype} and C{documentElement}.
"""
# A document is equal to itself
document = microdom.Document()
self.assertTrue(document.isEqualToNode(document))
# A document without a doctype or documentElement is equal to another
# document without a doctype or documentElement.
another = microdom.Document()
self.assertTrue(document.isEqualToNode(another))
# A document with a doctype is not equal to a document without a
# doctype.
document.doctype = self.doctype
self.assertFalse(document.isEqualToNode(another))
# Two documents with the same doctype are equal
another.doctype = self.doctype
self.assertTrue(document.isEqualToNode(another))
# A document with a documentElement is not equal to a document without
# a documentElement
document.appendChild(microdom.Node(object()))
self.assertFalse(document.isEqualToNode(another))
# Two documents with equal documentElements are equal.
another.appendChild(microdom.Node(object()))
self.assertTrue(document.isEqualToNode(another))
# Two documents with documentElements which are not equal are not
# equal.
document.documentElement.appendChild(microdom.Node(object()))
self.assertFalse(document.isEqualToNode(another))
def test_childRestriction(self):
"""
L{Document.appendChild} raises L{ValueError} if the document already
has a child.
"""
document = microdom.Document()
child = microdom.Node()
another = microdom.Node()
document.appendChild(child)
self.assertRaises(ValueError, document.appendChild, another)
class EntityReferenceTests(TestCase):
"""
Tests for L{EntityReference}.
"""
def test_isEqualToNode(self):
"""
L{EntityReference.isEqualToNode} returns C{True} if and only if passed
a L{EntityReference} with the same C{eref}.
"""
self.assertTrue(
microdom.EntityReference('quot').isEqualToNode(
microdom.EntityReference('quot')))
self.assertFalse(
microdom.EntityReference('quot').isEqualToNode(
microdom.EntityReference('apos')))
class CharacterDataTests(TestCase):
"""
Tests for L{CharacterData}.
"""
def test_isEqualToNode(self):
"""
L{CharacterData.isEqualToNode} returns C{True} if and only if passed a
L{CharacterData} with the same value.
"""
self.assertTrue(
microdom.CharacterData('foo').isEqualToNode(
microdom.CharacterData('foo')))
self.assertFalse(
microdom.CharacterData('foo').isEqualToNode(
microdom.CharacterData('bar')))
class CommentTests(TestCase):
"""
Tests for L{Comment}.
"""
def test_isEqualToNode(self):
"""
L{Comment.isEqualToNode} returns C{True} if and only if passed a
L{Comment} with the same value.
"""
self.assertTrue(
microdom.Comment('foo').isEqualToNode(
microdom.Comment('foo')))
self.assertFalse(
microdom.Comment('foo').isEqualToNode(
microdom.Comment('bar')))
class TextTests(TestCase):
"""
Tests for L{Text}.
"""
def test_isEqualToNode(self):
"""
L{Text.isEqualToNode} returns C{True} if and only if passed a L{Text}
which represents the same data.
"""
self.assertTrue(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('foo', raw=True)))
self.assertFalse(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('foo', raw=False)))
self.assertFalse(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('bar', raw=True)))
class CDATASectionTests(TestCase):
"""
Tests for L{CDATASection}.
"""
def test_isEqualToNode(self):
"""
L{CDATASection.isEqualToNode} returns C{True} if and only if passed a
L{CDATASection} which represents the same data.
"""
self.assertTrue(
microdom.CDATASection('foo').isEqualToNode(
microdom.CDATASection('foo')))
self.assertFalse(
microdom.CDATASection('foo').isEqualToNode(
microdom.CDATASection('bar')))
class ElementTests(TestCase):
"""
Tests for L{Element}.
"""
def test_isEqualToNode(self):
"""
L{Element.isEqualToNode} returns C{True} if and only if passed a
L{Element} with the same C{nodeName}, C{namespace}, C{childNodes}, and
C{attributes}.
"""
self.assertTrue(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar')))
# Elements with different nodeName values do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'bar', {'a': 'b'}, object(), namespace='bar')))
# Elements with different namespaces do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='baz')))
# Elements with different childNodes do not compare equal.
one = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar')
two = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar')
two.appendChild(microdom.Node(object()))
self.assertFalse(one.isEqualToNode(two))
# Elements with different attributes do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'c'}, object(), namespace='bar')))
| mit |
marc-sensenich/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_powerstate.py | 24 | 9024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest_powerstate
short_description: Manages power states of virtual machines in vCenter
description:
- Power on / Power off / Restart a virtual machine.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
requirements:
- python >= 2.6
- PyVmomi
options:
state:
description:
- Set the state of the virtual machine.
choices: [ powered-off, powered-on, reboot-guest, restarted, shutdown-guest, suspended, present]
default: present
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: first
choices: [ first, last ]
uuid:
description:
- UUID of the instance to manage if known, this is VMware's unique identifier.
- This is required if name is not supplied.
folder:
description:
- Destination folder, absolute or relative path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
- ' folder: vm/folder2'
- ' folder: folder2'
default: /vm
scheduled_at:
description:
- Date and time in string format at which specificed task needs to be performed.
- "The required format for date and time - 'dd/mm/yyyy hh:mm'."
- Scheduling task requires vCenter server. A standalone ESXi server does not support this option.
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while forcing virtual machine state.
default: False
type: bool
version_added: 2.5
state_change_timeout:
description:
- If the C(state) is set to C(shutdown-guest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the VM to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set the state of a virtual machine to poweroff
vmware_guest_powerstate:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /"{{ datacenter_name }}"/vm/my_folder
name: "{{ guest_name }}"
state: powered-off
delegate_to: localhost
register: deploy
- name: Set the state of a virtual machine to poweroff at given scheduled time
vmware_guest_powerstate:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
folder: /"{{ datacenter_name }}"/vm/my_folder
name: "{{ guest_name }}"
state: powered-off
scheduled_at: "09/01/2018 10:18"
delegate_to: localhost
register: deploy_at_schedule_datetime
- name: Wait for the virtual machine to shutdown
vmware_guest_powerstate:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: "{{ guest_name }}"
state: shutdown-guest
state_change_timeout: 200
delegate_to: localhost
register: deploy
'''
RETURN = r''' # '''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, set_vm_power_state, vmware_argument_spec
from ansible.module_utils._text import to_native
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['present', 'powered-off', 'powered-on', 'reboot-guest', 'restarted', 'shutdown-guest', 'suspended']),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
folder=dict(type='str', default='/vm'),
force=dict(type='bool', default=False),
scheduled_at=dict(type='str'),
state_change_timeout=dict(type='int', default=0),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
mutually_exclusive=[
['name', 'uuid'],
],
)
result = dict(changed=False,)
pyv = PyVmomi(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
if vm:
# VM already exists, so set power state
scheduled_at = module.params.get('scheduled_at', None)
if scheduled_at:
if not pyv.is_vcenter():
module.fail_json(msg="Scheduling task requires vCenter, hostname %s "
"is an ESXi server." % module.params.get('hostname'))
powerstate = {
'powered-off': vim.VirtualMachine.PowerOff,
'powered-on': vim.VirtualMachine.PowerOn,
'reboot-guest': vim.VirtualMachine.RebootGuest,
'restarted': vim.VirtualMachine.Reset,
'shutdown-guest': vim.VirtualMachine.ShutdownGuest,
'suspended': vim.VirtualMachine.Suspend,
}
dt = ''
try:
dt = datetime.strptime(scheduled_at, '%d/%m/%Y %H:%M')
except ValueError as e:
module.fail_json(msg="Failed to convert given date and time string to Python datetime object,"
"please specify string in 'dd/mm/yyyy hh:mm' format: %s" % to_native(e))
schedule_task_spec = vim.scheduler.ScheduledTaskSpec()
schedule_task_desc = 'Schedule task for vm %s for operation %s at %s' % (vm.name,
module.params.get('state'),
scheduled_at)
schedule_task_spec.name = schedule_task_desc
schedule_task_spec.description = schedule_task_desc
schedule_task_spec.scheduler = vim.scheduler.OnceTaskScheduler()
schedule_task_spec.scheduler.runAt = dt
schedule_task_spec.action = vim.action.MethodAction()
schedule_task_spec.action.name = powerstate[module.params.get('state')]
schedule_task_spec.enabled = True
try:
pyv.content.scheduledTaskManager.CreateScheduledTask(vm, schedule_task_spec)
# As this is async task, we create scheduled task and mark state to changed.
module.exit_json(changed=True)
except vim.fault.InvalidName as e:
module.fail_json(msg="Failed to create scheduled task %s for %s : %s" % (module.params.get('state'),
vm.name,
to_native(e.msg)))
except vim.fault.DuplicateName as e:
module.exit_json(chanaged=False, details=to_native(e.msg))
except vmodl.fault.InvalidArgument as e:
module.fail_json(msg="Failed to create scheduled task %s as specifications "
"given are invalid: %s" % (module.params.get('state'),
to_native(e.msg)))
else:
result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
else:
module.fail_json(msg="Unable to set power state for non-existing virtual machine : '%s'" % (module.params.get('uuid') or module.params.get('name')))
if result.get('failed') is True:
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Mappy/luigi | luigi/file.py | 1 | 4341 | # Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import random
import tempfile
import shutil
import luigi.util
from target import FileSystem, FileSystemTarget
from luigi.format import FileWrapper
class atomic_file(file):
# Simple class that writes to a temp file and moves it on close()
# Also cleans up the temp file if close is not invoked
def __init__(self, path):
self.__tmp_path = path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
self.path = path
super(atomic_file, self).__init__(self.__tmp_path, 'w')
def close(self):
super(atomic_file, self).close()
os.rename(self.__tmp_path, self.path)
def __del__(self):
if os.path.exists(self.__tmp_path):
os.remove(self.__tmp_path)
@property
def tmp_path(self):
return self.__tmp_path
def __exit__(self, exc_type, exc, traceback):
" Close/commit the file if there are no exception "
if exc_type:
return
return file.__exit__(self, exc_type, exc, traceback)
class LocalFileSystem(FileSystem):
""" Wrapper for access to file system operations
Work in progress - add things as needed
"""
def exists(self, path):
return os.path.exists(path)
def mkdir(self, path):
os.makedirs(path)
def isdir(self, path):
return os.path.isdir(path)
def remove(self, path, recursive=True):
if recursive and self.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
class File(FileSystemTarget):
fs = LocalFileSystem()
def __init__(self, path=None, format=None, is_tmp=False):
if not path:
if not is_tmp:
raise Exception('path or is_tmp must be set')
path = os.path.join(tempfile.gettempdir(), 'luigi-tmp-%09d' % random.randint(0, 999999999))
super(File, self).__init__(path)
self.format = format
self.is_tmp = is_tmp
def open(self, mode='r'):
if mode == 'w':
# Create folder if it does not exist
normpath = os.path.normpath(self.path)
parentfolder = os.path.dirname(normpath)
if parentfolder and not os.path.exists(parentfolder):
os.makedirs(parentfolder)
if self.format:
return self.format.pipe_writer(atomic_file(self.path))
else:
return atomic_file(self.path)
elif mode == 'r':
fileobj = FileWrapper(open(self.path, 'r'))
if self.format:
return self.format.pipe_reader(fileobj)
return fileobj
else:
raise Exception('mode must be r/w')
@luigi.util.deprecate_kwarg('fail_if_exists', 'raise_if_exists', False)
def move(self, new_path, fail_if_exists=False):
if fail_if_exists and os.path.exists(new_path):
raise RuntimeError('Destination exists: %s' % new_path)
d = os.path.dirname(new_path)
if d and not os.path.exists(d):
self.fs.mkdir(d)
os.rename(self.path, new_path)
def move_dir(self, new_path):
self.move(new_path)
def remove(self):
self.fs.remove(self.path)
@luigi.util.deprecate_kwarg('fail_if_exists', 'raise_if_exists', False)
def copy(self, new_path, fail_if_exists=False):
if fail_if_exists and os.path.exists(new_path):
raise RuntimeError('Destination exists: %s' % new_path)
tmp = File(new_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10), is_tmp=True)
tmp.open('w')
shutil.copy(self.path, tmp.fn)
tmp.move(new_path)
@property
def fn(self):
return self.path
def __del__(self):
if self.is_tmp and self.exists():
self.remove()
| apache-2.0 |
MiLk/ansible | lib/ansible/modules/system/ufw.py | 16 | 12151 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ahti Kitsik <ak@ahtik.com>
# (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
# (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
# (c) 2013, James Martin <jmartin@basho.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ufw
short_description: Manage firewall with UFW
description:
- Manage firewall with UFW.
version_added: 1.6
author:
- "Aleksey Ovcharenko (@ovcharenko)"
- "Jarno Keskikangas (@pyykkis)"
- "Ahti Kitsik (@ahtik)"
notes:
- See C(man ufw) for more examples.
requirements:
- C(ufw) package
options:
state:
description:
- C(enabled) reloads firewall and enables firewall on boot.
- C(disabled) unloads firewall and disables firewall on boot.
- C(reloaded) reloads firewall.
- C(reset) disables and resets firewall to installation defaults.
required: false
choices: ['enabled', 'disabled', 'reloaded', 'reset']
policy:
description:
- Change the default policy for incoming or outgoing traffic.
required: false
aliases: ['default']
choices: ['allow', 'deny', 'reject']
direction:
description:
- Select direction for a rule or default policy command.
required: false
choices: ['in', 'out', 'incoming', 'outgoing', 'routed']
logging:
description:
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
choices: ['on', 'off', 'low', 'medium', 'high', 'full']
required: false
insert:
description:
- Insert the corresponding rule as rule number NUM
required: false
rule:
description:
- Add firewall rule
required: false
choices: ['allow', 'deny', 'reject', 'limit']
log:
description:
- Log new connections matched to this rule
required: false
choices: ['yes', 'no']
from_ip:
description:
- Source IP address.
required: false
aliases: ['from', 'src']
default: 'any'
from_port:
description:
- Source port.
required: false
to_ip:
description:
- Destination IP address.
required: false
aliases: ['to', 'dest']
default: 'any'
to_port:
description:
- Destination port.
required: false
aliases: ['port']
proto:
description:
- TCP/IP protocol.
choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']
required: false
name:
description:
- Use profile located in C(/etc/ufw/applications.d)
required: false
aliases: ['app']
delete:
description:
- Delete rule.
required: false
choices: ['yes', 'no']
interface:
description:
- Specify interface for rule.
required: false
aliases: ['if']
route:
description:
- Apply the rule to routed/forwarded packets.
required: false
choices: ['yes', 'no']
comment:
description:
- Add a comment to the rule. Requires UFW version >=0.35.
required: false
version_added: "2.4"
'''
EXAMPLES = '''
# Allow everything and enable UFW
- ufw:
state: enabled
policy: allow
# Set logging
- ufw:
logging: on
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
- ufw:
rule: reject
port: auth
log: yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
- ufw:
rule: limit
port: ssh
proto: tcp
# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
# a rule=allow task can leave those ports exposed. Either use delete=yes
# or a separate state=reset task)
- ufw:
rule: allow
name: OpenSSH
# Delete OpenSSH rule
- ufw:
rule: allow
name: OpenSSH
delete: yes
# Deny all access to port 53:
- ufw:
rule: deny
port: 53
# Allow port range 60000-61000
- ufw:
rule: allow
port: '60000:61000'
# Allow all access to tcp port 80:
- ufw:
rule: allow
port: 80
proto: tcp
# Allow all access from RFC1918 networks to this host:
- ufw:
rule: allow
src: '{{ item }}'
with_items:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
# Deny access to udp port 514 from host 1.2.3.4 and include a comment:
- ufw:
rule: deny
proto: udp
src: 1.2.3.4
port: 514
comment: "Block syslog"
# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
- ufw:
rule: allow
interface: eth0
direction: in
proto: udp
src: 1.2.3.5
from_port: 5469
dest: 1.2.3.4
to_port: 5469
# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host.
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
- ufw:
rule: deny
proto: tcp
src: '2001:db8::/32'
port: 25
# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24.
# Can be used to further restrict a global FORWARD policy set to allow
- ufw:
rule: deny
route: yes
src: 1.2.3.0/24
dest: 4.5.6.0/24
'''
from operator import itemgetter
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']),
default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']),
direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete = dict(default=False, type='bool'),
route = dict(default=False, type='bool'),
insert = dict(default=None),
rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']),
interface = dict(default=None, aliases=['if']),
log = dict(default=False, type='bool'),
from_ip = dict(default='any', aliases=['src', 'from']),
from_port = dict(default=None),
to_ip = dict(default='any', aliases=['dest', 'to']),
to_port = dict(default=None, aliases=['port']),
proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']),
app = dict(default=None, aliases=['name']),
comment = dict(default=None, type='str')
),
supports_check_mode = True,
mutually_exclusive = [['app', 'proto', 'logging']]
)
cmds = []
def execute(cmd):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg=err or out)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
rc, out, err = module.run_command("%s --version" % ufw_bin)
if rc != 0:
module.fail_json(
msg="Failed to get ufw version.", rc=rc, out=out, err=err
)
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if(params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
# Save the pre state and rules in order to recognize changes
(_, pre_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
# Execute commands
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = { 'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset' }
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
execute(cmd + [[command], [value]])
elif command == 'default':
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
# Rules are constructed according to the long format
#
# ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s" ), ('from_port', "port %s" ),
('to_ip', "to %s" ), ('to_port', "port %s" ),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
execute(cmd)
# Get the new state
(_, post_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/urban_data_input/entrances.py | 1 | 10931 | # -*- coding: utf-8 -*-
# Space Syntax Toolkit
# Set of tools for essential space syntax network analysis and results exploration
# -------------------
# begin : 2016-06-03
# copyright : (C) 2016 by Abhimanyu Acharya/(C) 2016 by Space Syntax Limited’.
# author : Abhimanyu Acharya
# email : a.acharya@spacesyntax.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from __future__ import print_function
import os
# Import the PyQt and QGIS libraries
from builtins import str
from qgis.PyQt.QtCore import (QObject, QVariant)
from qgis.core import (QgsProject, QgsVectorLayer, QgsField, QgsCoordinateReferenceSystem, QgsVectorFileWriter,
QgsDataSourceUri, QgsVectorLayerExporter, QgsMessageLog, QgsMapLayer, Qgis)
from esstoolkit.utilities import layer_field_helpers as lfh, shapefile_helpers as shph
class EntranceTool(QObject):
id_attribute = 'E_ID'
category_attribute = 'E_Category'
subcat_attribute = 'E_SubCat'
level_attribute = 'E_Level'
def __init__(self, iface, dockwidget):
QObject.__init__(self)
self.iface = iface
self.canvas = self.iface.mapCanvas()
self.dockwidget = dockwidget
self.entrancedlg = self.dockwidget.entrancedlg
self.plugin_path = os.path.dirname(__file__)
self.entrance_layer = None
# signals from dockwidget
self.dockwidget.updateEntranceButton.clicked.connect(self.updateSelectedEntranceAttribute)
self.dockwidget.updateEntranceIDButton.clicked.connect(self.updateIDEntrances)
self.dockwidget.useExistingEntrancescomboBox.currentIndexChanged.connect(self.loadEntranceLayer)
# signals from new entrance dialog
self.entrancedlg.create_new_layer.connect(self.newEntranceLayer)
#######
# Data functions
#######
# Update the F_ID column of the Frontage layer
def updateIDEntrances(self):
layer = self.dockwidget.setEntranceLayer()
features = layer.getFeatures()
i = 1
layer.startEditing()
for feat in features:
feat[EntranceTool.id_attribute] = i
i += 1
layer.updateFeature(feat)
layer.commitChanges()
layer.startEditing()
def isRequiredEntranceLayer(self, layer, type):
if layer.type() == QgsMapLayer.VectorLayer \
and layer.geometryType() == type:
if lfh.layerHasFields(layer, [EntranceTool.category_attribute, EntranceTool.subcat_attribute]):
return True
return False
# Add Frontage layer to combobox if conditions are satisfied
def updateEntranceLayer(self):
# disconnect any current entrance layer
self.disconnectEntranceLayer()
self.dockwidget.useExistingEntrancescomboBox.clear()
self.dockwidget.useExistingEntrancescomboBox.setEnabled(False)
layers = QgsProject.instance().mapLayers().values()
type = 0
for lyr in layers:
if self.isRequiredEntranceLayer(lyr, type):
self.dockwidget.useExistingEntrancescomboBox.addItem(lyr.name(), lyr)
if self.dockwidget.useExistingEntrancescomboBox.count() > 0:
self.dockwidget.useExistingEntrancescomboBox.setEnabled(True)
self.entrance_layer = self.dockwidget.setEntranceLayer()
self.connectEntranceLayer()
# Create New Layer
def newEntranceLayer(self):
vl = QgsVectorLayer("Point?crs=", "memory:Entrances", "memory")
provider = vl.dataProvider()
provider.addAttributes([QgsField(EntranceTool.id_attribute, QVariant.Int),
QgsField(EntranceTool.category_attribute, QVariant.String),
QgsField(EntranceTool.subcat_attribute, QVariant.String),
QgsField(EntranceTool.level_attribute, QVariant.Double)])
if vl.crs().toWkt() == "":
vl.setCrs(QgsProject.instance().crs())
vl.updateFields()
if self.entrancedlg.e_shp_radioButton.isChecked(): # layer_type == 'shapefile':
path = self.entrancedlg.lineEditEntrances.text()
if path and path != '':
filename = os.path.basename(path)
location = os.path.abspath(path)
crs = QgsCoordinateReferenceSystem()
crs.createFromSrid(3857)
shph.createShapeFile(vl, path, crs)
vl = self.iface.addVectorLayer(location, filename[:-4], "ogr")
else:
vl = 'invalid data source'
elif self.entrancedlg.e_postgis_radioButton.isChecked():
db_path = self.entrancedlg.lineEditEntrances.text()
if db_path and db_path != '':
(database, schema, table_name) = db_path.split(':')
db_con_info = self.entrancedlg.dbsettings_dlg.available_dbs[database]
uri = QgsDataSourceUri()
# passwords, usernames need to be empty if not provided or else connection will fail
if 'service' in list(db_con_info.keys()):
uri.setConnection(db_con_info['service'], '', '', '')
elif 'password' in list(db_con_info.keys()):
uri.setConnection(db_con_info['host'], db_con_info['port'], db_con_info['dbname'],
db_con_info['user'],
db_con_info['password'])
else:
print(db_con_info) # db_con_info['host']
uri.setConnection('', db_con_info['port'], db_con_info['dbname'], '', '')
uri.setDataSource(schema, table_name, "geom")
error = QgsVectorLayerExporter.exportLayer(vl, uri.uri(), "postgres", vl.crs())
if error[0] != QgsVectorLayerExporter.NoError:
print("Error when creating postgis layer: ", error[1])
vl = 'duplicate'
else:
vl = QgsVectorLayer(uri.uri(), table_name, "postgres")
else:
vl = 'invalid data source'
if vl == 'invalid data source':
msgBar = self.iface.messageBar()
msg = msgBar.createMessage(u'Specify output path!')
msgBar.pushWidget(msg, Qgis.Info, 10)
elif vl == 'duplicate':
msgBar = self.iface.messageBar()
msg = msgBar.createMessage(u'Fronatges layer already exists!')
msgBar.pushWidget(msg, Qgis.Info, 10)
elif not vl:
msgBar = self.iface.messageBar()
msg = msgBar.createMessage(u'Entrance layer failed to load!')
msgBar.pushWidget(msg, Qgis.Info, 10)
else:
QgsProject.instance().addMapLayer(vl)
msgBar = self.iface.messageBar()
msg = msgBar.createMessage(u'Entrances layer created!')
msgBar.pushWidget(msg, Qgis.Info, 10)
vl.startEditing()
self.updateEntranceLayer()
self.entrancedlg.closePopUpEntrances()
# Set layer as entrance layer and apply thematic style
def loadEntranceLayer(self):
# disconnect any current entrance layer
self.disconnectEntranceLayer()
if self.dockwidget.useExistingEntrancescomboBox.count() > 0:
self.entrance_layer = self.dockwidget.setEntranceLayer()
qml_path = self.plugin_path + "/styles/entrancesThematic.qml"
self.entrance_layer.loadNamedStyle(qml_path)
self.entrance_layer.startEditing()
self.connectEntranceLayer()
def connectEntranceLayer(self):
if self.entrance_layer:
self.entrance_layer.featureAdded.connect(self.logEntranceFeatureAdded)
self.entrance_layer.selectionChanged.connect(self.dockwidget.addEntranceDataFields)
self.entrance_layer.featureDeleted.connect(self.dockwidget.clearEntranceDataFields)
def disconnectEntranceLayer(self):
try:
if self.entrance_layer:
self.entrance_layer.selectionChanged.disconnect(self.dockwidget.addEntranceDataFields)
self.entrance_layer.featureAdded.disconnect(self.logEntranceFeatureAdded)
self.entrance_layer.featureDeleted.disconnect(self.dockwidget.clearEntranceDataFields)
self.entrance_layer = None
except RuntimeError as e:
if str(e) == 'wrapped C/C++ object of type QgsVectorLayer has been deleted':
# QT object has already been deleted
return
else:
raise e
def logEntranceFeatureAdded(self, fid):
QgsMessageLog.logMessage("feature added, id = " + str(fid))
mc = self.canvas
v_layer = self.dockwidget.setEntranceLayer()
feature_Count = v_layer.featureCount()
features = v_layer.getFeatures()
inputid = 0
if feature_Count == 1:
inputid = 1
elif feature_Count > 1:
inputid = feature_Count
data = v_layer.dataProvider()
update1 = data.fieldNameIndex(EntranceTool.category_attribute)
update2 = data.fieldNameIndex(EntranceTool.subcat_attribute)
update3 = data.fieldNameIndex(EntranceTool.id_attribute)
update4 = data.fieldNameIndex(EntranceTool.level_attribute)
categorytext = self.dockwidget.ecategorylistWidget.currentItem().text()
subcategorytext = self.dockwidget.esubcategorylistWidget.currentItem().text()
accessleveltext = self.dockwidget.eaccesscategorylistWidget.currentItem().text()
v_layer.changeAttributeValue(fid, update1, categorytext, True)
v_layer.changeAttributeValue(fid, update2, subcategorytext, True)
v_layer.changeAttributeValue(fid, update3, inputid, True)
v_layer.changeAttributeValue(fid, update4, accessleveltext, True)
v_layer.updateFields()
# Update Feature
def updateSelectedEntranceAttribute(self):
mc = self.canvas
layer = self.dockwidget.setEntranceLayer()
features = layer.selectedFeatures()
categorytext = self.dockwidget.ecategorylistWidget.currentItem().text()
subcategorytext = self.dockwidget.esubcategorylistWidget.currentItem().text()
accessleveltext = self.dockwidget.eaccesscategorylistWidget.currentItem().text()
for feat in features:
feat[EntranceTool.category_attribute] = categorytext
feat[EntranceTool.subcat_attribute] = subcategorytext
feat[EntranceTool.level_attribute] = accessleveltext
layer.updateFeature(feat)
self.dockwidget.addEntranceDataFields()
| gpl-3.0 |
yuxng/DA-RNN | lib/networks/vanilla2d.py | 1 | 1494 | import tensorflow as tf
class Vanilla2DCell(tf.contrib.rnn.RNNCell):
"""Vanilla Recurrent Unit cell."""
def __init__(self, num_units, channels):
self._num_units = num_units
self._channels = channels
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def make_var(self, name, shape, initializer=None, trainable=True):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
# inputs: [batch_size, height, width, channels]
# state: [batch_size, height, width, num_units]
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or type(self).__name__): # "VanillaCell"
inputs_shape = tf.shape(inputs)
inputs = tf.reshape(inputs, [inputs_shape[0], inputs_shape[1], inputs_shape[2], self._channels])
# concat inputs and state
inputs_state = tf.concat(3, [inputs, state])
# define the variables
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [3, 3, self._num_units + self._channels, self._num_units])
biases = self.make_var('biases', [self._num_units], init_biases)
# 2D convolution
conv = tf.nn.conv2d(inputs_state, kernel, [1, 1, 1, 1], padding='SAME')
new_h = tf.nn.tanh(tf.nn.bias_add(conv, biases))
return new_h, new_h
| mit |
btardio/Django_Personal_Website_Concept | app_manage_subscriptions/tests.py | 1 | 8788 | #
#
#
# Copyright (C) 2016 Brandon C Tardio
#
# This file is part of DPWC
#
# DPWC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DPWC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DPWC. If not, see <http://www.gnu.org/licenses/>.
#
# Contact: BTardio@gmail.com
# 818 424 6838
#
#
# Django_Personal_Website_Concept ( DPWC )
#
#
#
from django.test import TestCase
from django.test import Client
from app_manage_subscriptions.models import manage_subscriptions
# Create your tests here.
class app_manage_subscriptions_test_case ( TestCase ):
def setUp(self):
manage_subscriptions.objects.all().delete()
def test_contact_form_submit ( self ):
c = Client()
self.assertEqual ( 1, 1 )
# submit_email
# .. get request
response = c.get('/submit_email/')
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>OhAQEyLYb5LK2ME10E750JSq3j67GfYd</UNIT_TEST_ID>', response.content.decode() )
# .. get request with email in url with no db entry
dbobj = manage_subscriptions.objects.filter ( email='b@b.com' )
self.assertEqual ( len(dbobj), 0 )
response = c.get('/submit_email/b@b.com')
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>7wdfrNWdgU4LP0ZmKYRqRsH7KI04LbKA</UNIT_TEST_ID>', response.content.decode() )
dbobj = manage_subscriptions.objects.filter ( email='b@b.com' )
self.assertEqual ( len(dbobj), 1 )
dbobj = dbobj[0]
self.assertEqual ( dbobj.job_search_subscription, True )
self.assertEqual ( dbobj.weekly_newsletter_subscription, True )
self.assertEqual ( dbobj.deals_subscription, True )
self.assertEqual ( dbobj.announcements_subscription, True )
# .. get request after creation of db entry
response = c.get('/submit_email/b@b.com')
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>7wdfrNWdgU4LP0ZmKYRqRsH7KI04LbKA</UNIT_TEST_ID>', response.content.decode() )
dbobj = manage_subscriptions.objects.filter ( email='b@b.com' )
self.assertEqual ( len(dbobj), 1 )
# .. post request with no db entry
dbobj = manage_subscriptions.objects.filter(email='d@d.com')
self.assertEqual ( len(dbobj), 0 )
response = c.post('/submit_email/', { 'email' : 'd@d.com' } )
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>7wdfrNWdgU4LP0ZmKYRqRsH7KI04LbKA</UNIT_TEST_ID>', response.content.decode() )
dbobj = manage_subscriptions.objects.filter(email='d@d.com')
self.assertEqual ( len(dbobj), 1 )
dbobj = dbobj[0]
self.assertEqual ( dbobj.job_search_subscription, True )
self.assertEqual ( dbobj.weekly_newsletter_subscription, True )
self.assertEqual ( dbobj.deals_subscription, True )
self.assertEqual ( dbobj.announcements_subscription, True )
# .. post request after creation of db entry
response = c.post('/submit_email/', { 'email' : 'd@d.com' } )
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>7wdfrNWdgU4LP0ZmKYRqRsH7KI04LbKA</UNIT_TEST_ID>', response.content.decode() )
dbobj = manage_subscriptions.objects.filter(email='d@d.com')
self.assertEqual ( len(dbobj), 1 )
# manage_subscriptions
# .. get request
response = c.get ( '/manage_subscriptions/', follow=True )
self.assertEqual ( response.redirect_chain[0][1], 302 )
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>OhAQEyLYb5LK2ME10E750JSq3j67GfYd</UNIT_TEST_ID>', response.content.decode() )
# .. get request with email in url
dbobj = manage_subscriptions.objects.filter(email='c@c.com')
self.assertEqual ( len(dbobj), 0 )
response = c.get('/manage_subscriptions/c@c.com')
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>7wdfrNWdgU4LP0ZmKYRqRsH7KI04LbKA</UNIT_TEST_ID>', response.content.decode() )
dbobj = manage_subscriptions.objects.filter(email='c@c.com')
self.assertEqual ( len(dbobj), 1 )
dbobj = dbobj[0]
self.assertEqual ( dbobj.job_search_subscription, True )
self.assertEqual ( dbobj.weekly_newsletter_subscription, True )
self.assertEqual ( dbobj.deals_subscription, True )
self.assertEqual ( dbobj.announcements_subscription, True )
# .. get request after creation of db entry with email in url
response = c.get('/manage_subscriptions/c@c.com')
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>7wdfrNWdgU4LP0ZmKYRqRsH7KI04LbKA</UNIT_TEST_ID>', response.content.decode() )
dbobj = manage_subscriptions.objects.filter(email='c@c.com')
self.assertEqual ( len(dbobj), 1 )
dbobj = dbobj[0]
self.assertEqual ( dbobj.job_search_subscription, True )
self.assertEqual ( dbobj.weekly_newsletter_subscription, True )
self.assertEqual ( dbobj.deals_subscription, True )
self.assertEqual ( dbobj.announcements_subscription, True )
# .. post request changes subscription choices
response = c.post ( '/manage_subscriptions/', { 'email' : 'c@c.com',
'job_search_subscription' : False,
'weekly_newsletter_subscription' : False,
'deals_subscription' : False,
'announcements_subscription' : False } )
self.assertEqual ( response.status_code, 200 )
self.assertIn ( 'manage_subscriptions submission received.', response.content.decode() )
dbobj = manage_subscriptions.objects.filter(email='c@c.com')
self.assertEqual ( len(dbobj), 1 )
dbobj = dbobj[0]
self.assertEqual ( dbobj.job_search_subscription, False )
self.assertEqual ( dbobj.weekly_newsletter_subscription, False )
self.assertEqual ( dbobj.deals_subscription, False )
self.assertEqual ( dbobj.announcements_subscription, False )
# .. post request without db entry and non default subscription choices
dbobj = manage_subscriptions.objects.filter(email='a@a.com')
self.assertEqual ( len(dbobj), 0 )
response = c.post ( '/manage_subscriptions/', { 'email' : 'a@a.com',
'job_search_subscription' : False,
'weekly_newsletter_subscription' : True,
'deals_subscription' : False,
'announcements_subscription' : False } )
self.assertEqual ( response.status_code, 200 )
self.assertIn ( 'manage_subscriptions submission received.', response.content.decode() )
dbobj = manage_subscriptions.objects.filter(email='a@a.com')
self.assertEqual ( len(dbobj), 1 )
dbobj = dbobj[0]
self.assertEqual ( dbobj.job_search_subscription, False )
self.assertEqual ( dbobj.weekly_newsletter_subscription, True )
self.assertEqual ( dbobj.deals_subscription, False )
self.assertEqual ( dbobj.announcements_subscription, False )
# .. get request after creation of non default choices
response = c.get('/manage_subscriptions/a@a.com')
self.assertEqual ( response.status_code, 200 )
self.assertIn ( '<UNIT_TEST_ID hidden>7wdfrNWdgU4LP0ZmKYRqRsH7KI04LbKA</UNIT_TEST_ID>', response.content.decode() )
dbobj = manage_subscriptions.objects.filter(email='a@a.com')
self.assertEqual ( len(dbobj), 1 )
dbobj = dbobj[0]
self.assertEqual ( dbobj.job_search_subscription, False )
self.assertEqual ( dbobj.weekly_newsletter_subscription, True )
self.assertEqual ( dbobj.deals_subscription, False )
self.assertEqual ( dbobj.announcements_subscription, False )
| gpl-3.0 |
nwchandler/ansible | lib/ansible/modules/web_infrastructure/jenkins_plugin.py | 31 | 25931 | #!/usr/bin/python
# encoding: utf-8
# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
version_added: '2.2'
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
options:
group:
required: false
default: jenkins
description:
- Name of the Jenkins group on the OS.
jenkins_home:
required: false
default: /var/lib/jenkins
description:
- Home directory of the Jenkins user.
mode:
required: false
default: '0664'
description:
- File mode applied on versioned plugins.
name:
required: true
description:
- Plugin name.
owner:
required: false
default: jenkins
description:
- Name of the Jenkins user on the OS.
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options. To
remove an option, set the value of the option to C(null).
state:
required: false
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
description:
- Desired plugin state.
- If the C(latest) is set, the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
timeout:
required: false
default: 30
description:
- Server connection timeout in secs.
updates_expiration:
required: false
default: 86400
description:
- Number of seconds after which a new copy of the I(update-center.json)
file is downloaded. This is used to avoid the need to download the
plugin to calculate its checksum when C(latest) is specified.
- Set it to C(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
C(latest) is specified.
updates_url:
required: false
default: https://updates.jenkins-ci.org
description:
- URL of the Update Centre.
- Used as the base URL to download the plugins and the
I(update-center.json) JSON file.
url:
required: false
default: http://localhost:8080
description:
- URL of the Jenkins server.
version:
required: false
default: null
description:
- Plugin version number.
- If this option is specified, all plugin dependencies must be installed
manually.
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
- Quote the version to prevent the value to be interpreted as float. For
example if C(1.20) would be unquoted, it would become C(1.2).
with_dependencies:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether to install plugin dependencies.
- This option takes effect only if the I(version) is not defined.
notes:
- Plugin installation should be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- It's necessary to notify the handler or call the I(service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkis service was
successfully restarted after the plugin installation.
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
'''
EXAMPLES = '''
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
- name: Install plugin without its dependencies
jenkins_plugin:
name: build-pipeline-plugin
with_dependencies: no
- name: Make sure the plugin is always up-to-date
jenkins_plugin:
name: token-macro
state: latest
- name: Install specific version of the plugin
jenkins_plugin:
name: token-macro
version: "1.15"
- name: Pin the plugin
jenkins_plugin:
name: token-macro
state: pinned
- name: Unpin the plugin
jenkins_plugin:
name: token-macro
state: unpinned
- name: Enable the plugin
jenkins_plugin:
name: token-macro
state: enabled
- name: Disable the plugin
jenkins_plugin:
name: token-macro
state: disabled
- name: Uninstall plugin
jenkins_plugin:
name: build-pipeline-plugin
state: absent
#
# Example of how to use the params
#
# Define a variable and specify all default parameters you want to use across
# all jenkins_plugin calls:
#
# my_jenkins_params:
# url_username: admin
# url_password: p4ssw0rd
# url: http://localhost:8888
#
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
params: "{{ my_jenkins_params }}"
#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
hosts: jenkins-master
vars:
my_jenkins_plugins:
token-macro:
enabled: yes
build-pipeline-plugin:
version: "1.4.9"
pinned: no
enabled: yes
tasks:
- name: Install plugins without a specific version
jenkins_plugin:
name: "{{ item.key }}"
register: my_jenkins_plugin_unversioned
when: >
'version' not in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Install plugins with a specific version
jenkins_plugin:
name: "{{ item.key }}"
version: "{{ item.value['version'] }}"
register: my_jenkins_plugin_versioned
when: >
'version' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Initiate the fact
set_fact:
jenkins_restart_required: no
- name: Check if restart is required by any of the versioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_versioned.results }}"
- name: Check if restart is required by any of the unversioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_unversioned.results }}"
- name: Restart Jenkins if required
service:
name: jenkins
state: restarted
when: jenkins_restart_required
- name: Wait for Jenkins to start up
uri:
url: http://localhost:8080
status_code: 200
timeout: 5
register: jenkins_service_status
# Keep trying for 5 mins in 5 sec intervals
retries: 60
delay: 5
until: >
'status' in jenkins_service_status and
jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
set_fact:
jenkins_restart_required: no
when: jenkins_restart_required
- name: Plugin pinning
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
when: >
'pinned' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Plugin enabling
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
when: >
'enabled' in item.value
with_dict: "{{ my_jenkins_plugins }}"
'''
RETURN = '''
plugin:
description: plugin name
returned: success
type: string
sample: build-pipeline-plugin
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url, url_argument_spec
from ansible.module_utils._text import to_native
import base64
import hashlib
import json
import os
import tempfile
import time
class JenkinsPlugin(object):
def __init__(self, module):
# To be able to call fail_json
self.module = module
# Shortcuts for the params
self.params = self.module.params
self.url = self.params['url']
self.timeout = self.params['timeout']
# Crumb
self.crumb = {}
if self._csrf_enabled():
self.crumb = self._get_crumb()
# Get list of installed plugins
self._get_installed_plugins()
def _csrf_enabled(self):
csrf_data = self._get_json_data(
"%s/%s" % (self.url, "api/json"), 'CSRF')
if 'useCrumbs' not in csrf_data:
self.module.fail_json(
msg="Required fields not found in the Crumbs response.",
details=csrf_data)
return csrf_data['useCrumbs']
def _get_json_data(self, url, what, **kwargs):
# Get the JSON data
r = self._get_url_data(url, what, **kwargs)
# Parse the JSON data
try:
json_data = json.loads(to_native(r.read()))
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot parse %s JSON data." % what,
details=to_native(e))
return json_data
def _get_url_data(
self, url, what=None, msg_status=None, msg_exception=None,
**kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
# Get the URL data
try:
response, info = fetch_url(
self.module, url, timeout=self.timeout, **kwargs)
if info['status'] != 200:
self.module.fail_json(msg=msg_status, details=info['msg'])
except Exception:
e = get_exception()
self.module.fail_json(msg=msg_exception, details=to_native(e))
return response
def _get_crumb(self):
crumb_data = self._get_json_data(
"%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
ret = {
crumb_data['crumbRequestField']: crumb_data['crumb']
}
else:
self.module.fail_json(
msg="Required fields not found in the Crum response.",
details=crumb_data)
return ret
def _get_installed_plugins(self):
plugins_data = self._get_json_data(
"%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
'list of plugins')
# Check if we got valid data
if 'plugins' not in plugins_data:
self.module.fail_json(msg="No valid plugin data found.")
# Create final list of installed/pined plugins
self.is_installed = False
self.is_pinned = False
self.is_enabled = False
for p in plugins_data['plugins']:
if p['shortName'] == self.params['name']:
self.is_installed = True
if p['pinned']:
self.is_pinned = True
if p['enabled']:
self.is_enabled = True
break
def install(self):
changed = False
plugin_file = (
'%s/plugins/%s.jpi' % (
self.params['jenkins_home'],
self.params['name']))
if not self.is_installed and self.params['version'] is None:
if not self.module.check_mode:
# Install the plugin (with dependencies)
install_script = (
'd = Jenkins.instance.updateCenter.getPlugin("%s")'
'.deploy(); d.get();' % self.params['name'])
if self.params['with_dependencies']:
install_script = (
'Jenkins.instance.updateCenter.getPlugin("%s")'
'.getNeededDependencies().each{it.deploy()}; %s' % (
self.params['name'], install_script))
script_data = {
'script': install_script
}
script_data.update(self.crumb)
data = urlencode(script_data)
# Send the installation request
r = self._get_url_data(
"%s/scriptText" % self.url,
msg_status="Cannot install plugin.",
msg_exception="Plugin installation has failed.",
data=data)
hpi_file = '%s/plugins/%s.hpi' % (
self.params['jenkins_home'],
self.params['name'])
if os.path.isfile(hpi_file):
os.remove(hpi_file)
changed = True
else:
# Check if the plugin directory exists
if not os.path.isdir(self.params['jenkins_home']):
self.module.fail_json(
msg="Jenkins home directory doesn't exist.")
md5sum_old = None
if os.path.isfile(plugin_file):
# Make the checksum of the currently installed plugin
md5sum_old = hashlib.md5(
open(plugin_file, 'rb').read()).hexdigest()
if self.params['version'] in [None, 'latest']:
# Take latest version
plugin_url = (
"%s/latest/%s.hpi" % (
self.params['updates_url'],
self.params['name']))
else:
# Take specific version
plugin_url = (
"{0}/download/plugins/"
"{1}/{2}/{1}.hpi".format(
self.params['updates_url'],
self.params['name'],
self.params['version']))
if (
self.params['updates_expiration'] == 0 or
self.params['version'] not in [None, 'latest'] or
md5sum_old is None):
# Download the plugin file directly
r = self._download_plugin(plugin_url)
# Write downloaded plugin into file if checksums don't match
if md5sum_old is None:
# No previously installed plugin
if not self.module.check_mode:
self._write_file(plugin_file, r)
changed = True
else:
# Get data for the MD5
data = r.read()
# Make new checksum
md5sum_new = hashlib.md5(data).hexdigest()
# If the checksum is different from the currently installed
# plugin, store the new plugin
if md5sum_old != md5sum_new:
if not self.module.check_mode:
self._write_file(plugin_file, data)
changed = True
else:
# Check for update from the updates JSON file
plugin_data = self._download_updates()
try:
sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot calculate SHA1 of the old plugin.",
details=e.message)
sha1sum_old = base64.b64encode(sha1_old.digest())
# If the latest version changed, download it
if sha1sum_old != plugin_data['sha1']:
if not self.module.check_mode:
r = self._download_plugin(plugin_url)
self._write_file(plugin_file, r)
changed = True
# Change file attributes if needed
if os.path.isfile(plugin_file):
params = {
'dest': plugin_file
}
params.update(self.params)
file_args = self.module.load_file_common_arguments(params)
if not self.module.check_mode:
# Not sure how to run this in the check mode
changed = self.module.set_fs_attributes_if_different(
file_args, changed)
else:
# See the comment above
changed = True
return changed
def _download_updates(self):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_file = "%s/%s" % (updates_dir, updates_filename)
download_updates = True
# Check if we need to download new updates file
if os.path.isfile(updates_file):
# Get timestamp when the file was changed last time
ts_file = os.stat(updates_file).st_mtime
ts_now = time.time()
if ts_now - ts_file < self.params['updates_expiration']:
download_updates = False
updates_file_orig = updates_file
# Download the updates file if needed
if download_updates:
url = "%s/update-center.json" % self.params['updates_url']
# Get the data
r = self._get_url_data(
url,
msg_status="Remote updates not found.",
msg_exception="Updates download failed.")
# Write the updates file
update_fd, updates_file = tempfile.mkstemp()
os.write(update_fd, r.read())
try:
os.close(update_fd)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot close the tmp updates file %s." % updates_file,
details=to_native(e))
# Open the updates file
try:
f = open(updates_file)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open temporal updates file.",
details=to_native(e))
i = 0
for line in f:
# Read only the second line
if i == 1:
try:
data = json.loads(line)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot load JSON data from the tmp updates file.",
details=e.message)
break
i += 1
# Move the updates file to the right place if we could read it
if download_updates:
# Make sure the destination directory exists
if not os.path.isdir(updates_dir):
try:
os.makedirs(updates_dir, int('0700', 8))
except OSError:
e = get_exception()
self.module.fail_json(
msg="Cannot create temporal directory.",
details=e.message)
self.module.atomic_move(updates_file, updates_file_orig)
# Check if we have the plugin data available
if 'plugins' not in data or self.params['name'] not in data['plugins']:
self.module.fail_json(
msg="Cannot find plugin data in the updates file.")
return data['plugins'][self.params['name']]
def _download_plugin(self, plugin_url):
# Download the plugin
r = self._get_url_data(
plugin_url,
msg_status="Plugin not found.",
msg_exception="Plugin download failed.")
return r
def _write_file(self, f, data):
# Store the plugin into a temp file and then move it
tmp_f_fd, tmp_f = tempfile.mkstemp()
if isinstance(data, str):
os.write(tmp_f_fd, data)
else:
os.write(tmp_f_fd, data.read())
try:
os.close(tmp_f_fd)
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot close the temporal plugin file %s.' % tmp_f,
details=to_native(e))
# Move the file onto the right place
self.module.atomic_move(tmp_f, f)
def uninstall(self):
changed = False
# Perform the action
if self.is_installed:
if not self.module.check_mode:
self._pm_query('doUninstall', 'Uninstallation')
changed = True
return changed
def pin(self):
return self._pinning('pin')
def unpin(self):
return self._pinning('unpin')
def _pinning(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'pin' and not self.is_pinned or
action == 'unpin' and self.is_pinned):
# Perform the action
if not self.module.check_mode:
self._pm_query(action, "%sning" % action.capitalize())
changed = True
return changed
def enable(self):
return self._enabling('enable')
def disable(self):
return self._enabling('disable')
def _enabling(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'enable' and not self.is_enabled or
action == 'disable' and self.is_enabled):
# Perform the action
if not self.module.check_mode:
self._pm_query(
"make%sd" % action.capitalize(),
"%sing" % action[:-1].capitalize())
changed = True
return changed
def _pm_query(self, action, msg):
url = "%s/pluginManager/plugin/%s/%s" % (
self.params['url'], self.params['name'], action)
data = urlencode(self.crumb)
# Send the request
self._get_url_data(
url,
msg_status="Plugin not found. %s" % url,
msg_exception="%s has failed." % msg,
data=data)
def main():
# Module arguments
argument_spec = url_argument_spec()
argument_spec.update(
group=dict(default='jenkins'),
jenkins_home=dict(default='/var/lib/jenkins'),
mode=dict(default='0644', type='raw'),
name=dict(required=True),
owner=dict(default='jenkins'),
params=dict(type='dict'),
state=dict(
choices=[
'present',
'absent',
'pinned',
'unpinned',
'enabled',
'disabled',
'latest'],
default='present'),
timeout=dict(default=30, type="int"),
updates_expiration=dict(default=86400, type="int"),
updates_url=dict(default='https://updates.jenkins-ci.org'),
url=dict(default='http://localhost:8080'),
url_password=dict(no_log=True),
version=dict(),
with_dependencies=dict(default=True, type='bool'),
)
# Module settings
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Force basic authentication
module.params['force_basic_auth'] = True
# Convert timeout to float
try:
module.params['timeout'] = float(module.params['timeout'])
except ValueError:
e = get_exception()
module.fail_json(
msg='Cannot convert %s to float.' % module.params['timeout'],
details=to_native(e))
# Set version to latest if state is latest
if module.params['state'] == 'latest':
module.params['state'] = 'present'
module.params['version'] = 'latest'
# Create some shortcuts
name = module.params['name']
state = module.params['state']
# Initial change state of the task
changed = False
# Instantiate the JenkinsPlugin object
jp = JenkinsPlugin(module)
# Perform action depending on the requested state
if state == 'present':
changed = jp.install()
elif state == 'absent':
changed = jp.uninstall()
elif state == 'pinned':
changed = jp.pin()
elif state == 'unpinned':
changed = jp.unpin()
elif state == 'enabled':
changed = jp.enable()
elif state == 'disabled':
changed = jp.disable()
# Print status of the change
module.exit_json(changed=changed, plugin=name, state=state)
if __name__ == '__main__':
main()
| gpl-3.0 |
crazyoldbox/cellwars | pgu/gui/select.py | 29 | 4602 | """
"""
import traceback
from .const import *
from .button import Button
from .basic import Label, Image
from .table import Table
class Select(Table):
"""A combo dropdown box widget.
Example:
w = Select(value="goats")
w.add("Cats","cats")
w.add("Goats","goats")
w.add("Dogs","Dogs")
w.value = 'dogs' #changes the value from goats to dogs
"""
# The drop-down arrow button for the selection widget
top_arrow = None
# A button displaying the currently selected item
top_selection = None
# The first option added to the selector
firstOption = None
# The PGU table of options
options = None
_value = None
def __init__(self,value=None,**params):
params.setdefault('cls','select')
Table.__init__(self,**params)
label = Label(" ",cls=self.cls+".option.label")
self.top_selected = Button(label, cls=self.cls+".selected")
Table.add(self,self.top_selected) #,hexpand=1,vexpand=1)#,0,0)
self.top_arrow = Button(Image(self.style.arrow), cls=self.cls+".arrow")
Table.add(self,self.top_arrow) #,hexpand=1,vexpand=1) #,1,0)
self.options = Table(cls=self.cls+".options")
self.options.connect(BLUR,self._close,None)
self.options.name = "pulldown-table"
self.values = []
self.value = value
def resize(self,width=None,height=None):
max_w,max_h = 0,0
for w in self.options.widgets:
w.rect.w,w.rect.h = w.resize()
max_w,max_h = max(max_w,w.rect.w),max(max_h,w.rect.h)
#xt,xr,xb,xl = self.top_selected.getspacing()
self.top_selected.style.width = max_w #+ xl + xr
self.top_selected.style.height = max_h #+ xt + xb
self.top_arrow.connect(CLICK,self._open,None)
self.top_selected.connect(CLICK,self._open,None)
w,h = Table.resize(self,width,height)
self.options.style.width = w
#HACK: sort of, but not a big one..
self.options.resize()
return w,h
def _open(self,value):
opts = self.options
opts.rect.w, opts.rect.h = opts.resize()
# y = self.rect.y
# c = self.container
# while hasattr(c, 'container'):
# y += c.rect.y
# if (not c.container):
# break
# c = c.container
# if y + self.rect.h + opts.rect.h <= c.rect.h: #down
# dy = self.rect.y + self.rect.h
# else: #up
# dy = self.rect.y - self.rect.h
opts.rect.w, opts.rect.h = opts.resize()
# TODO - make sure there is enough space to open down
# ...
yp = self.rect.bottom-1
self.container.open(opts, self.rect.x, yp)
self.firstOption.focus()
# TODO - this is a hack
for opt in self.options.widgets:
opt.repaint()
def _close(self,value):
self.options.close()
self.top_selected.focus()
def _setvalue(self,value):
self.value = value._value
if self.container:
#self.chsize()
#HACK: improper use of resize()
#self.resize() #to recenter the new value, etc.
pass
# #self._resize()
self._close(None)
#self.repaint() #this will happen anyways
@property
def value(self):
return self._value
@value.setter
def value(self, val):
mywidget = None
for w in self.values:
if w._value == val:
mywidget = w
oldval = self._value
self._value = val
if (oldval != val):
self.send(CHANGE)
self.repaint()
if not mywidget:
mywidget = Label(" ",cls=self.cls+".option.label")
self.top_selected.value = mywidget
def add(self,w,value=None):
"""Add a widget and associated value to the dropdown box."""
if type(w) == str: w = Label(w,cls=self.cls+".option.label")
w.style.align = -1
btn = Button(w,cls=self.cls+".option")
btn.connect(CLICK,self._setvalue,w)
self.options.tr()
self.options.add(btn)
if (not self.firstOption):
self.firstOption = btn
if value != None: w._value = value
else: w._value = w
if self.value == w._value:
self.top_selected.value = w
self.values.append(w)
| gpl-2.0 |
s40523243/2016fallcp_hw | plugin/liquid_tags/diag.py | 270 | 4096 | """
Blockdiag Tag
---------
This tag implements a liquid style tag for blockdiag [1]. You can use different
diagram types like blockdiag, seqdiag, packetdiag etc. [1]
[1] http://blockdiag.com/en/blockdiag/
Syntax
------
{% blockdiag {
<diagramm type> {
<CODE>
}
}
%}
Examples
--------
{% blockdiag {
blockdiag {
A -> B -> C;
B -> D;
}
}
%}
{% blockdiag {
actdiag {
A -> B -> C -> D -> E;
lane {
A; C; E;
}
lane {
B; D;
}
}
}
%}
{% blockdiag {
packetdiag {
0-7: Source Port
8-15: Destination Port
16-31: Sequence Number
32-47: Acknowledgment Number
}
}
%}
...
Output
------
<div class="blockdiag" style="align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div>
"""
import io
import os
import sys
import base64
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% blockdiag [diagram type] [code] %}'
DOT_BLOCK_RE = re.compile(r'^\s*(?P<diagram>\w+).*$', re.MULTILINE | re.DOTALL)
_draw_mode = 'PNG'
_publish_mode = 'PNG'
def get_diag(code, command):
""" Generate diagramm and return data """
import tempfile
import shutil
code = code + u'\n'
try:
tmpdir = tempfile.mkdtemp()
fd, diag_name = tempfile.mkstemp(dir=tmpdir)
f = os.fdopen(fd, "w")
f.write(code.encode('utf-8'))
f.close()
format = _draw_mode.lower()
draw_name = diag_name + '.' + format
saved_argv = sys.argv
argv = [diag_name, '-T', format, '-o', draw_name]
if _draw_mode == 'SVG':
argv += ['--ignore-pil']
# Run command
command.main(argv)
# Read image data from file
file_name = diag_name + '.' + _publish_mode.lower()
with io.open(file_name, 'rb') as f:
data = f.read()
f.close()
finally:
for file in os.listdir(tmpdir):
os.unlink(tmpdir + "/" + file)
# os.rmdir will fail -> use shutil
shutil.rmtree(tmpdir)
return data
def diag(code, command):
if command == "blockdiag": # blockdiag
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "diagram": # diagram
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "seqdiag": # seqdiag
import seqdiag.command
return get_diag(code, seqdiag.command)
elif command == "actdiag": # actdiag
import actdiag.command
return get_diag(code, actdiag.command)
elif command == "nwdiag": # nwdiag
import nwdiag.command
return get_diag(code, nwdiag.command)
elif command == "packetdiag": # packetdiag
import packetdiag.command
return get_diag(code, packetdiag.command)
elif command == "rackdiag": # racketdiag
import rackdiag.command
return get_diag(code, rackdiag.command)
else: # not found
print("No such command %s" % command)
return None
@LiquidTags.register("blockdiag")
def blockdiag_parser(preprocessor, tag, markup):
""" Blockdiag parser """
m = DOT_BLOCK_RE.search(markup)
if m:
# Get diagram type and code
diagram = m.group('diagram').strip()
code = markup
# Run command
output = diag(code, diagram)
if output:
# Return Base64 encoded image
return '<div class="blockdiag" style="align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output)
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 |
CookiesandCake/namebench | nb_third_party/dns/rdtypes/ANY/CERT.py | 248 | 4263 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.dnssec
import dns.rdata
import dns.tokenizer
_ctype_by_value = {
1 : 'PKIX',
2 : 'SPKI',
3 : 'PGP',
253 : 'URI',
254 : 'OID',
}
_ctype_by_name = {
'PKIX' : 1,
'SPKI' : 2,
'PGP' : 3,
'URI' : 253,
'OID' : 254,
}
def _ctype_from_text(what):
v = _ctype_by_name.get(what)
if not v is None:
return v
return int(what)
def _ctype_to_text(what):
v = _ctype_by_value.get(what)
if not v is None:
return v
return str(what)
class CERT(dns.rdata.Rdata):
"""CERT record
@ivar certificate_type: certificate type
@type certificate_type: int
@ivar key_tag: key tag
@type key_tag: int
@ivar algorithm: algorithm
@type algorithm: int
@ivar certificate: the certificate or CRL
@type certificate: string
@see: RFC 2538"""
__slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate):
super(CERT, self).__init__(rdclass, rdtype)
self.certificate_type = certificate_type
self.key_tag = key_tag
self.algorithm = algorithm
self.certificate = certificate
def to_text(self, origin=None, relativize=True, **kw):
certificate_type = _ctype_to_text(self.certificate_type)
return "%s %d %s %s" % (certificate_type, self.key_tag,
dns.dnssec.algorithm_to_text(self.algorithm),
dns.rdata._base64ify(self.certificate))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
certificate_type = _ctype_from_text(tok.get_string())
key_tag = tok.get_uint16()
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
if algorithm < 0 or algorithm > 255:
raise dns.exception.SyntaxError("bad algorithm type")
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
certificate = b64.decode('base64_codec')
return cls(rdclass, rdtype, certificate_type, key_tag,
algorithm, certificate)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
self.algorithm)
file.write(prefix)
file.write(self.certificate)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
prefix = wire[current : current + 5]
current += 5
rdlen -= 5
if rdlen < 0:
raise dns.exception.FormError
(certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
certificate = wire[current : current + rdlen]
return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| apache-2.0 |
dealien/Red-Magician | cogs/roller.py | 1 | 4118 | import discord
from discord.ext import commands
import random
class Roller:
"""Rolling dices, the right way"""
def __init__(self, bot):
self.bot = bot
self.roll_arr = []
# Dice rolling function
def roll_dice(self, count, dice, mod, result):
# extra rolls check
extra_rolls = 0
# perform rolls
for i in range(0, count):
roll = random.randint(1, dice)
# check if hit max for extra roll
if roll == dice:
extra_rolls += 1
# add roll modifier
roll += mod
result.append(roll)
# roll extra
if extra_rolls > 0:
self.roll_dice(extra_rolls, dice, mod, result)
else:
self.roll_arr = result
@commands.command(pass_context=True)
async def rd(self, ctx, count=4, dice=20, mod=0):
"""
Rolls dices in the XdY+MOD style
By default rolls 4d20 with MOD=0
"""
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
# Check if provided with numbers
if is_number(count) and is_number(dice) and is_number(mod):
# check if numbers are correct
if int(count) > 0 and int(count) <= 50 and int(dice) > 0 and int(dice) <= 50 and int(mod) >= 0:
# Delete old roll
self.roll_arr = []
# Roll the dice
self.roll_dice(int(count), int(dice), int(mod), [])
message = "**[" + "]** **[".join(str(roll) for roll in self.roll_arr) + "]**"
await self.bot.say("You rolled: \n" + message)
else:
await self.bot.say("Dice and side amount should be in range from 0 to 50. Mod should be > 0")
else:
await self.bot.say(
"Please provide numbers in format <amount of dices> <number of sides> <modifier>\nMax value is 50")
@commands.command(pass_context=True)
async def rds(self, ctx, count=4, dice=20, mod=0, success=10):
"""
Check if was successfull based on a success rate
By default rolls 4d20 with MOD=0 and checks for 10 as a success value
"""
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
# Check if provided with numbers
if is_number(count) and is_number(dice) and is_number(mod) and is_number(success):
# check if numbers are correct
if int(count) > 0 and int(count) <= 50 and int(dice) > 0 and int(dice) <= 50 and int(mod) >= 0 and int(
success) > 0:
# Delete old roll
self.roll_arr = []
# Roll the dice
self.roll_dice(int(count), int(dice), int(mod), [])
# check for values that passed
passed = 0
for roll in self.roll_arr:
if roll >= int(success):
passed += 1
else:
continue
if passed >= int(len(self.roll_arr) / 2):
await self.bot.say("Success! (" + " ".join(str(roll) for roll in self.roll_arr) + ")")
else:
await self.bot.say("Fail! (" + " ".join(str(roll) for roll in self.roll_arr) + ")")
else:
await self.bot.say(
"Dice and side amount should be in range from 0 to 50. Mod and success should be > 0")
else:
await self.bot.say(
"Please provide numbers in format <amount of dices> <number of sides> <modifier> <success threshold>\nMax value is 50")
@commands.command(pass_context=True)
async def last(self, ctx):
"""Shows last roll"""
await self.bot.say("Last roll:\n**[" + "]** **[".join(str(roll) for roll in self.roll_arr) + "]**")
def setup(bot):
bot.add_cog(Roller(bot))
| gpl-3.0 |
Greennut/ostproject | django/contrib/gis/db/models/proxy.py | 404 | 2512 | """
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
geom = geom_value
elif (geom_value is None) or (geom_value==''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None: value.srid = self._field.srid
elif value is None or isinstance(value, (basestring, buffer)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('cannot set %s GeometryProxy with value of type: %s' % (obj.__class__.__name__, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
| bsd-3-clause |
fnordahl/nova | nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py | 33 | 1894 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import platform
import mock
from os_brick.initiator import connector
from nova.compute import arch
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import fibrechannel
class LibvirtFibreChannelVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_fibrechan_driver(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_conn)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnector)
def _test_libvirt_fibrechan_driver_s390(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_conn)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnectorS390X)
@mock.patch.object(platform, 'machine', return_value=arch.S390)
def test_libvirt_fibrechan_driver_s390(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
@mock.patch.object(platform, 'machine', return_value=arch.S390X)
def test_libvirt_fibrechan_driver_s390x(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
| apache-2.0 |
394954369/horizon | openstack_dashboard/dashboards/identity/groups/tests.py | 41 | 8437 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.identity.groups import constants
GROUPS_INDEX_URL = reverse(constants.GROUPS_INDEX_URL)
GROUP_CREATE_URL = reverse(constants.GROUPS_CREATE_URL)
GROUP_UPDATE_URL = reverse(constants.GROUPS_UPDATE_URL, args=[1])
GROUP_MANAGE_URL = reverse(constants.GROUPS_MANAGE_URL, args=[1])
GROUP_ADD_MEMBER_URL = reverse(constants.GROUPS_ADD_MEMBER_URL, args=[1])
class GroupsViewTests(test.BaseAdminViewTests):
def _get_domain_id(self):
return self.request.session.get('domain_context', None)
def _get_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('group_list',)})
def test_index(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
api.keystone.group_list(IgnoreArg(), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
if domain_id:
for group in res.context['table'].data:
self.assertItemsEqual(group.domain_id, domain_id)
self.assertContains(res, 'Create Group')
self.assertContains(res, 'Edit')
self.assertContains(res, 'Delete Group')
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('group_list',
'keystone_can_edit_group')})
def test_index_with_keystone_can_edit_group_false(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
api.keystone.group_list(IgnoreArg(), domain=domain_id) \
.AndReturn(groups)
api.keystone.keystone_can_edit_group() \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
self.assertNotContains(res, 'Create Group')
self.assertNotContains(res, 'Edit')
self.assertNotContains(res, 'Delete Group')
@test.create_stubs({api.keystone: ('group_create', )})
def test_create(self):
domain_id = self._get_domain_id()
group = self.groups.get(id="1")
api.keystone.group_create(IsA(http.HttpRequest),
description=group.description,
domain_id=domain_id,
name=group.name).AndReturn(group)
self.mox.ReplayAll()
formData = {'method': 'CreateGroupForm',
'name': group.name,
'description': group.description}
res = self.client.post(GROUP_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('group_get',
'group_update')})
def test_update(self):
group = self.groups.get(id="1")
test_description = 'updated description'
api.keystone.group_get(IsA(http.HttpRequest), '1').AndReturn(group)
api.keystone.group_update(IsA(http.HttpRequest),
description=test_description,
group_id=group.id,
name=group.name).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateGroupForm',
'group_id': group.id,
'name': group.name,
'description': test_description}
res = self.client.post(GROUP_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('group_list',
'group_delete')})
def test_delete_group(self):
domain_id = self._get_domain_id()
group = self.groups.get(id="2")
api.keystone.group_list(IgnoreArg(), domain=domain_id) \
.AndReturn(self.groups.list())
api.keystone.group_delete(IgnoreArg(), group.id)
self.mox.ReplayAll()
formData = {'action': 'groups__delete__%s' % group.id}
res = self.client.post(GROUPS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, GROUPS_INDEX_URL)
@test.create_stubs({api.keystone: ('group_get',
'user_list',)})
def test_manage(self):
group = self.groups.get(id="1")
group_members = self.users.list()
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(group_members)
self.mox.ReplayAll()
res = self.client.get(GROUP_MANAGE_URL)
self.assertTemplateUsed(res, constants.GROUPS_MANAGE_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, group_members)
@test.create_stubs({api.keystone: ('user_list',
'remove_group_user')})
def test_remove_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(self.users.list())
api.keystone.remove_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_members__removeGroupMember__%s' % user.id}
res = self.client.post(GROUP_MANAGE_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('group_get',
'user_list',
'add_group_user')})
def test_add_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
api.keystone.user_list(IgnoreArg(),
domain=group.domain_id).\
AndReturn(self.users.list())
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(self.users.list()[2:])
api.keystone.add_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_non_members__addMember__%s' % user.id}
res = self.client.post(GROUP_ADD_MEMBER_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1)
| apache-2.0 |
iulian787/spack | var/spack/repos/builtin/packages/py-workload-automation/package.py | 4 | 2174 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWorkloadAutomation(PythonPackage):
"""Workload Automation (WA) is a framework for executing workloads and
collecting measurements on Android and Linux devices."""
homepage = "https://github.com/ARM-software/workload-automation"
url = "https://github.com/ARM-software/workload-automation/archive/v3.2.tar.gz"
version('3.2', sha256='a3db9df6a9e0394231560ebe6ba491a513f6309e096eaed3db6f4cb924c393ea')
version('3.1.4', sha256='217fc33a3739d011a086315ef86b90cf332c16d1b03c9dcd60d58c9fd1f37f98')
version('3.1.3', sha256='152470808cf8dad8a833fd7b2cb7d77cf8aa5d1af404e37fa0a4ff3b07b925b2')
version('3.1.2', sha256='8226a6abc5cbd96e3f1fd6df02891237a06cdddb8b1cc8916f255fcde20d3069')
version('3.1.1', sha256='32a19be92e43439637c68d9146f21bb7a0ae7b8652c11dfc4b4bd66d59329ad4')
version('3.1.0', sha256='f00aeef7a1412144c4139c23b4c48583880ba2147207646d96359f1d295d6ac3')
version('3.0.0', sha256='8564b0c67541e3a212363403ee090dfff5e4df85770959a133c0979445b51c3c')
version('2.7.0', sha256='e9005b9db18e205bf6c4b3e09b15a118abeede73700897427565340dcd589fbb')
version('2.6.0', sha256='b94341fb067592cebe0db69fcf7c00c82f96b4eb7c7210e34b38473869824cce')
depends_on('py-setuptools', type='build')
depends_on('py-python-dateutil', type=('build', 'run'))
depends_on('py-pexpect@3.3:', type=('build', 'run'))
depends_on('py-pyserial', type=('build', 'run'))
depends_on('py-colorama', type=('build', 'run'))
depends_on('py-pyyaml@5.1:', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-wrapt', type=('build', 'run'))
depends_on('py-pandas@0.23.0:', type=('build', 'run'), when='^python@3.5.3:')
depends_on('py-pandas@0.23.0:0.24.2', type=('build', 'run'), when='^python@:3.5.2')
depends_on('py-future', type=('build', 'run'))
depends_on('py-louie', type=('build', 'run'))
depends_on('py-devlib', type=('build', 'run'))
| lgpl-2.1 |
dawnpower/nova | nova/tests/functional/api/client.py | 7 | 13069 | # Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
from nova.tests.unit.image import fake
LOG = logging.getLogger(__name__)
class APIResponse(object):
"""Decoded API Response
This provides a decoded version of the Requests response which
include a json decoded body, far more convenient for testing that
returned structures are correct, or using parts of returned
structures in tests.
This class is a simple wrapper around dictionaries for API
responses in tests. It includes extra attributes so that they can
be inspected in addition to the attributes.
All json responses from Nova APIs are dictionary compatible, or
blank, so other possible base classes are not needed.
"""
status = 200
"""The HTTP status code as an int"""
content = ""
"""The Raw HTTP response body as a string"""
body = {}
"""The decoded json body as a dictionary"""
headers = {}
"""Response headers as a dictionary"""
def __init__(self, response):
"""Construct an API response from a Requests response
:param response: a ``requests`` library response
"""
super(APIResponse, self).__init__()
self.status = response.status_code
self.content = response.content
if self.content:
self.body = jsonutils.loads(self.content)
self.headers = response.headers
def __str__(self):
# because __str__ falls back to __repr__ we can still use repr
# on self but add in the other attributes.
return "<Response body:%r, status_code:%s>" % (self.body, self.status)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status_code
_body = response.content
message = ('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s' %
{'message': message, '_status': _status,
'_body': _body})
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authentication error"
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authorization error"
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Item not found"
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = 'openstack'
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
response = requests.request(method, url, data=body, headers=_headers)
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status_code
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
self.auth_result = response.headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None,
strip_version=False, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
if strip_version:
# NOTE(vish): cut out version number and tenant_id
base_uri = '/'.join(base_uri.split('/', 3)[:-1])
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message="Unexpected status code",
response=response)
return response
def _decode_json(self, response):
resp = APIResponse(status=response.status_code)
if response.content:
resp.body = jsonutils.loads(response.content)
return resp
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
#####################################
#
# Convenience methods
#
# The following are a set of convenience methods to get well known
# resources, they can be helpful in setting up resources in
# tests. All of these convenience methods throw exceptions if they
# get a non 20x status code, so will appropriately abort tests if
# they fail.
#
# They all return the most relevant part of their response body as
# decoded data structure.
#
#####################################
def get_server(self, server_id):
return self.api_get('/servers/%s' % server_id).body['server']
def get_servers(self, detail=True, search_opts=None):
rel_url = '/servers/detail' if detail else '/servers'
if search_opts is not None:
qparams = {}
for opt, val in search_opts.iteritems():
qparams[opt] = val
if qparams:
query_string = "?%s" % urllib.urlencode(qparams)
rel_url += query_string
return self.api_get(rel_url).body['servers']
def post_server(self, server):
response = self.api_post('/servers', server).body
if 'reservation_id' in response:
return response
else:
return response['server']
def put_server(self, server_id, server):
return self.api_put('/servers/%s' % server_id, server).body
def post_server_action(self, server_id, data):
return self.api_post('/servers/%s/action' % server_id, data).body
def delete_server(self, server_id):
return self.api_delete('/servers/%s' % server_id)
def get_image(self, image_id):
return self.api_get('/images/%s' % image_id).body['image']
def get_images(self, detail=True):
rel_url = '/images/detail' if detail else '/images'
return self.api_get(rel_url).body['images']
def post_image(self, image):
return self.api_post('/images', image).body['image']
def delete_image(self, image_id):
return self.api_delete('/images/%s' % image_id)
def get_flavor(self, flavor_id):
return self.api_get('/flavors/%s' % flavor_id).body['flavor']
def get_flavors(self, detail=True):
rel_url = '/flavors/detail' if detail else '/flavors'
return self.api_get(rel_url).body['flavors']
def post_flavor(self, flavor):
return self.api_post('/flavors', flavor).body['flavor']
def delete_flavor(self, flavor_id):
return self.api_delete('/flavors/%s' % flavor_id)
def post_extra_spec(self, flavor_id, spec):
return self.api_post('/flavors/%s/os-extra_specs' %
flavor_id, spec)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id).body['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
return self.api_get(rel_url).body['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume).body['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
def get_server_volume(self, server_id, attachment_id):
return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id)
).body['volumeAttachment']
def get_server_volumes(self, server_id):
return self.api_get('/servers/%s/os-volume_attachments' %
(server_id)).body['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment
).body['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
class TestOpenStackClientV3(TestOpenStackClient):
"""Simple OpenStack v3 API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing.
Note that the V3 API does not have an image API and so it is
not possible to query the api for the image information.
So instead we just access the fake image service used by the unittests
directly.
"""
def get_image(self, image_id):
return fake._fakeImageService.show(None, image_id)
def get_images(self, detail=True):
return fake._fakeImageService.detail(None)
def post_image(self, image):
raise NotImplementedError
def delete_image(self, image_id):
return fake._fakeImageService.delete(None, image_id)
class TestOpenStackClientV3Mixin(object):
def _get_test_client(self):
return TestOpenStackClientV3('fake', 'fake', self.auth_url)
| apache-2.0 |
i4Ds/IRE | IREMedia/libraries/OpenCV/samples/python2/watershed.py | 7 | 2096 | #!/usr/bin/env python
'''
Watershed segmentation
=========
This program demonstrates the watershed segmentation algorithm
in OpenCV: watershed().
Usage
-----
watershed.py [image filename]
Keys
----
1-7 - switch marker color
SPACE - update segmentation
r - reset
a - toggle autoupdate
ESC - exit
'''
import numpy as np
import cv2
from common import Sketcher
class App:
def __init__(self, fn):
self.img = cv2.imread(fn)
h, w = self.img.shape[:2]
self.markers = np.zeros((h, w), np.int32)
self.markers_vis = self.img.copy()
self.cur_marker = 1
self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255
self.auto_update = True
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
def get_colors(self):
return map(int, self.colors[self.cur_marker]), self.cur_marker
def watershed(self):
m = self.markers.copy()
cv2.watershed(self.img, m)
overlay = self.colors[np.maximum(m, 0)]
vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3)
cv2.imshow('watershed', vis)
def run(self):
while True:
ch = 0xFF & cv2.waitKey(50)
if ch == 27:
break
if ch >= ord('1') and ch <= ord('7'):
self.cur_marker = ch - ord('0')
print 'marker: ', self.cur_marker
if ch == ord(' ') or (self.sketch.dirty and self.auto_update):
self.watershed()
self.sketch.dirty = False
if ch in [ord('a'), ord('A')]:
self.auto_update = not self.auto_update
print 'auto_update if', ['off', 'on'][self.auto_update]
if ch in [ord('r'), ord('R')]:
self.markers[:] = 0
self.markers_vis[:] = self.img
self.sketch.show()
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try: fn = sys.argv[1]
except: fn = '../cpp/fruits.jpg'
print __doc__
App(fn).run()
| apache-2.0 |
ya7lelkom/googleads-python-lib | examples/dfp/v201505/product_service/get_all_products.py | 3 | 1704 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all products.
Products are created automatically from product templates.
Tags: ProductService.getProductsByStatement
"""
__author__ = 'Nicholas Chen'
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
product_service = client.GetService('ProductService', version='v201505')
# Create a filter statement.
statement = dfp.FilterStatement('ORDER BY id ASC')
# Get products by statement.
while True:
response = product_service.getProductsByStatement(statement.ToStatement())
if 'results' in response:
# Display results.
for product in response['results']:
print ('Product with id \'%s\' and name \'%s\' was found.' % (
product['id'], product['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
rkq/cxxexp | third-party/src/thrift-0.9.1/lib/py/src/transport/THttpClient.py | 157 | 4221 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import httplib
import os
import socket
import sys
import urllib
import urlparse
import warnings
from cStringIO import StringIO
from TTransport import *
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None):
"""THttpClient supports two different types constructor parameters.
THttpClient(host, port, path) - deprecated
THttpClient(uri)
Only the second supports https.
"""
if port is not None:
warnings.warn(
"Please use the THttpClient('http://host:port/path') syntax",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urlparse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or httplib.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or httplib.HTTPS_PORT
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
self.__wbuf = StringIO()
self.__http = None
self.__timeout = None
self.__custom_headers = None
def open(self):
if self.scheme == 'http':
self.__http = httplib.HTTP(self.host, self.port)
else:
self.__http = httplib.HTTPS(self.host, self.port)
def close(self):
self.__http.close()
self.__http = None
def isOpen(self):
return self.__http is not None
def setTimeout(self, ms):
if not hasattr(socket, 'getdefaulttimeout'):
raise NotImplementedError
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
def read(self, sz):
return self.__http.file.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def __withTimeout(f):
def _f(*args, **kwargs):
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(args[0].__timeout)
result = f(*args, **kwargs)
socket.setdefaulttimeout(orig_timeout)
return result
return _f
def flush(self):
if self.isOpen():
self.close()
self.open()
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = StringIO()
# HTTP request
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Host', self.host)
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/THttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in self.__custom_headers.iteritems():
self.__http.putheader(key, val)
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.code, self.message, self.headers = self.__http.getreply()
# Decorate if we know how to timeout
if hasattr(socket, 'getdefaulttimeout'):
flush = __withTimeout(flush)
| mit |
DominoTree/servo | tests/wpt/web-platform-tests/tools/third_party/attrs/tests/test_validators.py | 41 | 6577 | """
Tests for `attr.validators`.
"""
from __future__ import absolute_import, division, print_function
import pytest
import zope.interface
import attr
from attr import has
from attr import validators as validator_module
from attr._compat import TYPE
from attr.validators import and_, in_, instance_of, optional, provides
from .utils import simple_attr
class TestInstanceOf(object):
"""
Tests for `instance_of`.
"""
def test_success(self):
"""
Nothing happens if types match.
"""
v = instance_of(int)
v(None, simple_attr("test"), 42)
def test_subclass(self):
"""
Subclasses are accepted too.
"""
v = instance_of(int)
# yep, bools are a subclass of int :(
v(None, simple_attr("test"), True)
def test_fail(self):
"""
Raises `TypeError` on wrong types.
"""
v = instance_of(int)
a = simple_attr("test")
with pytest.raises(TypeError) as e:
v(None, a, "42")
assert (
"'test' must be <{type} 'int'> (got '42' that is a <{type} "
"'str'>).".format(type=TYPE),
a, int, "42",
) == e.value.args
def test_repr(self):
"""
Returned validator has a useful `__repr__`.
"""
v = instance_of(int)
assert (
"<instance_of validator for type <{type} 'int'>>"
.format(type=TYPE)
) == repr(v)
def always_pass(_, __, ___):
"""
Toy validator that always passses.
"""
def always_fail(_, __, ___):
"""
Toy validator that always fails.
"""
0/0
class TestAnd(object):
def test_success(self):
"""
Succeeds if all wrapped validators succeed.
"""
v = and_(instance_of(int), always_pass)
v(None, simple_attr("test"), 42)
def test_fail(self):
"""
Fails if any wrapped validator fails.
"""
v = and_(instance_of(int), always_fail)
with pytest.raises(ZeroDivisionError):
v(None, simple_attr("test"), 42)
def test_sugar(self):
"""
`and_(v1, v2, v3)` and `[v1, v2, v3]` are equivalent.
"""
@attr.s
class C(object):
a1 = attr.ib("a1", validator=and_(
instance_of(int),
))
a2 = attr.ib("a2", validator=[
instance_of(int),
])
assert C.__attrs_attrs__[0].validator == C.__attrs_attrs__[1].validator
class IFoo(zope.interface.Interface):
"""
An interface.
"""
def f():
"""
A function called f.
"""
class TestProvides(object):
"""
Tests for `provides`.
"""
def test_success(self):
"""
Nothing happens if value provides requested interface.
"""
@zope.interface.implementer(IFoo)
class C(object):
def f(self):
pass
v = provides(IFoo)
v(None, simple_attr("x"), C())
def test_fail(self):
"""
Raises `TypeError` if interfaces isn't provided by value.
"""
value = object()
a = simple_attr("x")
v = provides(IFoo)
with pytest.raises(TypeError) as e:
v(None, a, value)
assert (
"'x' must provide {interface!r} which {value!r} doesn't."
.format(interface=IFoo, value=value),
a, IFoo, value,
) == e.value.args
def test_repr(self):
"""
Returned validator has a useful `__repr__`.
"""
v = provides(IFoo)
assert (
"<provides validator for interface {interface!r}>"
.format(interface=IFoo)
) == repr(v)
@pytest.mark.parametrize("validator", [
instance_of(int),
[always_pass, instance_of(int)],
])
class TestOptional(object):
"""
Tests for `optional`.
"""
def test_success(self, validator):
"""
Nothing happens if validator succeeds.
"""
v = optional(validator)
v(None, simple_attr("test"), 42)
def test_success_with_none(self, validator):
"""
Nothing happens if None.
"""
v = optional(validator)
v(None, simple_attr("test"), None)
def test_fail(self, validator):
"""
Raises `TypeError` on wrong types.
"""
v = optional(validator)
a = simple_attr("test")
with pytest.raises(TypeError) as e:
v(None, a, "42")
assert (
"'test' must be <{type} 'int'> (got '42' that is a <{type} "
"'str'>).".format(type=TYPE),
a, int, "42",
) == e.value.args
def test_repr(self, validator):
"""
Returned validator has a useful `__repr__`.
"""
v = optional(validator)
if isinstance(validator, list):
assert (
("<optional validator for _AndValidator(_validators=[{func}, "
"<instance_of validator for type <{type} 'int'>>]) or None>")
.format(func=repr(always_pass), type=TYPE)
) == repr(v)
else:
assert (
("<optional validator for <instance_of validator for type "
"<{type} 'int'>> or None>")
.format(type=TYPE)
) == repr(v)
class TestIn_(object):
"""
Tests for `in_`.
"""
def test_success_with_value(self):
"""
If the value is in our options, nothing happens.
"""
v = in_([1, 2, 3])
a = simple_attr("test")
v(1, a, 3)
def test_fail(self):
"""
Raise ValueError if the value is outside our options.
"""
v = in_([1, 2, 3])
a = simple_attr("test")
with pytest.raises(ValueError) as e:
v(None, a, None)
assert (
"'test' must be in [1, 2, 3] (got None)",
) == e.value.args
def test_repr(self):
"""
Returned validator has a useful `__repr__`.
"""
v = in_([3, 4, 5])
assert(
("<in_ validator with options [3, 4, 5]>")
) == repr(v)
def test_hashability():
"""
Validator classes are hashable.
"""
for obj_name in dir(validator_module):
obj = getattr(validator_module, obj_name)
if not has(obj):
continue
hash_func = getattr(obj, '__hash__', None)
assert hash_func is not None
assert hash_func is not object.__hash__
| mpl-2.0 |
Tivix/wagtail | wagtail/wagtailcore/tests/test_utils.py | 27 | 1287 | # -*- coding: utf-8 -*
from __future__ import unicode_literals
from django.test import TestCase
from django.utils.text import slugify
from wagtail.wagtailcore.utils import cautious_slugify
class TestCautiousSlugify(TestCase):
def test_behaves_same_as_slugify_for_latin_chars(self):
test_cases = [
('', ''),
('???', ''),
('Hello world', 'hello-world'),
('Hello_world', 'hello_world'),
('Hellö wörld', 'hello-world'),
('Hello world', 'hello-world'),
(' Hello world ', 'hello-world'),
('Hello, world!', 'hello-world'),
('Hello*world', 'helloworld'),
('Hello☃world', 'helloworld'),
]
for (original, expected_result) in test_cases:
self.assertEqual(slugify(original), expected_result)
self.assertEqual(cautious_slugify(original), expected_result)
def test_escapes_non_latin_chars(self):
test_cases = [
('Straßenbahn', 'straxdfenbahn'),
('Спорт!', 'u0421u043fu043eu0440u0442'),
('〔山脈〕', 'u5c71u8108'),
]
for (original, expected_result) in test_cases:
self.assertEqual(cautious_slugify(original), expected_result)
| bsd-3-clause |
mlperf/inference_results_v0.5 | closed/CentaurTechnology/code/ssd-small/python/main.py | 3 | 22760 | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_ncore":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet_ncore":
(imagenet.Imagenet, dataset.pre_process_mobilenet_uint8, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-ncore":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf-calibrate": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tflite-calibrate",
"model-name": "resnet50",
},
"resnet50-tf-ncore": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet_ncore",
"backend": "tflite-ncore",
"model-name": "resnet50",
},
"resnet50-tf-ncore-offline": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet_ncore",
"backend": "tflite-ncore-offline-imagenet",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-tf-ncore": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet_ncore",
"backend": "tflite-ncore",
"model-name": "mobilenet",
},
"mobilenet-tf-ncore-offline": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet_ncore",
"backend": "tflite-ncore-offline-imagenet",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-tflite": {
"inputs": "image_tensor:0",
"outputs": "detection_boxes:0,detection_classes:0,detection_scores:0,num_detections:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-tf-ncore": {
"inputs": "image_tensor:0",
"outputs": "detection_boxes:0,detection_classes:0,detection_scores:0,num_detections:0",
"dataset": "coco-300",
"backend": "tflite-ncore",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-tf-ncore-offline": {
"inputs": "image_tensor:0",
"outputs": "detection_boxes:0,detection_classes:0,detection_scores:0,num_detections:0",
"dataset": "coco-300",
"backend": "tflite-ncore-offline-ssd",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--config", default="../mlperf.conf", help="mlperf rules config")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
parser.add_argument("--enable-trace", action="store_true", help="enable mlperf log trace")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
elif backend == "tflite-calibrate":
from backend_tflite_calibrate import BackendTflite
backend = BackendTflite()
elif backend == "tflite-ncore":
from backend_tflite_ncore import BackendTfliteNcore
backend = BackendTfliteNcore()
elif backend == "tflite-ncore-offline-imagenet":
from backend_tflite_ncore_offline_imagenet import BackendTfliteNcoreOfflineImagenet
backend = BackendTfliteNcoreOfflineImagenet()
elif backend == "tflite-ncore-offline-ssd":
from backend_tflite_ncore_offline_ssd import BackendTfliteNcoreOfflineSSD
backend = BackendTfliteNcoreOfflineSSD()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
config = os.path.abspath(args.config)
if not os.path.exists(config):
log.error("{} not found".format(config))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
warmup_queries = range(args.max_batchsize)
ds.load_query_samples(warmup_queries)
for _ in range(2):
img, _ = ds.get_samples(warmup_queries)
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.FromConfig(config, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
# override target latency when it needs to be less than 1ms
if args.model_name == "mobilenet":
settings.single_stream_expected_latency_ns = 200000
elif args.model_name == "resnet50":
settings.single_stream_expected_latency_ns = 900000
elif args.model_name == "ssd-mobilenet":
settings.single_stream_expected_latency_ns = 1000000
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
#qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
qsl = lg.ConstructQSL(count, min(count, 1024), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
if args.enable_trace:
lg.StartTest(sut, qsl, settings)
else:
logsettings = lg.LogSettings()
logsettings.enable_trace = False
lg.StartTestWithLogSettings(sut, qsl, settings, logsettings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
| apache-2.0 |
afabiani/mapstore | doc/en/conf.py | 13 | 8821 | # -*- coding: utf-8 -*-
#
# MapStore documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 1 14:56:36:21 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc','rst2pdf.pdfbuilder']
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MapStore'
copyright = u'2013, GeoSolutions'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.x'
# The full version, including alpha/beta/rc tags.
release = '1.3.x'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'geosolutions'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "MapStore Training"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'MapStoredoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MapStore.tex', u'MapStore Training',
u'GeoSolutions', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "themes/geosolutions/static/img/geosolutions.png"
papersize='a4paper'
tab_width=2
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# If there is more than one author, separate them with \\.
# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor'
#
# The options element is a dictionary that lets you override
# this config per-document.
# For example,
# ('index', u'MyProject', u'My Project', u'Author Name',
# dict(pdf_compressed = True))
# would mean that specific document would be compressed
# regardless of the global pdf_compressed setting.
pdf_documents = [
('index', u'MapStore Training', u'MapStore Training', u'GeoSolutions'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx','kerning','a4']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
pdf_compressed = True
# A colon-separated list of folders to search for fonts. Example:
pdf_font_path = ['C:\\Windows\\Fonts']
# Language to be used for hyphenation support
pdf_language = "en_US"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
pdf_break_level = 2
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
#pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
#pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
#pdf_verbosity = 0
# If false, no index is generated.
#pdf_use_index = True
# If false, no modindex is generated.
#pdf_use_modindex = True
# If false, no coverpage is generated.
#pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is empty list)
# you need vectorpdf for better sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage' | gpl-3.0 |
alexsavio/scikit-learn | benchmarks/bench_mnist.py | 38 | 6799 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
gamahead/nupic | tests/unit/nupic/algorithms/tp10x2_test.py | 12 | 13288 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the C++ implementation of the temporal pooler."""
import cPickle as pickle
import unittest2 as unittest
import numpy
from nupic.bindings.math import Random
from nupic.research import fdrutilities as fdrutils
from nupic.research.TP import TP
from nupic.research.TP10X2 import TP10X2
VERBOSITY = 0 # how chatty the unit tests should be
INFERENCE_VERBOSITY = 0 # Chattiness during inference test
SEED = 12
_RGEN = Random(SEED)
def checkCell0(tp):
"""Check that cell 0 has no incoming segments"""
for c in range(tp.numberOfCols):
assert tp.getNumSegmentsInCell(c, 0) == 0
def setVerbosity(verbosity, tp, tpPy):
"""Set verbosity levels of the TP's"""
tp.cells4.setVerbosity(verbosity)
tp.verbosity = verbosity
tpPy.verbosity = verbosity
class TP10X2Test(unittest.TestCase):
def basicTest(self):
"""Basic test (creation, pickling, basic run of learning and inference)"""
# Create TP object
tp = TP10X2(numberOfCols=10, cellsPerColumn=3, initialPerm=.2,
connectedPerm= 0.8, minThreshold=2, newSynapseCount=5,
permanenceInc=.1, permanenceDec= .05, permanenceMax=1,
globalDecay=.05, activationThreshold=4, doPooling=False,
segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY)
tp.retrieveLearningStates = True
# Save and reload
tp.makeCells4Ephemeral = False
pickle.dump(tp, open("test_tp10x.pkl", "wb"))
tp2 = pickle.load(open("test_tp10x.pkl"))
self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY, checkStates=False))
# Learn
for i in xrange(5):
x = numpy.zeros(tp.numberOfCols, dtype='uint32')
_RGEN.initializeUInt32Array(x, 2)
tp.learn(x)
# Save and reload after learning
tp.reset()
tp.makeCells4Ephemeral = False
pickle.dump(tp, open("test_tp10x.pkl", "wb"))
tp2 = pickle.load(open("test_tp10x.pkl"))
self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY))
## Infer
patterns = numpy.zeros((4, tp.numberOfCols), dtype='uint32')
for i in xrange(4):
_RGEN.initializeUInt32Array(patterns[i], 2)
for i in xrange(10):
x = numpy.zeros(tp.numberOfCols, dtype='uint32')
_RGEN.initializeUInt32Array(x, 2)
tp.infer(x)
if i > 0:
tp.checkPrediction2(patterns)
def basicTest2(self, tp, numPatterns=100, numRepetitions=3, activity=15,
testTrimming=False, testRebuild=False):
"""Basic test (basic run of learning and inference)"""
# Create PY TP object that mirrors the one sent in.
tpPy = TP(numberOfCols=tp.numberOfCols, cellsPerColumn=tp.cellsPerColumn,
initialPerm=tp.initialPerm, connectedPerm=tp.connectedPerm,
minThreshold=tp.minThreshold, newSynapseCount=tp.newSynapseCount,
permanenceInc=tp.permanenceInc, permanenceDec=tp.permanenceDec,
permanenceMax=tp.permanenceMax, globalDecay=tp.globalDecay,
activationThreshold=tp.activationThreshold,
doPooling=tp.doPooling,
segUpdateValidDuration=tp.segUpdateValidDuration,
pamLength=tp.pamLength, maxAge=tp.maxAge,
maxSeqLength=tp.maxSeqLength,
maxSegmentsPerCell=tp.maxSegmentsPerCell,
maxSynapsesPerSegment=tp.maxSynapsesPerSegment,
seed=tp.seed, verbosity=tp.verbosity)
# Ensure we are copying over learning states for TPDiff
tp.retrieveLearningStates = True
verbosity = VERBOSITY
# Learn
# Build up sequences
sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns,
length=tp.numberOfCols,
activity=activity)
for r in xrange(numRepetitions):
for i in xrange(sequence.nRows()):
#if i > 11:
# setVerbosity(6, tp, tpPy)
if i % 10 == 0:
tp.reset()
tpPy.reset()
if verbosity >= 2:
print "\n\n ===================================\nPattern:",
print i, "Round:", r, "input:", sequence.getRow(i)
y1 = tp.learn(sequence.getRow(i))
y2 = tpPy.learn(sequence.getRow(i))
# Ensure everything continues to work well even if we continuously
# rebuild outSynapses structure
if testRebuild:
tp.cells4.rebuildOutSynapses()
if testTrimming:
tp.trimSegments()
tpPy.trimSegments()
if verbosity > 2:
print "\n ------ CPP states ------ ",
tp.printStates()
print "\n ------ PY states ------ ",
tpPy.printStates()
if verbosity > 6:
print "C++ cells: "
tp.printCells()
print "PY cells: "
tpPy.printCells()
if verbosity >= 3:
print "Num segments in PY and C++", tpPy.getNumSegments(), \
tp.getNumSegments()
# Check if the two TP's are identical or not. This check is slow so
# we do it every other iteration. Make it every iteration for debugging
# as needed.
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity, False))
# Check that outputs are identical
self.assertLess(abs((y1 - y2).sum()), 3)
print "Learning completed"
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity))
# TODO: Need to check - currently failing this
#checkCell0(tpPy)
# Remove unconnected synapses and check TP's again
# Test rebuild out synapses
print "Rebuilding outSynapses"
tp.cells4.rebuildOutSynapses()
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))
print "Trimming segments"
tp.trimSegments()
tpPy.trimSegments()
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))
# Save and reload after learning
print "Pickling and unpickling"
tp.makeCells4Ephemeral = False
pickle.dump(tp, open("test_tp10x.pkl", "wb"))
tp2 = pickle.load(open("test_tp10x.pkl"))
self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY, checkStates=False))
# Infer
print "Testing inference"
# Setup for inference
tp.reset()
tpPy.reset()
setVerbosity(INFERENCE_VERBOSITY, tp, tpPy)
patterns = numpy.zeros((40, tp.numberOfCols), dtype='uint32')
for i in xrange(4):
_RGEN.initializeUInt32Array(patterns[i], 2)
for i, x in enumerate(patterns):
x = numpy.zeros(tp.numberOfCols, dtype='uint32')
_RGEN.initializeUInt32Array(x, 2)
y = tp.infer(x)
yPy = tpPy.infer(x)
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY, checkLearn=False))
if abs((y - yPy).sum()) > 0:
print "C++ output", y
print "Py output", yPy
assert False
if i > 0:
tp.checkPrediction2(patterns)
tpPy.checkPrediction2(patterns)
print "Inference completed"
print "===================================="
return tp, tpPy
def testTPs(self, short=True):
"""Call basicTest2 with multiple parameter settings and ensure the C++ and
PY versions are identical throughout."""
if short == True:
print "Testing short version"
else:
print "Testing long version"
if short:
print "\nTesting with fixed resource CLA - test max segment and synapses"
tp = TP10X2(numberOfCols=30, cellsPerColumn=5,
initialPerm=.5, connectedPerm= 0.5, permanenceMax=1,
minThreshold=8, newSynapseCount=10,
permanenceInc=0.1, permanenceDec=0.01,
globalDecay=.0, activationThreshold=8,
doPooling=False, segUpdateValidDuration=5,
seed=SEED, verbosity=VERBOSITY,
maxAge=0,
maxSegmentsPerCell=2, maxSynapsesPerSegment=10,
checkSynapseConsistency=True)
tp.cells4.setCellSegmentOrder(True)
self.basicTest2(tp, numPatterns=15, numRepetitions=1)
if not short:
print "\nTesting with fixed resource CLA - test max segment and synapses"
tp = TP10X2(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5, permanenceMax = 1,
minThreshold = 8, newSynapseCount = 10,
permanenceInc = .1, permanenceDec= .01,
globalDecay = .0, activationThreshold = 8,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
maxAge = 0,
maxSegmentsPerCell = 2, maxSynapsesPerSegment = 10,
checkSynapseConsistency = True)
tp.cells4.setCellSegmentOrder(1)
self.basicTest2(tp, numPatterns=30, numRepetitions=2)
print "\nTesting with permanenceInc = 0 and Dec = 0"
tp = TP10X2(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = 0.0, permanenceDec= 0.00,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = False)
tp.printParameters()
self.basicTest2(tp, numPatterns = 30, numRepetitions = 3)
print "Testing with permanenceInc = 0 and Dec = 0 and 1 cell per column"
tp = TP10X2(numberOfCols=30, cellsPerColumn=1,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = 0.0, permanenceDec= 0.0,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = False)
self.basicTest2(tp)
print "Testing with permanenceInc = 0.1 and Dec = .0"
tp = TP10X2(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = .1, permanenceDec= .0,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = False)
self.basicTest2(tp)
print ("Testing with permanenceInc = 0.1, Dec = .01 and higher synapse "
"count")
tp = TP10X2(numberOfCols=30, cellsPerColumn=2,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 5,
permanenceInc = .1, permanenceDec= .01,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = True)
self.basicTest2(tp, numPatterns=10, numRepetitions=2)
print "Testing age based global decay"
tp = TP10X2(numberOfCols=30, cellsPerColumn=5,
initialPerm = .4, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = 0.1, permanenceDec= 0.1,
permanenceMax = 1,
globalDecay = .25, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
pamLength = 2, maxAge = 20,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = True)
tp.cells4.setCellSegmentOrder(1)
self.basicTest2(tp)
print "\nTesting with fixed size CLA, max segments per cell"
tp = TP10X2(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5, permanenceMax = 1,
minThreshold = 8, newSynapseCount = 10,
permanenceInc = .1, permanenceDec= .01,
globalDecay = .0, activationThreshold = 8,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
maxAge = 0,
maxSegmentsPerCell = 2, maxSynapsesPerSegment = 100,
checkSynapseConsistency = True)
tp.cells4.setCellSegmentOrder(1)
self.basicTest2(tp, numPatterns=30, numRepetitions=2)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
biocyberman/bcbio-nextgen | bcbio/hla/bwakit.py | 5 | 4025 | """Call HLA alleles with assembly methods implemented in bwakit.
https://github.com/lh3/bwa/blob/master/README-alt.md#hla-typing
https://github.com/lh3/bwa/tree/master/bwakit
"""
import csv
import glob
import os
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.hla import groups as hla_groups
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
def run(data):
"""HLA typing with bwakit, parsing output from called genotype files.
"""
bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem")))
hla_fqs = tz.get_in(["hla", "fastq"], data, [])
if len(hla_fqs) > 0:
hla_base = os.path.commonprefix(hla_fqs)
while hla_base.endswith("."):
hla_base = hla_base[:-1]
out_file = hla_base + ".top"
if not utils.file_exists(out_file):
cmd = "{bwakit_dir}/run-HLA {hla_base}"
do.run(cmd.format(**locals()), "HLA typing with bwakit")
out_file = _organize_calls(out_file, hla_base, data)
data["hla"].update({"call_file": out_file,
"hlacaller": "bwakit"})
return data
def _organize_calls(out_file, hla_base, data):
"""Prepare genotype calls, reporting best call along with quality metrics.
"""
hla_truth = get_hla_truthset(data)
sample = dd.get_sample_name(data)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "locus", "mismatches", "options", "alleles", "p-groups", "expected",
"validates"])
for genotype_file in glob.glob("%s.HLA-*.gt" % (hla_base)):
hla_locus = os.path.basename(genotype_file).replace(
"%s.HLA-" % os.path.basename(hla_base), "").replace(".gt", "")
with open(genotype_file) as in_handle:
total_options = set([])
for i, line in enumerate(in_handle):
_, aone, atwo, m = line.split("\t")[:4]
pgroups = (hla_groups.hla_protein(aone, data), hla_groups.hla_protein(atwo, data))
if i == 0:
call_alleles = [aone, atwo]
call_pgroups = pgroups
mismatches = m
total_options.add(pgroups)
if len(total_options) > 0:
truth_alleles = tz.get_in([sample, hla_locus], hla_truth, [])
writer.writerow([sample, hla_locus, mismatches, len(total_options),
";".join(call_alleles), ";".join(call_pgroups),
";".join(truth_alleles), matches_truth(call_alleles, truth_alleles, data)])
return out_file
def matches_truth(call_alleles, truth_alleles, data):
"""Flexibly check if truth and call alleles match, using p-groups.
"""
if not truth_alleles:
return ""
else:
def _remove_p(x):
return x[:-1] if x.endswith("P") else x
t_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in truth_alleles])
c_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in call_alleles])
return "yes" if len(t_cmp.intersection(c_cmp)) == len(t_cmp) else "no"
def get_hla_truthset(data):
"""Retrieve expected truth calls for annotating HLA called output.
"""
val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data)
out = {}
if val_csv and utils.file_exists(val_csv):
with open(val_csv) as in_handle:
reader = csv.reader(in_handle)
reader.next() # header
for sample, locus, alleles in (l for l in reader if l):
out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")])
return out
| mit |
praekelt/go-api-toolkit | go_api/collections/inmemory.py | 1 | 4132 | """
An in-memory ICollection implementation.
"""
from copy import deepcopy
from uuid import uuid4
from go_api.queue import PausingDeferredQueue, PausingQueueCloseMarker
from twisted.internet.defer import inlineCallbacks
from zope.interface import implementer
from .interfaces import ICollection
from .errors import (
CollectionObjectNotFound, CollectionObjectAlreadyExists,
CollectionUsageError)
from ..utils import simulate_async
@implementer(ICollection)
class InMemoryCollection(object):
"""
A Collection implementation backed by an in-memory dict.
"""
def __init__(self, data=None):
if data is None:
data = {}
self._data = data
def _id_to_key(self, object_id):
"""
Convert object_id into a key for the internal datastore. This should be
overridden in subclasses that don't use object_id as the key.
"""
return object_id
def _key_to_id(self, key):
"""
Convert an internal datastore key into an object_id. This should be
overridden in subclasses that don't use object_id as the key.
"""
return key
def _is_my_key(self, key):
"""
Returns True if the key belongs to this store, False otherwise. This
should be overridden in subclasses that only operate on a subset of the
keys in the backend datastore.
"""
return True
def _set_data(self, object_id, data):
row_data = deepcopy(data)
row_data['id'] = object_id
self._data[self._id_to_key(object_id)] = row_data
def _get_data(self, object_id):
data = self._data.get(self._id_to_key(object_id), None)
return deepcopy(data)
def _get_keys(self):
return [
self._key_to_id(key) for key in self._data
if self._is_my_key(key)]
@simulate_async
def all_keys(self):
return self._get_keys()
@simulate_async
def stream(self, query):
if query is not None:
raise CollectionUsageError(
'query parameter not supported by InMemoryCollection')
q = PausingDeferredQueue(backlog=1, size=3)
@inlineCallbacks
def fill_queue():
for object_id in sorted(self._get_keys()):
yield q.put(self._get_data(object_id))
yield q.put(PausingQueueCloseMarker())
q.fill_d = fill_queue()
return q
@simulate_async
def page(self, cursor, max_results, query):
if query is not None:
raise CollectionUsageError(
'query parameter not supported by InMemoryCollection')
# Default value of 5 for max_results
max_results = max_results or 5
# Default value of 0 for cursor
cursor = int(cursor) if cursor else 0
keys = sorted(self._get_keys())
next_cursor = cursor + max_results
groups = map(self._get_data, keys[cursor:next_cursor])
next_cursor = next_cursor if next_cursor < len(keys) else None
return (
next_cursor,
groups,
)
@simulate_async
def get(self, object_id):
data = self._get_data(object_id)
if data is None:
raise CollectionObjectNotFound(object_id)
return data
@simulate_async
def create(self, object_id, data):
if object_id is None:
object_id = uuid4().hex
if self._get_data(object_id) is not None:
raise CollectionObjectAlreadyExists(object_id)
self._set_data(object_id, data)
return (object_id, self._get_data(object_id))
@simulate_async
def update(self, object_id, data):
if not self._id_to_key(object_id) in self._data:
raise CollectionObjectNotFound(object_id)
self._set_data(object_id, data)
return self._get_data(object_id)
@simulate_async
def delete(self, object_id):
data = self._get_data(object_id)
if data is None:
raise CollectionObjectNotFound(object_id)
self._data.pop(self._id_to_key(object_id), None)
return data
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.