repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
jamesblunt/edx-platform | common/djangoapps/student/tests/test_recent_enrollments.py | 63 | 7937 | """
Tests for the recently enrolled messaging within the Dashboard.
"""
import datetime
from django.conf import settings
from django.core.urlresolvers import reverse
from opaque_keys.edx import locator
from pytz import UTC
import unittest
import ddt
from shoppingcart.models import DonationConfiguration
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from course_modes.tests.factories import CourseModeFactory
from student.models import CourseEnrollment, DashboardConfiguration
from student.views import get_course_enrollments, _get_recently_enrolled_courses # pylint: disable=protected-access
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class TestRecentEnrollments(ModuleStoreTestCase):
"""
Unit tests for getting the list of courses for a logged in user
"""
PASSWORD = 'test'
def setUp(self):
"""
Add a student
"""
super(TestRecentEnrollments, self).setUp()
self.student = UserFactory()
self.student.set_password(self.PASSWORD)
self.student.save()
# Old Course
old_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0')
course, enrollment = self._create_course_and_enrollment(old_course_location)
enrollment.created = datetime.datetime(1900, 12, 31, 0, 0, 0, 0)
enrollment.save()
# New Course
course_location = locator.CourseLocator('Org1', 'Course1', 'Run1')
self.course, self.enrollment = self._create_course_and_enrollment(course_location)
def _create_course_and_enrollment(self, course_location):
""" Creates a course and associated enrollment. """
course = CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run
)
enrollment = CourseEnrollment.enroll(self.student, course.id)
return course, enrollment
def _configure_message_timeout(self, timeout):
"""Configure the amount of time the enrollment message will be displayed. """
config = DashboardConfiguration(recent_enrollment_time_delta=timeout)
config.save()
def test_recently_enrolled_courses(self):
"""
Test if the function for filtering recent enrollments works appropriately.
"""
self._configure_message_timeout(60)
# get courses through iterating all courses
courses_list = list(get_course_enrollments(self.student, None, []))
self.assertEqual(len(courses_list), 2)
recent_course_list = _get_recently_enrolled_courses(courses_list)
self.assertEqual(len(recent_course_list), 1)
def test_zero_second_delta(self):
"""
Tests that the recent enrollment list is empty if configured to zero seconds.
"""
self._configure_message_timeout(0)
courses_list = list(get_course_enrollments(self.student, None, []))
self.assertEqual(len(courses_list), 2)
recent_course_list = _get_recently_enrolled_courses(courses_list)
self.assertEqual(len(recent_course_list), 0)
def test_enrollments_sorted_most_recent(self):
"""
Test that the list of newly created courses are properly sorted to show the most
recent enrollments first.
"""
self._configure_message_timeout(600)
# Create a number of new enrollments and courses, and force their creation behind
# the first enrollment
courses = []
for idx, seconds_past in zip(range(2, 6), [5, 10, 15, 20]):
course_location = locator.CourseLocator(
'Org{num}'.format(num=idx),
'Course{num}'.format(num=idx),
'Run{num}'.format(num=idx)
)
course, enrollment = self._create_course_and_enrollment(course_location)
enrollment.created = datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds_past)
enrollment.save()
courses.append(course)
courses_list = list(get_course_enrollments(self.student, None, []))
self.assertEqual(len(courses_list), 6)
recent_course_list = _get_recently_enrolled_courses(courses_list)
self.assertEqual(len(recent_course_list), 5)
self.assertEqual(recent_course_list[1].course.id, courses[0].id)
self.assertEqual(recent_course_list[2].course.id, courses[1].id)
self.assertEqual(recent_course_list[3].course.id, courses[2].id)
self.assertEqual(recent_course_list[4].course.id, courses[3].id)
def test_dashboard_rendering(self):
"""
Tests that the dashboard renders the recent enrollment messages appropriately.
"""
self._configure_message_timeout(600)
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
self.assertContains(response, "Thank you for enrolling in")
@ddt.data(
#Register as an honor in any course modes with no payment option
([('audit', 0), ('honor', 0)], 'honor', True),
([('honor', 0)], 'honor', True),
([], 'honor', True),
#Register as an honor in any course modes which has payment option
([('honor', 10)], 'honor', False), # This is a paid course
([('audit', 0), ('honor', 0), ('professional', 20)], 'honor', True),
([('audit', 0), ('honor', 0), ('verified', 20)], 'honor', True),
([('audit', 0), ('honor', 0), ('verified', 20), ('professional', 20)], 'honor', True),
([], 'honor', True),
#Register as an audit in any course modes with no payment option
([('audit', 0), ('honor', 0)], 'audit', True),
([('audit', 0)], 'audit', True),
#Register as an audit in any course modes which has no payment option
([('audit', 0), ('honor', 0), ('verified', 10)], 'audit', True),
#Register as a verified in any course modes which has payment option
([('professional', 20)], 'professional', False),
([('verified', 20)], 'verified', False),
([('professional', 20), ('verified', 20)], 'verified', False),
([('audit', 0), ('honor', 0), ('verified', 20)], 'verified', False)
)
@ddt.unpack
def test_donate_button(self, course_modes, enrollment_mode, show_donate):
# Enable the enrollment success message
self._configure_message_timeout(10000)
# Enable donations
DonationConfiguration(enabled=True).save()
# Create the course mode(s)
for mode, min_price in course_modes:
CourseModeFactory(mode_slug=mode, course_id=self.course.id, min_price=min_price)
self.enrollment.mode = enrollment_mode
self.enrollment.save()
# Check that the donate button is or is not displayed
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
if show_donate:
self.assertContains(response, "donate-container")
else:
self.assertNotContains(response, "donate-container")
def test_donate_button_honor_with_price(self):
# Enable the enrollment success message and donations
self._configure_message_timeout(10000)
DonationConfiguration(enabled=True).save()
# Create a white-label course mode
# (honor mode with a price set)
CourseModeFactory(mode_slug="honor", course_id=self.course.id, min_price=100)
# Check that the donate button is NOT displayed
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
self.assertNotContains(response, "donate-container")
| agpl-3.0 |
pfctdayelise/scrapy | scrapy/http/response/__init__.py | 21 | 2615 | """
This module implements the Response class which is used to represent HTTP
responses in Scrapy.
See documentation in docs/topics/request-response.rst
"""
import copy
from six.moves.urllib.parse import urljoin
from scrapy.http.headers import Headers
from scrapy.utils.trackref import object_ref
from scrapy.http.common import obsolete_setter
class Response(object_ref):
def __init__(self, url, status=200, headers=None, body='', flags=None, request=None):
self.headers = Headers(headers or {})
self.status = int(status)
self._set_body(body)
self._set_url(url)
self.request = request
self.flags = [] if flags is None else list(flags)
@property
def meta(self):
try:
return self.request.meta
except AttributeError:
raise AttributeError("Response.meta not available, this response " \
"is not tied to any request")
def _get_url(self):
return self._url
def _set_url(self, url):
if isinstance(url, str):
self._url = url
else:
raise TypeError('%s url must be str, got %s:' % (type(self).__name__, \
type(url).__name__))
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if isinstance(body, str):
self._body = body
elif isinstance(body, unicode):
raise TypeError("Cannot assign a unicode body to a raw Response. " \
"Use TextResponse, HtmlResponse, etc")
elif body is None:
self._body = ''
else:
raise TypeError("Response body must either be str or unicode. Got: '%s'" \
% type(body).__name__)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
def __str__(self):
return "<%d %s>" % (self.status, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Response"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Response with the same attributes except for those
given new values.
"""
for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
| bsd-3-clause |
googleapis/googleapis-gen | google/cloud/bigquery/datatransfer/v1/bigquery-datatransfer-v1-py/google/cloud/bigquery_datatransfer/__init__.py | 1 | 4530 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.bigquery_datatransfer_v1.services.data_transfer_service.client import DataTransferServiceClient
from google.cloud.bigquery_datatransfer_v1.services.data_transfer_service.async_client import DataTransferServiceAsyncClient
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import CheckValidCredsRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import CheckValidCredsResponse
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import CreateTransferConfigRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import DataSource
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import DataSourceParameter
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import DeleteTransferConfigRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import DeleteTransferRunRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import GetDataSourceRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import GetTransferConfigRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import GetTransferRunRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListDataSourcesRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListDataSourcesResponse
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListTransferConfigsRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListTransferConfigsResponse
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListTransferLogsRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListTransferLogsResponse
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListTransferRunsRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ListTransferRunsResponse
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ScheduleTransferRunsRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import ScheduleTransferRunsResponse
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import StartManualTransferRunsRequest
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import StartManualTransferRunsResponse
from google.cloud.bigquery_datatransfer_v1.types.datatransfer import UpdateTransferConfigRequest
from google.cloud.bigquery_datatransfer_v1.types.transfer import EmailPreferences
from google.cloud.bigquery_datatransfer_v1.types.transfer import ScheduleOptions
from google.cloud.bigquery_datatransfer_v1.types.transfer import TransferConfig
from google.cloud.bigquery_datatransfer_v1.types.transfer import TransferMessage
from google.cloud.bigquery_datatransfer_v1.types.transfer import TransferRun
from google.cloud.bigquery_datatransfer_v1.types.transfer import TransferState
from google.cloud.bigquery_datatransfer_v1.types.transfer import TransferType
__all__ = ('DataTransferServiceClient',
'DataTransferServiceAsyncClient',
'CheckValidCredsRequest',
'CheckValidCredsResponse',
'CreateTransferConfigRequest',
'DataSource',
'DataSourceParameter',
'DeleteTransferConfigRequest',
'DeleteTransferRunRequest',
'GetDataSourceRequest',
'GetTransferConfigRequest',
'GetTransferRunRequest',
'ListDataSourcesRequest',
'ListDataSourcesResponse',
'ListTransferConfigsRequest',
'ListTransferConfigsResponse',
'ListTransferLogsRequest',
'ListTransferLogsResponse',
'ListTransferRunsRequest',
'ListTransferRunsResponse',
'ScheduleTransferRunsRequest',
'ScheduleTransferRunsResponse',
'StartManualTransferRunsRequest',
'StartManualTransferRunsResponse',
'UpdateTransferConfigRequest',
'EmailPreferences',
'ScheduleOptions',
'TransferConfig',
'TransferMessage',
'TransferRun',
'TransferState',
'TransferType',
)
| apache-2.0 |
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/django/utils/safestring.py | 24 | 4401 | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils.functional import curry, Promise, allow_lazy
from django.utils import six
class EscapeData(object):
pass
class EscapeBytes(bytes, EscapeData):
"""
A byte string that should be HTML-escaped when output.
"""
__new__ = allow_lazy(bytes.__new__, bytes)
class EscapeText(six.text_type, EscapeData):
"""
A unicode string object that should be HTML-escaped when output.
"""
__new__ = allow_lazy(six.text_type.__new__, six.text_type)
if six.PY3:
EscapeString = EscapeText
else:
EscapeString = EscapeBytes
# backwards compatibility for Python 2
EscapeUnicode = EscapeText
class SafeData(object):
pass
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
__new__ = allow_lazy(bytes.__new__, bytes)
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
decode = curry(_proxy_method, method=bytes.decode)
class SafeText(six.text_type, SafeData):
"""
A unicode (Python 2) / str (Python 3) subclass that has been specifically
marked as "safe" for HTML output purposes.
"""
__new__ = allow_lazy(six.text_type.__new__, six.text_type)
def __add__(self, rhs):
"""
Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
encode = curry(_proxy_method, method=six.text_type.encode)
if six.PY3:
SafeString = SafeText
else:
SafeString = SafeBytes
# backwards compatibility for Python 2
SafeUnicode = SafeText
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if isinstance(s, SafeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
if isinstance(s, (SafeData, EscapeData)):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return EscapeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return EscapeText(s)
return EscapeBytes(bytes(s))
| gpl-3.0 |
ecanzonieri/pyleus | pyleus/storm/component.py | 1 | 10241 | """Module containing the base class for all pyleus components and a wrapper
class around Storm configurations.
"""
from __future__ import absolute_import
import argparse
from collections import deque
import logging
import logging.config
import os
import sys
import traceback
try:
import simplejson as json
_ = json # pyflakes
except ImportError:
import json
from pyleus.storm import DEFAULT_STREAM
from pyleus.storm import StormTuple
from pyleus.storm.serializers.msgpack_serializer import MsgpackSerializer
from pyleus.storm.serializers.json_serializer import JSONSerializer
# Please keeep in sync with java TopologyBuilder
DESCRIBE_OPT = "--describe"
COMPONENT_OPTIONS_OPT = "--options"
PYLEUS_CONFIG_OPT = "--pyleus-config"
DEFAULT_LOGGING_CONFIG_PATH = "pyleus_logging.conf"
JSON_SERIALIZER = "json"
MSGPACK_SERIALIZER = "msgpack"
SERIALIZERS = {
JSON_SERIALIZER: JSONSerializer,
MSGPACK_SERIALIZER: MsgpackSerializer,
}
log = logging.getLogger(__name__)
def _is_namedtuple(obj):
return (type(obj) is type and
issubclass(obj, tuple) and
hasattr(obj, "_fields"))
def _serialize(obj):
"""Given a list, a tuple or a namedtuple, return it as a list. In case of
None, simply return None.
"""
if obj is None:
return None
# obj is a namedtuple "class"
elif _is_namedtuple(obj):
return list(obj._fields)
# obj is a list or a tuple
return list(obj)
def _expand_output_fields(obj):
"""Expand all allowed notations for defining OUTPUT_FIELDS into the
extended one.
"""
# if single-stream notation
if not isinstance(obj, dict):
return {DEFAULT_STREAM: _serialize(obj)}
# if multiple-streams notation
for key, value in obj.items():
obj[key] = _serialize(value)
return obj
class StormConfig(dict):
"""Add some convenience properites to a configuration ``dict`` from Storm.
You can access Storm configuration dictionary within a component through
``self.conf``.
"""
def __init__(self, conf):
super(StormConfig, self).__init__()
self.update(conf)
@property
def tick_tuple_freq(self):
"""Helper property to access the value of tick tuple frequency stored
in Storm configuration.
:return: tick tuple frequency for the component
:rtype: ``float`` or ``None``
.. note::
Bolts not specifying tick tuple frequency default to ``None``,
while spouts are not supposed to use tick tuples at all.
"""
return self.get("topology.tick.tuple.freq.secs")
class Component(object):
"""Base class for all pyleus components."""
COMPONENT_TYPE = None # One of "bolt", "spout"
#: ``list`` or ``dict`` of output fields for the component.
#:
#: .. note:: Specify in subclass.
#:
#: .. seealso:: :ref:`groupings`
OUTPUT_FIELDS = None
#: ``list`` of user-defined options for the component.
#:
#: .. note:: Specify in subclass.
OPTIONS = None
# Populated in Component.run()
#: ``dict`` containing options passed to component in the yaml definition
#: file.
options = None
#: :class:`~.StormConfig` containing the Storm configuration for the
#: component.
conf = None
#: ``dict`` containing the Storm context for the component.
context = None
pyleus_config = None
def __init__(self, input_stream=None, output_stream=None):
"""The Storm component will parse the command line in order
to figure out if it has been queried for a description or for
actually running."""
super(Component, self).__init__()
if input_stream is None:
input_stream = sys.stdin
if output_stream is None:
output_stream = sys.stdout
self._input_stream = input_stream
self._output_stream = output_stream
self._pending_commands = deque()
self._pending_taskids = deque()
self._serializer = None
def describe(self):
"""Print to stdout a JSON description of the component.
The java TopologyBuilder will use the JSON descrption for topology
cofiguration and validation.
"""
print(json.dumps({
"component_type": self.COMPONENT_TYPE,
"output_fields": _expand_output_fields(self.OUTPUT_FIELDS),
"options": _serialize(self.OPTIONS)}))
def initialize_logging(self):
"""Load logging configuration file from command line configuration (if
provided) and initialize logging for the component.
"""
logging_config_path = self.pyleus_config.get('logging_config_path')
if logging_config_path:
logging.config.fileConfig(logging_config_path)
elif os.path.isfile(DEFAULT_LOGGING_CONFIG_PATH):
logging.config.fileConfig(DEFAULT_LOGGING_CONFIG_PATH)
def initialize_serializer(self):
"""Load serializer type from command line configuration and instantiate
the associated
:class:`~pyleus.storm.serializers.serializer.Serializer`.
"""
serializer = self.pyleus_config.get('serializer')
if serializer in SERIALIZERS:
self._serializer = SERIALIZERS[serializer](
self._input_stream, self._output_stream)
else:
raise ValueError("Unknown serializer: {0}", serializer)
def setup_component(self):
"""Storm component setup before execution. It will also
call the initialization method implemented in the subclass.
"""
self.conf, self.context = self._init_component()
self.initialize()
def initialize(self):
"""Called after component has been launched, but before processing any
tuples. You can use this method to setup your component.
.. note:: Implement in subclass.
"""
pass
def run(self):
"""Entry point for the component running logic.
Forgetting to call it as following will prevent the topology from
running.
:Example:
.. code-block:: python
if __name__ == '__main__':
MyComponent().run()
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(DESCRIBE_OPT, action="store_true", default=False)
parser.add_argument(COMPONENT_OPTIONS_OPT, default=None)
parser.add_argument(PYLEUS_CONFIG_OPT, default=None)
args = parser.parse_args()
if args.describe:
self.describe()
return
self.options = json.loads(args.options) if args.options else {}
self.pyleus_config = json.loads(args.pyleus_config) \
if args.pyleus_config else {}
try:
self.initialize_logging()
self.initialize_serializer()
self.setup_component()
self.run_component()
except Exception as e:
log.exception("Exception in {0}.run".format(self.COMPONENT_TYPE))
self.error(traceback.format_exc(e))
def run_component(self):
"""Run the main loop of the component. Implemented in Bolt and
Spout subclasses.
"""
raise NotImplementedError
def _msg_is_command(self, msg):
"""Storm differentiates between commands and taskids by whether the
message is a ``dict`` or ``list``.
"""
return isinstance(msg, dict)
def _msg_is_taskid(self, msg):
"""..seealso:: :meth:`~._msg_is_command`"""
return isinstance(msg, list)
def read_command(self):
"""Return the next command from the input stream, whether from the
_pending_commands queue or the stream directly if the queue is empty.
In that case, queue any taskids which are received until the next
command comes in.
"""
if self._pending_commands:
return self._pending_commands.popleft()
msg = self._serializer.read_msg()
while self._msg_is_taskid(msg):
self._pending_taskids.append(msg)
msg = self._serializer.read_msg()
return msg
def read_taskid(self):
"""Like :meth:`~.read_command`, but returns the next taskid and queues
any commands received while reading the input stream.
"""
if self._pending_taskids:
return self._pending_taskids.popleft()
msg = self._serializer.read_msg()
while self._msg_is_command(msg):
self._pending_commands.append(msg)
msg = self._serializer.read_msg()
return msg
def read_tuple(self):
"""Read and parse a command into a StormTuple object."""
cmd = self.read_command()
return StormTuple(
cmd['id'], cmd['comp'], cmd['stream'], cmd['task'], cmd['tuple'])
def _create_pidfile(self, pid_dir, pid):
"""Create a file based on pid used by Storm to watch over the Python
process.
"""
open(os.path.join(pid_dir, str(pid)), 'a').close()
def _init_component(self):
"""Receive the setup_info dict from the Storm task and report back with
our pid; also touch a pidfile in the pidDir specified in setup_info.
"""
setup_info = self._serializer.read_msg()
pid = os.getpid()
self._serializer.send_msg({'pid': pid})
self._create_pidfile(setup_info['pidDir'], pid)
return StormConfig(setup_info['conf']), setup_info['context']
def send_command(self, command, opts_dict=None):
"""Merge command with options and send the message through
:class:`~pyleus.storm.serializers.serializer.Serializer`
"""
if opts_dict is not None:
command_dict = dict(opts_dict)
command_dict['command'] = command
else:
command_dict = dict(command=command)
self._serializer.send_msg(command_dict)
def log(self, msg):
"""Send a log message."""
self.send_command('log', {
'msg': msg,
})
def error(self, msg):
"""Send an error message."""
self.send_command('error', {
'msg': msg,
})
| apache-2.0 |
MrKriss/ThinkStatsToolbox | stats_toolbox/utils/cleaners.py | 1 | 1709 | """ Functions to clean data frames by handling outliers and missing values."""
import numpy as np
def clean_fem_preg(df):
"""Recode variables from the pregnancy data frame. """
# mother's age is encoded in centiyears; convert to years
df.agepreg /= 100.0
# birthwgt_lb contains at least one bogus value (51 lbs)
# replace with NaN
df.ix[df.birthwgt_lb > 20, "birthwgt_lb"] = np.nan
# replace 'not ascertained', 'refused', 'don't know' with NaN
na_vals = [97, 98, 99]
df.birthwgt_lb.replace(na_vals, np.nan, inplace=True)
df.birthwgt_oz.replace(na_vals, np.nan, inplace=True)
df.hpagelb.replace(na_vals, np.nan, inplace=True)
df.babysex.replace([7, 9], np.nan, inplace=True)
df.nbrnaliv.replace([9], np.nan, inplace=True)
# birthweight is stored in two columns, lbs and oz.
# convert to a single column in lb
# NOTE: creating a new column requires dictionary syntax,
# not attribute assignment (like df.totalwgt_lb)
df['totalwgt_lb'] = df.birthwgt_lb + df.birthwgt_oz / 16.0
# due to a bug in ReadStataDct, the last variable gets clipped;
# so for now set it to NaN
df.cmintvw = np.nan
return df
def clean_BBRSS_Frame(df):
"""Recodes BRFSS variables.
df: DataFrame
"""
# clean age
df.age.replace([7, 9], float('NaN'), inplace=True)
# clean height
df.htm3.replace([999], float('NaN'), inplace=True)
# clean weight
df.wtkg2.replace([99999], float('NaN'), inplace=True)
df.wtkg2 /= 100.0
# clean weight a year ago
df.wtyrago.replace([7777, 9999], float('NaN'), inplace=True)
df['wtyrago'] = df.wtyrago.apply(lambda x: x/2.2 if x < 9000 else x-9000)
| gpl-3.0 |
davidsminor/gaffer | python/GafferUITest/CompoundParameterValueWidgetTest.py | 5 | 2354 | ##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import weakref
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class CompoundParameterValueWidgetTest( GafferUITest.TestCase ) :
def testLifetime( self ) :
n = Gaffer.OpHolder()
opSpec = GafferTest.ParameterisedHolderTest.classSpecification( "image/grade", "IECORE_OP_PATHS" )[:-1]
n.setOp( *opSpec )
ui = GafferUI.CompoundParameterValueWidget( n.parameterHandler() )
w = weakref.ref( ui )
del ui
self.assertEqual( w(), None )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
mvillalba/python-namecheap | docs/source/conf.py | 3 | 6957 | # -*- coding: utf-8 -*-
#
# namecheap documentation build configuration file, created by
# sphinx-quickstart on Thu May 26 09:57:48 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import namecheap
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'namecheap'
copyright = u'2011, MartΓn RaΓΊl Villalba'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = namecheap.__version__
# The full version, including alpha/beta/rc tags.
release = namecheap.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Documentation for namecheap, version {0}' \
.format(namecheap.__version__)
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = html_title
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'namecheap_doc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'namecheap.tex', u'Documentation for namecheap',
u'MartΓn RaΓΊl Villalba', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'namecheap', u'Documentation for namecheap',
[u'MartΓn RaΓΊl Villalba'], 1)
]
| mit |
daviddao/luminosity | sklearn-server/flask/lib/python2.7/site-packages/setuptools/sandbox.py | 259 | 13925 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
from setuptools import compat
from setuptools.compat import builtins
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@classmethod
def dump(cls, type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
compat.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
"""
pattern = re.compile('(setuptools|pkg_resources|distutils)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
def runner():
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
DirectorySandbox(setup_dir).run(runner)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
| bsd-3-clause |
creasyw/IMTAphy | documentation/toolchain/Sphinx-0.5dev_20081110-py2.5.egg/sphinx/util/jsdump.py | 4 | 5401 | # -*- coding: utf-8 -*-
"""
sphinx.util.jsdump
~~~~~~~~~~~~~~~~~~
This module implements a simple JavaScript serializer.
Uses the basestring encode function from simplejson.
:copyright: 2008 by Armin Ronacher, Bob Ippolito, Georg Brandl.
:license: BSD.
"""
import re
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+')
_name_re = re.compile(r'[a-zA-Z]\w*')
_nameonly_re = re.compile(r'[a-zA-Z]\w*$')
# escape \, ", control characters and everything outside ASCII
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
ESCAPE_DICT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
ESCAPED = re.compile(r'\\u.{4}|\\.')
def encode_string(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DICT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
def decode_string(s):
return ESCAPED.sub(lambda m: eval('u"'+m.group()+'"'), s)
reswords = set("""\
abstract else instanceof switch
boolean enum int synchronized
break export interface this
byte extends long throw
case false native throws
catch final new transient
char finally null true
class float package try
const for private typeof
continue function protected var
debugger goto public void
default if return volatile
delete implements short while
do import static with
double in super""".split())
def dumps(obj, key=False):
if key:
if not isinstance(obj, basestring):
obj = str(obj)
if _nameonly_re.match(obj) and obj not in reswords:
return obj # return it as a bare word
else:
return encode_string(obj)
if obj is None:
return 'null'
elif obj is True or obj is False:
return obj and 'true' or 'false'
elif isinstance(obj, (int, long, float)):
return str(obj)
elif isinstance(obj, dict):
return '{%s}' % ','.join('%s:%s' % (
dumps(key, True),
dumps(value)
) for key, value in obj.iteritems())
elif isinstance(obj, (tuple, list, set)):
return '[%s]' % ','.join(dumps(x) for x in obj)
elif isinstance(obj, basestring):
return encode_string(obj)
raise TypeError(type(obj))
def dump(obj, f):
f.write(dumps(obj))
def loads(x):
"""Loader that can read the JS subset the indexer produces."""
nothing = object()
i = 0
n = len(x)
stack = []
obj = nothing
key = False
keys = []
while i < n:
c = x[i]
if c == '{':
obj = {}
stack.append(obj)
key = True
keys.append(nothing)
i += 1
elif c == '[':
obj = []
stack.append(obj)
key = False
keys.append(nothing)
i += 1
elif c in '}]':
if key:
raise ValueError("unfinished dict")
oldobj = stack.pop()
keys.pop()
if stack:
obj = stack[-1]
if isinstance(obj, dict):
if keys[-1] is nothing:
raise ValueError("invalid key object", oldobj)
obj[keys[-1]] = oldobj
else:
obj.append(oldobj)
else:
break
i += 1
elif c == ',':
if key:
raise ValueError("multiple keys")
if isinstance(obj, dict):
key = True
i += 1
elif c == ':':
if not isinstance(obj, dict):
raise ValueError("colon in list")
i += 1
if not key:
raise ValueError("multiple values")
key = False
else:
m = _str_re.match(x, i)
if m:
y = decode_string(m.group()[1:-1])
else:
m = _int_re.match(x, i)
if m:
y = int(m.group())
else:
m = _name_re.match(x, i)
if m:
y = m.group()
if y == 'true':
y = True
elif y == 'false':
y = False
elif y == 'null':
y = None
elif not key:
raise ValueError("bareword as value")
else:
raise ValueError("read error at pos %d" % i)
i = m.end()
if isinstance(obj, dict):
if key:
keys[-1] = y
else:
obj[keys[-1]] = y
key = False
else:
obj.append(y)
if obj is nothing:
raise ValueError("nothing loaded from string")
return obj
def load(f):
return loads(f.read())
| gpl-2.0 |
mmclenna/engine | testing/legion/examples/subprocess/subprocess_test.py | 15 | 4531 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A host test module demonstrating interacting with remote subprocesses."""
import argparse
import logging
import os
import sys
import time
import xmlrpclib
# Map the testing directory so we can import legion.legion_test.
TESTING_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', '..', '..', '..', 'testing')
sys.path.append(TESTING_DIR)
from legion import legion_test_case
from legion import jsonrpclib
class ExampleTestController(legion_test_case.TestCase):
"""An example controller using the remote subprocess functions."""
@classmethod
def setUpClass(cls):
"""Creates the task machine and waits until it connects."""
parser = argparse.ArgumentParser()
parser.add_argument('--task-hash')
parser.add_argument('--os', default='Ubuntu-14.04')
args, _ = parser.parse_known_args()
cls.task = cls.CreateTask(
isolated_hash=args.task_hash,
dimensions={'os': args.os},
idle_timeout_secs=90,
connection_timeout_secs=90,
verbosity=logging.DEBUG)
cls.task.Create()
cls.task.WaitForConnection()
def testMultipleProcesses(self):
"""Tests that processes can be run and controlled simultaneously."""
start = time.time()
logging.info('Starting "sleep 10" and "sleep 20"')
sleep10 = self.task.Process(['sleep', '10'])
sleep20 = self.task.Process(['sleep', '20'])
logging.info('Waiting for sleep 10 to finish and verifying timing')
sleep10.Wait()
elapsed = time.time() - start
self.assertGreaterEqual(elapsed, 10)
self.assertLess(elapsed, 11)
logging.info('Waiting for sleep 20 to finish and verifying timing')
sleep20.Wait()
elapsed = time.time() - start
self.assertGreaterEqual(elapsed, 20)
sleep10.Delete()
sleep20.Delete()
def testTerminate(self):
"""Tests that a process can be correctly terminated."""
start = time.time()
logging.info('Starting "sleep 20"')
sleep20 = self.task.Process(['sleep', '20'])
logging.info('Calling Terminate()')
sleep20.Terminate()
try:
logging.info('Trying to wait for sleep 20 to complete')
sleep20.Wait()
except xmlrpclib.Fault:
pass
finally:
sleep20.Delete()
logging.info('Checking to make sure sleep 20 was actually terminated')
self.assertLess(time.time() - start, 20)
def testLs(self):
"""Tests that the returned results from a process are correct."""
logging.info('Calling "ls"')
ls = self.task.Process(['ls'])
logging.info('Trying to wait for ls to complete')
ls.Wait()
logging.info('Checking that ls completed and returned the correct results')
self.assertEqual(ls.GetReturncode(), 0)
self.assertIn('task.isolate', ls.ReadStdout())
def testProcessOutput(self):
"""Tests that a process's output gets logged to a file in the output-dir."""
code = ('import sys\n'
'sys.stdout.write("Hello stdout")\n'
'sys.stderr.write("Hello stderr")')
self.task.rpc.WriteFile('test.py', code)
proc = self.task.Process(['python', 'test.py'],)
self.CheckProcessOutput('stdout', proc.key, 'Hello stdout')
self.CheckProcessOutput('stderr', proc.key, 'Hello stderr')
def testCustomKey(self):
"""Tests that a custom key passed to a process works correctly."""
code = ('import sys\n'
'sys.stdout.write("Hello CustomKey stdout")\n'
'sys.stderr.write("Hello CustomKey stderr")')
self.task.rpc.WriteFile('test.py', code)
self.task.Process(['python', 'test.py'], key='CustomKey')
self.CheckProcessOutput('stdout', 'CustomKey', 'Hello CustomKey stdout')
self.CheckProcessOutput('stderr', 'CustomKey', 'Hello CustomKey stderr')
def testKeyReuse(self):
"""Tests that a key cannot be reused."""
self.task.Process(['ls'], key='KeyReuse')
self.assertRaises(jsonrpclib.Fault, self.task.Process, ['ls'],
key='KeyReuse')
def CheckProcessOutput(self, pipe, key, expected):
"""Checks that a process' output files are correct."""
logging.info('Reading output file')
output_dir = self.task.rpc.GetOutputDir()
path = self.task.rpc.PathJoin(output_dir, '%s.%s' % (key, pipe))
actual = self.task.rpc.ReadFile(path)
self.assertEqual(expected, actual)
if __name__ == '__main__':
legion_test_case.main()
| bsd-3-clause |
nikolas/edx-platform | common/test/acceptance/fixtures/certificates.py | 80 | 1221 | """
Tools for creating certificates config fixture data.
"""
import json
from . import STUDIO_BASE_URL
from .base import StudioApiFixture
class CertificateConfigFixtureError(Exception):
"""
Error occurred while installing certificate config fixture.
"""
pass
class CertificateConfigFixture(StudioApiFixture):
"""
Fixture to create certificates configuration for a course
"""
certificates = []
def __init__(self, course_id, certificates_data):
self.course_id = course_id
self.certificates = certificates_data
super(CertificateConfigFixture, self).__init__()
def install(self):
"""
Push the certificates config data to certificate endpoint.
"""
response = self.session.post(
'{}/certificates/{}'.format(STUDIO_BASE_URL, self.course_id),
data=json.dumps(self.certificates),
headers=self.headers
)
if not response.ok:
raise CertificateConfigFixtureError(
"Could not create certificate {0}. Status was {1}".format(
json.dumps(self.certificates), response.status_code
)
)
return self
| agpl-3.0 |
3manuek/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
linjoahow/w17test_1 | static/Brython3.1.0-20150301-090019/Lib/unittest/test/testmock/testpatch.py | 739 | 53126 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'frooble', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
h3biomed/ansible | lib/ansible/modules/network/netconf/netconf_config.py | 2 | 19354 | #!/usr/bin/python
# (c) 2016, Leandro Lisboa Penz <lpenz at lpenz.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: netconf_config
version_added: "2.2"
author: "Leandro Lisboa Penz (@lpenz)"
short_description: netconf device configuration
description:
- Netconf is a network management protocol developed and standardized by
the IETF. It is documented in RFC 6241.
- This module allows the user to send a configuration XML file to a netconf
device, and detects if there was a configuration change.
extends_documentation_fragment:
- netconf
- network_agnostic
options:
content:
description:
- The configuration data as defined by the device's data models, the value can be either in
xml string format or text format. The format of the configuration should be supported by remote
Netconf server
aliases: ['xml']
target:
description:
Name of the configuration datastore to be edited.
- auto, uses candidate and fallback to running
- candidate, edit <candidate/> datastore and then commit
- running, edit <running/> datastore directly
default: auto
version_added: "2.4"
aliases: ['datastore']
source_datastore:
description:
- Name of the configuration datastore to use as the source to copy the configuration
to the datastore mentioned by C(target) option. The values can be either I(running), I(candidate),
I(startup) or a remote URL
version_added: "2.7"
aliases: ['source']
format:
description:
- The format of the configuration provided as value of C(content). Accepted values are I(xml) and I(text) and
the given configuration format should be supported by remote Netconf server.
default: xml
choices: ['xml', 'text']
version_added: "2.7"
lock:
description:
- Instructs the module to explicitly lock the datastore specified as C(target). By setting the option
value I(always) is will explicitly lock the datastore mentioned in C(target) option. It the value
is I(never) it will not lock the C(target) datastore. The value I(if-supported) lock the C(target)
datastore only if it is supported by the remote Netconf server.
default: always
choices: ['never', 'always', 'if-supported']
version_added: "2.7"
default_operation:
description:
- The default operation for <edit-config> rpc, valid values are I(merge), I(replace) and I(none).
If the default value is merge, the configuration data in the C(content) option is merged at the
corresponding level in the C(target) datastore. If the value is replace the data in the C(content)
option completely replaces the configuration in the C(target) datastore. If the value is none the C(target)
datastore is unaffected by the configuration in the config option, unless and until the incoming configuration
data uses the C(operation) operation to request a different operation.
choices: ['merge', 'replace', 'none']
version_added: "2.7"
confirm:
description:
- This argument will configure a timeout value for the commit to be confirmed before it is automatically
rolled back. If the C(confirm_commit) argument is set to False, this argument is silently ignored. If the
value of this argument is set to 0, the commit is confirmed immediately. The remote host MUST
support :candidate and :confirmed-commit capability for this option to .
default: 0
version_added: "2.7"
confirm_commit:
description:
- This argument will execute commit operation on remote device. It can be used to confirm a previous commit.
type: bool
default: 'no'
version_added: "2.7"
error_option:
description:
- This option controls the netconf server action after an error occurs while editing the configuration.
If the value is I(stop-on-error) abort the config edit on first error, if value is I(continue-on-error)
it continues to process configuration data on error, error is recorded and negative response is generated
if any errors occur. If value is C(rollback-on-error) it rollback to the original configuration in case
any error occurs, this requires the remote Netconf server to support the :rollback-on-error capability.
default: stop-on-error
choices: ['stop-on-error', 'continue-on-error', 'rollback-on-error']
version_added: "2.7"
save:
description:
- The C(save) argument instructs the module to save the configuration in C(target) datastore to the
startup-config if changed and if :startup capability is supported by Netconf server.
default: false
version_added: "2.4"
type: bool
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory or role root directory, if playbook is part of an
ansible role. If the directory does not exist, it is created.
type: bool
default: 'no'
version_added: "2.7"
delete:
description:
- It instructs the module to delete the configuration from value mentioned in C(target) datastore.
type: bool
default: 'no'
version_added: "2.7"
commit:
description:
- This boolean flag controls if the configuration changes should be committed or not after editing the
candidate datastore. This option is supported only if remote Netconf server supports :candidate
capability. If the value is set to I(False) commit won't be issued after edit-config operation
and user needs to handle commit or discard-changes explicitly.
type: bool
default: True
version_added: "2.7"
validate:
description:
- This boolean flag if set validates the content of datastore given in C(target) option.
For this option to work remote Netconf server should support :validate capability.
type: bool
default: False
version_added: "2.7"
src:
description:
- Specifies the source path to the xml file that contains the configuration or configuration template
to load. The path to the source file can either be the full path on the Ansible control host or
a relative path from the playbook or role root directory. This argument is mutually exclusive with I(xml).
version_added: "2.4"
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
requirements:
- "ncclient"
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- This module supports devices with and without the candidate and
confirmed-commit capabilities. It will always use the safer feature.
- This module supports the use of connection=netconf
'''
EXAMPLES = '''
- name: use lookup filter to provide xml configuration
netconf_config:
content: "{{ lookup('file', './config.xml') }}"
- name: set ntp server in the device
netconf_config:
content: |
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<system xmlns="urn:ietf:params:xml:ns:yang:ietf-system">
<ntp>
<enabled>true</enabled>
<server>
<name>ntp1</name>
<udp><address>127.0.0.1</address></udp>
</server>
</ntp>
</system>
</config>
- name: wipe ntp configuration
netconf_config:
content: |
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<system xmlns="urn:ietf:params:xml:ns:yang:ietf-system">
<ntp>
<enabled>false</enabled>
<server operation="remove">
<name>ntp1</name>
</server>
</ntp>
</system>
</config>
- name: configure interface while providing different private key file path (for connection=netconf)
netconf_config:
backup: yes
register: backup_junos_location
vars:
ansible_private_key_file: /home/admin/.ssh/newprivatekeyfile
- name: configurable backup path
netconf_config:
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
'''
RETURN = '''
server_capabilities:
description: list of capabilities of the server
returned: success
type: list
sample: ['urn:ietf:params:netconf:base:1.1','urn:ietf:params:netconf:capability:confirmed-commit:1.0','urn:ietf:params:netconf:capability:candidate:1.0']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
diff:
description: If --diff option in enabled while running, the before and after configuration change are
returned as part of before and after key.
returned: when diff is enabled
type: dict
sample:
"after": "<rpc-reply>\n<data>\n<configuration>\n<version>17.3R1.10</version>...<--snip-->"
"before": "<rpc-reply>\n<data>\n<configuration>\n <version>17.3R1.10</version>...<--snip-->"
'''
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.network.netconf.netconf import get_capabilities, get_config, sanitize_xml
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
content=dict(aliases=['xml']),
target=dict(choices=['auto', 'candidate', 'running'], default='auto', aliases=['datastore']),
source_datastore=dict(aliases=['source']),
format=dict(choices=['xml', 'text'], default='xml'),
lock=dict(choices=['never', 'always', 'if-supported'], default='always'),
default_operation=dict(choices=['merge', 'replace', 'none']),
confirm=dict(type='int', default=0),
confirm_commit=dict(type='bool', default=False),
error_option=dict(choices=['stop-on-error', 'continue-on-error', 'rollback-on-error'], default='stop-on-error'),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
save=dict(type='bool', default=False),
delete=dict(type='bool', default=False),
commit=dict(type='bool', default=True),
validate=dict(type='bool', default=False),
)
# deprecated options
netconf_top_spec = {
'src': dict(type='path', removed_in_version=2.11),
'host': dict(removed_in_version=2.11),
'port': dict(removed_in_version=2.11, type='int', default=830),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME']), removed_in_version=2.11, no_log=True),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), removed_in_version=2.11, no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), removed_in_version=2.11, type='path'),
'hostkey_verify': dict(removed_in_version=2.11, type='bool', default=True),
'look_for_keys': dict(removed_in_version=2.11, type='bool', default=True),
'timeout': dict(removed_in_version=2.11, type='int', default=10),
}
argument_spec.update(netconf_top_spec)
mutually_exclusive = [('content', 'src', 'source', 'delete', 'confirm_commit')]
required_one_of = [('content', 'src', 'source', 'delete', 'confirm_commit')]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
if module.params['src']:
module.deprecate(msg="argument 'src' has been deprecated. Use file lookup plugin instead to read file contents.",
version="2.11")
config = module.params['content'] or module.params['src']
target = module.params['target']
lock = module.params['lock']
source = module.params['source_datastore']
delete = module.params['delete']
confirm_commit = module.params['confirm_commit']
confirm = module.params['confirm']
validate = module.params['validate']
save = module.params['save']
conn = Connection(module._socket_path)
capabilities = get_capabilities(module)
operations = capabilities['device_operations']
supports_commit = operations.get('supports_commit', False)
supports_writable_running = operations.get('supports_writable_running', False)
supports_startup = operations.get('supports_startup', False)
# identify target datastore
if target == 'candidate' and not supports_commit:
module.fail_json(msg=':candidate is not supported by this netconf server')
elif target == 'running' and not supports_writable_running:
module.fail_json(msg=':writable-running is not supported by this netconf server')
elif target == 'auto':
if supports_commit:
target = 'candidate'
elif supports_writable_running:
target = 'running'
else:
module.fail_json(msg='neither :candidate nor :writable-running are supported by this netconf server')
# Netconf server capability validation against input options
if save and not supports_startup:
module.fail_json(msg='cannot copy <%s/> to <startup/>, while :startup is not supported' % target)
if confirm_commit and not operations.get('supports_confirm_commit', False):
module.fail_json(msg='confirm commit is not supported by Netconf server')
if (confirm > 0) and not operations.get('supports_confirm_commit', False):
module.fail_json(msg='confirm commit is not supported by this netconf server, given confirm timeout: %d' % confirm)
if validate and not operations.get('supports_validate', False):
module.fail_json(msg='validate is not supported by this netconf server')
if lock == 'never':
execute_lock = False
elif target in operations.get('lock_datastore', []):
# lock is requested (always/if-support) and supported => lets do it
execute_lock = True
else:
# lock is requested (always/if-supported) but not supported => issue warning
module.warn("lock operation on '%s' source is not supported on this device" % target)
execute_lock = (lock == 'always')
result = {'changed': False, 'server_capabilities': capabilities.get('server_capabilities', [])}
before = None
after = None
locked = False
try:
if module.params['backup']:
response = get_config(module, target, lock=execute_lock)
before = to_text(tostring(response), errors='surrogate_then_replace').strip()
result['__backup__'] = before.strip()
if validate:
conn.validate(target)
if source:
if not module.check_mode:
conn.copy(source, target)
result['changed'] = True
elif delete:
if not module.check_mode:
conn.delete(target)
result['changed'] = True
elif confirm_commit:
if not module.check_mode:
conn.commit()
result['changed'] = True
elif config:
if module.check_mode and not supports_commit:
module.warn("check mode not supported as Netconf server doesn't support candidate capability")
result['changed'] = True
module.exit_json(**result)
if execute_lock:
conn.lock(target=target)
locked = True
if before is None:
before = to_text(conn.get_config(source=target), errors='surrogate_then_replace').strip()
kwargs = {
'config': config,
'target': target,
'default_operation': module.params['default_operation'],
'error_option': module.params['error_option'],
'format': module.params['format'],
}
conn.edit_config(**kwargs)
if supports_commit and module.params['commit']:
after = to_text(conn.get_config(source='candidate'), errors='surrogate_then_replace').strip()
if not module.check_mode:
confirm_timeout = confirm if confirm > 0 else None
confirmed_commit = True if confirm_timeout else False
conn.commit(confirmed=confirmed_commit, timeout=confirm_timeout)
else:
conn.discard_changes()
if after is None:
after = to_text(conn.get_config(source='running'), errors='surrogate_then_replace').strip()
sanitized_before = sanitize_xml(before)
sanitized_after = sanitize_xml(after)
if sanitized_before != sanitized_after:
result['changed'] = True
if result['changed']:
if save and not module.check_mode:
conn.copy_config(target, 'startup')
if module._diff:
result['diff'] = {'before': sanitized_before, 'after': sanitized_after}
except ConnectionError as e:
module.fail_json(msg=to_text(e, errors='surrogate_then_replace').strip())
finally:
if locked:
conn.unlock(target=target)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
hyperized/ansible | lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py | 8 | 21696 | #!/usr/bin/python
#
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_cosmosdbaccount
version_added: "2.8"
short_description: Manage Azure Database Account instance
description:
- Create, update and delete instance of Azure Database Account.
options:
resource_group:
description:
- Name of an Azure resource group.
required: True
name:
description:
- Cosmos DB database account name.
required: True
location:
description:
- The location of the resource group to which the resource belongs.
- Required when I(state=present).
kind:
description:
- Indicates the type of database account. This can only be set at database account creation.
choices:
- 'global_document_db'
- 'mongo_db'
- 'parse'
consistency_policy:
description:
- The consistency policy for the Cosmos DB account.
suboptions:
default_consistency_level:
description:
- The default consistency level and configuration settings of the Cosmos DB account.
- Required when I(state=present).
choices:
- 'eventual'
- 'session'
- 'bounded_staleness'
- 'strong'
- 'consistent_prefix'
max_staleness_prefix:
description:
- When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated.
- Accepted range for this value is 1 - 2,147,483,647. Required when I(default_consistency_policy=bounded_staleness).
type: int
max_interval_in_seconds:
description:
- When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated.
- Accepted range for this value is 5 - 86400. Required when I(default_consistency_policy=bounded_staleness).
type: int
geo_rep_locations:
description:
- An array that contains the georeplication locations enabled for the Cosmos DB account.
- Required when I(state=present).
type: list
suboptions:
name:
description:
- The name of the region.
failover_priority:
description:
- The failover priority of the region. A failover priority of 0 indicates a write region.
- The maximum value for a failover priority = (total number of regions - 1).
- Failover priority values must be unique for each of the regions in which the database account exists.
type: int
database_account_offer_type:
description:
- Database account offer type, for example I(Standard)
- Required when I(state=present).
ip_range_filter:
description:
- Cosmos DB Firewall support. This value specifies the set of IP addresses or IP address ranges.
- In CIDR form to be included as the allowed list of client IPs for a given database account.
- IP addresses/ranges must be comma separated and must not contain any spaces.
is_virtual_network_filter_enabled:
description:
- Flag to indicate whether to enable/disable Virtual Network ACL rules.
type: bool
enable_automatic_failover:
description:
- Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage.
- Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account.
type: bool
enable_cassandra:
description:
- Enable Cassandra.
type: bool
enable_table:
description:
- Enable Table.
type: bool
enable_gremlin:
description:
- Enable Gremlin.
type: bool
virtual_network_rules:
description:
- List of Virtual Network ACL rules configured for the Cosmos DB account.
type: list
suboptions:
subnet:
description:
- It can be a string containing resource id of a subnet.
- It can be a dictionary containing 'resource_group', 'virtual_network_name' and 'subnet_name'
ignore_missing_vnet_service_endpoint:
description:
- Create Cosmos DB account without existing virtual network service endpoint.
type: bool
enable_multiple_write_locations:
description:
- Enables the account to write in multiple locations
type: bool
state:
description:
- Assert the state of the Database Account.
- Use C(present) to create or update an Database Account and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create Cosmos DB Account - min
azure_rm_cosmosdbaccount:
resource_group: myResourceGroup
name: myDatabaseAccount
location: westus
geo_rep_locations:
- name: southcentralus
failover_priority: 0
database_account_offer_type: Standard
- name: Create Cosmos DB Account - max
azure_rm_cosmosdbaccount:
resource_group: myResourceGroup
name: myDatabaseAccount
location: westus
kind: mongo_db
geo_rep_locations:
- name: southcentralus
failover_priority: 0
database_account_offer_type: Standard
ip_range_filter: 10.10.10.10
enable_multiple_write_locations: yes
virtual_network_rules:
- subnet: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVi
rtualNetwork/subnets/mySubnet"
consistency_policy:
default_consistency_level: bounded_staleness
max_staleness_prefix: 10
max_interval_in_seconds: 1000
'''
RETURN = '''
id:
description:
- The unique resource identifier of the database account.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccounts/myData
baseAccount"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.cosmosdb import CosmosDB
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMCosmosDBAccount(AzureRMModuleBase):
"""Configuration class for an Azure RM Database Account resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
kind=dict(
type='str',
choices=['global_document_db',
'mongo_db',
'parse']
),
consistency_policy=dict(
type='dict',
options=dict(
default_consistency_level=dict(
type='str',
choices=['eventual',
'session',
'bounded_staleness',
'strong',
'consistent_prefix']
),
max_staleness_prefix=dict(
type='int'
),
max_interval_in_seconds=dict(
type='int'
)
)
),
geo_rep_locations=dict(
type='list',
options=dict(
name=dict(
type='str',
required=True
),
failover_priority=dict(
type='int',
required=True
)
)
),
database_account_offer_type=dict(
type='str'
),
ip_range_filter=dict(
type='str'
),
is_virtual_network_filter_enabled=dict(
type='bool'
),
enable_automatic_failover=dict(
type='bool'
),
enable_cassandra=dict(
type='bool'
),
enable_table=dict(
type='bool'
),
enable_gremlin=dict(
type='bool'
),
virtual_network_rules=dict(
type='list',
options=dict(
id=dict(
type='str',
required=True
),
ignore_missing_vnet_service_endpoint=dict(
type='bool'
)
)
),
enable_multiple_write_locations=dict(
type='bool'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMCosmosDBAccount, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.parameters[key] = kwargs[key]
kind = self.parameters.get('kind')
if kind == 'global_document_db':
self.parameters['kind'] = 'GlobalDocumentDB'
elif kind == 'mongo_db':
self.parameters['kind'] = 'MongoDB'
elif kind == 'parse':
self.parameters['kind'] = 'Parse'
dict_camelize(self.parameters, ['consistency_policy', 'default_consistency_level'], True)
dict_rename(self.parameters, ['geo_rep_locations', 'name'], 'location_name')
dict_rename(self.parameters, ['geo_rep_locations'], 'locations')
self.parameters['capabilities'] = []
if self.parameters.pop('enable_cassandra', False):
self.parameters['capabilities'].append({'name': 'EnableCassandra'})
if self.parameters.pop('enable_table', False):
self.parameters['capabilities'].append({'name': 'EnableTable'})
if self.parameters.pop('enable_gremlin', False):
self.parameters['capabilities'].append({'name': 'EnableGremlin'})
for rule in self.parameters.get('virtual_network_rules', []):
subnet = rule.pop('subnet')
if isinstance(subnet, dict):
virtual_network_name = subnet.get('virtual_network_name')
subnet_name = subnet.get('subnet_name')
resource_group_name = subnet.get('resource_group', self.resource_group)
template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}"
subnet = template.format(self.subscription_id, resource_group_name, virtual_network_name, subnet_name)
rule['id'] = subnet
response = None
self.mgmt_client = self.get_mgmt_svc_client(CosmosDB,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if "location" not in self.parameters:
self.parameters["location"] = resource_group.location
old_response = self.get_databaseaccount()
if not old_response:
self.log("Database Account instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Database Account instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
old_response['locations'] = old_response['failover_policies']
if not default_compare(self.parameters, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Database Account instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_databaseaccount()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Database Account instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_databaseaccount()
else:
self.log("Database Account instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update({'id': response.get('id', None)})
return self.results
def create_update_databaseaccount(self):
'''
Creates or updates Database Account with the specified configuration.
:return: deserialized Database Account instance state dictionary
'''
self.log("Creating / Updating the Database Account instance {0}".format(self.name))
try:
response = self.mgmt_client.database_accounts.create_or_update(resource_group_name=self.resource_group,
account_name=self.name,
create_update_parameters=self.parameters)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Database Account instance.')
self.fail("Error creating the Database Account instance: {0}".format(str(exc)))
return response.as_dict()
def delete_databaseaccount(self):
'''
Deletes specified Database Account instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Database Account instance {0}".format(self.name))
try:
response = self.mgmt_client.database_accounts.delete(resource_group_name=self.resource_group,
account_name=self.name)
# This currently doesnt' work as there is a bug in SDK / Service
# if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
# response = self.get_poller_result(response)
except CloudError as e:
self.log('Error attempting to delete the Database Account instance.')
self.fail("Error deleting the Database Account instance: {0}".format(str(e)))
return True
def get_databaseaccount(self):
'''
Gets the properties of the specified Database Account.
:return: deserialized Database Account instance state dictionary
'''
self.log("Checking if the Database Account instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group,
account_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Database Account instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Database Account instance.')
if found is True:
return response.as_dict()
return False
def default_compare(new, old, path, result):
if new is None:
return True
elif isinstance(new, dict):
if not isinstance(old, dict):
result['compare'] = 'changed [' + path + '] old dict is null'
return False
for k in new.keys():
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
return False
return True
elif isinstance(new, list):
if not isinstance(old, list) or len(new) != len(old):
result['compare'] = 'changed [' + path + '] length is different or null'
return False
elif len(old) == 0:
return True
elif isinstance(old[0], dict):
key = None
if 'id' in old[0] and 'id' in new[0]:
key = 'id'
elif 'name' in old[0] and 'name' in new[0]:
key = 'name'
else:
key = list(old[0])[0]
new = sorted(new, key=lambda x: x.get(key, ''))
old = sorted(old, key=lambda x: x.get(key, ''))
else:
new = sorted(new)
old = sorted(old)
for i in range(len(new)):
if not default_compare(new[i], old[i], path + '/*', result):
return False
return True
else:
if path == '/location' or path.endswith('location_name'):
new = new.replace(' ', '').lower()
old = new.replace(' ', '').lower()
if new == old:
return True
else:
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
return False
def dict_camelize(d, path, camelize_first):
if isinstance(d, list):
for i in range(len(d)):
dict_camelize(d[i], path, camelize_first)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = _snake_to_camel(old_value, camelize_first)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_camelize(sd, path[1:], camelize_first)
def dict_upper(d, path):
if isinstance(d, list):
for i in range(len(d)):
dict_upper(d[i], path)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = old_value.upper()
else:
sd = d.get(path[0], None)
if sd is not None:
dict_upper(sd, path[1:])
def dict_rename(d, path, new_name):
if isinstance(d, list):
for i in range(len(d)):
dict_rename(d[i], path, new_name)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.pop(path[0], None)
if old_value is not None:
d[new_name] = old_value
else:
sd = d.get(path[0], None)
if sd is not None:
dict_rename(sd, path[1:], new_name)
def dict_expand(d, path, outer_dict_name):
if isinstance(d, list):
for i in range(len(d)):
dict_expand(d[i], path, outer_dict_name)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.pop(path[0], None)
if old_value is not None:
d[outer_dict_name] = d.get(outer_dict_name, {})
d[outer_dict_name] = old_value
else:
sd = d.get(path[0], None)
if sd is not None:
dict_expand(sd, path[1:], outer_dict_name)
def main():
"""Main execution"""
AzureRMCosmosDBAccount()
if __name__ == '__main__':
main()
| gpl-3.0 |
daodaoliang/python-phonenumbers | python/phonenumbers/data/region_LY.py | 11 | 1642 | """Auto-generated file, do not edit by hand. LY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_LY = PhoneMetadata(id='LY', country_code=218, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[25679]\\d{8}', possible_number_pattern='\\d{7,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:2[1345]|5[1347]|6[123479]|71)\\d{7}', possible_number_pattern='\\d{7,9}', example_number='212345678'),
mobile=PhoneNumberDesc(national_number_pattern='9[1-6]\\d{7}', possible_number_pattern='\\d{9}', example_number='912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([25679]\\d)(\\d{7})', format='\\1-\\2', national_prefix_formatting_rule='0\\1')])
| apache-2.0 |
Snowlinux/snowupdate | janitor/plugincore/plugins/dpkg_status_plugin.py | 1 | 2620 | # Copyright (C) 2009-2012 Canonical, Ltd.
# -*- Mode: Python; indent-tabs-mode: nil; tab-width: 4; coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'DpkgStatusCruft',
'DpkgStatusPlugin',
]
import logging
import subprocess
from apt_pkg import TagFile
from janitor.plugincore.cruft import Cruft
from janitor.plugincore.i18n import setup_gettext
from janitor.plugincore.plugin import Plugin
_ = setup_gettext()
class DpkgStatusCruft(Cruft):
def __init__(self, n_items):
self.n_items = n_items
def get_prefix(self):
return 'dpkg-status'
def get_prefix_description(self):
return _('%i obsolete entries in the status file') % self.n_items
def get_shortname(self):
return _('Obsolete entries in dpkg status')
def get_description(self): # pragma: no cover
return _('Obsolete dpkg status entries')
def cleanup(self):
logging.debug('calling dpkg --forget-old-unavail')
res = subprocess.call('dpkg --forget-old-unavail'.split())
logging.debug('dpkg --forget-old-unavail returned {}'.format(res))
class DpkgStatusPlugin(Plugin):
def __init__(self, filename=None):
self.status = ('/var/lib/dpkg/status'
if filename is None
else filename)
self.condition = ['PostCleanup']
def get_cruft(self):
n_cruft = 0
with open(self.status) as fp:
tagf = TagFile(fp)
while tagf.step():
statusline = tagf.section.get('Status')
(want, flag, status) = statusline.split()
if (want == 'purge' and
flag == 'ok' and
status == 'not-installed'):
# Then...
n_cruft += 1
logging.debug('DpkgStatusPlugin found {} cruft items'.format(n_cruft))
if n_cruft:
return [DpkgStatusCruft(n_cruft)]
return []
| gpl-2.0 |
mainyanim/eyetoai | findings/distrib.py | 1 | 9083 | import pandas as pd
import numpy as np
import random
import json
from openpyxl import load_workbook
from openpyxl import Workbook
import numpy as np
import math
# define values check and append to arr
# define probability array
# read excel
df = pd.read_excel("output.xlsx")
wb = load_workbook('output.xlsx')
ws = wb.get_sheet_by_name('Sheet1') # Define worksheet
def get_dic_from_two_lists(keys, values):
return {keys[i]: values[i] for i in range(len(keys))}
# Define function to normalize arr values
def normalize(items):
problist = [x / sum(items) for x in items]
# def probslist
def concatvals(row, start, stop):
prob_head = list(df)[start:stop]
width = stop - start
col = start
val_arr = []
prob_arr = []
for i in range(width):
value_temp = df.iloc[row - 2, col]
if isinstance(value_temp, float) is False:
value = [x.strip() for x in value_temp.split(',')]
len_val = len(value)
prob_arr += [prob_head[i] for _ in range(len_val)]
val_arr += value[0:len_val]
col += 1
randparameter = random.choices(val_arr, prob_arr, k=1)
return randparameter
def grab_data(r, s, x, y):
ps = [concatvals(r+s, x, y)]
return ps
def create_rep(arr, dict1, row_data, condname, modality):
params = []
to_json = []
if condname == 'Mass' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 0, 14, 19)
row_data += 1
elif condname == 'Calcifications' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 3, 14, 19)
row_data += 1
elif condname == 'Assymetry' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 6, 14, 19)
row_data += 1
elif condname == 'Lymph nodes' and modality == 'Mammography':
for i in range(len(arr)):
params += [concatvals(row_data + 7, 14, 19)]
row_data += 1
elif condname == 'Mass' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 8, 14, 19)]
row_data += 1
elif condname == 'Calcifications US' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 12, 14, 19)]
row_data += 1
elif condname == 'Lymph nodes' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 13, 14, 19)]
row_data += 1
elif condname == 'Special cases' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 14, 14, 19)]
row_data += 1
elif condname == 'Mass' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 15, 14, 19)]
row_data += 1
elif condname == 'MRI features' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 18, 14, 19)]
row_data += 1
elif condname == 'Kinetic curve assessment' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 19, 14, 19)]
row_data += 1
elif condname == 'Non-mass enhancement (NME)' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 20, 14, 19)]
row_data += 1
elif condname == 'Non-enhancing findings' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 22, 14, 19)]
row_data += 1
elif condname == 'Lymph nodes' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 22, 14, 19)]
row_data += 1
elif condname == 'Fat containing lesions' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 23, 14, 19)]
row_data += 1
data = get_dic_from_two_lists(arr, params)
dict1.update(data)
to_json += [dict1]
return to_json
def get_name(infile):
with open(infile, 'r') as f:
contents_of_file = f.read()
lines = contents_of_file.splitlines()
line_number = random.randrange(0, len(lines))
person_name = lines[line_number]
return person_name
def get_numcond():
names = len(df.Name.unique())
return names
def get_cond_name():
name_arr = df.Name.unique()
n = list(name_arr)
n_arr = []
for i in range(len(name_arr)):
if (isinstance(n[i], float)) is False:
n_arr += [n[i]]
rand_cond_name = random.choice(n_arr)
return rand_cond_name
def check_row(cond_name):
from xlrd import open_workbook
book = open_workbook("output.xlsx")
for sheet in book.sheets():
for rowidx in range(sheet.nrows):
row = sheet.row(rowidx)
for colidx, cell in enumerate(row):
if cell.value == cond_name:
print("condition name is: ", cond_name)
return rowidx + 1
# Create random with parameter of report numbers
def generate_report(infile):
# for i in range(items):
a = np.array([[i.value for i in j] for j in ws['C1':'I1']]).ravel()
b = np.array([[i.value for i in j] for j in ws['C2':'I2']]).ravel()
# Read BiRads Probabilities into list
# Read BiRads into list
person_name = get_name(infile)
p_id = random.randrange(100)
p_age = random.randrange(25, 65)
br_p = normalize(b)
print(br_p)
br = random.choices(a, br_p, k=1)
name = get_cond_name()
names = get_numcond()
row = check_row(name)
"create list of values and slice empty entities from list"
rm = df['Relevant modalities'].values.tolist()[0:26]
# r = 'Mammography'
r = random.choice(rm)
dict_report = {'Id': p_id, 'First name': person_name, 'Age': p_age, 'Condition Name': name, 'BiRad': br,
'Relevant Modality': r}
# mammo params
if r == 'Mammography':
f_list = df['Relevant findings'].values.tolist()[0:8]
f = random.choice(f_list)
dict_report.update({'Relevant finding': f})
iter_params_mass = ['Shape', 'Margin', 'Density']
iter_params_calc = ['Typically benign', 'Suspicious morphology', 'Distribution']
iter_params_a = ['Assymetry']
iter_params_lymph = ['Lymph nodes']
if f == 'Mass':
report = create_rep(iter_params_mass, dict_report, row, f, r)
elif f == 'Calcifications':
report = create_rep(iter_params_calc, dict_report, row, f, r)
elif f == 'Assymetry':
report = create_rep(iter_params_a, dict_report, row, f, r)
else:
report = create_rep(iter_params_lymph, dict_report, row, f, r)
elif r == 'US':
f_list = df['Relevant findings'].values.tolist()[8:15]
f = random.choice(f_list)
dict_report.update({'Relevant finding': f})
us_params_mass = ['Shape', 'Margin', 'Echo', 'Posterior']
us_params_calc = ['Calcifications']
us_params_l_nodes = ['Lymph Nodes']
us_params_sp_cases = ['Special Cases']
if f == 'Mass':
report = create_rep(us_params_mass, dict_report, row, f, r)
elif f == 'Calcifications US':
report = create_rep(us_params_calc, dict_report, row, f, r)
elif f == 'Lymph nodes':
report = create_rep(us_params_l_nodes, dict_report, row, f, r)
else:
report = create_rep(us_params_sp_cases, dict_report, row, f, r)
elif r == 'MRI':
f_list = df['Relevant findings'].values.tolist()[15:25]
mri_params_mass = ['Shape', 'Margin', 'Internal enhancement']
mri_params_mri_f = ['MRI features']
mri_params_kin_c_a = ['Kinetic curve assessment']
mri_params_nme = ['Distribution', 'Internal enhancement patterns']
mri_params_nef = ['Non-enhancing patterns']
mri_params_l_nodes = ['Lymph Nodes']
mri_params_fcl = ['Fat containing lesions']
f = random.choice(f_list)
dict_report.update({'Relevant finding': f})
if f == 'Mass':
report = create_rep(mri_params_mass, dict_report, row, f, r)
elif f == 'MRI features':
report = create_rep(mri_params_mri_f, dict_report, row, f, r)
elif f == 'Kinetic curve assessment':
report = create_rep(mri_params_kin_c_a, dict_report, row, f, r)
elif f == 'Non-mass enhancement (NME)':
report = create_rep(mri_params_nme, dict_report, row, f, r)
elif f == 'Non-enhancing findings':
report = create_rep(mri_params_nef, dict_report, row, f, r)
elif f == 'Lymph nodes':
report = create_rep(mri_params_l_nodes, dict_report, row, f, r)
else:
report = create_rep(mri_params_fcl, dict_report, row, f, r)
print(report)
def main():
for i in range(1):
generate_report("first-names.txt")
main()
| mit |
nanditav/15712-TensorFlow | tensorflow/contrib/tfprof/python/tools/tfprof/print_model_analysis_test.py | 10 | 8180 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.contrib.tfprof.python.tools.tfprof import pywrap_tensorflow_print_model_analysis_lib as print_mdl
from tensorflow.tools.tfprof import tfprof_options_pb2
from tensorflow.tools.tfprof import tfprof_output_pb2
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'device_regexes': ['.*'],
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'viz': False
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(tf.test.TestCase):
def _BuildSmallModel(self):
image = tf.zeros([2, 6, 6, 3])
kernel = tf.get_variable(
'DW', [6, 6, 3, 6],
tf.float32,
initializer=tf.random_normal_initializer(stddev=0.001))
x = tf.nn.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
def testPrintModelAnalysis(self):
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = TEST_OPTIONS['max_depth']
opts.min_bytes = TEST_OPTIONS['min_bytes']
opts.min_micros = TEST_OPTIONS['min_micros']
opts.min_params = TEST_OPTIONS['min_params']
opts.min_float_ops = TEST_OPTIONS['min_float_ops']
for p in TEST_OPTIONS['device_regexes']:
opts.device_regexes.append(p)
opts.order_by = TEST_OPTIONS['order_by']
for p in TEST_OPTIONS['account_type_regexes']:
opts.account_type_regexes.append(p)
for p in TEST_OPTIONS['start_name_regexes']:
opts.start_name_regexes.append(p)
for p in TEST_OPTIONS['trim_name_regexes']:
opts.trim_name_regexes.append(p)
for p in TEST_OPTIONS['show_name_regexes']:
opts.show_name_regexes.append(p)
for p in TEST_OPTIONS['hide_name_regexes']:
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = TEST_OPTIONS['account_displayed_op_only']
for p in TEST_OPTIONS['select']:
opts.select.append(p)
opts.viz = TEST_OPTIONS['viz']
with tf.Session() as sess, tf.device('/cpu:0'):
_ = self._BuildSmallModel()
tfprof_pb = tfprof_output_pb2.TFProfNode()
tfprof_pb.ParseFromString(
print_mdl.PrintModelAnalysis(sess.graph.as_graph_def(
).SerializeToString(), b'', b'', b'scope', opts.SerializeToString()))
expected_pb = tfprof_output_pb2.TFProfNode()
text_format.Merge(r"""name: "_TFProfRoot"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "Conv2D"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW"
exec_micros: 0
requested_bytes: 0
parameters: 648
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
device: "/device:CPU:0"
children {
name: "DW/Assign"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
children {
name: "DW/Initializer/random_normal/RandomStandardNormal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mean"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mul"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/shape"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/stddev"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/read"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "zeros"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0""", expected_pb)
self.assertEqual(expected_pb, tfprof_pb)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
sffjunkie/home-assistant | tests/components/light/test_rfxtrx.py | 5 | 11633 | """The tests for the Rfxtrx light platform."""
import unittest
from homeassistant.bootstrap import _setup_component
from homeassistant.components import rfxtrx as rfxtrx_core
from tests.common import get_test_home_assistant
class TestLightRfxtrx(unittest.TestCase):
"""Test the Rfxtrx light platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
self.hass.config.components = ['rfxtrx']
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
if rfxtrx_core.RFXOBJECT:
rfxtrx_core.RFXOBJECT.close_connection()
self.hass.stop()
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx_core.ATTR_FIREEVENT: True}}}}))
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
'signal_repetitions': 3}}}}))
def test_invalid_config(self):
"""Test configuration."""
self.assertFalse(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'invalid_key': 'afda',
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
rfxtrx_core.ATTR_FIREEVENT: True}}}}))
def test_default_config(self):
"""Test with 0 switches."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'devices': {}}}))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_old_config(self):
"""Test with 1 light."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'devices':
{'123efab1': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51'}}}}))
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT =\
rfxtrxmod.Core("", transport_protocol=rfxtrxmod.DummyTransport)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.assumed_state)
self.assertEqual(entity.signal_repetitions, 1)
self.assertFalse(entity.should_fire_event)
self.assertFalse(entity.should_poll)
self.assertFalse(entity.is_on)
entity.turn_on()
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
entity.turn_off()
self.assertFalse(entity.is_on)
self.assertEqual(entity.brightness, 0)
entity.turn_on(brightness=100)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 100)
entity.turn_on(brightness=10)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 10)
entity.turn_on(brightness=255)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
def test_one_light(self):
"""Test with 1 light."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test'}}}}))
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT =\
rfxtrxmod.Core("", transport_protocol=rfxtrxmod.DummyTransport)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.assumed_state)
self.assertEqual(entity.signal_repetitions, 1)
self.assertFalse(entity.should_fire_event)
self.assertFalse(entity.should_poll)
self.assertFalse(entity.is_on)
entity.turn_on()
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
entity.turn_off()
self.assertFalse(entity.is_on)
self.assertEqual(entity.brightness, 0)
entity.turn_on(brightness=100)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 100)
entity.turn_on(brightness=10)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 10)
entity.turn_on(brightness=255)
self.assertTrue(entity.is_on)
self.assertEqual(entity.brightness, 255)
def test_several_lights(self):
"""Test with 3 lights."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'signal_repetitions': 3,
'devices':
{'0b1100cd0213c7f230010f71': {
'name': 'Test'},
'0b1100100118cdea02010f70': {
'name': 'Bath'},
'0b1100101118cdea02010f70': {
'name': 'Living'}}}}))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
entity = rfxtrx_core.RFX_DEVICES[id]
self.assertEqual(entity.signal_repetitions, 3)
if entity.name == 'Living':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Living: off>', entity.__str__())
elif entity.name == 'Bath':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Bath: off>', entity.__str__())
elif entity.name == 'Test':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Test: off>', entity.__str__())
self.assertEqual(3, device_num)
def test_discover_light(self):
"""Test with discovery of lights."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0b11009e00e6116202020070')
event.data = bytearray(b'\x0b\x11\x00\x9e\x00\xe6\x11b\x02\x02\x00p')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['0e611622']
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual('<Entity 0b11009e00e6116202020070: on>',
entity.__str__())
event = rfxtrx_core.get_rfx_object('0b11009e00e6116201010070')
event.data = bytearray(b'\x0b\x11\x00\x9e\x00\xe6\x11b\x01\x01\x00p')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02020070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x02, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['118cdea2']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual('<Entity 0b1100120118cdea02020070: on>',
entity.__str__())
# trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
# trying to add a swicth
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a rollershutter
event = rfxtrx_core.get_rfx_object('0a1400adf394ab020e0060')
event.data = bytearray([0x0A, 0x14, 0x00, 0xAD, 0xF3, 0x94,
0xAB, 0x02, 0x0E, 0x00, 0x60])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
def test_discover_light_noautoadd(self):
"""Test with discover of light when auto add is False."""
self.assertTrue(_setup_component(self.hass, 'light', {
'light': {'platform': 'rfxtrx',
'automatic_add': False,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02020070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x02, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02010070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x01, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100120118cdea02020070')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x02, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a switch
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a rollershutter
event = rfxtrx_core.get_rfx_object('0a1400adf394ab020e0060')
event.data = bytearray([0x0A, 0x14, 0x00, 0xAD, 0xF3, 0x94,
0xAB, 0x02, 0x0E, 0x00, 0x60])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
| mit |
nlevitt/youtube-dl | youtube_dl/extractor/thisamericanlife.py | 135 | 1549 | from __future__ import unicode_literals
from .common import InfoExtractor
class ThisAmericanLifeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thisamericanlife\.org/(?:radio-archives/episode/|play_full\.php\?play=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.thisamericanlife.org/radio-archives/episode/487/harper-high-school-part-one',
'md5': '8f7d2da8926298fdfca2ee37764c11ce',
'info_dict': {
'id': '487',
'ext': 'm4a',
'title': '487: Harper High School, Part One',
'description': 'md5:ee40bdf3fb96174a9027f76dbecea655',
'thumbnail': 're:^https?://.*\.jpg$',
},
}, {
'url': 'http://www.thisamericanlife.org/play_full.php?play=487',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.thisamericanlife.org/radio-archives/episode/%s' % video_id, video_id)
return {
'id': video_id,
'url': 'http://stream.thisamericanlife.org/{0}/stream/{0}_64k.m3u8'.format(video_id),
'protocol': 'm3u8_native',
'ext': 'm4a',
'acodec': 'aac',
'vcodec': 'none',
'abr': 64,
'title': self._html_search_meta(r'twitter:title', webpage, 'title', fatal=True),
'description': self._html_search_meta(r'description', webpage, 'description'),
'thumbnail': self._og_search_thumbnail(webpage),
}
| unlicense |
srvg/ansible-modules-extras | storage/netapp/netapp_e_host.py | 45 | 17574 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: netapp_e_host
short_description: manage eseries hosts
description:
- Create, update, remove hosts on NetApp E-series storage arrays
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- the id of the storage array you wish to act against
required: True
name:
description:
- If the host doesnt yet exist, the label to assign at creation time.
- If the hosts already exists, this is what is used to identify the host to apply any desired changes
required: True
host_type_index:
description:
- The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information. Alternatively you can use the WSP portal to retrieve the information.
required: True
ports:
description:
- a list of of dictionaries of host ports you wish to associate with the newly created host
required: False
group:
description:
- the group you want the host to be a member of
required: False
"""
EXAMPLES = """
- name: Set Host Info
netapp_e_host:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
name: "{{ host_name }}"
host_type_index: "{{ host_type_index }}"
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: The host has been created.
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data is None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class Host(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
ssid=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
group=dict(type='str', required=False),
ports=dict(type='list', required=False),
force_port=dict(type='bool', default=False),
name=dict(type='str', required=True),
host_type_index=dict(type='int', required=True)
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.group = args['group']
self.ports = args['ports']
self.force_port = args['force_port']
self.name = args['name']
self.host_type_index = args['host_type_index']
self.state = args['state']
self.ssid = args['ssid']
self.url = args['api_url']
self.user = args['api_username']
self.pwd = args['api_password']
self.certs = args['validate_certs']
self.ports = args['ports']
self.post_body = dict()
if not self.url.endswith('/'):
self.url += '/'
@property
def valid_host_type(self):
try:
(rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
try:
match = filter(lambda host_type: host_type['index'] == self.host_type_index, host_types)[0]
return True
except IndexError:
self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
@property
def hostports_available(self):
used_ids = list()
try:
(rc, self.available_ports) = request(self.url + 'storage-systems/%s/unassociated-host-ports' % self.ssid,
url_password=self.pwd, url_username=self.user,
validate_certs=self.certs,
headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to get unassociated host ports. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
if len(self.available_ports) > 0 and len(self.ports) <= len(self.available_ports):
for port in self.ports:
for free_port in self.available_ports:
# Desired Type matches but also make sure we havent already used the ID
if not free_port['id'] in used_ids:
# update the port arg to have an id attribute
used_ids.append(free_port['id'])
break
if len(used_ids) != len(self.ports) and not self.force_port:
self.module.fail_json(
msg="There are not enough free host ports with the specified port types to proceed")
else:
return True
else:
self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports")
@property
def group_id(self):
if self.group:
try:
(rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
try:
group_obj = filter(lambda group: group['name'] == self.group, all_groups)[0]
return group_obj['id']
except IndexError:
self.module.fail_json(msg="No group with the name: %s exists" % self.group)
else:
# Return the value equivalent of no group
return "0000000000000000000000000000000000000000"
@property
def host_exists(self):
try:
(rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
self.all_hosts = all_hosts
try: # Try to grab the host object
self.host_obj = filter(lambda host: host['label'] == self.name, all_hosts)[0]
return True
except IndexError:
# Host with the name passed in does not exist
return False
@property
def needs_update(self):
needs_update = False
self.force_port_update = False
if self.host_obj['clusterRef'] != self.group_id or \
self.host_obj['hostTypeIndex'] != self.host_type_index:
needs_update = True
if self.ports:
if not self.host_obj['ports']:
needs_update = True
for arg_port in self.ports:
# First a quick check to see if the port is mapped to a different host
if not self.port_on_diff_host(arg_port):
for obj_port in self.host_obj['ports']:
if arg_port['label'] == obj_port['label']:
# Confirmed that port arg passed in exists on the host
# port_id = self.get_port_id(obj_port['label'])
if arg_port['type'] != obj_port['portId']['ioInterfaceType']:
needs_update = True
if 'iscsiChapSecret' in arg_port:
# No way to know the current secret attr, so always return True just in case
needs_update = True
else:
# If the user wants the ports to be reassigned, do it
if self.force_port:
self.force_port_update = True
needs_update = True
else:
self.module.fail_json(
msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different port spec" % arg_port)
return needs_update
def port_on_diff_host(self, arg_port):
""" Checks to see if a passed in port arg is present on a different host """
for host in self.all_hosts:
# Only check 'other' hosts
if self.host_obj['name'] != self.name:
for port in host['ports']:
# Check if the port label is found in the port dict list of each host
if arg_port['label'] == port['label']:
self.other_host = host
return True
return False
def reassign_ports(self, apply=True):
if not self.post_body:
self.post_body = dict(
portsToUpdate=dict()
)
for port in self.ports:
if self.port_on_diff_host(port):
self.post_body['portsToUpdate'].update(dict(
portRef=self.other_host['hostPortRef'],
hostRef=self.host_obj['id'],
# Doesnt yet address port identifier or chap secret
))
if apply:
try:
(rc, self.host_obj) = request(
self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
except:
err = get_exception()
self.module.fail_json(
msg="Failed to reassign host port. Host Id [%s]. Array Id [%s]. Error [%s]." % (
self.host_obj['id'], self.ssid, str(err)))
def update_host(self):
if self.ports:
if self.hostports_available:
if self.force_port_update is True:
self.reassign_ports(apply=False)
# Make sure that only ports that arent being reassigned are passed into the ports attr
self.ports = [port for port in self.ports if not self.port_on_diff_host(port)]
self.post_body['ports'] = self.ports
if self.group:
self.post_body['groupId'] = self.group_id
self.post_body['hostType'] = dict(index=self.host_type_index)
try:
(rc, self.host_obj) = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
except:
err = get_exception()
self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
self.module.exit_json(changed=True, **self.host_obj)
def create_host(self):
post_body = dict(
name=self.name,
host_type=dict(index=self.host_type_index),
groupId=self.group_id,
ports=self.ports
)
if self.ports:
# Check that all supplied port args are valid
if self.hostports_available:
post_body.update(ports=self.ports)
elif not self.force_port:
self.module.fail_json(
msg="You supplied ports that are already in use. Supply force_port to True if you wish to reassign the ports")
if not self.host_exists:
try:
(rc, create_resp) = request(self.url + "storage-systems/%s/hosts" % self.ssid, method='POST',
url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
data=json.dumps(post_body), headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
else:
self.module.exit_json(changed=False,
msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name))
self.host_obj = create_resp
if self.ports and self.force_port:
self.reassign_ports()
self.module.exit_json(changed=True, **self.host_obj)
def remove_host(self):
try:
(rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
method='DELETE',
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to remote host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
self.ssid,
str(err)))
def apply(self):
if self.state == 'present':
if self.host_exists:
if self.needs_update and self.valid_host_type:
self.update_host()
else:
self.module.exit_json(changed=False, msg="Host already present.", id=self.ssid, label=self.name)
elif self.valid_host_type:
self.create_host()
else:
if self.host_exists:
self.remove_host()
self.module.exit_json(changed=True, msg="Host removed.")
else:
self.module.exit_json(changed=False, msg="Host already absent.", id=self.ssid, label=self.name)
def main():
host = Host()
host.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
Suwmlee/XX-Net | gae_proxy/server/lib/google/appengine/ext/bulkload/simplexml_connector.py | 1 | 7328 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bulkloader XML reading and writing.
Handle the XML format specified in a bulkloader.yaml file.
"""
import codecs
import logging
import re
from xml.etree import cElementTree as ElementTree
from xml.sax import saxutils
from google.appengine.ext.bulkload import bulkloader_errors
from google.appengine.ext.bulkload import connector_interface
NODE_PATH_ONLY_RE = '(/[a-zA-Z][a-zA-Z0-9]*)+$'
class SimpleXmlConnector(connector_interface.ConnectorInterface):
"""Read/write a simply-structured XML file and convert dicts for each record.
A simply-structed XML file is one where we can locate all interesting nodes
with a simple (ElementTree supported) xpath, and each node contains either
all the info we care about as child (and not grandchild) nodes with text or
as attributes.
We'll also pass the entire node in case the developer wants to do something
more interesting with it (occasional grandchildren, parents, etc.).
This is of course a fairly expensive way to read XML--we build a DOM, then
copy parts of it into a dict. A pull model would work well with the interface
too.
"""
ELEMENT_CENTRIC = 1
ATTRIBUTE_CENTRIC = 2
@classmethod
def create_from_options(cls, options, name):
"""Factory using an options dictionary.
Args:
options: Dictionary of options. Must contain:
* xpath_to_nodes: The xpath to select a record.
* style: 'element_centric' or 'attribute_centric'
name: The name of this transformer, for use in error messages.
Returns:
XmlConnector connector object described by the specified options.
Raises:
InvalidConfiguration: If the config is invalid.
"""
xpath_to_nodes = options.get('xpath_to_nodes')
if not xpath_to_nodes:
raise bulkloader_errors.InvalidConfiguration(
'simplexml must specify xpath_to_nodes. (In transformer named %s)' %
name)
if not re.match(NODE_PATH_ONLY_RE, xpath_to_nodes):
logging.warning('simplexml export only supports very simple '
'/root/to/node xpath_to_nodes for now.')
xml_style = options.get('style')
xml_style_mapping = {
'element_centric': cls.ELEMENT_CENTRIC,
'attribute_centric': cls.ATTRIBUTE_CENTRIC,
}
if xml_style not in xml_style_mapping:
raise bulkloader_errors.InvalidConfiguration(
'simplexml must specify one of these valid xml_style options: "%s". '
'You specified %s in transformer named %s.' %
('", "'.join(list(xml_style_mapping.keys())), xml_style,
name))
return cls(xpath_to_nodes, xml_style_mapping[xml_style])
def __init__(self, xpath_to_nodes, xml_style):
"""Constructor.
Args:
xpath_to_nodes: xpath to the nodes to run over.
xml_style: ELEMENT_CENTRIC or ATTRIBUTE_CENTRIC--we'll
either convert the list of elements to a dict (last element of the same
name will be used) or the list of attributes.
Raises:
InvalidConfiguration: If the config is invalid.
"""
self.xpath_to_nodes = xpath_to_nodes
assert xml_style in (self.ELEMENT_CENTRIC, self.ATTRIBUTE_CENTRIC)
self.xml_style = xml_style
self.output_stream = None
self.bulkload_state = None
self.depth = 0
if re.match(NODE_PATH_ONLY_RE, xpath_to_nodes):
self.node_list = self.xpath_to_nodes.split('/')[1:]
self.entity_node = self.node_list[-1]
self.node_list = self.node_list[:-1]
else:
self.node_list = None
self.entity_node = None
self.node_list = None
def generate_import_record(self, filename, bulkload_state):
"""Generator, yields dicts for nodes found as described in the options."""
self.bulkload_state = bulkload_state
tree = ElementTree.parse(filename)
xpath_to_nodes = self.xpath_to_nodes
if (len(xpath_to_nodes) > 1 and xpath_to_nodes[0] == '/'
and xpath_to_nodes[1] != '/'):
if not tree.getroot().tag == xpath_to_nodes.split('/')[1]:
return
xpath_to_nodes = '/' + xpath_to_nodes.split('/', 2)[2]
nodes = tree.findall(xpath_to_nodes)
for node in nodes:
if self.xml_style == self.ELEMENT_CENTRIC:
input_dict = {}
for child in node.getchildren():
if not child.tag in input_dict:
input_dict[child.tag] = child.text
else:
input_dict = dict(list(node.items()))
input_dict['__node__'] = node
yield input_dict
def initialize_export(self, filename, bulkload_state):
"""Initialize the output file."""
self.bulkload_state = bulkload_state
if not self.node_list:
raise bulkloader_errors.InvalidConfiguration(
'simplexml export only supports simple /root/to/node xpath_to_nodes '
'for now.')
self.output_stream = codecs.open(filename, 'wb', 'utf-8')
self.output_stream.write('<?xml version="1.0"?>\n')
self.depth = 0
for node in self.node_list:
self.output_stream.write('%s<%s>\n' % (' ' * self.depth, node))
self.depth += 1
self.indent = ' ' * self.depth
def write_iterable_as_elements(self, values):
"""Write a dict as elements, possibly recursively."""
if isinstance(values, dict):
values = iter(values.items())
for (name, value) in values:
if isinstance(value, str):
self.output_stream.write('%s <%s>%s</%s>\n' % (self.indent, name,
saxutils.escape(value),
name))
else:
self.output_stream.write('%s <%s>\n' % (self.indent, name))
self.depth += 1
self.indent = ' ' * self.depth
self.write_iterable_as_elements(value)
self.depth -= 1
self.indent = ' ' * self.depth
self.output_stream.write('%s </%s>\n' % (self.indent, name))
def write_dict(self, dictionary):
"""Write one record for the specified entity."""
if self.xml_style == self.ELEMENT_CENTRIC:
self.output_stream.write('%s<%s>\n' % (self.indent, self.entity_node))
self.write_iterable_as_elements(dictionary)
self.output_stream.write('%s</%s>\n' % (self.indent, self.entity_node))
else:
self.output_stream.write('%s<%s ' % (self.indent, self.entity_node))
for (name, value) in dictionary.items():
self.output_stream.write('%s=%s ' % (name, saxutils.quoteattr(value)))
self.output_stream.write('/>\n')
def finalize_export(self):
if not self.output_stream:
return
for node in reversed(self.node_list):
self.depth -= 1
self.output_stream.write('%s</%s>\n' % (' ' * self.depth, node))
self.output_stream.close()
self.output_stream = None
| bsd-2-clause |
asposeforcloud/Aspose_Cloud_SDK_For_Python | test/test_aspose_tasks.py | 1 | 6228 | __author__ = 'AssadMahmood'
import unittest
import asposecloud
import os.path
import json
from asposecloud.storage import Folder
from asposecloud.tasks import Document
from asposecloud.tasks import Calendar
class TestAsposeImaging(unittest.TestCase):
def setUp(self):
with open('setup.json') as json_file:
data = json.load(json_file)
asposecloud.AsposeApp.app_key = str(data['app_key'])
asposecloud.AsposeApp.app_sid = str(data['app_sid'])
asposecloud.AsposeApp.output_path = str(data['output_location'])
asposecloud.Product.product_uri = str(data['product_uri'])
def test_add_link(self):
folder = Folder()
response = folder.upload_file('./data/test_tasks.mpp')
self.assertEqual(True, response)
link_data = {"Link":None,"Index":0,"PredecessorUid":1,"SuccessorUid":3,"LinkType":3,"Lag":9600,"LagFormat":4}
doc = Document('test_tasks.mpp')
response = doc.add_link(link_data)
self.assertEqual(dict, type(response))
def test_add_calendar(self):
folder = Folder()
response = folder.upload_file('./data/test_tasks.mpp')
self.assertEqual(True, response)
calendar_data = {"Name":"ADDED CALENDAR","Uid":0,"Days":[{"DayType":1,"DayWorking":False,
"FromDate":"0001-01-01T00:00:00",
"ToDate":"0001-01-01T00:00:00",
"WorkingTimes":[]},
{"DayType":2,"DayWorking":True,
"FromDate":"0001-01-01T00:00:00",
"ToDate":"0001-01-01T00:00:00",
"WorkingTimes":[{"FromTime":"2010-01-01T09:00:00",
"ToTime":"2010-01-01T12:00:00"},
{"FromTime":"2010-01-01T13:00:00",
"ToTime":"2010-01-01T18:00:00"}]},
{"DayType":3,"DayWorking":True,
"FromDate":"0001-01-01T00:00:00",
"ToDate":"0001-01-01T00:00:00",
"WorkingTimes":[{"FromTime":"2010-01-01T09:00:00",
"ToTime":"2010-01-01T12:00:00"},
{"FromTime":"2010-01-01T13:00:00",
"ToTime":"2010-01-01T18:00:00"}]},
{"DayType":4,"DayWorking":True,
"FromDate":"0001-01-01T00:00:00",
"ToDate":"0001-01-01T00:00:00",
"WorkingTimes":[{"FromTime":"2010-01-01T09:00:00",
"ToTime":"2010-01-01T12:00:00"},
{"FromTime":"2010-01-01T13:00:00",
"ToTime":"2010-01-01T18:00:00"}]},
{"DayType":5,"DayWorking":True,
"FromDate":"0001-01-01T00:00:00",
"ToDate":"0001-01-01T00:00:00",
"WorkingTimes":[{"FromTime":"2010-01-01T09:00:00",
"ToTime":"2010-01-01T12:00:00"},
{"FromTime":"2010-01-01T13:00:00",
"ToTime":"2010-01-01T18:00:00"}]},
{"DayType":6,"DayWorking":True,
"FromDate":"0001-01-01T00:00:00",
"ToDate":"0001-01-01T00:00:00",
"WorkingTimes":[{"FromTime":"2010-01-01T09:00:00",
"ToTime":"2010-01-01T12:00:00"},
{"FromTime":"2010-01-01T13:00:00",
"ToTime":"2010-01-01T18:00:00"}]},
{"DayType":7,"DayWorking":True,
"FromDate":"0001-01-01T00:00:00",
"ToDate":"0001-01-01T00:00:00",
"WorkingTimes":[{"FromTime":"2010-01-01T09:00:00",
"ToTime":"2010-01-01T13:00:00"}]}],
"Exceptions":[],"IsBaseCalendar":True,"BaseCalendar":None,"IsBaselineCalendar":False}
cal = Calendar('test_tasks.mpp')
response = cal.add_calendar(calendar_data)
self.assertEqual(dict, type(response))
if __name__ == '__main__':
unittest.main() | mit |
sestrella/ansible | test/units/modules/network/f5/test_bigip_profile_analytics.py | 22 | 3514 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_analytics import ApiParameters
from library.modules.bigip_profile_analytics import ModuleParameters
from library.modules.bigip_profile_analytics import ModuleManager
from library.modules.bigip_profile_analytics import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_analytics import ApiParameters
from ansible.modules.network.f5.bigip_profile_analytics import ModuleParameters
from ansible.modules.network.f5.bigip_profile_analytics import ModuleManager
from ansible.modules.network.f5.bigip_profile_analytics import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
description='foo',
collect_geo=True,
collect_ip=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.description == 'foo'
assert p.collect_geo == 'yes'
assert p.collect_ip == 'yes'
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_analytics_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.collect_geo == 'no'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
description='foo',
collect_geo=True,
collect_ip=True,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
mce35/agocontrol | devices/onvif/suds/transport/https.py | 202 | 3634 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for basic HTTP (authenticated) transport implementations.
"""
import urllib2 as u2
from suds.transport import *
from suds.transport.http import HttpTransport
from logging import getLogger
log = getLogger(__name__)
class HttpAuthenticated(HttpTransport):
"""
Provides basic http authentication that follows the RFC-2617 specification.
As defined by specifications, credentials are provided to the server
upon request (HTTP/1.0 401 Authorization Required) by the server only.
@ivar pm: The password manager.
@ivar handler: The authentication handler.
"""
def __init__(self, **kwargs):
"""
@param kwargs: Keyword arguments.
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
- B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
HttpTransport.__init__(self, **kwargs)
self.pm = u2.HTTPPasswordMgrWithDefaultRealm()
def open(self, request):
self.addcredentials(request)
return HttpTransport.open(self, request)
def send(self, request):
self.addcredentials(request)
return HttpTransport.send(self, request)
def addcredentials(self, request):
credentials = self.credentials()
if not (None in credentials):
u = credentials[0]
p = credentials[1]
self.pm.add_password(None, request.url, u, p)
def credentials(self):
return (self.options.username, self.options.password)
def u2handlers(self):
handlers = HttpTransport.u2handlers(self)
handlers.append(u2.HTTPBasicAuthHandler(self.pm))
return handlers
class WindowsHttpAuthenticated(HttpAuthenticated):
"""
Provides Windows (NTLM) http authentication.
@ivar pm: The password manager.
@ivar handler: The authentication handler.
@author: Christopher Bess
"""
def u2handlers(self):
# try to import ntlm support
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
raise Exception("Cannot import python-ntlm module")
handlers = HttpTransport.u2handlers(self)
handlers.append(HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(self.pm))
return handlers
| gpl-3.0 |
semonte/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/win32/wtsapi32.py | 102 | 11164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for wtsapi32.dll in ctypes.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
from winappdbg.win32.advapi32 import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- Constants ----------------------------------------------------------------
WTS_CURRENT_SERVER_HANDLE = 0
WTS_CURRENT_SESSION = 1
#--- WTS_PROCESS_INFO structure -----------------------------------------------
# typedef struct _WTS_PROCESS_INFO {
# DWORD SessionId;
# DWORD ProcessId;
# LPTSTR pProcessName;
# PSID pUserSid;
# } WTS_PROCESS_INFO, *PWTS_PROCESS_INFO;
class WTS_PROCESS_INFOA(Structure):
_fields_ = [
("SessionId", DWORD),
("ProcessId", DWORD),
("pProcessName", LPSTR),
("pUserSid", PSID),
]
PWTS_PROCESS_INFOA = POINTER(WTS_PROCESS_INFOA)
class WTS_PROCESS_INFOW(Structure):
_fields_ = [
("SessionId", DWORD),
("ProcessId", DWORD),
("pProcessName", LPWSTR),
("pUserSid", PSID),
]
PWTS_PROCESS_INFOW = POINTER(WTS_PROCESS_INFOW)
#--- WTSQuerySessionInformation enums and structures --------------------------
# typedef enum _WTS_INFO_CLASS {
# WTSInitialProgram = 0,
# WTSApplicationName = 1,
# WTSWorkingDirectory = 2,
# WTSOEMId = 3,
# WTSSessionId = 4,
# WTSUserName = 5,
# WTSWinStationName = 6,
# WTSDomainName = 7,
# WTSConnectState = 8,
# WTSClientBuildNumber = 9,
# WTSClientName = 10,
# WTSClientDirectory = 11,
# WTSClientProductId = 12,
# WTSClientHardwareId = 13,
# WTSClientAddress = 14,
# WTSClientDisplay = 15,
# WTSClientProtocolType = 16,
# WTSIdleTime = 17,
# WTSLogonTime = 18,
# WTSIncomingBytes = 19,
# WTSOutgoingBytes = 20,
# WTSIncomingFrames = 21,
# WTSOutgoingFrames = 22,
# WTSClientInfo = 23,
# WTSSessionInfo = 24,
# WTSSessionInfoEx = 25,
# WTSConfigInfo = 26,
# WTSValidationInfo = 27,
# WTSSessionAddressV4 = 28,
# WTSIsRemoteSession = 29
# } WTS_INFO_CLASS;
WTSInitialProgram = 0
WTSApplicationName = 1
WTSWorkingDirectory = 2
WTSOEMId = 3
WTSSessionId = 4
WTSUserName = 5
WTSWinStationName = 6
WTSDomainName = 7
WTSConnectState = 8
WTSClientBuildNumber = 9
WTSClientName = 10
WTSClientDirectory = 11
WTSClientProductId = 12
WTSClientHardwareId = 13
WTSClientAddress = 14
WTSClientDisplay = 15
WTSClientProtocolType = 16
WTSIdleTime = 17
WTSLogonTime = 18
WTSIncomingBytes = 19
WTSOutgoingBytes = 20
WTSIncomingFrames = 21
WTSOutgoingFrames = 22
WTSClientInfo = 23
WTSSessionInfo = 24
WTSSessionInfoEx = 25
WTSConfigInfo = 26
WTSValidationInfo = 27
WTSSessionAddressV4 = 28
WTSIsRemoteSession = 29
WTS_INFO_CLASS = ctypes.c_int
# typedef enum _WTS_CONNECTSTATE_CLASS {
# WTSActive,
# WTSConnected,
# WTSConnectQuery,
# WTSShadow,
# WTSDisconnected,
# WTSIdle,
# WTSListen,
# WTSReset,
# WTSDown,
# WTSInit
# } WTS_CONNECTSTATE_CLASS;
WTSActive = 0
WTSConnected = 1
WTSConnectQuery = 2
WTSShadow = 3
WTSDisconnected = 4
WTSIdle = 5
WTSListen = 6
WTSReset = 7
WTSDown = 8
WTSInit = 9
WTS_CONNECTSTATE_CLASS = ctypes.c_int
# typedef struct _WTS_CLIENT_DISPLAY {
# DWORD HorizontalResolution;
# DWORD VerticalResolution;
# DWORD ColorDepth;
# } WTS_CLIENT_DISPLAY, *PWTS_CLIENT_DISPLAY;
class WTS_CLIENT_DISPLAY(Structure):
_fields_ = [
("HorizontalResolution", DWORD),
("VerticalResolution", DWORD),
("ColorDepth", DWORD),
]
PWTS_CLIENT_DISPLAY = POINTER(WTS_CLIENT_DISPLAY)
# typedef struct _WTS_CLIENT_ADDRESS {
# DWORD AddressFamily;
# BYTE Address[20];
# } WTS_CLIENT_ADDRESS, *PWTS_CLIENT_ADDRESS;
# XXX TODO
# typedef struct _WTSCLIENT {
# WCHAR ClientName[CLIENTNAME_LENGTH + 1];
# WCHAR Domain[DOMAIN_LENGTH + 1 ];
# WCHAR UserName[USERNAME_LENGTH + 1];
# WCHAR WorkDirectory[MAX_PATH + 1];
# WCHAR InitialProgram[MAX_PATH + 1];
# BYTE EncryptionLevel;
# ULONG ClientAddressFamily;
# USHORT ClientAddress[CLIENTADDRESS_LENGTH + 1];
# USHORT HRes;
# USHORT VRes;
# USHORT ColorDepth;
# WCHAR ClientDirectory[MAX_PATH + 1];
# ULONG ClientBuildNumber;
# ULONG ClientHardwareId;
# USHORT ClientProductId;
# USHORT OutBufCountHost;
# USHORT OutBufCountClient;
# USHORT OutBufLength;
# WCHAR DeviceId[MAX_PATH + 1];
# } WTSCLIENT, *PWTSCLIENT;
# XXX TODO
# typedef struct _WTSINFO {
# WTS_CONNECTSTATE_CLASS State;
# DWORD SessionId;
# DWORD IncomingBytes;
# DWORD OutgoingBytes;
# DWORD IncomingCompressedBytes;
# DWORD OutgoingCompressedBytes;
# WCHAR WinStationName;
# WCHAR Domain;
# WCHAR UserName;
# LARGE_INTEGER ConnectTime;
# LARGE_INTEGER DisconnectTime;
# LARGE_INTEGER LastInputTime;
# LARGE_INTEGER LogonTime;
# LARGE_INTEGER CurrentTime;
# } WTSINFO, *PWTSINFO;
# XXX TODO
# typedef struct _WTSINFOEX {
# DWORD Level;
# WTSINFOEX_LEVEL Data;
# } WTSINFOEX, *PWTSINFOEX;
# XXX TODO
#--- wtsapi32.dll -------------------------------------------------------------
# void WTSFreeMemory(
# __in PVOID pMemory
# );
def WTSFreeMemory(pMemory):
_WTSFreeMemory = windll.wtsapi32.WTSFreeMemory
_WTSFreeMemory.argtypes = [PVOID]
_WTSFreeMemory.restype = None
_WTSFreeMemory(pMemory)
# BOOL WTSEnumerateProcesses(
# __in HANDLE hServer,
# __in DWORD Reserved,
# __in DWORD Version,
# __out PWTS_PROCESS_INFO *ppProcessInfo,
# __out DWORD *pCount
# );
def WTSEnumerateProcessesA(hServer = WTS_CURRENT_SERVER_HANDLE):
_WTSEnumerateProcessesA = windll.wtsapi32.WTSEnumerateProcessesA
_WTSEnumerateProcessesA.argtypes = [HANDLE, DWORD, DWORD, POINTER(PWTS_PROCESS_INFOA), PDWORD]
_WTSEnumerateProcessesA.restype = bool
_WTSEnumerateProcessesA.errcheck = RaiseIfZero
pProcessInfo = PWTS_PROCESS_INFOA()
Count = DWORD(0)
_WTSEnumerateProcessesA(hServer, 0, 1, byref(pProcessInfo), byref(Count))
return pProcessInfo, Count.value
def WTSEnumerateProcessesW(hServer = WTS_CURRENT_SERVER_HANDLE):
_WTSEnumerateProcessesW = windll.wtsapi32.WTSEnumerateProcessesW
_WTSEnumerateProcessesW.argtypes = [HANDLE, DWORD, DWORD, POINTER(PWTS_PROCESS_INFOW), PDWORD]
_WTSEnumerateProcessesW.restype = bool
_WTSEnumerateProcessesW.errcheck = RaiseIfZero
pProcessInfo = PWTS_PROCESS_INFOW()
Count = DWORD(0)
_WTSEnumerateProcessesW(hServer, 0, 1, byref(pProcessInfo), byref(Count))
return pProcessInfo, Count.value
WTSEnumerateProcesses = DefaultStringType(WTSEnumerateProcessesA, WTSEnumerateProcessesW)
# BOOL WTSTerminateProcess(
# __in HANDLE hServer,
# __in DWORD ProcessId,
# __in DWORD ExitCode
# );
def WTSTerminateProcess(hServer, ProcessId, ExitCode):
_WTSTerminateProcess = windll.wtsapi32.WTSTerminateProcess
_WTSTerminateProcess.argtypes = [HANDLE, DWORD, DWORD]
_WTSTerminateProcess.restype = bool
_WTSTerminateProcess.errcheck = RaiseIfZero
_WTSTerminateProcess(hServer, ProcessId, ExitCode)
# BOOL WTSQuerySessionInformation(
# __in HANDLE hServer,
# __in DWORD SessionId,
# __in WTS_INFO_CLASS WTSInfoClass,
# __out LPTSTR *ppBuffer,
# __out DWORD *pBytesReturned
# );
# XXX TODO
#--- kernel32.dll -------------------------------------------------------------
# I've no idea why these functions are in kernel32.dll instead of wtsapi32.dll
# BOOL ProcessIdToSessionId(
# __in DWORD dwProcessId,
# __out DWORD *pSessionId
# );
def ProcessIdToSessionId(dwProcessId):
_ProcessIdToSessionId = windll.kernel32.ProcessIdToSessionId
_ProcessIdToSessionId.argtypes = [DWORD, PDWORD]
_ProcessIdToSessionId.restype = bool
_ProcessIdToSessionId.errcheck = RaiseIfZero
dwSessionId = DWORD(0)
_ProcessIdToSessionId(dwProcessId, byref(dwSessionId))
return dwSessionId.value
# DWORD WTSGetActiveConsoleSessionId(void);
def WTSGetActiveConsoleSessionId():
_WTSGetActiveConsoleSessionId = windll.kernel32.WTSGetActiveConsoleSessionId
_WTSGetActiveConsoleSessionId.argtypes = []
_WTSGetActiveConsoleSessionId.restype = DWORD
_WTSGetActiveConsoleSessionId.errcheck = RaiseIfZero
return _WTSGetActiveConsoleSessionId()
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| apache-2.0 |
dtroyer/python-openstackclient | openstackclient/common/versions.py | 2 | 3712 | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Versions Action Implementation"""
from osc_lib.command import command
from openstackclient.i18n import _
class ShowVersions(command.Lister):
_description = _("Show available versions of services")
def get_parser(self, prog_name):
parser = super(ShowVersions, self).get_parser(prog_name)
interface_group = parser.add_mutually_exclusive_group()
interface_group.add_argument(
"--all-interfaces",
dest="is_all_interfaces",
action="store_true",
default=False,
help=_("Show values for all interfaces"),
)
interface_group.add_argument(
'--interface',
default='public',
metavar='<interface>',
help=_('Show versions for a specific interface.'),
)
parser.add_argument(
'--region-name',
metavar='<region_name>',
help=_('Show versions for a specific region.'),
)
parser.add_argument(
'--service',
metavar='<service>',
help=_('Show versions for a specific service. The argument should '
'be either an exact match to what is in the catalog or a '
'known official value or alias from '
'service-types-authority '
'(https://service-types.openstack.org/)'),
)
parser.add_argument(
'--status',
metavar='<status>',
help=_("""Show versions for a specific status. Valid values are:
- SUPPORTED
- CURRENT
- DEPRECATED
- EXPERIMENTAL""")
)
return parser
def take_action(self, parsed_args):
interface = parsed_args.interface
if parsed_args.is_all_interfaces:
interface = None
session = self.app.client_manager.session
version_data = session.get_all_version_data(
interface=interface,
region_name=parsed_args.region_name,
service_type=parsed_args.service)
columns = [
"Region Name",
"Service Type",
"Version",
"Status",
"Endpoint",
"Min Microversion",
"Max Microversion",
]
status = parsed_args.status
if status:
status = status.upper()
versions = []
for region_name, interfaces in version_data.items():
for interface, services in interfaces.items():
for service_type, service_versions in services.items():
for data in service_versions:
if status and status != data['status']:
continue
versions.append((
region_name,
service_type,
data['version'],
data['status'],
data['url'],
data['min_microversion'],
data['max_microversion'],
))
return (columns, versions)
| apache-2.0 |
ericvandenbergfb/spark | python/pyspark/util.py | 49 | 1424 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = []
def _exception_message(excp):
"""Return the message from an exception as either a str or unicode object. Supports both
Python 2 and Python 3.
>>> msg = "Exception message"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
>>> msg = u"unicΓΆde"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
"""
if hasattr(excp, "message"):
return excp.message
return str(excp)
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| apache-2.0 |
CitizenB/ansible | test/units/plugins/action/test_action.py | 25 | 2537 | # (c) 2015, Florian Apolloner <florian@apolloner.eu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.playbook.play_context import PlayContext
from ansible.plugins.action import ActionBase
class TestActionBase(unittest.TestCase):
class DerivedActionBase(ActionBase):
def run(self, tmp=None, task_vars=None):
# We're not testing the plugin run() method, just the helper
# methods ActionBase defines
return dict()
def test_sudo_only_if_user_differs(self):
play_context = PlayContext()
action_base = self.DerivedActionBase(None, None, play_context, None, None, None)
action_base._connection = Mock(exec_command=Mock(return_value=(0, '', '')))
play_context.become = True
play_context.become_user = play_context.remote_user = 'root'
play_context.make_become_cmd = Mock(return_value='CMD')
action_base._low_level_execute_command('ECHO', sudoable=True)
play_context.make_become_cmd.assert_not_called()
play_context.remote_user = 'apo'
action_base._low_level_execute_command('ECHO', sudoable=True)
play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None)
play_context.make_become_cmd.reset_mock()
become_allow_same_user = C.BECOME_ALLOW_SAME_USER
C.BECOME_ALLOW_SAME_USER = True
try:
play_context.remote_user = 'root'
action_base._low_level_execute_command('ECHO SAME', sudoable=True)
play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None)
finally:
C.BECOME_ALLOW_SAME_USER = become_allow_same_user
| gpl-3.0 |
RO-ny9/python-for-android | python3-alpha/python3-src/Lib/_pyio.py | 45 | 69934 | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import warnings
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super().__init__(errno, strerror)
if not isinstance(characters_written, int):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an IOError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
self.flush()
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line of bytes from the stream.
If limit is specified, at most limit bytes will be read.
Limit should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes, where n is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes, where n is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call,
where n is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
self.flush()
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
return memoryview(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer
try:
self._flush_unlocked()
except BlockingIOError as e:
# We can't accept anything else.
# XXX Why not just let the exception pass through?
raise BlockingIOError(e.errno, e.strerror, 0)
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
written = 0
try:
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except IOError as e:
if e.errno != EINTR:
raise
continue
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
written += n
except BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
written += n
raise BlockingIOError(e.errno, e.strerror, written)
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream, where n is an int.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line seperator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<_pyio.TextIOWrapper"
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
self.flush()
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
next_byte = bytearray(1)
for next_byte[0] in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| apache-2.0 |
ojii/sandlib | lib/lib-python/2.7/plat-mac/lib-scriptpackages/Explorer/__init__.py | 73 | 2265 | """
Package generated from /Applications/Internet Explorer.app
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the Explorer module is removed.", stacklevel=2)
import aetools
Error = aetools.Error
import Standard_Suite
import URL_Suite
import Netscape_Suite
import Microsoft_Internet_Explorer
import Web_Browser_Suite
import Required_Suite
_code_to_module = {
'****' : Standard_Suite,
'GURL' : URL_Suite,
'MOSS' : Netscape_Suite,
'MSIE' : Microsoft_Internet_Explorer,
'WWW!' : Web_Browser_Suite,
'reqd' : Required_Suite,
}
_code_to_fullname = {
'****' : ('Explorer.Standard_Suite', 'Standard_Suite'),
'GURL' : ('Explorer.URL_Suite', 'URL_Suite'),
'MOSS' : ('Explorer.Netscape_Suite', 'Netscape_Suite'),
'MSIE' : ('Explorer.Microsoft_Internet_Explorer', 'Microsoft_Internet_Explorer'),
'WWW!' : ('Explorer.Web_Browser_Suite', 'Web_Browser_Suite'),
'reqd' : ('Explorer.Required_Suite', 'Required_Suite'),
}
from Standard_Suite import *
from URL_Suite import *
from Netscape_Suite import *
from Microsoft_Internet_Explorer import *
from Web_Browser_Suite import *
from Required_Suite import *
def getbaseclasses(v):
if not getattr(v, '_propdict', None):
v._propdict = {}
v._elemdict = {}
for superclassname in getattr(v, '_superclassnames', []):
superclass = eval(superclassname)
getbaseclasses(superclass)
v._propdict.update(getattr(superclass, '_propdict', {}))
v._elemdict.update(getattr(superclass, '_elemdict', {}))
v._propdict.update(getattr(v, '_privpropdict', {}))
v._elemdict.update(getattr(v, '_privelemdict', {}))
import StdSuites
#
# Set property and element dictionaries now that all classes have been defined
#
getbaseclasses(application)
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
}
class Explorer(Standard_Suite_Events,
URL_Suite_Events,
Netscape_Suite_Events,
Microsoft_Internet_Explorer_Events,
Web_Browser_Suite_Events,
Required_Suite_Events,
aetools.TalkTo):
_signature = 'MSIE'
_moduleName = 'Explorer'
_elemdict = application._elemdict
_propdict = application._propdict
| bsd-3-clause |
jeremiahmarks/sl4a | python/src/Lib/test/regrtest.py | 55 | 37786 | #! /usr/bin/env python
"""Regression test.
This will find all modules whose name is "test_*" in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-w: verbose2 -- re-run failed tests in verbose mode
-q: quiet -- don't print anything except if a test fails
-x: exclude -- arguments are tests to *exclude*
-s: single -- run only a single test (see below)
-S: slow -- print the slowest 10 tests
-r: random -- randomize test execution order
-f: fromfile -- read names of tests to run from a file (see below)
-l: findleaks -- if GC is available detect tests that leak memory
-u: use -- specify which special resource intensive tests to run
-h: help -- print this text and exit
-t: threshold -- call gc.set_threshold(N)
-T: coverage -- turn on code coverage using the trace module
-D: coverdir -- Directory where coverage files are put
-N: nocoverdir -- Put coverage files alongside modules
-L: runleaks -- run the leaks(1) command just before exit
-R: huntrleaks -- search for reference leaks (needs debug build, v. slow)
-M: memlimit -- run very large memory-consuming tests
If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.
-T turns on code coverage tracing with the trace module.
-D specifies the directory where coverage files are put.
-N Put coverage files alongside modules.
-s means to run only a single test and exit. This is useful when
doing memory analysis on the Python interpreter (which tend to consume
too many resources to run the full regression test non-stop). The
file /tmp/pynexttest is read to find the next test to run. If this
file is missing, the first test_*.py file in testdir or on the command
line is used. (actually tempfile.gettempdir() is used instead of
/tmp).
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), so the minimal invocation is '-R ::'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
lib2to3 - Run the tests for 2to3 (They take a while.)
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
compiler - Test the compiler package by compiling all the source
in the standard library and test suite. This takes
a long time. Enabling this resource also allows
test_tokenize to verify round-trip lexing on every
file in the test library.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
"""
import cStringIO
import getopt
import os
import random
import re
import sys
import time
import traceback
import warnings
# I see no other way to suppress these warnings;
# putting them in test_grammar.py has no effect:
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
".*test.test_grammar$")
if sys.maxint > 0x7fffffff:
# Also suppress them in <string>, because for 64-bit platforms,
# that's where test_grammar.py hides them.
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
"<string>")
# Ignore ImportWarnings that only occur in the source tree,
# (because of modules with the same name as source-directories in Modules/)
for mod in ("ctypes", "gzip", "zipfile", "tarfile", "encodings.zlib_codec",
"test.test_zipimport", "test.test_zlib", "test.test_zipfile",
"test.test_codecs", "test.string_tests"):
warnings.filterwarnings(module=".*%s$" % (mod,),
action="ignore", category=ImportWarning)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
from test import test_support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'compiler', 'subprocess', 'urlfetch')
def usage(code, msg=''):
print __doc__
if msg: print msg
sys.exit(code)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir, and
print_slow) allow programmers calling main() directly to set the
values that would normally be set by flags on the command line.
"""
test_support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsSrf:lu:t:TD:NLR:wM:',
['help', 'verbose', 'quiet', 'exclude',
'single', 'slow', 'random', 'fromfile',
'findleaks', 'use=', 'threshold=', 'trace',
'coverdir=', 'nocoverdir', 'runleaks',
'huntrleaks=', 'verbose2', 'memlimit=',
])
except getopt.error, msg:
usage(2, msg)
# Defaults
if use_resources is None:
use_resources = []
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-s', '--single'):
single = True
elif o in ('-S', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) != 3:
print a, huntrleaks
usage(2, '-R takes three colon-separated arguments')
if len(huntrleaks[0]) == 0:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if len(huntrleaks[1]) == 0:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks[2]) == 0:
huntrleaks[2] = "reflog.txt"
elif o in ('-M', '--memlimit'):
test_support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage(1, 'Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
if single and fromfile:
usage(2, "-s and -f don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
if findleaks:
try:
import gc
except ImportError:
print 'No GC available, disabling findleaks.'
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
from tempfile import gettempdir
filename = os.path.join(gettempdir(), 'pynexttest')
try:
fp = open(filename, 'r')
next = fp.read().strip()
tests = [next]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(fromfile)
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
if args:
args = map(removepy, args)
if tests:
tests = map(removepy, tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS[:]
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests[:0] = args
args = []
tests = tests or args or findtests(testdir, stdtests, nottests)
if single:
tests = tests[:1]
if randomize:
random.shuffle(tests)
if trace:
import trace
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
trace=False, count=True)
test_times = []
test_support.verbose = verbose # Tell tests to be moderately quiet
test_support.use_resources = use_resources
save_modules = sys.modules.keys()
for test in tests:
if not quiet:
print test
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet,'
' test_times, testdir)',
globals=globals(), locals=vars())
else:
try:
ok = runtest(test, verbose, quiet, test_times,
testdir, huntrleaks)
except KeyboardInterrupt:
# print a newline separate from the ^C
print
break
except:
raise
if ok > 0:
good.append(test)
elif ok == 0:
bad.append(test)
else:
skipped.append(test)
if ok == -2:
resource_denieds.append(test)
if findleaks:
gc.collect()
if gc.garbage:
print "Warning: test created", len(gc.garbage),
print "uncollectable object(s)."
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
# The lists won't be sorted if running with -r
good.sort()
bad.sort()
skipped.sort()
if good and not quiet:
if not bad and not skipped and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if print_slow:
test_times.sort(reverse=True)
print "10 slowest tests:"
for time, test in test_times[:10]:
print "%s: %.1fs" % (test, time)
if bad:
print count(len(bad), "test"), "failed:"
printlist(bad)
if skipped and not quiet:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if verbose2 and bad:
print "Re-running failed tests in verbose mode"
for test in bad:
print "Re-running test %r in verbose mode" % test
sys.stdout.flush()
try:
test_support.verbose = True
ok = runtest(test, True, quiet, test_times, testdir,
huntrleaks)
except KeyboardInterrupt:
# print a newline separate from the ^C
print
break
except:
raise
if single:
alltests = findtests(testdir, stdtests, nottests)
for i in range(len(alltests)):
if tests[0] == alltests[i]:
if i == len(alltests) - 1:
os.unlink(filename)
else:
fp = open(filename, 'w')
fp.write(alltests[i+1] + '\n')
fp.close()
break
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
NOTTESTS = [
'test_support',
'test_future1',
'test_future2',
]
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
if not testdir: testdir = findtestdir()
names = os.listdir(testdir)
tests = []
for name in names:
if name[:5] == "test_" and name[-3:] == os.extsep+"py":
modname = name[:-3]
if modname not in stdtests and modname not in nottests:
tests.append(modname)
tests.sort()
return stdtests + tests
def runtest(test, verbose, quiet, test_times,
testdir=None, huntrleaks=False):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
testdir -- test directory
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
Return:
-2 test skipped because resource denied
-1 test skipped for some other reason
0 test failed
1 test passed
"""
try:
return runtest_inner(test, verbose, quiet, test_times,
testdir, huntrleaks)
finally:
cleanup_test_droppings(test, verbose)
def runtest_inner(test, verbose, quiet, test_times,
testdir=None, huntrleaks=False):
test_support.unload(test)
if not testdir:
testdir = findtestdir()
if verbose:
capture_stdout = None
else:
capture_stdout = cStringIO.StringIO()
try:
save_stdout = sys.stdout
try:
if capture_stdout:
sys.stdout = capture_stdout
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Old tests run to completion simply as a side-effect of
# being imported. For tests based on unittest or doctest,
# explicitly invoke their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
dash_R(the_module, test, indirect_test, huntrleaks)
test_time = time.time() - start_time
test_times.append((test_time, test))
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -2
except (ImportError, test_support.TestSkipped), msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -1
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
print "test", test, "failed --", msg
sys.stdout.flush()
return 0
except:
type, value = sys.exc_info()[:2]
print "test", test, "crashed --", str(type) + ":", value
sys.stdout.flush()
if verbose:
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return 0
else:
# Except in verbose mode, tests should not print anything
if verbose or huntrleaks:
return 1
output = capture_stdout.getvalue()
if not output:
return 1
print "test", test, "produced unexpected output:"
print "*" * 70
print output
print "*" * 70
sys.stdout.flush()
return 0
def cleanup_test_droppings(testname, verbose):
import shutil
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (test_support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print "%r left behind %s %r" % (testname, kind, name)
try:
nuker(name)
except Exception, msg:
print >> sys.stderr, ("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg))
def dash_R(the_module, test, indirect_test, huntrleaks):
# This code is hackish and inelegant, but it seems to do the job.
import copy_reg, _abcoll, io
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
abcs = {}
modules = _abcoll, io
for abc in [getattr(mod, a) for mod in modules for a in mod.__all__]:
# XXX isinstance(abc, ABCMeta) leads to infinite recursion
if not hasattr(abc, '_abc_registry'):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
reload(the_module)
deltas = []
nwarmup, ntracked, fname = huntrleaks
repcount = nwarmup + ntracked
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount]
dash_R_cleanup(fs, ps, pic, abcs)
for i in range(repcount):
rc = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
dash_R_cleanup(fs, ps, pic, abcs)
if i >= nwarmup:
deltas.append(sys.gettotalrefcount() - rc - 2)
print >> sys.stderr
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print >> sys.stderr, msg
refrep = open(fname, "a")
print >> refrep, msg
refrep.close()
def dash_R_cleanup(fs, ps, pic, abcs):
import gc, copy_reg
import _strptime, linecache
dircache = test_support.import_module('dircache', deprecated=True)
import urlparse, urllib, urllib2, mimetypes, doctest
import struct, filecmp
from distutils.dir_util import _path_created
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copy_reg.dispatch_table.clear()
copy_reg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc, registry in abcs.items():
abc._abc_registry = registry.copy()
abc._abc_cache.clear()
abc._abc_negative_cache.clear()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urlparse.clear_cache()
urllib.urlcleanup()
urllib2.install_opener(None)
dircache.reset()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
# Collect cyclic trash.
gc.collect()
def findtestdir():
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
return testdir
def removepy(name):
if name.endswith(os.extsep + "py"):
name = name[:-3]
return name
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
print fill(' '.join(map(str, x)), width,
initial_indent=blanks, subsequent_indent=blanks)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_socket_ssl
# Controlled by test_socket_ssl.skip_expected. Requires the network
# resource, and a socket module with ssl support.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = {
'win32':
"""
test__locale
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_mhlib
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_threadsignals
test_timing
test_wait3
test_wait4
""",
'linux2':
"""
test_bsddb185
test_curses
test_dl
test_largefile
test_kqueue
test_ossaudiodev
""",
'mac':
"""
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_grp
test_ioctl
test_largefile
test_locale
test_kqueue
test_mmap
test_openpty
test_ossaudiodev
test_poll
test_popen
test_popen2
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sundry
test_tarfile
test_timing
""",
'unixware7':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'openunix8':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'sco_sv3':
"""
test_asynchat
test_bsddb
test_bsddb185
test_dl
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'riscos':
"""
test_asynchat
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_largefile
test_locale
test_kqueue
test_mmap
test_openpty
test_poll
test_popen2
test_pty
test_pwd
test_strop
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
test_timing
""",
'darwin':
"""
test__locale
test_bsddb
test_bsddb3
test_curses
test_epoll
test_gdbm
test_largefile
test_locale
test_kqueue
test_minidom
test_ossaudiodev
test_poll
""",
'sunos5':
"""
test_bsddb
test_bsddb185
test_curses
test_dbm
test_epoll
test_kqueue
test_gdbm
test_gzip
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_bsddb
test_bsddb185
test_curses
test_dl
test_epoll
test_gdbm
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
""",
'atheos':
"""
test_bsddb185
test_curses
test_dl
test_gdbm
test_epoll
test_largefile
test_locale
test_kqueue
test_mhlib
test_mmap
test_poll
test_popen2
test_resource
""",
'cygwin':
"""
test_bsddb185
test_bsddb3
test_curses
test_dbm
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
""",
'os2emx':
"""
test_audioop
test_bsddb185
test_bsddb3
test_commands
test_curses
test_dl
test_epoll
test_kqueue
test_largefile
test_mhlib
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
""",
'freebsd4':
"""
test_bsddb
test_bsddb3
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socket_ssl
test_socketserver
test_tcl
test_timeout
test_urllibnet
test_multiprocessing
""",
'aix5':
"""
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_dl
test_epoll
test_gdbm
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_zipimport
test_zlib
""",
'openbsd3':
"""
test_bsddb
test_bsddb3
test_ctypes
test_dl
test_epoll
test_gdbm
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_multiprocessing
""",
'netbsd3':
"""
test_bsddb
test_bsddb185
test_bsddb3
test_ctypes
test_curses
test_dl
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_multiprocessing
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
_expectations['freebsd7'] = _expectations['freebsd4']
_expectations['freebsd8'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
# expected to be skipped on every platform, even Linux
self.expected.add('test_linuxaudiodev')
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
try:
from test import test_socket_ssl
except ImportError:
pass
else:
if test_socket_ssl.skip_expected:
self.expected.add('test_socket_ssl')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.maxint == 9223372036854775807L:
self.expected.add('test_imageop')
if not sys.platform in ("mac", "darwin"):
MAC_ONLY = ["test_macos", "test_macostools", "test_aepack",
"test_plistlib", "test_scriptpackages",
"test_applesingle"]
for skip in MAC_ONLY:
self.expected.add(skip)
elif len(u'\0'.encode('unicode-internal')) == 4:
self.expected.add("test_macostools")
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite"]
for skip in WIN_ONLY:
self.expected.add(skip)
if sys.platform != 'irix':
IRIX_ONLY = ["test_imageop", "test_al", "test_cd", "test_cl",
"test_gl", "test_imgfile"]
for skip in IRIX_ONLY:
self.expected.add(skip)
if sys.platform != 'sunos5':
self.expected.add('test_sunaudiodev')
self.expected.add('test_nis')
if not sys.py3kwarning:
self.expected.add('test_py3kwarn')
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. This
# prevents relative imports from working, and relative imports will screw
# up the testing framework. E.g. if both test.test_support and
# test_support are imported, they will not contain the same globals, and
# much of the testing framework relies on the globals in the
# test.test_support module.
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = pathlen = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
if len(sys.path) == pathlen:
print 'Could not find %r in sys.path to remove it' % mydir
main()
| apache-2.0 |
arista-eosplus/ansible-modules-extras | cloud/google/gce_img.py | 65 | 5562 | #!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""An Ansible module to utilize GCE image resources."""
DOCUMENTATION = '''
---
module: gce_img
version_added: "1.9"
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create or delete
required: true
default: null
aliases: []
description:
description:
- an optional description
required: false
default: null
aliases: []
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
aliases: []
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["present", "absent"]
aliases: []
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
aliases: []
service_account_email:
description:
- service account email
required: false
default: null
aliases: []
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Peter Tan (@tanpeter)"
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Create an image named test-image from a tarball in Google Cloud Storage.
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image
source: gs://bucket/path/to/image.tgz
# Delete an image named test-image.
- gce_img:
name: test-image
state: absent
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
has_libcloud = True
except ImportError:
has_libcloud = False
GCS_URI = 'https://storage.googleapis.com/'
def create_image(gce, name, module):
"""Create an image with the specified name."""
source = module.params.get('source')
zone = module.params.get('zone')
desc = module.params.get('description')
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith(GCS_URI):
# source is a Google Cloud Storage URI
volume = source
elif source.startswith('gs://'):
# libcloud only accepts https URI.
volume = source.replace('gs://', GCS_URI)
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
try:
gce.ex_create_image(name, volume, desc, False)
return True
except ResourceExistsError:
return False
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
def delete_image(gce, name, module):
"""Delete a specific image resource by name."""
try:
gce.ex_delete_image(name)
return True
except ResourceNotFoundError:
return False
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
description=dict(),
source=dict(),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(),
project_id=dict(),
)
)
if not has_libcloud:
module.fail_json(msg='libcloud with GCE support is required.')
gce = gce_connect(module)
name = module.params.get('name')
state = module.params.get('state')
changed = False
# user wants to create an image.
if state == 'present':
changed = create_image(gce, name, module)
# user wants to delete the image.
if state == 'absent':
changed = delete_image(gce, name, module)
module.exit_json(changed=changed, name=name)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
| gpl-3.0 |
opcon/plutokore | tests/test_environments.py | 2 | 3138 | import pytest
def test_makino_env_creation(makino_env):
assert makino_env is not None
def test_king_env_creation(king_env):
assert king_env is not None
def test_default_viriality():
from plutokore.environments.makino import MakinoProfile
from plutokore.environments.king import KingProfile
from astropy import units as u
from astropy import cosmology
mass = (10**12.5) * u.M_sun
z = 0
mp = MakinoProfile(
mass,
z,
cosmo=cosmology.Planck15,
concentration_method='klypin-planck-relaxed')
kp = KingProfile(
mass,
z,
cosmo=cosmology.Planck15,
concentration_method='klypin-planck-relaxed')
assert mp.delta_vir == 200
assert kp.delta_vir == 200
def test_concentration_methods():
from plutokore.environments.makino import MakinoProfile
from plutokore.environments.king import KingProfile
from astropy import units as u
from astropy import cosmology
conc_methods = [
'dolag', 'bullock', 'klypin-planck-all', 'klypin-planck-relaxed',
'klypin-wmap-all', 'klypin-wmap-relaxed', 'dutton', 'maccio'
]
mass = (10**12.5) * u.M_sun
z = 0
for cm in conc_methods:
mp = MakinoProfile(
mass, z, cosmo=cosmology.Planck15, concentration_method=cm)
kp = KingProfile(
mass, z, cosmo=cosmology.Planck15, concentration_method=cm)
assert mp.concentration != 0.0
assert kp.concentration != 0.0
def test_wrong_concentration_method():
from plutokore.environments.makino import MakinoProfile
from plutokore.environments.king import KingProfile
from astropy import units as u
from astropy import cosmology
try:
mass = (10**12.5) * u.M_sun
z = 0
mp = MakinoProfile(
mass, z, cosmo=cosmology.Planck15, concentration_method='invalid')
assert 0
except ValueError:
pass
try:
kp = KingProfile(
mass, z, cosmo=cosmology.Planck15, concentration_method='invalid')
assert 0
except ValueError:
pass
def test_default_cosmology():
from plutokore.environments.makino import MakinoProfile
from plutokore.environments.king import KingProfile
from astropy import units as u
from astropy import cosmology
mass = (10**12.5) * u.M_sun
z = 0
mp = MakinoProfile(mass, z, concentration_method='klypin-planck-relaxed')
kp = KingProfile(mass, z, concentration_method='klypin-planck-relaxed')
assert mp.cosmo is cosmology.Planck15
assert kp.cosmo is cosmology.Planck15
def test_default_conc_method():
from plutokore.environments.makino import MakinoProfile
from plutokore.environments.king import KingProfile
from astropy import units as u
from astropy import cosmology
mass = (10**12.5) * u.M_sun
z = 0
mp = MakinoProfile(mass, z)
kp = KingProfile(mass, z)
assert mp.concentration != 0.0
assert kp.concentration != 0.0
def test_get_king_density(king_env):
from astropy import units as u
d = king_env.get_density(1 * u.Mpc)
assert d > 0
| mit |
schakrava/rockstor-core | src/rockstor/storageadmin/south_migrations/0009_auto__del_field_sambashare_admin_users.py | 9 | 21417 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field smb_shares on 'User'
m2m_table_name = db.shorten_name(u'storageadmin_user_smb_shares')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm['storageadmin.user'], null=False)),
('sambashare', models.ForeignKey(orm['storageadmin.sambashare'], null=False))
))
db.create_unique(m2m_table_name, ['user_id', 'sambashare_id'])
# Deleting field 'SambaShare.admin_users'
db.delete_column(u'storageadmin_sambashare', 'admin_users')
def backwards(self, orm):
# Removing M2M table for field smb_shares on 'User'
db.delete_table(db.shorten_name(u'storageadmin_user_smb_shares'))
# Adding field 'SambaShare.admin_users'
db.add_column(u'storageadmin_sambashare', 'admin_users',
self.gf('django.db.models.fields.CharField')(default='Administrator', max_length=128),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'oauth2_provider.application': {
'Meta': {'object_name': 'Application'},
'authorization_grant_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "u'O-@jKqqlrgaolVZzyZ6S43hUGp-?MZ_.DKqosPSX'", 'unique': 'True', 'max_length': '100'}),
'client_secret': ('django.db.models.fields.CharField', [], {'default': "u'kZqnMD4A!_P-UDlwTV@Hbms2OBbFI@IpNRpDcjsJDg:J?tUpNJKSMsWF8x_C8SYy_Cbx3j_0u38lOGMjUbnxjrRAC-8ZhHpddkWan.36wP.q0pR:4QRL-56qPzQYI5UM'", 'max_length': '255', 'blank': 'True'}),
'client_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'storageadmin.advancednfsexport': {
'Meta': {'object_name': 'AdvancedNFSExport'},
'export_str': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'storageadmin.apikeys': {
'Meta': {'object_name': 'APIKeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'})
},
'storageadmin.appliance': {
'Meta': {'object_name': 'Appliance'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'client_secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'current_appliance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "'Rockstor'", 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'mgmt_port': ('django.db.models.fields.IntegerField', [], {'default': '443'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'storageadmin.dashboardconfig': {
'Meta': {'object_name': 'DashboardConfig'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'widgets': ('django.db.models.fields.CharField', [], {'max_length': '4096'})
},
'storageadmin.disk': {
'Meta': {'object_name': 'Disk'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parted': ('django.db.models.fields.BooleanField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'storageadmin.installedplugin': {
'Meta': {'object_name': 'InstalledPlugin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'install_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'plugin_meta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Plugin']"})
},
'storageadmin.iscsitarget': {
'Meta': {'object_name': 'IscsiTarget'},
'dev_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'dev_size': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'tid': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'tname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'storageadmin.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'boot_proto': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'dns_servers': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipaddr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'itype': ('django.db.models.fields.CharField', [], {'default': "'io'", 'max_length': '100'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'netmask': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'onboot': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.nfsexport': {
'Meta': {'object_name': 'NFSExport'},
'export_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.NFSExportGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"})
},
'storageadmin.nfsexportgroup': {
'Meta': {'object_name': 'NFSExportGroup'},
'admin_host': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'editable': ('django.db.models.fields.CharField', [], {'default': "'rw'", 'max_length': '2'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'host_str': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount_security': ('django.db.models.fields.CharField', [], {'default': "'insecure'", 'max_length': '8'}),
'nohide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'syncable': ('django.db.models.fields.CharField', [], {'default': "'async'", 'max_length': '5'})
},
'storageadmin.oauthapp': {
'Meta': {'object_name': 'OauthApp'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oauth2_provider.Application']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.User']"})
},
'storageadmin.plugin': {
'Meta': {'object_name': 'Plugin'},
'css_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4096'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'js_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'})
},
'storageadmin.pool': {
'Meta': {'object_name': 'Pool'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'raid': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.poolscrub': {
'Meta': {'object_name': 'PoolScrub'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'errors': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kb_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '10'})
},
'storageadmin.posixacls': {
'Meta': {'object_name': 'PosixACLs'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'perms': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'smb_share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SambaShare']"})
},
'storageadmin.sambashare': {
'Meta': {'object_name': 'SambaShare'},
'browsable': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'comment': ('django.db.models.fields.CharField', [], {'default': "'foo bar'", 'max_length': '100'}),
'create_mask': ('django.db.models.fields.CharField', [], {'default': "'0755'", 'max_length': '4'}),
'guest_ok': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'read_only': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sambashare'", 'unique': 'True', 'to': "orm['storageadmin.Share']"})
},
'storageadmin.setup': {
'Meta': {'object_name': 'Setup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setup_disks': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_network': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.sftp': {
'Meta': {'object_name': 'SFTP'},
'editable': ('django.db.models.fields.CharField', [], {'default': "'ro'", 'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['storageadmin.Share']", 'unique': 'True'})
},
'storageadmin.share': {
'Meta': {'object_name': 'Share'},
'group': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'owner': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
'perms': ('django.db.models.fields.CharField', [], {'default': "'755'", 'max_length': '9'}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'replica': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'subvol_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.snapshot': {
'Meta': {'unique_together': "(('share', 'name'),)", 'object_name': 'Snapshot'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'real_name': ('django.db.models.fields.CharField', [], {'default': "'unknownsnap'", 'max_length': '4096'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'snap_type': ('django.db.models.fields.CharField', [], {'default': "'admin'", 'max_length': '64'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uvisible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'writable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.supportcase': {
'Meta': {'object_name': 'SupportCase'},
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'zipped_log': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.user': {
'Meta': {'object_name': 'User'},
'gid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'smb_shares': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_users'", 'null': 'True', 'to': "orm['storageadmin.SambaShare']"}),
'uid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'suser'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'})
}
}
complete_apps = ['storageadmin'] | gpl-3.0 |
laperry1/android_external_chromium_org | build/android/pylib/utils/report_results.py | 44 | 4147 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing utility functions for reporting results."""
import logging
import os
import re
from pylib import constants
from pylib.utils import flakiness_dashboard_results_uploader
def _LogToFile(results, test_type, suite_name):
"""Log results to local files which can be used for aggregation later."""
log_file_path = os.path.join(constants.GetOutDirectory(), 'test_logs')
if not os.path.exists(log_file_path):
os.mkdir(log_file_path)
full_file_name = os.path.join(
log_file_path, re.sub('\W', '_', test_type).lower() + '.log')
if not os.path.exists(full_file_name):
with open(full_file_name, 'w') as log_file:
print >> log_file, '\n%s results for %s build %s:' % (
test_type, os.environ.get('BUILDBOT_BUILDERNAME'),
os.environ.get('BUILDBOT_BUILDNUMBER'))
logging.info('Writing results to %s.' % full_file_name)
logging.info('Writing results to %s.' % full_file_name)
with open(full_file_name, 'a') as log_file:
shortened_suite_name = suite_name[:25] + (suite_name[25:] and '...')
print >> log_file, '%s%s' % (shortened_suite_name.ljust(30),
results.GetShortForm())
def _LogToFlakinessDashboard(results, test_type, test_package,
flakiness_server):
"""Upload results to the flakiness dashboard"""
logging.info('Upload results for test type "%s", test package "%s" to %s' %
(test_type, test_package, flakiness_server))
# TODO(frankf): Enable uploading for gtests.
if test_type != 'Instrumentation':
logging.warning('Invalid test type.')
return
try:
if flakiness_server == constants.UPSTREAM_FLAKINESS_SERVER:
assert test_package in ['ContentShellTest',
'ChromeShellTest',
'AndroidWebViewTest']
dashboard_test_type = ('%s_instrumentation_tests' %
test_package.lower().rstrip('test'))
# Downstream server.
else:
dashboard_test_type = 'Chromium_Android_Instrumentation'
flakiness_dashboard_results_uploader.Upload(
results, flakiness_server, dashboard_test_type)
except Exception as e:
logging.error(e)
def LogFull(results, test_type, test_package, annotation=None,
flakiness_server=None):
"""Log the tests results for the test suite.
The results will be logged three different ways:
1. Log to stdout.
2. Log to local files for aggregating multiple test steps
(on buildbots only).
3. Log to flakiness dashboard (on buildbots only).
Args:
results: An instance of TestRunResults object.
test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
test_package: Test package name (e.g. 'ipc_tests' for gtests,
'ContentShellTest' for instrumentation tests)
annotation: If instrumenation test type, this is a list of annotations
(e.g. ['Smoke', 'SmallTest']).
flakiness_server: If provider, upload the results to flakiness dashboard
with this URL.
"""
if not results.DidRunPass():
logging.critical('*' * 80)
logging.critical('Detailed Logs')
logging.critical('*' * 80)
for line in results.GetLogs().splitlines():
logging.critical(line)
logging.critical('*' * 80)
logging.critical('Summary')
logging.critical('*' * 80)
for line in results.GetGtestForm().splitlines():
logging.critical(line)
logging.critical('*' * 80)
if os.environ.get('BUILDBOT_BUILDERNAME'):
# It is possible to have multiple buildbot steps for the same
# instrumenation test package using different annotations.
if annotation and len(annotation) == 1:
suite_name = annotation[0]
else:
suite_name = test_package
_LogToFile(results, test_type, suite_name)
if flakiness_server:
_LogToFlakinessDashboard(results, test_type, test_package,
flakiness_server)
| bsd-3-clause |
nazeehshoura/crawler | env/lib/python2.7/site-packages/pip/basecommand.py | 392 | 6578 | """Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
| mit |
luispedro/BuildingMachineLearningSystemsWithPython | ch01/gen_webstats.py | 1 | 1108 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script generates web traffic data for our hypothetical
# web startup "MLASS" in chapter 01
import os
import scipy as sp
from scipy.stats import gamma
import matplotlib.pyplot as plt
from utils import DATA_DIR, CHART_DIR
sp.random.seed(3) # to reproduce the data later on
x = sp.arange(1, 31*24)
y = sp.array(200*(sp.sin(2*sp.pi*x/(7*24))), dtype=int)
y += gamma.rvs(15, loc=0, scale=100, size=len(x))
y += 2 * sp.exp(x/100.0)
y = sp.ma.array(y, mask=[y<0])
print(sum(y), sum(y<0))
plt.scatter(x, y)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks([w*7*24 for w in range(5)],
['week %i' %(w+1) for w in range(5)])
plt.autoscale(tight=True)
plt.grid()
plt.savefig(os.path.join(CHART_DIR, "1400_01_01.png"))
sp.savetxt(os.path.join(DATA_DIR, "web_traffic.tsv"),
list(zip(x, y)), delimiter="\t", fmt="%s")
| mit |
chrisspen/burlap | burlap/ssl.py | 1 | 5942 | from __future__ import print_function
import os
import sys
import re
from datetime import datetime, date
import dateutil.parser
import pytz
from burlap import ServiceSatchel
from burlap.constants import *
from burlap.decorators import task
#from burlap import common
# import dateutil.parser
# import pytz
from fabric.api import runs_once, hide
class SSLSatchel(ServiceSatchel):
name = 'ssl'
def set_defaults(self):
self.env.country = '?'
self.env.state = '?'
self.env.city = '?'
self.env.organization = '?'
self.env.common_name = '?'
self.env.days = 365
self.env.length = 4096
self.env.domain = ''
@task
def generate_self_signed_certificate(self, domain='', r=None):
"""
Generates a self-signed certificate for use in an internal development
environment for testing SSL pages.
http://almostalldigital.wordpress.com/2013/03/07/self-signed-ssl-certificate-for-ec2-load-balancer/
"""
r = self.local_renderer
r.env.domain = domain or r.env.domain
assert r.env.domain, 'No SSL domain defined.'
role = r or self.genv.ROLE or ALL
ssl_dst = 'roles/%s/ssl' % (role,)
if not os.path.isdir(ssl_dst):
os.makedirs(ssl_dst)
r.env.base_dst = '%s/%s' % (ssl_dst, r.env.domain)
r.local('openssl req -new -newkey rsa:{ssl_length} '
'-days {ssl_days} -nodes -x509 '
'-subj "/C={ssl_country}/ST={ssl_state}/L={ssl_city}/O={ssl_organization}/CN={ssl_domain}" '
'-keyout {ssl_base_dst}.key -out {ssl_base_dst}.crt')
@task
@runs_once
def generate_csr(self, domain='', r=None):
"""
Creates a certificate signing request to be submitted to a formal
certificate authority to generate a certificate.
Note, the provider may say the CSR must be created on the target server,
but this is not necessary.
"""
r = r or self.local_renderer
r.env.domain = domain or r.env.domain
role = self.genv.ROLE or ALL
site = self.genv.SITE or self.genv.default_site
print('self.genv.default_site:', self.genv.default_site, file=sys.stderr)
print('site.csr0:', site, file=sys.stderr)
ssl_dst = 'roles/%s/ssl' % (role,)
print('ssl_dst:', ssl_dst)
if not os.path.isdir(ssl_dst):
os.makedirs(ssl_dst)
for site, site_data in self.iter_sites():
print('site.csr1:', site, file=sys.stderr)
assert r.env.domain, 'No SSL domain defined.'
r.env.ssl_base_dst = '%s/%s' % (ssl_dst, r.env.domain.replace('*.', ''))
r.env.ssl_csr_year = date.today().year
r.local('openssl req -nodes -newkey rsa:{ssl_length} '
'-subj "/C={ssl_country}/ST={ssl_state}/L={ssl_city}/O={ssl_organization}/CN={ssl_domain}" '
'-keyout {ssl_base_dst}.{ssl_csr_year}.key -out {ssl_base_dst}.{ssl_csr_year}.csr')
def get_expiration_date(self, fn):
"""
Reads the expiration date of a local crt file.
"""
r = self.local_renderer
r.env.crt_fn = fn
with hide('running'):
ret = r.local('openssl x509 -noout -in {ssl_crt_fn} -dates', capture=True)
matches = re.findall('notAfter=(.*?)$', ret, flags=re.IGNORECASE)
if matches:
return dateutil.parser.parse(matches[0])
@task
def list_expiration_dates(self, base='roles/all/ssl'):
"""
Scans through all local .crt files and displays the expiration dates.
"""
max_fn_len = 0
max_date_len = 0
data = []
for fn in os.listdir(base):
fqfn = os.path.join(base, fn)
if not os.path.isfile(fqfn):
continue
if not fn.endswith('.crt'):
continue
expiration_date = self.get_expiration_date(fqfn)
max_fn_len = max(max_fn_len, len(fn))
max_date_len = max(max_date_len, len(str(expiration_date)))
data.append((fn, expiration_date))
print('%s %s %s' % ('Filename'.ljust(max_fn_len), 'Expiration Date'.ljust(max_date_len), 'Expired'))
now = datetime.now().replace(tzinfo=pytz.UTC)
for fn, dt in sorted(data):
if dt is None:
expired = '?'
elif dt < now:
expired = 'YES'
else:
expired = 'NO'
print('%s %s %s' % (fn.ljust(max_fn_len), str(dt).ljust(max_date_len), expired))
@task
def verify_certificate_chain(self, base=None, crt=None, csr=None, key=None):
"""
Confirms the key, CSR, and certificate files all match.
"""
from burlap.common import get_verbose, print_fail, print_success
r = self.local_renderer
if base:
crt = base + '.crt'
csr = base + '.csr'
key = base + '.key'
else:
assert crt and csr and key, 'If base not provided, crt and csr and key must be given.'
assert os.path.isfile(crt)
assert os.path.isfile(csr)
assert os.path.isfile(key)
csr_md5 = r.local('openssl req -noout -modulus -in %s | openssl md5' % csr, capture=True)
key_md5 = r.local('openssl rsa -noout -modulus -in %s | openssl md5' % key, capture=True)
crt_md5 = r.local('openssl x509 -noout -modulus -in %s | openssl md5' % crt, capture=True)
match = crt_md5 == csr_md5 == key_md5
if self.verbose or not match:
print('crt:', crt_md5)
print('csr:', csr_md5)
print('key:', key_md5)
if match:
print_success('Files look good!')
else:
print_fail('Files no not match!')
raise Exception('Files no not match!')
@task
def configure(self):
pass
ssl = SSLSatchel()
| mit |
fangxingli/hue | desktop/core/ext-py/lxml-3.3.6/src/lxml/tests/test_xpathevaluator.py | 19 | 25818 | # -*- coding: utf-8 -*-
"""
Test cases related to XPath evaluation and the XPath class
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, _bytes, BytesIO
from common_imports import doctest, make_doctest
class ETreeXPathTestCase(HelperTestCase):
"""XPath tests etree"""
def test_xpath_boolean(self):
tree = self.parse('<a><b></b><b></b></a>')
self.assertTrue(tree.xpath('boolean(/a/b)'))
self.assertTrue(not tree.xpath('boolean(/a/c)'))
def test_xpath_number(self):
tree = self.parse('<a>1</a>')
self.assertEqual(1.,
tree.xpath('number(/a)'))
tree = self.parse('<a>A</a>')
actual = str(tree.xpath('number(/a)'))
expected = ['nan', '1.#qnan', 'nanq']
if not actual.lower() in expected:
self.fail('Expected a NAN value, got %s' % actual)
def test_xpath_string(self):
tree = self.parse('<a>Foo</a>')
self.assertEqual('Foo',
tree.xpath('string(/a/text())'))
def test_xpath_document_root(self):
tree = self.parse('<a><b/></a>')
self.assertEqual([],
tree.xpath('/'))
def test_xpath_namespace(self):
tree = self.parse('<a xmlns="test" xmlns:p="myURI"/>')
self.assertTrue((None, "test") in tree.xpath('namespace::*'))
self.assertTrue(('p', 'myURI') in tree.xpath('namespace::*'))
def test_xpath_namespace_empty(self):
tree = self.parse('<a/>')
self.assertEqual([('xml', 'http://www.w3.org/XML/1998/namespace')],
tree.xpath('namespace::*'))
def test_xpath_list_elements(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEqual([root[0], root[1]],
tree.xpath('/a/b'))
def test_xpath_list_nothing(self):
tree = self.parse('<a><b/></a>')
self.assertEqual([],
tree.xpath('/a/c'))
# this seems to pass a different code path, also should return nothing
self.assertEqual([],
tree.xpath('/a/c/text()'))
def test_xpath_list_text(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEqual(['Foo', 'Bar'],
tree.xpath('/a/b/text()'))
def test_xpath_list_text_parent(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEqual(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()'))
self.assertEqual([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_text_parent_no_smart_strings(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEqual(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=True))
self.assertEqual([root[0], root[1]],
[r.getparent() for r in
tree.xpath('/a/b/text()', smart_strings=True)])
self.assertEqual([None, None],
[r.attrname for r in
tree.xpath('/a/b/text()', smart_strings=True)])
self.assertEqual(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=False))
self.assertEqual([False, False],
[hasattr(r, 'getparent') for r in
tree.xpath('/a/b/text()', smart_strings=False)])
self.assertEqual([None, None],
[r.attrname for r in
tree.xpath('/a/b/text()', smart_strings=True)])
def test_xpath_list_unicode_text_parent(self):
xml = _bytes('<a><b>FooBar\\u0680\\u3120</b><b>BarFoo\\u0680\\u3120</b></a>').decode("unicode_escape")
tree = self.parse(xml.encode('utf-8'))
root = tree.getroot()
self.assertEqual([_bytes('FooBar\\u0680\\u3120').decode("unicode_escape"),
_bytes('BarFoo\\u0680\\u3120').decode("unicode_escape")],
tree.xpath('/a/b/text()'))
self.assertEqual([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_attribute(self):
tree = self.parse('<a b="B" c="C"/>')
self.assertEqual(['B'],
tree.xpath('/a/@b'))
def test_xpath_list_attribute_parent(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c')
self.assertEqual(1, len(results))
self.assertEqual('CqWeRtZuI', results[0])
self.assertEqual(tree.getroot().tag, results[0].getparent().tag)
def test_xpath_list_attribute_parent_no_smart_strings(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c', smart_strings=True)
self.assertEqual(1, len(results))
self.assertEqual('CqWeRtZuI', results[0])
self.assertEqual('c', results[0].attrname)
self.assertEqual(tree.getroot().tag, results[0].getparent().tag)
results = tree.xpath('/a/@c', smart_strings=False)
self.assertEqual(1, len(results))
self.assertEqual('CqWeRtZuI', results[0])
self.assertEqual(False, hasattr(results[0], 'getparent'))
self.assertEqual(False, hasattr(results[0], 'attrname'))
def test_xpath_text_from_other_document(self):
xml_data = '''
<table>
<item xml:id="k1"><value>v1</value></item>
<item xml:id="k2"><value>v2</value></item>
</table>
'''
def lookup(dummy, id):
return etree.XML(xml_data).xpath('id(%r)' % id)
functions = {(None, 'lookup') : lookup}
root = etree.XML('<dummy/>')
values = root.xpath("lookup('k1')/value/text()",
extensions=functions)
self.assertEqual(['v1'], values)
self.assertEqual('value', values[0].getparent().tag)
def test_xpath_list_comment(self):
tree = self.parse('<a><!-- Foo --></a>')
self.assertEqual(['<!-- Foo -->'],
list(map(repr, tree.xpath('/a/node()'))))
def test_rel_xpath_boolean(self):
root = etree.XML('<a><b><c/></b></a>')
el = root[0]
self.assertTrue(el.xpath('boolean(c)'))
self.assertTrue(not el.xpath('boolean(d)'))
def test_rel_xpath_list_elements(self):
tree = self.parse('<a><c><b>Foo</b><b>Bar</b></c><c><b>Hey</b></c></a>')
root = tree.getroot()
c = root[0]
self.assertEqual([c[0], c[1]],
c.xpath('b'))
self.assertEqual([c[0], c[1], root[1][0]],
c.xpath('//b'))
def test_xpath_ns(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertEqual(
[root[0]],
tree.xpath('//foo:b', namespaces={'foo': 'uri:a'}))
self.assertEqual(
[],
tree.xpath('//foo:b', namespaces={'foo': 'uri:c'}))
self.assertEqual(
[root[0]],
root.xpath('//baz:b', namespaces={'baz': 'uri:a'}))
def test_xpath_ns_none(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={None: 'uri:a'})
def test_xpath_ns_empty(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={'': 'uri:a'})
def test_xpath_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '\\fad')
def test_xpath_class_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
self.assertRaises(etree.XPathSyntaxError, etree.XPath, '\\fad')
def test_xpath_prefix_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '/fa:d')
def test_xpath_class_prefix_error(self):
tree = self.parse('<a/>')
xpath = etree.XPath("/fa:d")
self.assertRaises(etree.XPathEvalError, xpath, tree)
def test_elementtree_getpath(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(a)
self.assertEqual('/a/c/d',
tree.getpath(d2)[:6])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_elementtree_getpath_partial(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(c)
self.assertEqual('/c/d',
tree.getpath(d2)[:4])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_xpath_evaluator(self):
tree = self.parse('<a><b><c></c></b></a>')
e = etree.XPathEvaluator(tree)
root = tree.getroot()
self.assertEqual(
[root],
e('//a'))
def test_xpath_evaluator_tree(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEqual(
[],
e('a'))
root = child_tree.getroot()
self.assertEqual(
[root[0]],
e('c'))
def test_xpath_evaluator_tree_absolute(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEqual(
[],
e('/a'))
root = child_tree.getroot()
self.assertEqual(
[root],
e('/b'))
self.assertEqual(
[],
e('/c'))
def test_xpath_evaluator_element(self):
tree = self.parse('<a><b><c></c></b></a>')
root = tree.getroot()
e = etree.XPathEvaluator(root[0])
self.assertEqual(
[root[0][0]],
e('c'))
def test_xpath_extensions(self):
def foo(evaluator, a):
return 'hello %s' % a
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertEqual(
"hello you", e("foo('you')"))
def test_xpath_extensions_wrong_args(self):
def foo(evaluator, a, b):
return "hello %s and %s" % (a, b)
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(TypeError, e, "foo('you')")
def test_xpath_extensions_error(self):
def foo(evaluator, a):
return 1/0
extension = {(None, 'foo'): foo}
tree = self.parse('<a/>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(ZeroDivisionError, e, "foo('test')")
def test_xpath_extensions_nodes(self):
def f(evaluator, arg):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo('World')/result")
self.assertEqual(2, len(r))
self.assertEqual('Hoi', r[0].text)
self.assertEqual('Dag', r[1].text)
def test_xpath_extensions_nodes_append(self):
def f(evaluator, nodes):
r = etree.SubElement(nodes[0], 'results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEqual(2, len(r))
self.assertEqual('Hoi', r[0].text)
self.assertEqual('Dag', r[1].text)
def test_xpath_extensions_nodes_append2(self):
def f(evaluator, nodes):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
r.append(nodes[0])
return r
x = self.parse('<result>Honk</result>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEqual(3, len(r))
self.assertEqual('Hoi', r[0].text)
self.assertEqual('Dag', r[1].text)
self.assertEqual('Honk', r[2].text)
def test_xpath_context_node(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = []
def check_context(ctxt, nodes):
self.assertEqual(len(nodes), 1)
check_call.append(nodes[0].tag)
self.assertEqual(ctxt.context_node, nodes[0])
return True
find = etree.XPath("//*[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
find(tree)
check_call.sort()
self.assertEqual(check_call, ["a", "b", "c", "root"])
def test_xpath_eval_context_propagation(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt, nodes):
self.assertEqual(len(nodes), 1)
tag = nodes[0].tag
# empty during the "b" call, a "b" during the "c" call
check_call[tag] = ctxt.eval_context.get("b")
ctxt.eval_context[tag] = tag
return True
find = etree.XPath("//b[p:foo(.)]/c[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEqual(result, [tree.getroot()[1][0]])
self.assertEqual(check_call, {'b':None, 'c':'b'})
def test_xpath_eval_context_clear(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt):
check_call["done"] = True
# context must be empty for each new evaluation
self.assertEqual(len(ctxt.eval_context), 0)
ctxt.eval_context["test"] = True
return True
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEqual(result, [tree.getroot()[1]])
self.assertEqual(check_call["done"], True)
check_call.clear()
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEqual(result, [tree.getroot()[1]])
self.assertEqual(check_call["done"], True)
def test_xpath_variables(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
expr = "/a[@attr=$aval]"
r = e(expr, aval=1)
self.assertEqual(0, len(r))
r = e(expr, aval="true")
self.assertEqual(1, len(r))
self.assertEqual("true", r[0].get('attr'))
r = e(expr, aval=True)
self.assertEqual(1, len(r))
self.assertEqual("true", r[0].get('attr'))
def test_xpath_variables_nodeset(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
element = etree.Element("test-el")
etree.SubElement(element, "test-sub")
expr = "$value"
r = e(expr, value=element)
self.assertEqual(1, len(r))
self.assertEqual(element.tag, r[0].tag)
self.assertEqual(element[0].tag, r[0][0].tag)
def test_xpath_extensions_mix(self):
x = self.parse('<a attr="true"><test/></a>')
class LocalException(Exception):
pass
def foo(evaluator, a, varval):
etree.Element("DUMMY")
if varval == 0:
raise LocalException
elif varval == 1:
return ()
elif varval == 2:
return None
elif varval == 3:
return a[0][0]
a = a[0]
if a.get("attr") == str(varval):
return a
else:
return etree.Element("NODE")
extension = {(None, 'foo'): foo}
e = etree.XPathEvaluator(x, extensions=[extension])
del x
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
r = e("foo(., $value)", value=1)
self.assertEqual(len(r), 0)
r = e("foo(., 1)")
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=2)
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=3)
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "test")
r = e("foo(., $value)", value="false")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'false')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'true')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertEqual(r[0][0].tag, "test")
r = e("foo(., $value)", value="true")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
class ETreeXPathClassTestCase(HelperTestCase):
"Tests for the XPath class"
def test_xpath_compile_doc(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr != 'true']")
r = expr(x)
self.assertEqual(0, len(r))
expr = etree.XPath("/a[@attr = 'true']")
r = expr(x)
self.assertEqual(1, len(r))
expr = etree.XPath( expr.path )
r = expr(x)
self.assertEqual(1, len(r))
def test_xpath_compile_element(self):
x = self.parse('<a><b/><c/></a>')
root = x.getroot()
expr = etree.XPath("./b")
r = expr(root)
self.assertEqual(1, len(r))
self.assertEqual('b', r[0].tag)
expr = etree.XPath("./*")
r = expr(root)
self.assertEqual(2, len(r))
def test_xpath_compile_vars(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr=$aval]")
r = expr(x, aval=False)
self.assertEqual(0, len(r))
r = expr(x, aval=True)
self.assertEqual(1, len(r))
def test_xpath_compile_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
def test_xpath_elementtree_error(self):
self.assertRaises(ValueError, etree.XPath('*'), etree.ElementTree())
class ETreeXPathExsltTestCase(HelperTestCase):
"Tests for the EXSLT support in XPath (requires libxslt 1.1.25+)"
NSMAP = dict(
date = "http://exslt.org/dates-and-times",
math = "http://exslt.org/math",
set = "http://exslt.org/sets",
str = "http://exslt.org/strings",
)
def test_xpath_exslt_functions_date(self):
tree = self.parse('<a><b>2009-11-12</b><b>2008-12-11</b></a>')
match_dates = tree.xpath('//b[date:year(string()) = 2009]',
namespaces=self.NSMAP)
self.assertTrue(match_dates, str(match_dates))
self.assertEqual(len(match_dates), 1, str(match_dates))
self.assertEqual(match_dates[0].text, '2009-11-12')
def test_xpath_exslt_functions_strings(self):
tree = self.parse('<a><b>2009-11-12</b><b>2008-12-11</b></a>')
aligned_date = tree.xpath(
'str:align(string(//b[1]), "%s", "center")' % ('-'*20),
namespaces=self.NSMAP)
self.assertTrue(aligned_date, str(aligned_date))
self.assertEqual(aligned_date, '-----2009-11-12-----')
class ETreeETXPathClassTestCase(HelperTestCase):
"Tests for the ETXPath class"
def test_xpath_compile_ns(self):
x = self.parse('<a><b xmlns="nsa"/><b xmlns="nsb"/></a>')
expr = etree.ETXPath("/a/{nsa}b")
r = expr(x)
self.assertEqual(1, len(r))
self.assertEqual('{nsa}b', r[0].tag)
expr = etree.ETXPath("/a/{nsb}b")
r = expr(x)
self.assertEqual(1, len(r))
self.assertEqual('{nsb}b', r[0].tag)
# disabled this test as non-ASCII characters in namespace URIs are
# not acceptable
def _test_xpath_compile_unicode(self):
x = self.parse(_bytes('<a><b xmlns="http://nsa/\\uf8d2"/><b xmlns="http://nsb/\\uf8d1"/></a>'
).decode("unicode_escape"))
expr = etree.ETXPath(_bytes("/a/{http://nsa/\\uf8d2}b").decode("unicode_escape"))
r = expr(x)
self.assertEqual(1, len(r))
self.assertEqual(_bytes('{http://nsa/\\uf8d2}b').decode("unicode_escape"), r[0].tag)
expr = etree.ETXPath(_bytes("/a/{http://nsb/\\uf8d1}b").decode("unicode_escape"))
r = expr(x)
self.assertEqual(1, len(r))
self.assertEqual(_bytes('{http://nsb/\\uf8d1}b').decode("unicode_escape"), r[0].tag)
SAMPLE_XML = etree.parse(BytesIO("""
<body>
<tag>text</tag>
<section>
<tag>subtext</tag>
</section>
<tag />
<tag />
</body>
"""))
def tag(elem):
return elem.tag
def tag_or_value(elem):
return getattr(elem, 'tag', elem)
def stringTest(ctxt, s1):
return "Hello "+s1
def stringListTest(ctxt, s1):
return ["Hello "] + list(s1) + ["!"]
def floatTest(ctxt, f1):
return f1+4
def booleanTest(ctxt, b1):
return not b1
def setTest(ctxt, st1):
return st1[0]
def setTest2(ctxt, st1):
return st1[0:2]
def argsTest1(ctxt, s, f, b, st):
return ", ".join(map(str, (s, f, b, list(map(tag, st)))))
def argsTest2(ctxt, st1, st2):
st1.extend(st2)
return st1
def resultTypesTest(ctxt):
return [None,None]
def resultTypesTest2(ctxt):
return resultTypesTest
uri = "http://www.example.com/"
extension = {(None, 'stringTest'): stringTest,
(None, 'stringListTest'): stringListTest,
(None, 'floatTest'): floatTest,
(None, 'booleanTest'): booleanTest,
(None, 'setTest'): setTest,
(None, 'setTest2'): setTest2,
(None, 'argsTest1'): argsTest1,
(None, 'argsTest2'): argsTest2,
(None, 'resultTypesTest'): resultTypesTest,
(None, 'resultTypesTest2'): resultTypesTest2,}
def xpath():
"""
Test xpath extension functions.
>>> root = SAMPLE_XML
>>> e = etree.XPathEvaluator(root, extensions=[extension])
>>> e("stringTest('you')")
'Hello you'
>>> e(_bytes("stringTest('\\\\xe9lan')").decode("unicode_escape"))
u'Hello \\xe9lan'
>>> e("stringTest('you','there')") #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: stringTest() takes... 2 ...arguments ...
>>> e("floatTest(2)")
6.0
>>> e("booleanTest(true())")
False
>>> list(map(tag, e("setTest(/body/tag)")))
['tag']
>>> list(map(tag, e("setTest2(/body/*)")))
['tag', 'section']
>>> list(map(tag_or_value, e("stringListTest(/body/tag)")))
['Hello ', 'tag', 'tag', 'tag', '!']
>>> e("argsTest1('a',1.5,true(),/body/tag)")
"a, 1.5, True, ['tag', 'tag', 'tag']"
>>> list(map(tag, e("argsTest2(/body/tag, /body/section)")))
['tag', 'section', 'tag', 'tag']
>>> e("resultTypesTest()")
Traceback (most recent call last):
...
XPathResultError: This is not a supported node-set result: None
>>> try:
... e("resultTypesTest2()")
... except etree.XPathResultError:
... print("Got error")
Got error
"""
if sys.version_info[0] >= 3:
xpath.__doc__ = xpath.__doc__.replace(" u'", " '")
xpath.__doc__ = xpath.__doc__.replace(" XPathResultError",
" lxml.etree.XPathResultError")
xpath.__doc__ = xpath.__doc__.replace(" exactly 2 arguments",
" exactly 2 positional arguments")
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeXPathTestCase)])
suite.addTests([unittest.makeSuite(ETreeXPathClassTestCase)])
if etree.LIBXSLT_COMPILED_VERSION >= (1,1,25):
suite.addTests([unittest.makeSuite(ETreeXPathExsltTestCase)])
suite.addTests([unittest.makeSuite(ETreeETXPathClassTestCase)])
suite.addTests([doctest.DocTestSuite()])
suite.addTests(
[make_doctest('../../../doc/xpathxslt.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| apache-2.0 |
zhjunlang/kbengine | kbe/src/lib/python/Lib/test/test_memoryview.py | 72 | 16069 | """Unit tests for the memoryview
Some tests are in test_bytes. Many tests that require _testbuffer.ndarray
are in test_buffer.
"""
import unittest
import test.support
import sys
import gc
import weakref
import array
import io
class AbstractMemoryTests:
source_bytes = b"abcdef"
@property
def _source(self):
return self.source_bytes
@property
def _types(self):
return filter(None, [self.ro_type, self.rw_type])
def check_getitem_with_type(self, tp):
b = tp(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
self.assertEqual(m[0], ord(b"a"))
self.assertIsInstance(m[0], int)
self.assertEqual(m[5], ord(b"f"))
self.assertEqual(m[-1], ord(b"f"))
self.assertEqual(m[-6], ord(b"a"))
# Bounds checking
self.assertRaises(IndexError, lambda: m[6])
self.assertRaises(IndexError, lambda: m[-7])
self.assertRaises(IndexError, lambda: m[sys.maxsize])
self.assertRaises(IndexError, lambda: m[-sys.maxsize])
# Type checking
self.assertRaises(TypeError, lambda: m[None])
self.assertRaises(TypeError, lambda: m[0.0])
self.assertRaises(TypeError, lambda: m["a"])
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_getitem(self):
for tp in self._types:
self.check_getitem_with_type(tp)
def test_iter(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
self.assertEqual(list(m), [m[i] for i in range(len(m))])
def test_setitem_readonly(self):
if not self.ro_type:
self.skipTest("no read-only type to test")
b = self.ro_type(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
def setitem(value):
m[0] = value
self.assertRaises(TypeError, setitem, b"a")
self.assertRaises(TypeError, setitem, 65)
self.assertRaises(TypeError, setitem, memoryview(b"a"))
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_setitem_writable(self):
if not self.rw_type:
self.skipTest("no writable type to test")
tp = self.rw_type
b = self.rw_type(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
m[0] = ord(b'1')
self._check_contents(tp, b, b"1bcdef")
m[0:1] = tp(b"0")
self._check_contents(tp, b, b"0bcdef")
m[1:3] = tp(b"12")
self._check_contents(tp, b, b"012def")
m[1:1] = tp(b"")
self._check_contents(tp, b, b"012def")
m[:] = tp(b"abcdef")
self._check_contents(tp, b, b"abcdef")
# Overlapping copies of a view into itself
m[0:3] = m[2:5]
self._check_contents(tp, b, b"cdedef")
m[:] = tp(b"abcdef")
m[2:5] = m[0:3]
self._check_contents(tp, b, b"ababcf")
def setitem(key, value):
m[key] = tp(value)
# Bounds checking
self.assertRaises(IndexError, setitem, 6, b"a")
self.assertRaises(IndexError, setitem, -7, b"a")
self.assertRaises(IndexError, setitem, sys.maxsize, b"a")
self.assertRaises(IndexError, setitem, -sys.maxsize, b"a")
# Wrong index/slice types
self.assertRaises(TypeError, setitem, 0.0, b"a")
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, (slice(0,1,1), 0), b"a")
self.assertRaises(TypeError, setitem, (0, slice(0,1,1)), b"a")
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, "a", b"a")
# Not implemented: multidimensional slices
slices = (slice(0,1,1), slice(0,1,2))
self.assertRaises(NotImplementedError, setitem, slices, b"a")
# Trying to resize the memory object
exc = ValueError if m.format == 'c' else TypeError
self.assertRaises(exc, setitem, 0, b"")
self.assertRaises(exc, setitem, 0, b"ab")
self.assertRaises(ValueError, setitem, slice(1,1), b"a")
self.assertRaises(ValueError, setitem, slice(0,2), b"a")
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_delitem(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
with self.assertRaises(TypeError):
del m[1]
with self.assertRaises(TypeError):
del m[1:4]
def test_tobytes(self):
for tp in self._types:
m = self._view(tp(self._source))
b = m.tobytes()
# This calls self.getitem_type() on each separate byte of b"abcdef"
expected = b"".join(
self.getitem_type(bytes([c])) for c in b"abcdef")
self.assertEqual(b, expected)
self.assertIsInstance(b, bytes)
def test_tolist(self):
for tp in self._types:
m = self._view(tp(self._source))
l = m.tolist()
self.assertEqual(l, list(b"abcdef"))
def test_compare(self):
# memoryviews can compare for equality with other objects
# having the buffer interface.
for tp in self._types:
m = self._view(tp(self._source))
for tp_comp in self._types:
self.assertTrue(m == tp_comp(b"abcdef"))
self.assertFalse(m != tp_comp(b"abcdef"))
self.assertFalse(m == tp_comp(b"abcde"))
self.assertTrue(m != tp_comp(b"abcde"))
self.assertFalse(m == tp_comp(b"abcde1"))
self.assertTrue(m != tp_comp(b"abcde1"))
self.assertTrue(m == m)
self.assertTrue(m == m[:])
self.assertTrue(m[0:6] == m[:])
self.assertFalse(m[0:5] == m)
# Comparison with objects which don't support the buffer API
self.assertFalse(m == "abcdef")
self.assertTrue(m != "abcdef")
self.assertFalse("abcdef" == m)
self.assertTrue("abcdef" != m)
# Unordered comparisons
for c in (m, b"abcdef"):
self.assertRaises(TypeError, lambda: m < c)
self.assertRaises(TypeError, lambda: c <= m)
self.assertRaises(TypeError, lambda: m >= c)
self.assertRaises(TypeError, lambda: c > m)
def check_attributes_with_type(self, tp):
m = self._view(tp(self._source))
self.assertEqual(m.format, self.format)
self.assertEqual(m.itemsize, self.itemsize)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.shape, (6,))
self.assertEqual(len(m), 6)
self.assertEqual(m.strides, (self.itemsize,))
self.assertEqual(m.suboffsets, ())
return m
def test_attributes_readonly(self):
if not self.ro_type:
self.skipTest("no read-only type to test")
m = self.check_attributes_with_type(self.ro_type)
self.assertEqual(m.readonly, True)
def test_attributes_writable(self):
if not self.rw_type:
self.skipTest("no writable type to test")
m = self.check_attributes_with_type(self.rw_type)
self.assertEqual(m.readonly, False)
def test_getbuffer(self):
# Test PyObject_GetBuffer() on a memoryview object.
for tp in self._types:
b = tp(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
oldviewrefcount = sys.getrefcount(m)
s = str(m, "utf-8")
self._check_contents(tp, b, s.encode("utf-8"))
self.assertEqual(sys.getrefcount(m), oldviewrefcount)
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_gc(self):
for tp in self._types:
if not isinstance(tp, type):
# If tp is a factory rather than a plain type, skip
continue
class MyView():
def __init__(self, base):
self.m = memoryview(base)
class MySource(tp):
pass
class MyObject:
pass
# Create a reference cycle through a memoryview object.
# This exercises mbuf_clear().
b = MySource(tp(b'abc'))
m = self._view(b)
o = MyObject()
b.m = m
b.o = o
wr = weakref.ref(o)
b = m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
# This exercises memory_clear().
m = MyView(tp(b'abc'))
o = MyObject()
m.x = m
m.o = o
wr = weakref.ref(o)
m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
def _check_released(self, m, tp):
check = self.assertRaisesRegex(ValueError, "released")
with check: bytes(m)
with check: m.tobytes()
with check: m.tolist()
with check: m[0]
with check: m[0] = b'x'
with check: len(m)
with check: m.format
with check: m.itemsize
with check: m.ndim
with check: m.readonly
with check: m.shape
with check: m.strides
with check:
with m:
pass
# str() and repr() still function
self.assertIn("released memory", str(m))
self.assertIn("released memory", repr(m))
self.assertEqual(m, m)
self.assertNotEqual(m, memoryview(tp(self._source)))
self.assertNotEqual(m, tp(self._source))
def test_contextmanager(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
with m as cm:
self.assertIs(cm, m)
self._check_released(m, tp)
m = self._view(b)
# Can release explicitly inside the context manager
with m:
m.release()
def test_release(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
m.release()
self._check_released(m, tp)
# Can be called a second time (it's a no-op)
m.release()
self._check_released(m, tp)
def test_writable_readonly(self):
# Issue #10451: memoryview incorrectly exposes a readonly
# buffer as writable causing a segfault if using mmap
tp = self.ro_type
if tp is None:
self.skipTest("no read-only type to test")
b = tp(self._source)
m = self._view(b)
i = io.BytesIO(b'ZZZZ')
self.assertRaises(TypeError, i.readinto, m)
def test_getbuf_fail(self):
self.assertRaises(TypeError, self._view, {})
def test_hash(self):
# Memoryviews of readonly (hashable) types are hashable, and they
# hash as hash(obj.tobytes()).
tp = self.ro_type
if tp is None:
self.skipTest("no read-only type to test")
b = tp(self._source)
m = self._view(b)
self.assertEqual(hash(m), hash(b"abcdef"))
# Releasing the memoryview keeps the stored hash value (as with weakrefs)
m.release()
self.assertEqual(hash(m), hash(b"abcdef"))
# Hashing a memoryview for the first time after it is released
# results in an error (as with weakrefs).
m = self._view(b)
m.release()
self.assertRaises(ValueError, hash, m)
def test_hash_writable(self):
# Memoryviews of writable types are unhashable
tp = self.rw_type
if tp is None:
self.skipTest("no writable type to test")
b = tp(self._source)
m = self._view(b)
self.assertRaises(ValueError, hash, m)
def test_weakref(self):
# Check memoryviews are weakrefable
for tp in self._types:
b = tp(self._source)
m = self._view(b)
L = []
def callback(wr, b=b):
L.append(b)
wr = weakref.ref(m, callback)
self.assertIs(wr(), m)
del m
test.support.gc_collect()
self.assertIs(wr(), None)
self.assertIs(L[0], b)
def test_reversed(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
aslist = list(reversed(m.tolist()))
self.assertEqual(list(reversed(m)), aslist)
self.assertEqual(list(reversed(m)), list(m[::-1]))
# Variations on source objects for the buffer: bytes-like objects, then arrays
# with itemsize > 1.
# NOTE: support for multi-dimensional objects is unimplemented.
class BaseBytesMemoryTests(AbstractMemoryTests):
ro_type = bytes
rw_type = bytearray
getitem_type = bytes
itemsize = 1
format = 'B'
class BaseArrayMemoryTests(AbstractMemoryTests):
ro_type = None
rw_type = lambda self, b: array.array('i', list(b))
getitem_type = lambda self, b: array.array('i', list(b)).tobytes()
itemsize = array.array('i').itemsize
format = 'i'
@unittest.skip('XXX test should be adapted for non-byte buffers')
def test_getbuffer(self):
pass
@unittest.skip('XXX NotImplementedError: tolist() only supports byte views')
def test_tolist(self):
pass
# Variations on indirection levels: memoryview, slice of memoryview,
# slice of slice of memoryview.
# This is important to test allocation subtleties.
class BaseMemoryviewTests:
def _view(self, obj):
return memoryview(obj)
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj, tp(contents))
class BaseMemorySliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[1:7]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
def test_refs(self):
for tp in self._types:
m = memoryview(tp(self._source))
oldrefcount = sys.getrefcount(m)
m[1:2]
self.assertEqual(sys.getrefcount(m), oldrefcount)
class BaseMemorySliceSliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[:7][1:]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
# Concrete test classes
class BytesMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseBytesMemoryTests):
def test_constructor(self):
for tp in self._types:
ob = tp(self._source)
self.assertTrue(memoryview(ob))
self.assertTrue(memoryview(object=ob))
self.assertRaises(TypeError, memoryview)
self.assertRaises(TypeError, memoryview, ob, ob)
self.assertRaises(TypeError, memoryview, argument=ob)
self.assertRaises(TypeError, memoryview, ob, argument=True)
class ArrayMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseArrayMemoryTests):
def test_array_assign(self):
# Issue #4569: segfault when mutating a memoryview with itemsize != 1
a = array.array('i', range(10))
m = memoryview(a)
new_a = array.array('i', range(9, -1, -1))
m[:] = new_a
self.assertEqual(a, new_a)
class BytesMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseArrayMemoryTests):
pass
class BytesMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseArrayMemoryTests):
pass
def test_main():
test.support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
dataxu/ansible | test/units/modules/network/ovs/ovs_module.py | 73 | 2381 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestOpenVSwitchModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False,
commands=None, test_name=None):
self.load_fixtures(test_name)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, test_name):
pass
| gpl-3.0 |
cyx1231st/workflow_parser | workflow_parser/workflow/entities/request.py | 1 | 9364 | # Copyright (c) 2017 Yingxin Cheng
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from itertools import chain
from .bases import ActivityBase
from .bases import IntervalBase
from .bases import Pace
from .bases import RequestinsBase
from .join import JoinActivityBase
from .join import InnerjoinActivity
from .join import RequestjoinActivity
from .join import CrossjoinActivity
from .threadins import ThreadActivity
class ExtendedInterval(IntervalBase):
def __init__(self, from_pace, to_pace):
assert isinstance(from_pace, Pace)
assert isinstance(to_pace, Pace)
assert from_pace.threadins is to_pace.threadins
super(ExtendedInterval, self).__init__()
self.from_pace = from_pace
self.to_pace = to_pace
@property
def int_name(self):
return str(self.component)
@property
def component(self):
return self.from_pace.component
@property
def threadins(self):
return self.from_pace.threadins
@property
def requestins(self):
return self.from_pace.requestins
def __repr_intlabels__(self):
labels = super(ExtendedInterval, self).__repr_intlabels__()
labels += "!EX"
return labels
class RequestInstance(RequestinsBase):
_index_dict = defaultdict(lambda: 0)
def __init__(self, request, builder):
super(RequestInstance, self).__init__(request)
self.request_vars = defaultdict(set)
self.threadinss = []
self.thread_objs = set()
self.target_objs = set()
self.len_paces = 0
self.len_main_paces = 0
self._start_activity = None
self._end_activity = None
self.last_activity = None
self.activities_bymark = defaultdict(list)
self.innerjoin_activities = set()
self.requestjoin_activities = set()
self.crossjoinl_activities = set()
self.crossjoinr_activities = set()
self.emptyjoins_activities = set()
self.emptyjoined_activities = set()
self._builder = builder
@property
def start_activity(self):
return self._start_activity
@start_activity.setter
def start_activity(self, act):
assert isinstance(act, ThreadActivity)
assert act.to_pace
assert act.is_request_start
self.from_pace = act.to_pace
self._start_activity = act
@property
def end_activity(self):
return self._end_activity
@end_activity.setter
def end_activity(self, act):
assert isinstance(act, ThreadActivity)
assert act.from_pace
assert act.is_request_end
self.to_pace = act.from_pace
self._end_activity = act
@property
def request_type(self):
ret = self.start_activity.request_type
assert ret
return ret
@property
def request_state(self):
ret = self.end_activity.request_state
assert ret
return ret
@property
def components(self):
return self.request_vars["component"]
@property
def hosts(self):
return self.request_vars["host"]
@property
def last_seconds(self):
return self.last_activity.from_seconds
@property
def last_time(self):
return self.last_activity.from_time
@property
def cnt_nested(self):
assert len(self.crossjoinl_activities) == len(self.crossjoinr_activities)
return len(self.crossjoinl_activities)
def __repr_marks__(self):
marks = ""
if self.requestjoin_activities:
marks += ", %d reqs" % len(self.requestjoin_activities)
if self.crossjoinr_activities or self.crossjoinl_activities:
marks += ", (%d %d) cross" % (len(self.crossjoinl_activities),
len(self.crossjoinr_activities))
if self.emptyjoins_activities:
marks += ", %d emptyjoins" % len(self.emptyjoins_activities)
if self.emptyjoined_activities:
marks += ", %d emptyjoined" % len(self.emptyjoined_activities)
if self.activities_bymark:
marks += ", %d marks" % len(self.activities_bymark)
if not self._builder.is_built:
marks =+ ", ~BUILD"
return marks
def __repr__(self):
return "<RIns#%s: %s@%s, %d(main %d) paces, %d hosts, %d(%d) threads, "\
"%d vars, %d inner, [%.3f-%.3f(%.3f)]%s>" % (
self.request,
self.request_type,
self.request_state,
self.len_paces,
self.len_main_paces,
len(self.hosts),
len(self.thread_objs),
len(self.threadinss),
len(self.request_vars),
len(self.innerjoin_activities),
self.from_seconds,
self.to_seconds,
self.last_seconds,
self.__repr_marks__())
def __str__(self):
ret = repr(self)
ret += "\n VARS:"
for k, v in self.request_vars.items():
ret += "\n %s: %s" % (k, ",".join(str(v_) for v_ in v))
ret += "\n THREADINSS:"
for tis in self.threadinss:
ret += "\n %r" % tis
if self.innerjoin_activities:
ret += "\n INNER:"
for j in self.innerjoin_activities:
ret += "\n %r" % j
if self.requestjoin_activities:
ret += "\n REQUEST:"
for j in self.requestjoin_activities:
ret += "\n %r" % j
if self.crossjoinl_activities:
ret += "\n CROSSL:"
for j in self.crossjoinl_activities:
ret += "\n %r" % j
if self.crossjoinr_activities:
ret += "\n CROSSR:"
for j in self.crossjoinr_activities:
ret += "\n %r" % j
if self.emptyjoins_activities:
ret += "\n EMPTYJOINS:"
for j in self.emptyjoins_activities:
ret += "\n %r" % j
if self.emptyjoined_activities:
ret += "\n EMPTYJOINED:"
for j in self.emptyjoined_activities:
ret += "\n %r" % j
ret += "\n\n### MAIN ###:"
ret += "\n%r" % self.start_activity.threadins
activity = self.start_activity
while activity:
assert isinstance(activity, ActivityBase)
assert activity.is_main
if isinstance(activity, RequestjoinActivity):
if not activity.is_nest:
ret += "\n%r" % activity.to_threadins
mark = "~"
elif isinstance(activity, InnerjoinActivity):
ret += "\n%r" % activity.to_threadins
mark = "-"
elif isinstance(activity, CrossjoinActivity):
if not activity.is_left:
ret += "\n%r" % activity.to_threadins
mark = "~"
else:
mark = "|"
ret += "\n%s %s" % (mark, activity.__repr_to__())
if activity.to_pace:
activity = activity.to_pace.nxt_main_activity
else:
break
ret += "\n"
return ret
def automate_name(self):
if self._request is None:
assert self.request_type is not None
self._index_dict[self.request_type] += 1
self.request = "%s%d" % (self.request_type, self._index_dict[self.request_type])
def iter_joins(self):
for join in chain(self.innerjoin_activities,
self.requestjoin_activities):
assert join.is_interval
yield join
def iter_mainints(self, extended=False):
activity = self.start_activity.to_pace.nxt_main_activity
if not extended:
while activity is not None and activity.is_interval:
yield activity
pre = activity
activity = activity.to_pace.nxt_main_activity
else:
raise NotImplementedError()
from_pace = activity.from_pace
while activity is not None and activity.is_interval:
if isinstance(activity, JoinActivityBase):
if from_pace is not activity.from_pace:
yield ExtendedInterval(from_pace, activity.from_pace)
yield activity
from_pace = activity.to_pace
else:
assert isinstance(activity, ThreadActivity)
activity = activity.to_pace.nxt_main_activity
if from_pace is not activity.from_pace:
yield ExtendedInterval(from_pace, activity.from_pace)
def iter_threadints(self):
for tis in self.threadinss:
for int_ in tis.iter_ints():
yield int_
| apache-2.0 |
nlalevee/spark | python/pyspark/tests.py | 13 | 96302 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark import keyword_only
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
from pyspark.taskcontext import TaskContext
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
from time import sleep
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it2))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class ProfilerTests2(unittest.TestCase):
def test_profiler_disabled(self):
sc = SparkContext(conf=SparkConf().set("spark.python.profile", "false"))
try:
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.show_profiles())
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.dump_profiles("/tmp/abc"))
finally:
sc.stop()
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,1024]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_user_configuration(self):
"""Make sure user configuration is respected (SPARK-19307)"""
script = self.createTempFile("test.py", """
|from pyspark import SparkConf, SparkContext
|
|conf = SparkConf().set("spark.test_config", "1")
|sc = SparkContext(conf = conf)
|try:
| if sc._conf.get("spark.test_config") != "1":
| raise Exception("Cannot find spark.test_config in SparkContext's conf.")
|finally:
| sc.stop()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local", script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode, msg="Process failed with error:\n {0}".format(out))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
def set(self, x=None, other=None, other_x=None):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
| apache-2.0 |
anshumanchatterji/selenium | py/selenium/webdriver/firefox/firefox_profile.py | 10 | 14703 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import with_statement
import base64
import copy
import json
import os
import re
import shutil
import sys
import tempfile
import zipfile
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from xml.dom import minidom
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
WEBDRIVER_PREFERENCES = "webdriver_prefs.json"
EXTENSION_NAME = "fxdriver@googlecode.com"
class AddonFormatError(Exception):
"""Exception for not well-formed add-on manifest files"""
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = None
def __init__(self, profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
if not FirefoxProfile.DEFAULT_PREFERENCES:
with open(os.path.join(os.path.dirname(__file__),
WEBDRIVER_PREFERENCES)) as default_prefs:
FirefoxProfile.DEFAULT_PREFERENCES = json.load(default_prefs)
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES['mutable'])
self.native_events_enabled = True
self.profile_dir = profile_directory
self.tempfolder = None
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
self.tempfolder = tempfile.mkdtemp()
newprof = os.path.join(self.tempfolder, "webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs(os.path.join(self.profile_dir, "user.js"))
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
#Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
self.default_preferences[key] = value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
for key, value in FirefoxProfile.DEFAULT_PREFERENCES['frozen'].items():
self.default_preferences[key] = value
self._write_user_prefs(self.default_preferences)
#Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
try:
port = int(port)
if port < 1 or port > 65535:
raise WebDriverException("Port number must be in the range 1..65535")
except (ValueError, TypeError) as e:
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self.default_preferences["webdriver_accept_untrusted_certs"]
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self.default_preferences["webdriver_assume_untrusted_issuer"]
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self.default_preferences['webdriver_enable_native_events']
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
self.update_preferences()
fp = BytesIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.b64encode(fp.getvalue()).decode('UTF-8')
def set_proxy(self, proxy):
import warnings
warnings.warn(
"This method has been deprecated. Please pass in the proxy object to the Driver Object",
DeprecationWarning)
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
self._set_manual_proxy_preference("socks", proxy.socks_proxy)
elif proxy.proxy_type is ProxyType.PAC:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[0])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[1]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
def _read_existing_userjs(self, userjs):
import warnings
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
try:
self.default_preferences[matches.group(1)] = json.loads(matches.group(2))
except:
warnings.warn("(skipping) failed to json.loads existing preference: " +
matches.group(1) + matches.group(2))
except:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
if not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir)
def _addon_details(self, addon_path):
"""
Returns a dictionary of details about the addon.
:param addon_path: path to the add-on directory or XPI
Returns::
{'id': u'rainbow@colors.org', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
details = {
'id': None,
'unpack': False,
'name': None,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
if not os.path.exists(addon_path):
raise IOError('Add-on path does not exist: %s' % addon_path)
try:
if zipfile.is_zipfile(addon_path):
# Bug 944361 - We cannot use 'with' together with zipFile because
# it will cause an exception thrown in Python 2.6.
try:
compressed_file = zipfile.ZipFile(addon_path, 'r')
manifest = compressed_file.read('install.rdf')
finally:
compressed_file.close()
elif os.path.isdir(addon_path):
with open(os.path.join(addon_path, 'install.rdf'), 'r') as f:
manifest = f.read()
else:
raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path)
except (IOError, KeyError) as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
try:
doc = minidom.parseString(manifest)
# Get the namespaces abbreviations
em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#')
rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
description = doc.getElementsByTagName(rdf + 'Description').item(0)
if description is None:
description = doc.getElementsByTagName('Description').item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({entry: get_text(node)})
if details.get('id') is None:
for i in range(description.attributes.length):
attribute = description.attributes.item(i)
if attribute.name == em + 'id':
details.update({'id': attribute.value})
except Exception as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
# turn unpack into a true/false value
if isinstance(details['unpack'], str):
details['unpack'] = details['unpack'].lower() == 'true'
# If no ID is set, the add-on is invalid
if details.get('id') is None:
raise AddonFormatError('Add-on id could not be found.')
return details
| apache-2.0 |
dennisss/sympy | sympy/core/containers.py | 15 | 7810 | """Module for SymPy containers
(SymPy objects that store other SymPy objects)
The containers implemented in this module are subclassed to Basic.
They are supposed to work seamlessly within the SymPy framework.
"""
from __future__ import print_function, division
from sympy.core.basic import Basic
from sympy.core.compatibility import as_int
from sympy.core.sympify import sympify, converter
from sympy.utilities.iterables import iterable
class Tuple(Basic):
"""
Wrapper around the builtin tuple object
The Tuple is a subclass of Basic, so that it works well in the
SymPy framework. The wrapped tuple is available as self.args, but
you can also access elements or slices with [:] syntax.
>>> from sympy import symbols
>>> from sympy.core.containers import Tuple
>>> a, b, c, d = symbols('a b c d')
>>> Tuple(a, b, c)[1:]
(b, c)
>>> Tuple(a, b, c).subs(a, d)
(d, b, c)
"""
def __new__(cls, *args, **assumptions):
args = [ sympify(arg) for arg in args ]
obj = Basic.__new__(cls, *args, **assumptions)
return obj
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return Tuple(*[self.args[j] for j in range(*indices)])
return self.args[i]
def __len__(self):
return len(self.args)
def __contains__(self, item):
return item in self.args
def __iter__(self):
return iter(self.args)
def __add__(self, other):
if isinstance(other, Tuple):
return Tuple(*(self.args + other.args))
elif isinstance(other, tuple):
return Tuple(*(self.args + other))
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, Tuple):
return Tuple(*(other.args + self.args))
elif isinstance(other, tuple):
return Tuple(*(other + self.args))
else:
return NotImplemented
def __mul__(self, other):
try:
n = as_int(other)
except ValueError:
raise TypeError("Can't multiply sequence by non-integer of type '%s'" % type(other))
return self.func(*(self.args*n))
__rmul__ = __mul__
def __eq__(self, other):
if isinstance(other, Basic):
return super(Tuple, self).__eq__(other)
return self.args == other
def __ne__(self, other):
if isinstance(other, Basic):
return super(Tuple, self).__ne__(other)
return self.args != other
def __hash__(self):
return hash(self.args)
def _to_mpmath(self, prec):
return tuple([a._to_mpmath(prec) for a in self.args])
def __lt__(self, other):
return sympify(self.args < other.args)
def __le__(self, other):
return sympify(self.args <= other.args)
# XXX: Basic defines count() as something different, so we can't
# redefine it here. Originally this lead to cse() test failure.
def tuple_count(self, value):
"""T.count(value) -> integer -- return number of occurrences of value"""
return self.args.count(value)
def index(self, value, start=None, stop=None):
"""T.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present."""
# XXX: One would expect:
#
# return self.args.index(value, start, stop)
#
# here. Any trouble with that? Yes:
#
# >>> (1,).index(1, None, None)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: slice indices must be integers or None or have an __index__ method
#
# See: http://bugs.python.org/issue13340
if start is None and stop is None:
return self.args.index(value)
elif stop is None:
return self.args.index(value, start)
else:
return self.args.index(value, start, stop)
converter[tuple] = lambda tup: Tuple(*tup)
def tuple_wrapper(method):
"""
Decorator that converts any tuple in the function arguments into a Tuple.
The motivation for this is to provide simple user interfaces. The user can
call a function with regular tuples in the argument, and the wrapper will
convert them to Tuples before handing them to the function.
>>> from sympy.core.containers import tuple_wrapper
>>> def f(*args):
... return args
>>> g = tuple_wrapper(f)
The decorated function g sees only the Tuple argument:
>>> g(0, (1, 2), 3)
(0, (1, 2), 3)
"""
def wrap_tuples(*args, **kw_args):
newargs = []
for arg in args:
if type(arg) is tuple:
newargs.append(Tuple(*arg))
else:
newargs.append(arg)
return method(*newargs, **kw_args)
return wrap_tuples
class Dict(Basic):
"""
Wrapper around the builtin dict object
The Dict is a subclass of Basic, so that it works well in the
SymPy framework. Because it is immutable, it may be included
in sets, but its values must all be given at instantiation and
cannot be changed afterwards. Otherwise it behaves identically
to the Python dict.
>>> from sympy.core.containers import Dict
>>> D = Dict({1: 'one', 2: 'two'})
>>> for key in D:
... if key == 1:
... print('%s %s' % (key, D[key]))
1 one
The args are sympified so the 1 and 2 are Integers and the values
are Symbols. Queries automatically sympify args so the following work:
>>> 1 in D
True
>>> D.has('one') # searches keys and values
True
>>> 'one' in D # not in the keys
False
>>> D[1]
one
"""
def __new__(cls, *args):
if len(args) == 1 and ((args[0].__class__ is dict) or
(args[0].__class__ is Dict)):
items = [Tuple(k, v) for k, v in args[0].items()]
elif iterable(args) and all(len(arg) == 2 for arg in args):
items = [Tuple(k, v) for k, v in args]
else:
raise TypeError('Pass Dict args as Dict((k1, v1), ...) or Dict({k1: v1, ...})')
elements = frozenset(items)
obj = Basic.__new__(cls, elements)
obj.elements = elements
obj._dict = dict(items) # In case Tuple decides it wants to sympify
return obj
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
return self._dict[sympify(key)]
def __setitem__(self, key, value):
raise NotImplementedError("SymPy Dicts are Immutable")
@property
def args(self):
return tuple(self.elements)
def items(self):
'''D.items() -> list of D's (key, value) pairs, as 2-tuples'''
return self._dict.items()
def keys(self):
'''D.keys() -> list of D's keys'''
return self._dict.keys()
def values(self):
'''D.values() -> list of D's values'''
return self._dict.values()
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self._dict)
def __len__(self):
'''x.__len__() <==> len(x)'''
return self._dict.__len__()
def get(self, key, default=None):
'''D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'''
return self._dict.get(sympify(key), default)
def __contains__(self, key):
'''D.__contains__(k) -> True if D has a key k, else False'''
return sympify(key) in self._dict
def __lt__(self, other):
return sympify(self.args < other.args)
@property
def _sorted_args(self):
from sympy.utilities import default_sort_key
return tuple(sorted(self.args, key=default_sort_key))
| bsd-3-clause |
fumen/gae-fumen | lib/jinja2/lexer.py | 119 | 28238 | # -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache
from jinja2._compat import iteritems, implements_iterator, text_type, intern
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
def _make_name_re():
try:
compile('fΓΆΓΆ', '<unknown>', 'eval')
except SyntaxError:
return re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
import jinja2
from jinja2 import _stringdefs
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
_stringdefs.xid_continue))
# Save some memory here
sys.modules.pop('jinja2._stringdefs')
del _stringdefs
del jinja2._stringdefs
return name_re
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
name_re = _make_name_re()
del _make_name_re
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# strip leading spaces if lstrip_blocks is enabled
prefix_re = {}
if environment.lstrip_blocks:
# use '{%+' to manually disable lstrip_blocks behavior
no_lstrip_re = e('+')
# detect overlap between block and variable or comment strings
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
# make sure we don't mistake a block for a variable or a comment
m = block_diff.match(environment.comment_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
m = block_diff.match(environment.variable_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
# detect overlap between comment and variable strings
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
m = comment_diff.match(environment.variable_start_string)
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
lstrip_re = r'^[ \t]*'
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
lstrip_re,
e(environment.block_start_string),
no_lstrip_re,
e(environment.block_start_string),
)
comment_prefix_re = r'%s%s%s|%s\+?' % (
lstrip_re,
e(environment.comment_start_string),
no_variable_re,
e(environment.comment_start_string),
)
prefix_re['block'] = block_prefix_re
prefix_re['comment'] = comment_prefix_re
else:
block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c(r'(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c(r'\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
lines.append('')
break
source = '\n'.join(lines)
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)
| bsd-3-clause |
cherusk/ansible | lib/ansible/modules/system/gconftool2.py | 27 | 9389 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
#
# This file is part of Ansible (sort of)
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: gconftool2
author:
- "Kenneth D. Evensen (@kevensen)"
short_description: Edit GNOME Configurations
description:
- This module allows for the manipulation of GNOME 2 Configuration via
gconftool-2. Please see the gconftool-2(1) man pages for more details.
version_added: "2.3"
options:
key:
required: true
description:
- A GConf preference key is an element in the GConf repository
that corresponds to an application preference. See man gconftool-2(1)
value:
required: false
description:
- Preference keys typically have simple values such as strings,
integers, or lists of strings and integers. This is ignored if the state
is "get". See man gconftool-2(1)
value_type:
required: false
choices:
- int
- bool
- float
- string
description:
- The type of value being set. This is ignored if the state is "get".
state:
required: true
choices:
- get
- present
- absent
description:
- The action to take upon the key/value.
config_source:
required: false
description:
- Specify a configuration source to use rather than the default path.
See man gconftool-2(1)
direct:
required: false
choices: [ "yes", "no" ]
default: no
description:
- Access the config database directly, bypassing server. If direct is
specified then the config_source must be specified as well.
See man gconftool-2(1)
"""
EXAMPLES = """
- name: Change the widget font to "Serif 12"
gconftool2:
key: "/desktop/gnome/interface/font_name"
value_type: "string"
value: "Serif 12"
"""
RETURN = '''
key:
description: The key specified in the module parameters
returned: success
type: string
sample: "/desktop/gnome/interface/font_name"
value_type:
description: The type of the value that was changed
returned: success
type: string
sample: "string"
value:
description: The value of the preference key after executing the module
returned: success
type: string
sample: "Serif 12"
...
'''
from subprocess import Popen, PIPE
from ansible.module_utils.basic import AnsibleModule, BOOLEANS_TRUE
from ansible.module_utils.pycompat24 import get_exception
class GConf2Preference(object):
def __init__(self, ansible, key, value_type, value,
direct=False, config_source=""):
self.ansible = ansible
self.key = key
self.value_type = value_type
self.value = value
self.config_source = config_source
self.direct = direct
def value_already_set(self):
return False
def call(self, call_type, fail_onerr=True):
""" Helper function to perform gconftool-2 operations """
config_source = ''
direct = ''
changed = False
out = ''
# If the configuration source is different from the default, create
# the argument
if self.config_source is not None and len(self.config_source) > 0:
config_source = "--config-source " + self.config_source
# If direct is true, create the argument
if self.direct:
direct = "--direct"
# Execute the call
cmd = "gconftool-2 "
try:
# If the call is "get", then we don't need as many parameters and
# we can ignore some
if call_type == 'get':
cmd += "--get {0}".format(self.key)
# Otherwise, we will use all relevant parameters
elif call_type == 'set':
cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct,
config_source,
self.value_type,
call_type,
self.key,
self.value)
elif call_type == 'unset':
cmd += "--unset {0}".format(self.key)
# Start external command
process = Popen([cmd], stdout=PIPE, stderr=PIPE, shell=True)
# In either case, we will capture the output
out = process.stdout.read()
err = process.stderr.read()
if len(err) > 0:
if fail_onerr:
self.ansible.fail_json(msg='gconftool-2 failed with '
'error: %s' % (str(err)))
else:
changed = True
except OSError:
exception = get_exception()
self.ansible.fail_json(msg='gconftool-2 failed with exception: '
'%s' % exception)
return changed, out.rstrip()
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
key=dict(required=True, default=None, type='str'),
value_type=dict(required=False,
choices=['int', 'bool', 'float', 'string'],
type='str'),
value=dict(required=False, default=None, type='str'),
state=dict(required=True,
default=None,
choices=['present', 'get', 'absent'],
type='str'),
direct=dict(required=False, default=False, type='bool'),
config_source=dict(required=False, default=None, type='str')
),
supports_check_mode=True
)
state_values = {"present": "set", "absent": "unset", "get": "get"}
direct = False
# Assign module values to dictionary values
key = module.params['key']
value_type = module.params['value_type']
if module.params['value'].lower() == "true":
value = "true"
elif module.params['value'] == "false":
value = "false"
else:
value = module.params['value']
state = state_values[module.params['state']]
if module.params['direct'] in BOOLEANS_TRUE:
direct = True
config_source = module.params['config_source']
# Initialize some variables for later
change = False
new_value = ''
if state != "get":
if value is None or value == "":
module.fail_json(msg='State %s requires "value" to be set'
% str(state))
elif value_type is None or value_type == "":
module.fail_json(msg='State %s requires "value_type" to be set'
% str(state))
if direct and config_source is None:
module.fail_json(msg='If "direct" is "yes" then the ' +
'"config_source" must be specified')
elif not direct and config_source is not None:
module.fail_json(msg='If the "config_source" is specified ' +
'then "direct" must be "yes"')
# Create a gconf2 preference
gconf_pref = GConf2Preference(module, key, value_type,
value, direct, config_source)
# Now we get the current value, if not found don't fail
_, current_value = gconf_pref.call("get", fail_onerr=False)
# Check if the current value equals the value we want to set. If not, make
# a change
if current_value != value:
# If check mode, we know a change would have occured.
if module.check_mode:
# So we will set the change to True
change = True
# And set the new_value to the value that would have been set
new_value = value
# If not check mode make the change.
else:
change, new_value = gconf_pref.call(state)
# If the value we want to set is the same as the current_value, we will
# set the new_value to the current_value for reporting
else:
new_value = current_value
facts = dict(gconftool2={'changed': change,
'key': key,
'value_type': value_type,
'new_value': new_value,
'previous_value': current_value,
'playbook_value': module.params['value']})
module.exit_json(changed=change, ansible_facts=facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
laslabs/odoo | addons/crm/wizard/crm_lead_to_opportunity.py | 24 | 13151 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
import re
from openerp.exceptions import UserError
class crm_lead2opportunity_partner(osv.osv_memory):
_name = 'crm.lead2opportunity.partner'
_description = 'Lead To Opportunity Partner'
_inherit = 'crm.partner.binding'
_columns = {
'name': fields.selection([
('convert', 'Convert to opportunity'),
('merge', 'Merge with existing opportunities')
], 'Conversion Action', required=True),
'opportunity_ids': fields.many2many('crm.lead', string='Opportunities'),
'user_id': fields.many2one('res.users', 'Salesperson', select=True),
'team_id': fields.many2one('crm.team', 'Sales Team', oldname='section_id', select=True),
}
def onchange_action(self, cr, uid, ids, action, context=None):
return {'value': {'partner_id': False if action != 'exist' else self._find_matching_partner(cr, uid, context=context)}}
def _get_duplicated_leads(self, cr, uid, partner_id, email, include_lost=False, context=None):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
return self.pool.get('crm.lead')._get_duplicated_leads_by_emails(cr, uid, partner_id, email, include_lost=include_lost, context=context)
def default_get(self, cr, uid, fields, context=None):
"""
Default get for name, opportunity_ids.
If there is an exisitng partner link to the lead, find all existing
opportunities links with this partner to merge all information together
"""
lead_obj = self.pool.get('crm.lead')
res = super(crm_lead2opportunity_partner, self).default_get(cr, uid, fields, context=context)
if context.get('active_id'):
tomerge = [int(context['active_id'])]
partner_id = res.get('partner_id')
lead = lead_obj.browse(cr, uid, int(context['active_id']), context=context)
email = lead.partner_id and lead.partner_id.email or lead.email_from
tomerge.extend(self._get_duplicated_leads(cr, uid, partner_id, email, include_lost=True, context=context))
tomerge = list(set(tomerge))
if 'action' in fields and not res.get('action'):
res.update({'action' : partner_id and 'exist' or 'create'})
if 'partner_id' in fields:
res.update({'partner_id' : partner_id})
if 'name' in fields:
res.update({'name' : len(tomerge) >= 2 and 'merge' or 'convert'})
if 'opportunity_ids' in fields and len(tomerge) >= 2:
res.update({'opportunity_ids': tomerge})
if lead.user_id:
res.update({'user_id': lead.user_id.id})
if lead.team_id:
res.update({'team_id': lead.team_id.id})
if not partner_id and not lead.contact_name:
res.update({'action': 'nothing'})
return res
def on_change_user(self, cr, uid, ids, user_id, team_id, context=None):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
if user_id:
if team_id:
user_in_team = self.pool.get('crm.team').search(cr, uid, [('id', '=', team_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True)
else:
user_in_team = False
if not user_in_team:
result = self.pool['crm.lead'].on_change_user(cr, uid, ids, user_id, context=context)
team_id = result.get('value') and result['value'].get('team_id') and result['value']['team_id'] or False
return {'value': {'team_id': team_id}}
def view_init(self, cr, uid, fields, context=None):
"""
Check some preconditions before the wizard executes.
"""
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
for lead in lead_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if lead.probability == 100:
raise UserError(_("Closed/Dead leads cannot be converted into opportunities."))
return False
def _convert_opportunity(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
lead = self.pool.get('crm.lead')
res = False
lead_ids = vals.get('lead_ids', [])
team_id = vals.get('team_id', False)
partner_id = vals.get('partner_id')
data = self.browse(cr, uid, ids, context=context)[0]
leads = lead.browse(cr, uid, lead_ids, context=context)
for lead_id in leads:
partner_id = self._create_partner(cr, uid, lead_id.id, data.action, partner_id or lead_id.partner_id.id, context=context)
res = lead.convert_opportunity(cr, uid, [lead_id.id], partner_id, [], False, context=context)
user_ids = vals.get('user_ids', False)
if context.get('no_force_assignation'):
leads_to_allocate = [lead_id.id for lead_id in leads if not lead_id.user_id]
else:
leads_to_allocate = lead_ids
if user_ids:
lead.allocate_salesman(cr, uid, leads_to_allocate, user_ids, team_id=team_id, context=context)
return res
def action_apply(self, cr, uid, ids, context=None):
"""
Convert lead to opportunity or merge lead and opportunity and open
the freshly created opportunity view.
"""
if context is None:
context = {}
lead_obj = self.pool['crm.lead']
partner_obj = self.pool['res.partner']
w = self.browse(cr, uid, ids, context=context)[0]
opp_ids = [o.id for o in w.opportunity_ids]
vals = {
'team_id': w.team_id.id,
}
if w.partner_id:
vals['partner_id'] = w.partner_id.id
if w.name == 'merge':
lead_id = lead_obj.merge_opportunity(cr, uid, opp_ids, context=context)
lead_ids = [lead_id]
lead = lead_obj.read(cr, uid, lead_id, ['type', 'user_id'], context=context)
if lead['type'] == "lead":
context = dict(context, active_ids=lead_ids)
vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]})
self._convert_opportunity(cr, uid, ids, vals, context=context)
elif not context.get('no_force_assignation') or not lead['user_id']:
vals.update({'user_id': w.user_id.id})
lead_obj.write(cr, uid, lead_id, vals, context=context)
else:
lead_ids = context.get('active_ids', [])
vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]})
self._convert_opportunity(cr, uid, ids, vals, context=context)
for lead in lead_obj.browse(cr, uid, lead_ids, context=context):
if lead.partner_id and lead.partner_id.user_id != lead.user_id:
partner_obj.write(cr, uid, [lead.partner_id.id], {'user_id': lead.user_id.id}, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, lead_ids[0], context=context)
def _create_partner(self, cr, uid, lead_id, action, partner_id, context=None):
"""
Create partner based on action.
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this method in only called by crm_lead2opportunity_partner
#wizard and would probably diserve to be refactored or at least
#moved to a better place
if context is None:
context = {}
lead = self.pool.get('crm.lead')
if action == 'each_exist_or_create':
ctx = dict(context)
ctx['active_id'] = lead_id
partner_id = self._find_matching_partner(cr, uid, context=ctx)
action = 'create'
res = lead.handle_partner_assignation(cr, uid, [lead_id], action, partner_id, context=context)
return res.get(lead_id)
class crm_lead2opportunity_mass_convert(osv.osv_memory):
_name = 'crm.lead2opportunity.partner.mass'
_description = 'Mass Lead To Opportunity Partner'
_inherit = 'crm.lead2opportunity.partner'
_columns = {
'user_ids': fields.many2many('res.users', string='Salesmen'),
'team_id': fields.many2one('crm.team', 'Sales Team', select=True, oldname='section_id'),
'deduplicate': fields.boolean('Apply deduplication', help='Merge with existing leads/opportunities of each partner'),
'action': fields.selection([
('each_exist_or_create', 'Use existing partner or create'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'force_assignation': fields.boolean('Force assignation', help='If unchecked, this will leave the salesman of duplicated opportunities'),
}
_defaults = {
'deduplicate': True,
}
def default_get(self, cr, uid, fields, context=None):
res = super(crm_lead2opportunity_mass_convert, self).default_get(cr, uid, fields, context)
if 'partner_id' in fields:
# avoid forcing the partner of the first lead as default
res['partner_id'] = False
if 'action' in fields:
res['action'] = 'each_exist_or_create'
if 'name' in fields:
res['name'] = 'convert'
if 'opportunity_ids' in fields:
res['opportunity_ids'] = False
return res
def on_change_action(self, cr, uid, ids, action, context=None):
vals = {}
if action != 'exist':
vals = {'value': {'partner_id': False}}
return vals
def on_change_deduplicate(self, cr, uid, ids, deduplicate, context=None):
if context is None:
context = {}
active_leads = self.pool['crm.lead'].browse(cr, uid, context['active_ids'], context=context)
partner_ids = [(lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from) for lead in active_leads]
partners_duplicated_leads = {}
for partner_id, email in partner_ids:
duplicated_leads = self._get_duplicated_leads(cr, uid, partner_id, email)
if len(duplicated_leads) > 1:
partners_duplicated_leads.setdefault((partner_id, email), []).extend(duplicated_leads)
leads_with_duplicates = []
for lead in active_leads:
lead_tuple = (lead.partner_id.id, lead.partner_id.email if lead.partner_id else lead.email_from)
if len(partners_duplicated_leads.get(lead_tuple, [])) > 1:
leads_with_duplicates.append(lead.id)
return {'value': {'opportunity_ids': leads_with_duplicates}}
def _convert_opportunity(self, cr, uid, ids, vals, context=None):
"""
When "massively" (more than one at a time) converting leads to
opportunities, check the salesteam_id and salesmen_ids and update
the values before calling super.
"""
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
salesteam_id = data.team_id and data.team_id.id or False
salesmen_ids = []
if data.user_ids:
salesmen_ids = [x.id for x in data.user_ids]
vals.update({'user_ids': salesmen_ids, 'team_id': salesteam_id})
return super(crm_lead2opportunity_mass_convert, self)._convert_opportunity(cr, uid, ids, vals, context=context)
def mass_convert(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids, context=context)[0]
ctx = dict(context)
if data.name == 'convert' and data.deduplicate:
merged_lead_ids = []
remaining_lead_ids = []
lead_selected = context.get('active_ids', [])
for lead_id in lead_selected:
if lead_id not in merged_lead_ids:
lead = self.pool['crm.lead'].browse(cr, uid, lead_id, context=context)
duplicated_lead_ids = self._get_duplicated_leads(cr, uid, lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from)
if len(duplicated_lead_ids) > 1:
lead_id = self.pool.get('crm.lead').merge_opportunity(cr, uid, duplicated_lead_ids, False, False, context=context)
merged_lead_ids.extend(duplicated_lead_ids)
remaining_lead_ids.append(lead_id)
active_ids = set(context.get('active_ids', []))
active_ids = active_ids.difference(merged_lead_ids)
active_ids = active_ids.union(remaining_lead_ids)
ctx['active_ids'] = list(active_ids)
ctx['no_force_assignation'] = context.get('no_force_assignation', not data.force_assignation)
return self.action_apply(cr, uid, ids, context=ctx)
| agpl-3.0 |
mttr/django | tests/delete_regress/models.py | 325 | 3172 | from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Award(models.Model):
name = models.CharField(max_length=25)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
content_object = GenericForeignKey()
class AwardNote(models.Model):
award = models.ForeignKey(Award, models.CASCADE)
note = models.CharField(max_length=100)
class Person(models.Model):
name = models.CharField(max_length=25)
awards = GenericRelation(Award)
class Book(models.Model):
pagecount = models.IntegerField()
class Toy(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
name = models.CharField(max_length=50)
toys = models.ManyToManyField(Toy, through='PlayedWith')
class PlayedWith(models.Model):
child = models.ForeignKey(Child, models.CASCADE)
toy = models.ForeignKey(Toy, models.CASCADE)
date = models.DateField(db_column='date_col')
class PlayedWithNote(models.Model):
played = models.ForeignKey(PlayedWith, models.CASCADE)
note = models.TextField()
class Contact(models.Model):
label = models.CharField(max_length=100)
class Email(Contact):
email_address = models.EmailField(max_length=100)
class Researcher(models.Model):
contacts = models.ManyToManyField(Contact, related_name="research_contacts")
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
class Eaten(models.Model):
food = models.ForeignKey(Food, models.CASCADE, to_field="name")
meal = models.CharField(max_length=20)
# Models for #15776
class Policy(models.Model):
policy_number = models.CharField(max_length=10)
class Version(models.Model):
policy = models.ForeignKey(Policy, models.CASCADE)
class Location(models.Model):
version = models.ForeignKey(Version, models.SET_NULL, blank=True, null=True)
class Item(models.Model):
version = models.ForeignKey(Version, models.CASCADE)
location = models.ForeignKey(Location, models.SET_NULL, blank=True, null=True)
# Models for #16128
class File(models.Model):
pass
class Image(File):
class Meta:
proxy = True
class Photo(Image):
class Meta:
proxy = True
class FooImage(models.Model):
my_image = models.ForeignKey(Image, models.CASCADE)
class FooFile(models.Model):
my_file = models.ForeignKey(File, models.CASCADE)
class FooPhoto(models.Model):
my_photo = models.ForeignKey(Photo, models.CASCADE)
class FooFileProxy(FooFile):
class Meta:
proxy = True
class OrgUnit(models.Model):
name = models.CharField(max_length=64, unique=True)
class Login(models.Model):
description = models.CharField(max_length=32)
orgunit = models.ForeignKey(OrgUnit, models.CASCADE)
class House(models.Model):
address = models.CharField(max_length=32)
class OrderedPerson(models.Model):
name = models.CharField(max_length=32)
lives_in = models.ForeignKey(House, models.CASCADE)
class Meta:
ordering = ['name']
| bsd-3-clause |
nlevitt/youtube-dl | youtube_dl/extractor/anitube.py | 138 | 1721 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class AnitubeIE(InfoExtractor):
IE_NAME = 'anitube.se'
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
_TEST = {
'url': 'http://www.anitube.se/video/36621',
'md5': '59d0eeae28ea0bc8c05e7af429998d43',
'info_dict': {
'id': '36621',
'ext': 'mp4',
'title': 'Recorder to Randoseru 01',
'duration': 180.19,
},
'skip': 'Blocked in the US',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
key = self._html_search_regex(
r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key')
config_xml = self._download_xml(
'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key)
video_title = config_xml.find('title').text
thumbnail = config_xml.find('image').text
duration = float(config_xml.find('duration').text)
formats = []
video_url = config_xml.find('file')
if video_url is not None:
formats.append({
'format_id': 'sd',
'url': video_url.text,
})
video_url = config_xml.find('filehd')
if video_url is not None:
formats.append({
'format_id': 'hd',
'url': video_url.text,
})
return {
'id': video_id,
'title': video_title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats
}
| unlicense |
DarkmatterVale/ChatterBot | tests/storage_adapter_tests/test_jsondb_adapter.py | 1 | 10367 | from unittest import TestCase
from chatterbot.adapters.storage import JsonDatabaseAdapter
from chatterbot.conversation import Statement, Response
class JsonAdapterTestCase(TestCase):
def setUp(self):
"""
Instantiate the adapter.
"""
from random import randint
# Generate a random name for the database
database_name = str(randint(0, 9000))
self.adapter = JsonDatabaseAdapter(database=database_name)
def tearDown(self):
"""
Remove the test database.
"""
self.adapter.drop()
class JsonDatabaseAdapterTestCase(JsonAdapterTestCase):
def test_count_returns_zero(self):
"""
The count method should return a value of 0
when nothing has been saved to the database.
"""
self.assertEqual(self.adapter.count(), 0)
def test_count_returns_value(self):
"""
The count method should return a value of 1
when one item has been saved to the database.
"""
statement = Statement("Test statement")
self.adapter.update(statement)
self.assertEqual(self.adapter.count(), 1)
def test_statement_not_found(self):
"""
Test that None is returned by the find method
when a matching statement is not found.
"""
self.assertEqual(self.adapter.find("Non-existant"), None)
def test_statement_found(self):
"""
Test that a matching statement is returned
when it exists in the database.
"""
statement = Statement("New statement")
self.adapter.update(statement)
found_statement = self.adapter.find("New statement")
self.assertNotEqual(found_statement, None)
self.assertEqual(found_statement.text, statement.text)
def test_update_adds_new_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertNotEqual(statement_found, None)
self.assertEqual(statement_found.text, statement.text)
def test_update_modifies_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
# Check the initial values
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 0
)
# Update the statement value
statement.add_response(
Statement("New response")
)
self.adapter.update(statement)
# Check that the values have changed
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 1
)
def test_get_random_returns_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
random_statement = self.adapter.get_random()
self.assertEqual(random_statement.text, statement.text)
def test_find_returns_nested_responses(self):
response_list = [
Response("Yes"),
Response("No")
]
statement = Statement(
"Do you like this?",
in_response_to=response_list
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertIn("Yes", result.in_response_to)
self.assertIn("No", result.in_response_to)
def test_multiple_responses_added_on_update(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thank you."),
Response("Thanks.")
]
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertEqual(len(result.in_response_to), 2)
self.assertIn(statement.in_response_to[0], result.in_response_to)
self.assertIn(statement.in_response_to[1], result.in_response_to)
def test_update_saves_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thank you."),
Response("Thanks."),
]
)
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 2)
def test_getting_and_updating_statement(self):
statement = Statement("Hi")
self.adapter.update(statement)
statement.add_response(Response("Hello"))
statement.add_response(Response("Hello"))
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 1)
self.assertEqual(response.in_response_to[0].occurrence, 2)
def test_deserialize_responses(self):
response_list = [
{"text": "Test", "occurrence": 3},
{"text": "Testing", "occurrence": 1},
]
results = self.adapter.deserialize_responses(response_list)
self.assertEqual(len(results), 2)
class JsonDatabaseAdapterFilterTestCase(JsonAdapterTestCase):
def setUp(self):
super(JsonDatabaseAdapterFilterTestCase, self).setUp()
self.statement1 = Statement(
"Testing...",
in_response_to=[
Response("Why are you counting?")
]
)
self.statement2 = Statement(
"Testing one, two, three.",
in_response_to=[
Response("Testing...")
]
)
def test_filter_text_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(text="Howdy")
self.assertEqual(len(results), 0)
def test_filter_in_response_to_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to=[Response("Maybe")]
)
self.assertEqual(len(results), 0)
def test_filter_equal_results(self):
statement1 = Statement(
"Testing...",
in_response_to=[]
)
statement2 = Statement(
"Testing one, two, three.",
in_response_to=[]
)
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter(in_response_to=[])
self.assertEqual(len(results), 2)
self.assertIn(statement1, results)
self.assertIn(statement2, results)
def test_filter_contains_result(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_contains_no_result(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to__contains="How do you do?"
)
self.assertEqual(results, [])
def test_filter_multiple_parameters(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Testing...",
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_multiple_parameters_no_results(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Test",
in_response_to__contains="Not an existing response."
)
self.assertEqual(len(results), 0)
def test_filter_no_parameters(self):
"""
If no parameters are passed to the filter,
then all statements should be returned.
"""
statement1 = Statement("Testing...")
statement2 = Statement("Testing one, two, three.")
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter()
self.assertEqual(len(results), 2)
def test_filter_returns_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thanks."),
Response("Thank you.")
]
)
self.adapter.update(statement)
response = self.adapter.filter(
in_response_to__contains="Thanks."
)
# Get the first response
response = response[0]
self.assertEqual(len(response.in_response_to), 2)
def test_response_list_in_results(self):
"""
If a statement with response values is found using
the filter method, they should be returned as
response objects.
"""
statement = Statement(
"The first is to help yourself, the second is to help others.",
in_response_to=[
Response("Why do people have two hands?")
]
)
self.adapter.update(statement)
found = self.adapter.filter(text=statement.text)
self.assertEqual(len(found[0].in_response_to), 1)
self.assertEqual(type(found[0].in_response_to[0]), Response)
class ReadOnlyJsonDatabaseAdapterTestCase(JsonAdapterTestCase):
def test_update_does_not_add_new_statement(self):
self.adapter.read_only = True
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(statement_found, None)
def test_update_does_not_modify_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
self.adapter.read_only = True
statement.add_response(
Statement("New response")
)
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(statement_found.text, statement.text)
self.assertEqual(
len(statement_found.in_response_to), 0
)
| bsd-3-clause |
ianmabie/uberpy | venv/lib/python2.7/site-packages/yaml/dumper.py | 542 | 2719 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from emitter import *
from serializer import *
from representer import *
from resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| mit |
kmonsoor/python-for-android | python3-alpha/python3-src/Lib/email/iterators.py | 59 | 2205 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/adapter.py | 94 | 1501 | """
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
class PostGISAdapter(object):
def __init__(self, geom):
"Initializes on the geometry."
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry.
self.ewkb = str(geom.ewkb)
self.srid = geom.srid
self._adapter = Binary(self.ewkb)
def __conform__(self, proto):
# Does the given protocol conform to what Psycopg2 expects?
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
if not isinstance(other, PostGISAdapter):
return False
return (self.ewkb == other.ewkb) and (self.srid == other.srid)
def __str__(self):
return self.getquoted()
def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
self._adapter.prepare(conn)
def getquoted(self):
"Returns a properly quoted string for use in PostgreSQL/PostGIS."
# psycopg will figure out whether to use E'\\000' or '\000'
return 'ST_GeomFromEWKB(%s)' % self._adapter.getquoted()
def prepare_database_save(self, unused):
return self
| agpl-3.0 |
dstockwell/catapult | third_party/webapp2/tests/request_test.py | 21 | 11167 | # -*- coding: utf-8 -*-
import StringIO
import webapp2
import test_base
def _norm_req(s):
return '\r\n'.join(s.strip().replace('\r','').split('\n'))
_test_req = """
POST /webob/ HTTP/1.0
Accept: */*
Cache-Control: max-age=0
Content-Type: multipart/form-data; boundary=----------------------------deb95b63e42a
Host: pythonpaste.org
User-Agent: UserAgent/1.0 (identifier-version) library/7.0 otherlibrary/0.8
------------------------------deb95b63e42a
Content-Disposition: form-data; name="foo"
foo
------------------------------deb95b63e42a
Content-Disposition: form-data; name="bar"; filename="bar.txt"
Content-type: application/octet-stream
these are the contents of the file 'bar.txt'
------------------------------deb95b63e42a--
"""
_test_req2 = """
POST / HTTP/1.0
Content-Length: 0
"""
_test_req = _norm_req(_test_req)
_test_req2 = _norm_req(_test_req2) + '\r\n'
class TestRequest(test_base.BaseTestCase):
def test_charset(self):
req = webapp2.Request.blank('/', environ={
'CONTENT_TYPE': 'text/html; charset=ISO-8859-4',
})
self.assertEqual(req.content_type, 'text/html')
self.assertEqual(req.charset, 'iso-8859-4')
req = webapp2.Request.blank('/', environ={
'CONTENT_TYPE': 'application/json; charset="ISO-8859-1"',
})
self.assertEqual(req.content_type, 'application/json')
self.assertEqual(req.charset, 'iso-8859-1')
req = webapp2.Request.blank('/', environ={
'CONTENT_TYPE': 'application/json',
})
self.assertEqual(req.content_type, 'application/json')
self.assertEqual(req.charset.lower(), 'utf-8')
match = webapp2._charset_re.search('text/html')
if match:
charset = match.group(1).lower().strip().strip('"').strip()
else:
charset = 'utf-8'
self.assertEqual(charset, 'utf-8')
match = webapp2._charset_re.search('text/html; charset=ISO-8859-4')
if match:
charset = match.group(1).lower().strip().strip('"').strip()
else:
charset = 'utf-8'
self.assertEqual(charset, 'iso-8859-4')
match = webapp2._charset_re.search('text/html; charset="ISO-8859-4"')
if match:
charset = match.group(1).lower().strip().strip('"').strip()
else:
charset = 'utf-8'
self.assertEqual(charset, 'iso-8859-4')
match = webapp2._charset_re.search('text/html; charset= " ISO-8859-4 " ')
if match:
charset = match.group(1).lower().strip().strip('"').strip()
else:
charset = 'utf-8'
self.assertEqual(charset, 'iso-8859-4')
def test_unicode(self):
req = webapp2.Request.blank('/?1=2', POST='3=4')
res = req.GET.get('1')
self.assertEqual(res, '2')
self.assertTrue(isinstance(res, unicode))
res = req.str_GET.get('1')
self.assertEqual(res, '2')
self.assertTrue(isinstance(res, str))
res = req.POST.get('3')
self.assertEqual(res, '4')
self.assertTrue(isinstance(res, unicode))
res = req.str_POST.get('3')
self.assertEqual(res, '4')
self.assertTrue(isinstance(res, str))
def test_cookie_unicode(self):
import urllib
import base64
# With base64 ---------------------------------------------------------
value = base64.b64encode(u'Γ‘'.encode('utf-8'))
rsp = webapp2.Response()
rsp.set_cookie('foo', value)
cookie = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookie)])
self.assertEqual(req.cookies.get('foo'), value)
self.assertEqual(base64.b64decode(req.cookies.get('foo')).decode('utf-8'), u'Γ‘')
# Without quote -------------------------------------------------------
# Most recent WebOb versions take care of quoting.
# (not the version available on App Engine though)
value = u'fΓΆΓΆ=bΓ€r; fΓΆo, bΓ€r, bΓ€z=dΓ―ng;'
rsp = webapp2.Response()
rsp.set_cookie('foo', value)
cookie = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookie)])
self.assertEqual(req.cookies.get('foo'), value)
# With quote, hard way ------------------------------------------------
# Here is our test value.
x = u'fΓΆΓΆ'
# We must store cookies quoted. To quote unicode, we need to encode it.
y = urllib.quote(x.encode('utf8'))
# The encoded, quoted string looks ugly.
self.assertEqual(y, 'f%C3%B6%C3%B6')
# But it is easy to get it back to our initial value.
z = urllib.unquote(y).decode('utf8')
# And it is indeed the same value.
self.assertEqual(z, x)
# Set a cookie using the encoded/quoted value.
rsp = webapp2.Response()
rsp.set_cookie('foo', y)
cookie = rsp.headers.get('Set-Cookie')
self.assertEqual(cookie, 'foo=f%C3%B6%C3%B6; Path=/')
# Get the cookie back.
req = webapp2.Request.blank('/', headers=[('Cookie', cookie)])
self.assertEqual(req.cookies.get('foo'), y)
# Here is our original value, again. Problem: the value is decoded
# before we had a chance to unquote it.
w = urllib.unquote(req.cookies.get('foo').encode('utf8')).decode('utf8')
# And it is indeed the same value.
self.assertEqual(w, x)
# With quote, easy way ------------------------------------------------
value = u'fΓΆΓΆ=bΓ€r; fΓΆo, bΓ€r, bΓ€z=dΓ―ng;'
quoted_value = urllib.quote(value.encode('utf8'))
rsp = webapp2.Response()
rsp.set_cookie('foo', quoted_value)
cookie = rsp.headers.get('Set-Cookie')
req = webapp2.Request.blank('/', headers=[('Cookie', cookie)])
cookie_value = req.str_cookies.get('foo')
unquoted_cookie_value = urllib.unquote(cookie_value).decode('utf-8')
self.assertEqual(cookie_value, quoted_value)
self.assertEqual(unquoted_cookie_value, value)
def test_get(self):
req = webapp2.Request.blank('/?1=2&1=3&3=4', POST='5=6&7=8')
res = req.get('1')
self.assertEqual(res, '2')
res = req.get('1', allow_multiple=True)
self.assertEqual(res, ['2', '3'])
res = req.get('8')
self.assertEqual(res, '')
res = req.get('8', allow_multiple=True)
self.assertEqual(res, [])
res = req.get('8', default_value='9')
self.assertEqual(res, '9')
def test_get_with_POST(self):
req = webapp2.Request.blank('/?1=2&1=3&3=4', POST={5: 6, 7: 8},
unicode_errors='ignore')
res = req.get('1')
self.assertEqual(res, '2')
res = req.get('1', allow_multiple=True)
self.assertEqual(res, ['2', '3'])
res = req.get('8')
self.assertEqual(res, '')
res = req.get('8', allow_multiple=True)
self.assertEqual(res, [])
res = req.get('8', default_value='9')
self.assertEqual(res, '9')
def test_arguments(self):
req = webapp2.Request.blank('/?1=2&3=4', POST='5=6&7=8')
res = req.arguments()
self.assertEqual(res, ['1', '3', '5', '7'])
def test_get_range(self):
req = webapp2.Request.blank('/')
res = req.get_range('1', min_value=None, max_value=None, default=None)
self.assertEqual(res, None)
req = webapp2.Request.blank('/?1=2')
res = req.get_range('1', min_value=None, max_value=None, default=0)
self.assertEqual(res, 2)
req = webapp2.Request.blank('/?1=foo')
res = req.get_range('1', min_value=1, max_value=99, default=100)
self.assertEqual(res, 99)
def test_issue_3426(self):
"""When the content-type is 'application/x-www-form-urlencoded' and
POST data is empty the content-type is dropped by Google appengine.
"""
req = webapp2.Request.blank('/', environ={
'REQUEST_METHOD': 'GET',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
})
self.assertEqual(req.method, 'GET')
self.assertEqual(req.content_type, 'application/x-www-form-urlencoded')
# XXX: These tests fail when request charset is set to utf-8 by default.
# Otherwise they pass.
'''
def test_get_with_FieldStorage(self):
if not test_base.check_webob_version(1.0):
return
# A valid request without a Content-Length header should still read
# the full body.
# Also test parity between as_string and from_string / from_file.
import cgi
req = webapp2.Request.from_string(_test_req)
self.assertTrue(isinstance(req, webapp2.Request))
self.assertTrue(not repr(req).endswith('(invalid WSGI environ)>'))
self.assertTrue('\n' not in req.http_version or '\r' in req.http_version)
self.assertTrue(',' not in req.host)
self.assertTrue(req.content_length is not None)
self.assertEqual(req.content_length, 337)
self.assertTrue('foo' in req.body)
bar_contents = "these are the contents of the file 'bar.txt'\r\n"
self.assertTrue(bar_contents in req.body)
self.assertEqual(req.params['foo'], 'foo')
bar = req.params['bar']
self.assertTrue(isinstance(bar, cgi.FieldStorage))
self.assertEqual(bar.type, 'application/octet-stream')
bar.file.seek(0)
self.assertEqual(bar.file.read(), bar_contents)
bar = req.get_all('bar')
self.assertEqual(bar[0], bar_contents)
# out should equal contents, except for the Content-Length header,
# so insert that.
_test_req_copy = _test_req.replace('Content-Type',
'Content-Length: 337\r\nContent-Type')
self.assertEqual(str(req), _test_req_copy)
req2 = webapp2.Request.from_string(_test_req2)
self.assertTrue('host' not in req2.headers)
self.assertEqual(str(req2), _test_req2.rstrip())
self.assertRaises(ValueError,
webapp2.Request.from_string, _test_req2 + 'xx')
def test_issue_5118(self):
"""Unable to read POST variables ONCE self.request.body is read."""
if not test_base.check_webob_version(1.0):
return
import cgi
req = webapp2.Request.from_string(_test_req)
fieldStorage = req.POST.get('bar')
self.assertTrue(isinstance(fieldStorage, cgi.FieldStorage))
self.assertEqual(fieldStorage.type, 'application/octet-stream')
# Double read.
fieldStorage = req.POST.get('bar')
self.assertTrue(isinstance(fieldStorage, cgi.FieldStorage))
self.assertEqual(fieldStorage.type, 'application/octet-stream')
# Now read the body.
x = req.body
fieldStorage = req.POST.get('bar')
self.assertTrue(isinstance(fieldStorage, cgi.FieldStorage))
self.assertEqual(fieldStorage.type, 'application/octet-stream')
'''
if __name__ == '__main__':
test_base.main()
| bsd-3-clause |
therandomcode/WikiWriter | lib/flask/sessions.py | 348 | 12882 | # -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from itsdangerous import URLSafeTimedSerializer, BadSignature
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes `False` in.
new = False
#: for some backends this will always be `True`, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes `True` in.
modified = True
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
return json.dumps(_tag(value), separators=(',', ':'))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == ' t':
return tuple(the_value)
elif the_key == ' u':
return uuid.UUID(the_value)
elif the_key == ' m':
return Markup(the_value)
elif the_key == ' d':
return parse_date(the_value)
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Baseclass for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('the session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns `None` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop of the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's `None`.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or `None` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def open_session(self, app, request):
"""This method has to be implemented and must either return `None`
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
from flask.debughelpers import UnexpectedUnicodeError
| apache-2.0 |
endorphinl/horizon | openstack_dashboard/contrib/sahara/content/data_processing/data_image_registry/views.py | 25 | 4526 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.contrib.sahara.api import sahara as saharaclient
from openstack_dashboard.contrib.sahara.content. \
data_processing.data_image_registry.forms import EditTagsForm
from openstack_dashboard.contrib.sahara.content. \
data_processing.data_image_registry.forms import RegisterImageForm
from openstack_dashboard.contrib.sahara.content. \
data_processing.data_image_registry.tables import ImageRegistryTable
LOG = logging.getLogger(__name__)
class ImageRegistryView(tables.DataTableView):
table_class = ImageRegistryTable
template_name = (
'project/data_processing.data_image_registry/image_registry.html')
page_title = _("Image Registry")
def get_data(self):
try:
images = saharaclient.image_list(self.request)
except Exception:
images = []
msg = _('Unable to retrieve image list')
exceptions.handle(self.request, msg)
return images
def update_context_with_plugin_tags(request, context):
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
plugins_object = dict()
for plugin in plugins:
plugins_object[plugin.name] = dict()
for version in plugin.versions:
try:
details = saharaclient. \
plugin_get_version_details(request,
plugin.name,
version)
plugins_object[plugin.name][version] = (
details.required_image_tags)
except Exception:
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
context["plugins"] = plugins_object
class EditTagsView(forms.ModalFormView):
form_class = EditTagsForm
template_name = (
'project/data_processing.data_image_registry/edit_tags.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Edit Image Tags")
def get_context_data(self, **kwargs):
context = super(EditTagsView, self).get_context_data(**kwargs)
context['image'] = self.get_object()
update_context_with_plugin_tags(self.request, context)
return context
@memoized.memoized_method
def get_object(self):
try:
image = saharaclient.image_get(self.request,
self.kwargs["image_id"])
except Exception:
image = None
msg = _("Unable to fetch the image details")
exceptions.handle(self.request, msg)
return image
def get_initial(self):
image = self.get_object()
return {"image_id": image.id,
"tags_list": json.dumps(image.tags),
"user_name": image.username,
"description": image.description}
class RegisterImageView(forms.ModalFormView):
form_class = RegisterImageForm
template_name = (
'project/data_processing.data_image_registry/register_image.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Register Image")
def get_context_data(self, **kwargs):
context = super(RegisterImageView, self).get_context_data(**kwargs)
update_context_with_plugin_tags(self.request, context)
return context
def get_initial(self):
# need this initialization to allow registration
# of images without tags
return {"tags_list": json.dumps([])}
| apache-2.0 |
msmbuilder/msmbuilder-legacy | Extras/parallel_assign/lib/remote.py | 2 | 1953 | """
Functions that execute remotely on the workers
Note that there are thre globals on each worker, pgens, conf and metric. Also,
due to the way that IPython.parallel works, we do imports inside the functions
"""
PREPARED, PGENS, CONF, METRIC = False, None, None, None
def load_gens(gens_fn, conf_fn, metric):
"""Setup a worker by adding pgens to its global namespace
This is necessary because pgens are not necessarily picklable, so we can't
just prepare them on the master and then push them to the remote workers --
instead we want to actually load the pgens from disk and prepare them on
the remote node
"""
from msmbuilder import Trajectory
global PGENS, CONF, METRIC, PREPARED
METRIC = metric
CONF = Trajectory.load_trajectory_file(conf_fn)
gens = Trajectory.load_trajectory_file(gens_fn)
PGENS = metric.prepare_trajectory(gens)
PREPARED = True
def assign(vtraj, gens_fn, metric):
"""
Assign a VTraj to the generators
This executes on the remote workers. It uses two global variables which
are worker-local
Parameters
----------
vtraj : VTraj
A list of tuples like (traj_index, slice(start, end))
Globals
-------
conf : msmbuilder.Trajectory
metric : msmbuilder.metrics.AbstractDistanceMetric
"""
import numpy as np
global CONF
if not PREPARED:
load_gens(gens_fn, vtraj.project.conf_filename, metric)
traj = vtraj.load(CONF)
ptraj = METRIC.prepare_trajectory(traj)
n_frames = len(traj)
distances = np.zeros(n_frames)
assignments = np.zeros(n_frames, dtype=int)
for i in xrange(n_frames):
d_o2a = METRIC.one_to_all(ptraj, PGENS, i)
#d = np.zeros(len(ptraj))
assignments[i] = np.argmin(d_o2a)
distances[i] = d_o2a[assignments[i]]
return assignments, distances, vtraj
| gpl-2.0 |
charris/numpy | tools/find_deprecated_escaped_characters.py | 17 | 2025 | #!/usr/bin/env python3
r"""
Look for escape sequences deprecated in Python 3.6.
Python 3.6 deprecates a number of non-escape sequences starting with '\' that
were accepted before. For instance, '\(' was previously accepted but must now
be written as '\\(' or r'\('.
"""
import sys
def main(root):
"""Find deprecated escape sequences.
Checks for deprecated escape sequences in ``*.py files``. If `root` is a
file, that file is checked, if `root` is a directory all ``*.py`` files
found in a recursive descent are checked.
If a deprecated escape sequence is found, the file and line where found is
printed. Note that for multiline strings the line where the string ends is
printed and the error(s) are somewhere in the body of the string.
Parameters
----------
root : str
File or directory to check.
Returns
-------
None
"""
import ast
import tokenize
import warnings
from pathlib import Path
count = 0
base = Path(root)
paths = base.rglob("*.py") if base.is_dir() else [base]
for path in paths:
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tree = ast.parse(f.read())
if w:
print("file: ", str(path))
for e in w:
print('line: ', e.lineno, ': ', e.message)
print()
count += len(w)
print("Errors Found", count)
if __name__ == "__main__":
from argparse import ArgumentParser
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version must be >= 3.6")
parser = ArgumentParser(description="Find deprecated escaped characters")
parser.add_argument('root', help='directory or file to be checked')
args = parser.parse_args()
main(args.root)
| bsd-3-clause |
shoelzer/buildbot | master/buildbot/process/slavebuilder.py | 11 | 2072 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# This module is left for backward compatibility of old-named worker API.
# It should never be imported by Buildbot.
from __future__ import absolute_import
from __future__ import print_function
from buildbot.process.workerforbuilder import AbstractWorkerForBuilder as _AbstractWorkerForBuilder
from buildbot.process.workerforbuilder import LatentWorkerForBuilder as _LatentWorkerForBuilder
from buildbot.process.workerforbuilder import WorkerForBuilder as _WorkerForBuilder
from buildbot.worker_transition import deprecatedWorkerModuleAttribute
from buildbot.worker_transition import reportDeprecatedWorkerModuleUsage
reportDeprecatedWorkerModuleUsage(
"'{old}' module is deprecated, use "
"'buildbot.process.workerforbuilder' module instead".format(old=__name__))
deprecatedWorkerModuleAttribute(locals(), _AbstractWorkerForBuilder,
compat_name="AbstractSlaveBuilder",
new_name="AbstractWorkerForBuilder")
deprecatedWorkerModuleAttribute(locals(), _WorkerForBuilder,
compat_name="SlaveBuilder",
new_name="WorkerForBuilder")
deprecatedWorkerModuleAttribute(locals(), _LatentWorkerForBuilder,
compat_name="LatentSlaveBuilder",
new_name="LatentWorkerForBuilder")
| gpl-2.0 |
marshnmedia/dotfiles | .vim/bundle/ultisnips.vim/pythonx/UltiSnips/text_objects/_python_code.py | 3 | 6279 | #!/usr/bin/env python
# encoding: utf-8
"""Implements `!p ` interpolation."""
import os
from collections import namedtuple
from UltiSnips import _vim
from UltiSnips.compatibility import as_unicode
from UltiSnips.indent_util import IndentUtil
from UltiSnips.text_objects._base import NoneditableTextObject
class _Tabs(object):
"""Allows access to tabstop content via t[] inside of python code."""
def __init__(self, to):
self._to = to
def __getitem__(self, no):
ts = self._to._get_tabstop(self._to, int(no)) # pylint:disable=protected-access
if ts is None:
return ""
return ts.current_text
_VisualContent = namedtuple('_VisualContent', ['mode', 'text'])
class SnippetUtil(object):
"""Provides easy access to indentation, etc. This is the 'snip' object in
python code."""
def __init__(self, initial_indent, vmode, vtext):
self._ind = IndentUtil()
self._visual = _VisualContent(vmode, vtext)
self._initial_indent = self._ind.indent_to_spaces(initial_indent)
self._reset("")
def _reset(self, cur):
"""Gets the snippet ready for another update.
:cur: the new value for c.
"""
self._ind.reset()
self._cur = cur
self._rv = ""
self._changed = False
self.reset_indent()
def shift(self, amount=1):
"""Shifts the indentation level.
Note that this uses the shiftwidth because thats what code
formatters use.
:amount: the amount by which to shift.
"""
self.indent += " " * self._ind.shiftwidth * amount
def unshift(self, amount=1):
"""Unshift the indentation level.
Note that this uses the shiftwidth because thats what code
formatters use.
:amount: the amount by which to unshift.
"""
by = -self._ind.shiftwidth * amount
try:
self.indent = self.indent[:by]
except IndexError:
self.indent = ""
def mkline(self, line="", indent=None):
"""Creates a properly set up line.
:line: the text to add
:indent: the indentation to have at the beginning
if None, it uses the default amount
"""
if indent is None:
indent = self.indent
# this deals with the fact that the first line is
# already properly indented
if '\n' not in self._rv:
try:
indent = indent[len(self._initial_indent):]
except IndexError:
indent = ""
indent = self._ind.spaces_to_indent(indent)
return indent + line
def reset_indent(self):
"""Clears the indentation."""
self.indent = self._initial_indent
# Utility methods
@property
def fn(self): # pylint:disable=no-self-use,invalid-name
"""The filename."""
return _vim.eval('expand("%:t")') or ""
@property
def basename(self): # pylint:disable=no-self-use
"""The filename without extension."""
return _vim.eval('expand("%:t:r")') or ""
@property
def ft(self): # pylint:disable=invalid-name
"""The filetype."""
return self.opt("&filetype", "")
@property
def rv(self): # pylint:disable=invalid-name
"""The return value. The text to insert at the location of the
placeholder."""
return self._rv
@rv.setter
def rv(self, value): # pylint:disable=invalid-name
"""See getter."""
self._changed = True
self._rv = value
@property
def _rv_changed(self):
"""True if rv has changed."""
return self._changed
@property
def c(self): # pylint:disable=invalid-name
"""The current text of the placeholder."""
return self._cur
@property
def v(self): # pylint:disable=invalid-name
"""Content of visual expansions"""
return self._visual
def opt(self, option, default=None): # pylint:disable=no-self-use
"""Gets a Vim variable."""
if _vim.eval("exists('%s')" % option) == "1":
try:
return _vim.eval(option)
except _vim.error:
pass
return default
def __add__(self, value):
"""Appends the given line to rv using mkline."""
self.rv += '\n' # pylint:disable=invalid-name
self.rv += self.mkline(value)
return self
def __lshift__(self, other):
"""Same as unshift."""
self.unshift(other)
def __rshift__(self, other):
"""Same as shift."""
self.shift(other)
class PythonCode(NoneditableTextObject):
"""See module docstring."""
def __init__(self, parent, token):
# Find our containing snippet for snippet local data
snippet = parent
while snippet:
try:
self._locals = snippet.locals
text = snippet.visual_content.text
mode = snippet.visual_content.mode
break
except AttributeError:
snippet = snippet._parent # pylint:disable=protected-access
self._snip = SnippetUtil(token.indent, mode, text)
self._codes = ((
"import re, os, vim, string, random",
"\n".join(snippet.globals.get("!p", [])).replace("\r\n", "\n"),
token.code.replace("\\`", "`")
))
NoneditableTextObject.__init__(self, parent, token)
def _update(self, done):
path = _vim.eval('expand("%")') or ""
ct = self.current_text
self._locals.update({
't': _Tabs(self._parent),
'fn': os.path.basename(path),
'path': path,
'cur': ct,
'res': ct,
'snip': self._snip,
})
self._snip._reset(ct) # pylint:disable=protected-access
for code in self._codes:
exec(code, self._locals) # pylint:disable=exec-used
rv = as_unicode(
self._snip.rv if self._snip._rv_changed # pylint:disable=protected-access
else as_unicode(self._locals['res'])
)
if ct != rv:
self.overwrite(rv)
return False
return True
| mit |
koditr/xbmc-tr-team-turkish-addons | script.module.mechanize/lib/mechanize/_msiecookiejar.py | 134 | 14694 | """Microsoft Internet Explorer cookie loading on Windows.
Copyright 2002-2003 Johnny Lee <typo_pl@hotmail.com> (MSIE Perl code)
Copyright 2002-2006 John J Lee <jjl@pobox.com> (The Python port)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX names and comments are not great here
import os, re, time, struct, logging
if os.name == "nt":
import _winreg
from _clientcookie import FileCookieJar, CookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("mechanize").debug
def regload(path, leaf):
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
try:
value = _winreg.QueryValueEx(key, leaf)[0]
except WindowsError:
value = None
return value
WIN32_EPOCH = 0x019db1ded53e8000L # 1970 Jan 01 00:00:00 in Win32 FILETIME
def epoch_time_offset_from_win32_filetime(filetime):
"""Convert from win32 filetime to seconds-since-epoch value.
MSIE stores create and expire times as Win32 FILETIME, which is 64
bits of 100 nanosecond intervals since Jan 01 1601.
mechanize expects time in 32-bit value expressed in seconds since the
epoch (Jan 01 1970).
"""
if filetime < WIN32_EPOCH:
raise ValueError("filetime (%d) is before epoch (%d)" %
(filetime, WIN32_EPOCH))
return divmod((filetime - WIN32_EPOCH), 10000000L)[0]
def binary_to_char(c): return "%02X" % ord(c)
def binary_to_str(d): return "".join(map(binary_to_char, list(d)))
class MSIEBase:
magic_re = re.compile(r"Client UrlCache MMF Ver \d\.\d.*")
padding = "\x0d\xf0\xad\x0b"
msie_domain_re = re.compile(r"^([^/]+)(/.*)$")
cookie_re = re.compile("Cookie\:.+\@([\x21-\xFF]+).*?"
"(.+\@[\x21-\xFF]+\.txt)")
# path under HKEY_CURRENT_USER from which to get location of index.dat
reg_path = r"software\microsoft\windows" \
r"\currentversion\explorer\shell folders"
reg_key = "Cookies"
def __init__(self):
self._delayload_domains = {}
def _delayload_domain(self, domain):
# if necessary, lazily load cookies for this domain
delayload_info = self._delayload_domains.get(domain)
if delayload_info is not None:
cookie_file, ignore_discard, ignore_expires = delayload_info
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s", cookie_file)
else:
del self._delayload_domains[domain]
def _load_cookies_from_file(self, filename):
debug("Loading MSIE cookies file: %s", filename)
cookies = []
cookies_fh = open(filename)
try:
while 1:
key = cookies_fh.readline()
if key == "": break
rl = cookies_fh.readline
def getlong(rl=rl): return long(rl().rstrip())
def getstr(rl=rl): return rl().rstrip()
key = key.rstrip()
value = getstr()
domain_path = getstr()
flags = getlong() # 0x2000 bit is for secure I think
lo_expire = getlong()
hi_expire = getlong()
lo_create = getlong()
hi_create = getlong()
sep = getstr()
if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
hi_create, lo_create, sep) or (sep != "*"):
break
m = self.msie_domain_re.search(domain_path)
if m:
domain = m.group(1)
path = m.group(2)
cookies.append({"KEY": key, "VALUE": value,
"DOMAIN": domain, "PATH": path,
"FLAGS": flags, "HIXP": hi_expire,
"LOXP": lo_expire, "HICREATE": hi_create,
"LOCREATE": lo_create})
finally:
cookies_fh.close()
return cookies
def load_cookie_data(self, filename,
ignore_discard=False, ignore_expires=False):
"""Load cookies from file containing actual cookie data.
Old cookies are kept unless overwritten by newly loaded ones.
You should not call this method if the delayload attribute is set.
I think each of these files contain all cookies for one user, domain,
and path.
filename: file containing cookies -- usually found in a file like
C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt
"""
now = int(time.time())
cookie_data = self._load_cookies_from_file(filename)
for cookie in cookie_data:
flags = cookie["FLAGS"]
secure = ((flags & 0x2000) != 0)
filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
expires = epoch_time_offset_from_win32_filetime(filetime)
if expires < now:
discard = True
else:
discard = False
domain = cookie["DOMAIN"]
initial_dot = domain.startswith(".")
if initial_dot:
domain_specified = True
else:
# MSIE 5 does not record whether the domain cookie-attribute
# was specified.
# Assuming it wasn't is conservative, because with strict
# domain matching this will match less frequently; with regular
# Netscape tail-matching, this will match at exactly the same
# times that domain_specified = True would. It also means we
# don't have to prepend a dot to achieve consistency with our
# own & Mozilla's domain-munging scheme.
domain_specified = False
# assume path_specified is false
# XXX is there other stuff in here? -- e.g. comment, commentURL?
c = Cookie(0,
cookie["KEY"], cookie["VALUE"],
None, False,
domain, domain_specified, initial_dot,
cookie["PATH"], False,
secure,
expires,
discard,
None,
None,
{"flags": flags})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
CookieJar.set_cookie(self, c)
def load_from_registry(self, ignore_discard=False, ignore_expires=False,
username=None):
"""
username: only required on win9x
"""
cookies_dir = regload(self.reg_path, self.reg_key)
filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
self.load(filename, ignore_discard, ignore_expires, username)
def _really_load(self, index, filename, ignore_discard, ignore_expires,
username):
now = int(time.time())
if username is None:
username = os.environ['USERNAME'].lower()
cookie_dir = os.path.dirname(filename)
data = index.read(256)
if len(data) != 256:
raise LoadError("%s file is too short" % filename)
# Cookies' index.dat file starts with 32 bytes of signature
# followed by an offset to the first record, stored as a little-
# endian DWORD.
sig, size, data = data[:32], data[32:36], data[36:]
size = struct.unpack("<L", size)[0]
# check that sig is valid
if not self.magic_re.match(sig) or size != 0x4000:
raise LoadError("%s ['%s' %s] does not seem to contain cookies" %
(str(filename), sig, size))
# skip to start of first record
index.seek(size, 0)
sector = 128 # size of sector in bytes
while 1:
data = ""
# Cookies are usually in two contiguous sectors, so read in two
# sectors and adjust if not a Cookie.
to_read = 2 * sector
d = index.read(to_read)
if len(d) != to_read:
break
data = data + d
# Each record starts with a 4-byte signature and a count
# (little-endian DWORD) of sectors for the record.
sig, size, data = data[:4], data[4:8], data[8:]
size = struct.unpack("<L", size)[0]
to_read = (size - 2) * sector
## from urllib import quote
## print "data", quote(data)
## print "sig", quote(sig)
## print "size in sectors", size
## print "size in bytes", size*sector
## print "size in units of 16 bytes", (size*sector) / 16
## print "size to read in bytes", to_read
## print
if sig != "URL ":
assert sig in ("HASH", "LEAK", \
self.padding, "\x00\x00\x00\x00"), \
"unrecognized MSIE index.dat record: %s" % \
binary_to_str(sig)
if sig == "\x00\x00\x00\x00":
# assume we've got all the cookies, and stop
break
if sig == self.padding:
continue
# skip the rest of this record
assert to_read >= 0
if size != 2:
assert to_read != 0
index.seek(to_read, 1)
continue
# read in rest of record if necessary
if size > 2:
more_data = index.read(to_read)
if len(more_data) != to_read: break
data = data + more_data
cookie_re = ("Cookie\:%s\@([\x21-\xFF]+).*?" % username +
"(%s\@[\x21-\xFF]+\.txt)" % username)
m = re.search(cookie_re, data, re.I)
if m:
cookie_file = os.path.join(cookie_dir, m.group(2))
if not self.delayload:
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s",
cookie_file)
else:
domain = m.group(1)
i = domain.find("/")
if i != -1:
domain = domain[:i]
self._delayload_domains[domain] = (
cookie_file, ignore_discard, ignore_expires)
class MSIECookieJar(MSIEBase, FileCookieJar):
"""FileCookieJar that reads from the Windows MSIE cookies database.
MSIECookieJar can read the cookie files of Microsoft Internet Explorer
(MSIE) for Windows version 5 on Windows NT and version 6 on Windows XP and
Windows 98. Other configurations may also work, but are untested. Saving
cookies in MSIE format is NOT supported. If you save cookies, they'll be
in the usual Set-Cookie3 format, which you can read back in using an
instance of the plain old CookieJar class. Don't save using the same
filename that you loaded cookies from, because you may succeed in
clobbering your MSIE cookies index file!
You should be able to have LWP share Internet Explorer's cookies like
this (note you need to supply a username to load_from_registry if you're on
Windows 9x or Windows ME):
cj = MSIECookieJar(delayload=1)
# find cookies index file in registry and load cookies from it
cj.load_from_registry()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
response = opener.open("http://example.com/")
Iterating over a delayloaded MSIECookieJar instance will not cause any
cookies to be read from disk. To force reading of all cookies from disk,
call read_all_cookies. Note that the following methods iterate over self:
clear_temporary_cookies, clear_expired_cookies, __len__, __repr__, __str__
and as_string.
Additional methods:
load_from_registry(ignore_discard=False, ignore_expires=False,
username=None)
load_cookie_data(filename, ignore_discard=False, ignore_expires=False)
read_all_cookies()
"""
def __init__(self, filename=None, delayload=False, policy=None):
MSIEBase.__init__(self)
FileCookieJar.__init__(self, filename, delayload, policy)
def set_cookie(self, cookie):
if self.delayload:
self._delayload_domain(cookie.domain)
CookieJar.set_cookie(self, cookie)
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
domains = self._cookies.copy()
domains.update(self._delayload_domains)
domains = domains.keys()
cookies = []
for domain in domains:
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookies_for_domain(self, domain, request):
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
if self.delayload:
self._delayload_domain(domain)
return CookieJar._cookies_for_domain(self, domain, request)
def read_all_cookies(self):
"""Eagerly read in all cookies."""
if self.delayload:
for domain in self._delayload_domains.keys():
self._delayload_domain(domain)
def load(self, filename, ignore_discard=False, ignore_expires=False,
username=None):
"""Load cookies from an MSIE 'index.dat' cookies index file.
filename: full path to cookie index file
username: only required on win9x
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
index = open(filename, "rb")
try:
self._really_load(index, filename, ignore_discard, ignore_expires,
username)
finally:
index.close()
| gpl-2.0 |
yufish/youtube-dl | youtube_dl/extractor/webofstories.py | 105 | 4945 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class WebOfStoriesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?webofstories\.com/play/(?:[^/]+/)?(?P<id>[0-9]+)'
_VIDEO_DOMAIN = 'http://eu-mobile.webofstories.com/'
_GREAT_LIFE_STREAMER = 'rtmp://eu-cdn1.webofstories.com/cfx/st/'
_USER_STREAMER = 'rtmp://eu-users.webofstories.com/cfx/st/'
_TESTS = [
{
'url': 'http://www.webofstories.com/play/hans.bethe/71',
'md5': '373e4dd915f60cfe3116322642ddf364',
'info_dict': {
'id': '4536',
'ext': 'mp4',
'title': 'The temperature of the sun',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'Hans Bethe talks about calculating the temperature of the sun',
'duration': 238,
}
},
{
'url': 'http://www.webofstories.com/play/55908',
'md5': '2985a698e1fe3211022422c4b5ed962c',
'info_dict': {
'id': '55908',
'ext': 'mp4',
'title': 'The story of Gemmata obscuriglobus',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'Planctomycete talks about The story of Gemmata obscuriglobus',
'duration': 169,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._html_search_meta('description', webpage)
thumbnail = self._og_search_thumbnail(webpage)
embed_params = [s.strip(" \r\n\t'") for s in self._search_regex(
r'(?s)\$\("#embedCode"\).html\(getEmbedCode\((.*?)\)',
webpage, 'embed params').split(',')]
(
_, speaker_id, story_id, story_duration,
speaker_type, great_life, _thumbnail, _has_subtitles,
story_filename, _story_order) = embed_params
is_great_life_series = great_life == 'true'
duration = int_or_none(story_duration)
# URL building, see: http://www.webofstories.com/scripts/player.js
ms_prefix = ''
if speaker_type.lower() == 'ms':
ms_prefix = 'mini_sites/'
if is_great_life_series:
mp4_url = '{0:}lives/{1:}/{2:}.mp4'.format(
self._VIDEO_DOMAIN, speaker_id, story_filename)
rtmp_ext = 'flv'
streamer = self._GREAT_LIFE_STREAMER
play_path = 'stories/{0:}/{1:}'.format(
speaker_id, story_filename)
else:
mp4_url = '{0:}{1:}{2:}/{3:}.mp4'.format(
self._VIDEO_DOMAIN, ms_prefix, speaker_id, story_filename)
rtmp_ext = 'mp4'
streamer = self._USER_STREAMER
play_path = 'mp4:{0:}{1:}/{2}.mp4'.format(
ms_prefix, speaker_id, story_filename)
formats = [{
'format_id': 'mp4_sd',
'url': mp4_url,
}, {
'format_id': 'rtmp_sd',
'page_url': url,
'url': streamer,
'ext': rtmp_ext,
'play_path': play_path,
}]
self._sort_formats(formats)
return {
'id': story_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
}
class WebOfStoriesPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?webofstories\.com/playAll/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.webofstories.com/playAll/donald.knuth',
'info_dict': {
'id': 'donald.knuth',
'title': 'Donald Knuth (Scientist)',
},
'playlist_mincount': 97,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('http://www.webofstories.com/play/%s' % video_number, 'WebOfStories')
for video_number in set(re.findall('href="/playAll/%s\?sId=(\d+)"' % playlist_id, webpage))
]
title = self._search_regex(
r'<div id="speakerName">\s*<span>([^<]+)</span>',
webpage, 'speaker', default=None)
if title:
field = self._search_regex(
r'<span id="primaryField">([^<]+)</span>',
webpage, 'field', default=None)
if field:
title += ' (%s)' % field
if not title:
title = self._search_regex(
r'<title>Play\s+all\s+stories\s*-\s*([^<]+)\s*-\s*Web\s+of\s+Stories</title>',
webpage, 'title')
return self.playlist_result(entries, playlist_id, title)
| unlicense |
dongjiaqiang/hadoop-20 | src/contrib/hod/hodlib/GridServices/mapred.py | 182 | 8167 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""define MapReduce as subclass of Service"""
# -*- python -*-
import os, copy, time
from service import *
from hodlib.Hod.nodePool import *
from hodlib.Common.desc import CommandDesc
from hodlib.Common.util import get_exception_string, parseEquals
class MapReduceExternal(MasterSlave):
"""dummy proxy to external MapReduce instance"""
def __init__(self, serviceDesc, workDirs, version):
MasterSlave.__init__(self, serviceDesc, workDirs,None)
self.launchedMaster = True
self.masterInitialized = True
self.version = version
def getMasterRequest(self):
return None
def getMasterCommands(self, serviceDict):
return []
def getAdminCommands(self, serviceDict):
return []
def getWorkerCommands(self, serviceDict):
return []
def getMasterAddrs(self):
attrs = self.serviceDesc.getfinalAttrs()
addr = attrs['mapred.job.tracker']
return [addr]
def needsMore(self):
return 0
def needsLess(self):
return 0
def setMasterParams(self, dict):
self.serviceDesc['final-attrs']['mapred.job.tracker'] = "%s:%s" % (dict['host'],
dict['tracker_port'])
if self.version < 16:
self.serviceDesc.dict['final-attrs']['mapred.job.tracker.info.port'] = \
str(self.serviceDesc.dict['info_port'])
else:
# After Hadoop-2185
self.serviceDesc['final-attrs']['mapred.job.tracker.http.address'] = \
"%s:%s" %(dict['host'], dict['info_port'])
def getInfoAddrs(self):
attrs = self.serviceDesc.getfinalAttrs()
if self.version < 16:
addr = attrs['mapred.job.tracker']
k,v = addr.split( ":")
infoaddr = k + ':' + attrs['mapred.job.tracker.info.port']
else:
# After Hadoop-2185
# Note: earlier,we never respected mapred.job.tracker.http.address
infoaddr = attrs['mapred.job.tracker.http.address']
return [infoaddr]
class MapReduce(MasterSlave):
def __init__(self, serviceDesc, workDirs,required_node, version,
workers_per_ring = 1):
MasterSlave.__init__(self, serviceDesc, workDirs,required_node)
self.masterNode = None
self.masterAddr = None
self.infoAddr = None
self.workers = []
self.required_node = required_node
self.version = version
self.workers_per_ring = workers_per_ring
def isLaunchable(self, serviceDict):
hdfs = serviceDict['hdfs']
if (hdfs.isMasterInitialized()):
return True
return False
def getMasterRequest(self):
req = NodeRequest(1, [], False)
return req
def getMasterCommands(self, serviceDict):
hdfs = serviceDict['hdfs']
cmdDesc = self._getJobTrackerCommand(hdfs)
return [cmdDesc]
def getAdminCommands(self, serviceDict):
return []
def getWorkerCommands(self, serviceDict):
hdfs = serviceDict['hdfs']
workerCmds = []
for id in range(1, self.workers_per_ring + 1):
workerCmds.append(self._getTaskTrackerCommand(str(id), hdfs))
return workerCmds
def setMasterNodes(self, list):
node = list[0]
self.masterNode = node
def getMasterAddrs(self):
return [self.masterAddr]
def getInfoAddrs(self):
return [self.infoAddr]
def getWorkers(self):
return self.workers
def requiredNode(self):
return self.required_host
def setMasterParams(self, list):
dict = self._parseEquals(list)
self.masterAddr = dict['mapred.job.tracker']
k,v = self.masterAddr.split(":")
self.masterNode = k
if self.version < 16:
self.infoAddr = self.masterNode + ':' + dict['mapred.job.tracker.info.port']
else:
# After Hadoop-2185
self.infoAddr = dict['mapred.job.tracker.http.address']
def _parseEquals(self, list):
return parseEquals(list)
def _setWorkDirs(self, workDirs, envs, attrs, parentDirs, subDir):
local = []
system = None
temp = None
hadooptmpdir = None
dfsclient = []
for p in parentDirs:
workDirs.append(p)
workDirs.append(os.path.join(p, subDir))
dir = os.path.join(p, subDir, 'mapred-local')
local.append(dir)
if not system:
system = os.path.join(p, subDir, 'mapred-system')
if not temp:
temp = os.path.join(p, subDir, 'mapred-temp')
if not hadooptmpdir:
# Not used currently, generating hadooptmpdir just in case
hadooptmpdir = os.path.join(p, subDir, 'hadoop-tmp')
dfsclientdir = os.path.join(p, subDir, 'dfs-client')
dfsclient.append(dfsclientdir)
workDirs.append(dfsclientdir)
# FIXME!! use csv
attrs['mapred.local.dir'] = ','.join(local)
attrs['mapred.system.dir'] = 'fillindir'
attrs['mapred.temp.dir'] = temp
attrs['hadoop.tmp.dir'] = hadooptmpdir
envs['HADOOP_ROOT_LOGGER'] = "INFO,DRFA"
def _getJobTrackerCommand(self, hdfs):
sd = self.serviceDesc
parentDirs = self.workDirs
workDirs = []
attrs = sd.getfinalAttrs().copy()
envs = sd.getEnvs().copy()
if 'mapred.job.tracker' not in attrs:
attrs['mapred.job.tracker'] = 'fillinhostport'
if self.version < 16:
if 'mapred.job.tracker.info.port' not in attrs:
attrs['mapred.job.tracker.info.port'] = 'fillinport'
else:
# Addressing Hadoop-2185,
if 'mapred.job.tracker.http.address' not in attrs:
attrs['mapred.job.tracker.http.address'] = 'fillinhostport'
attrs['fs.default.name'] = hdfs.getMasterAddrs()[0]
self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-jt')
dict = { 'name' : 'jobtracker' }
dict['version'] = self.version
dict['program'] = os.path.join('bin', 'hadoop')
dict['argv'] = ['jobtracker']
dict['envs'] = envs
dict['pkgdirs'] = sd.getPkgDirs()
dict['workdirs'] = workDirs
dict['final-attrs'] = attrs
dict['attrs'] = sd.getAttrs()
cmd = CommandDesc(dict)
return cmd
def _getTaskTrackerCommand(self, id, hdfs):
sd = self.serviceDesc
parentDirs = self.workDirs
workDirs = []
attrs = sd.getfinalAttrs().copy()
envs = sd.getEnvs().copy()
jt = self.masterAddr
if jt == None:
raise ValueError, "Can't get job tracker address"
attrs['mapred.job.tracker'] = jt
attrs['fs.default.name'] = hdfs.getMasterAddrs()[0]
if self.version < 16:
if 'tasktracker.http.port' not in attrs:
attrs['tasktracker.http.port'] = 'fillinport'
# earlier to 16, tasktrackers always took ephemeral port 0 for
# tasktracker.report.bindAddress
else:
# Adding the following. Hadoop-2185
if 'mapred.task.tracker.report.address' not in attrs:
attrs['mapred.task.tracker.report.address'] = 'fillinhostport'
if 'mapred.task.tracker.http.address' not in attrs:
attrs['mapred.task.tracker.http.address'] = 'fillinhostport'
# unique parentDirs in case of multiple tasktrackers per hodring
pd = []
for dir in parentDirs:
dir = dir + "-" + id
pd.append(dir)
parentDirs = pd
# end of unique workdirs
self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-tt')
dict = { 'name' : 'tasktracker' }
dict['program'] = os.path.join('bin', 'hadoop')
dict['argv'] = ['tasktracker']
dict['envs'] = envs
dict['pkgdirs'] = sd.getPkgDirs()
dict['workdirs'] = workDirs
dict['final-attrs'] = attrs
dict['attrs'] = sd.getAttrs()
cmd = CommandDesc(dict)
return cmd
| apache-2.0 |
ReactiveX/RxPY | tests/test_observable/test_join.py | 1 | 33029 | import unittest
from datetime import timedelta
import rx
from rx import operators as ops
from rx.testing import TestScheduler, ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TimeSpan(object):
def from_ticks(self, value):
return value
class TimeInterval(object):
def __init__(self, value, interval):
if isinstance(interval, timedelta):
interval = int(interval.microseconds/1000)
self.value = value
self.interval = interval
def __str__(self):
return "%s@%s" % (self.value, self.interval)
def equals(self, other):
return other.interval == self.interval and other.value == self.value
def get_hash_code(self):
return self.value.get_hash_code() ^ self.interval.get_hash_code()
class TestJoin(unittest.TestCase):
def test_join_op_normal_i(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_next(830, "9rat"),
on_completed(900)]
def test_join_op_normal_ii(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 200)),
on_next(720, TimeInterval(8, 100)),
on_completed(721))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(990))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(910)]
def test_join_op_normal_iii(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval).pipe(ops.filter(lambda _: False)),
lambda y: rx.timer(y.interval).pipe(ops.filter(lambda _: False)),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_next(830, "9rat"),
on_completed(900)]
def test_join_op_normal_iv(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 200)),
on_next(720, TimeInterval(8, 100)),
on_completed(990))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(980))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(980)]
def test_join_op_normal_v(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 200)),
on_next(720, TimeInterval(8, 100)),
on_completed(990))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(900))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(922)]
def test_join_op_normal_vi(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 30)),
on_next(720, TimeInterval(8, 200)),
on_next(830, TimeInterval(9, 10)),
on_completed(850))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 20)),
on_next(732, TimeInterval("wig", 5)),
on_completed(900))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(900)]
def test_join_op_normal_vii(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create, disposed=713)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man")]
def test_join_op_error_i(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_error(310, ex))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create, disposed=713)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_error(310, ex)]
def test_join_op_error_ii(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_error(722, ex))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_error(722, ex)]
def test_join_op_error_iii(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval).pipe(ops.flat_map(rx.throw(ex) if x.value == 6 else rx.empty())),
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_error(725, ex)]
def test_join_op_error_iv(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 19)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval).pipe(ops.flat_map(rx.throw(ex) if y.value == "tin" else rx.empty())),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_error(721, ex)]
def test_join_op_error_v(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def left_duration_mapper(x):
if x.value >= 0:
raise Exception(ex)
else:
return rx.empty()
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
left_duration_mapper,
lambda y: rx.timer(y.interval),
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [on_error(210, ex)]
def test_join_op_error_vi(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 300)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def right_duration_mapper(y):
if len(y.value) >= 0:
raise Exception(ex)
else:
return rx.empty()
def mapper(xy):
x, y = xy
return "{}{}".format(x.value, y.value)
return xs.pipe(
ops.join(
ys,
lambda x: rx.timer(x.interval),
right_duration_mapper,
),
ops.map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [on_error(215, ex)]
def test_join_op_forward_scheduler(self):
scheduler = TestScheduler()
subscribe_schedulers = {
'x': 'unknown',
'y': 'unknown',
'duration_x': 'unknown',
'duration_y': 'unknown',
}
def subscribe_x(observer, scheduler='not_set'):
subscribe_schedulers['x'] = scheduler
# need to push one element to trigger duration mapper
observer.on_next('foo')
def subscribe_y(observer, scheduler='not_set'):
subscribe_schedulers['y'] = scheduler
# need to push one element to trigger duration mapper
observer.on_next('bar')
def subscribe_duration_x(observer, scheduler='not_set'):
subscribe_schedulers['duration_x'] = scheduler
def subscribe_duration_y(observer, scheduler='not_set'):
subscribe_schedulers['duration_y'] = scheduler
xs = rx.create(subscribe_x)
ys = rx.create(subscribe_y)
duration_x = rx.create(subscribe_duration_x)
duration_y = rx.create(subscribe_duration_y)
def create():
return xs.pipe(
ops.join(
ys,
lambda x: duration_x,
lambda y: duration_y,
),
)
results = scheduler.start(create=create)
assert subscribe_schedulers['x'] is scheduler
assert subscribe_schedulers['y'] is scheduler
assert subscribe_schedulers['duration_x'] is scheduler
assert subscribe_schedulers['duration_y'] is scheduler
def test_join_op_forward_scheduler_None(self):
subscribe_schedulers = {
'x': 'unknown',
'y': 'unknown',
'duration_x': 'unknown',
'duration_y': 'unknown',
}
def subscribe_x(observer, scheduler='not_set'):
subscribe_schedulers['x'] = scheduler
# need to push one element to trigger duration mapper
observer.on_next('foo')
def subscribe_y(observer, scheduler='not_set'):
subscribe_schedulers['y'] = scheduler
# need to push one element to trigger duration mapper
observer.on_next('bar')
def subscribe_duration_x(observer, scheduler='not_set'):
subscribe_schedulers['duration_x'] = scheduler
def subscribe_duration_y(observer, scheduler='not_set'):
subscribe_schedulers['duration_y'] = scheduler
xs = rx.create(subscribe_x)
ys = rx.create(subscribe_y)
duration_x = rx.create(subscribe_duration_x)
duration_y = rx.create(subscribe_duration_y)
stream = xs.pipe(
ops.join(
ys,
lambda x: duration_x,
lambda y: duration_y,
),
)
stream.subscribe()
assert subscribe_schedulers['x'] is None
assert subscribe_schedulers['y'] is None
assert subscribe_schedulers['duration_x'] is None
assert subscribe_schedulers['duration_y'] is None
| mit |
Mafarricos/Mafarricos-xbmc-addons | plugin.video.jami/plugintools.py | 20 | 18864 | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Plugin Tools v1.0.8
#---------------------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Based on code from youtube, parsedom and pelisalacarta addons
# Author:
# JesΓΊs
# tvalacarta@gmail.com
# http://www.mimediacenter.info/plugintools
#---------------------------------------------------------------------------
# Changelog:
# 1.0.0
# - First release
# 1.0.1
# - If find_single_match can't find anything, it returns an empty string
# - Remove addon id from this module, so it remains clean
# 1.0.2
# - Added parameter on "add_item" to say that item is playable
# 1.0.3
# - Added direct play
# - Fixed bug when video isPlayable=True
# 1.0.4
# - Added get_temp_path, get_runtime_path, get_data_path
# - Added get_setting, set_setting, open_settings_dialog and get_localized_string
# - Added keyboard_input
# - Added message
# 1.0.5
# - Added read_body_and_headers for advanced http handling
# - Added show_picture for picture addons support
# - Added optional parameters "title" and "hidden" to keyboard_input
# 1.0.6
# - Added fanart, show, episode and infolabels to add_item
# 1.0.7
# - Added set_view function
# 1.0.8
# - Added selector
#---------------------------------------------------------------------------
import xbmc
import xbmcplugin
import xbmcaddon
import xbmcgui
import urllib
import urllib2
import re
import sys
import os
import time
import socket
from StringIO import StringIO
import gzip
module_log_enabled = False
http_debug_log_enabled = False
LIST = "list"
THUMBNAIL = "thumbnail"
MOVIES = "movies"
TV_SHOWS = "tvshows"
SEASONS = "seasons"
EPISODES = "episodes"
OTHER = "other"
# Suggested view codes for each type from different skins (initial list thanks to xbmcswift2 library)
ALL_VIEW_CODES = {
'list': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 50, # List
'skin.re-touched': 50, # List
},
'thumbnail': {
'skin.confluence': 500, # Thumbnail
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 51, # Big icons
'skin.re-touched': 500, #Thumbnail
},
'movies': {
'skin.confluence': 500, # Thumbnail 515, # Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, #Thumbnail
},
'tvshows': {
'skin.confluence': 500, # Thumbnail 515, # Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, #Thumbnail
},
'seasons': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 50, # List
},
'episodes': {
'skin.confluence': 504, # Media Info
'skin.aeon.nox': 518, # Infopanel
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 550, # Wide
},
}
# Write something on XBMC log
def log(message):
xbmc.log(message)
# Write this module messages on XBMC log
def _log(message):
if module_log_enabled:
xbmc.log("plugintools."+message)
# Parse XBMC params - based on script.module.parsedom addon
def get_params():
_log("get_params")
param_string = sys.argv[2]
_log("get_params "+str(param_string))
commands = {}
if param_string:
split_commands = param_string[param_string.find('?') + 1:].split('&')
for command in split_commands:
_log("get_params command="+str(command))
if len(command) > 0:
if "=" in command:
split_command = command.split('=')
key = split_command[0]
value = urllib.unquote_plus(split_command[1])
commands[key] = value
else:
commands[command] = ""
_log("get_params "+repr(commands))
return commands
# Fetch text content from an URL
def read(url):
_log("read "+url)
f = urllib2.urlopen(url)
data = f.read()
f.close()
return data
def read_body_and_headers(url, post=None, headers=[], follow_redirects=False, timeout=None):
_log("read_body_and_headers "+url)
if post is not None:
_log("read_body_and_headers post="+post)
if len(headers)==0:
headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"])
# Start cookie lib
ficherocookies = os.path.join( get_data_path(), 'cookies.dat' )
_log("read_body_and_headers cookies_file="+ficherocookies)
cj = None
ClientCookie = None
cookielib = None
# Let's see if cookielib is available
try:
_log("read_body_and_headers importing cookielib")
import cookielib
except ImportError:
_log("read_body_and_headers cookielib no disponible")
# If importing cookielib fails
# let's try ClientCookie
try:
_log("read_body_and_headers importing ClientCookie")
import ClientCookie
except ImportError:
_log("read_body_and_headers ClientCookie not available")
# ClientCookie isn't available either
urlopen = urllib2.urlopen
Request = urllib2.Request
else:
_log("read_body_and_headers ClientCookie available")
# imported ClientCookie
urlopen = ClientCookie.urlopen
Request = ClientCookie.Request
cj = ClientCookie.MozillaCookieJar()
else:
_log("read_body_and_headers cookielib available")
# importing cookielib worked
urlopen = urllib2.urlopen
Request = urllib2.Request
cj = cookielib.MozillaCookieJar()
# This is a subclass of FileCookieJar
# that has useful load and save methods
if cj is not None:
# we successfully imported
# one of the two cookie handling modules
_log("read_body_and_headers Cookies enabled")
if os.path.isfile(ficherocookies):
_log("read_body_and_headers Reading cookie file")
# if we have a cookie file already saved
# then load the cookies into the Cookie Jar
try:
cj.load(ficherocookies)
except:
_log("read_body_and_headers Wrong cookie file, deleting...")
os.remove(ficherocookies)
# Now we need to get our Cookie Jar
# installed in the opener;
# for fetching URLs
if cookielib is not None:
_log("read_body_and_headers opener using urllib2 (cookielib)")
# if we use cookielib
# then we get the HTTPCookieProcessor
# and install the opener in urllib2
if not follow_redirects:
opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=http_debug_log_enabled),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler())
else:
opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=http_debug_log_enabled),urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
else:
_log("read_body_and_headers opener using ClientCookie")
# if we use ClientCookie
# then we get the HTTPCookieProcessor
# and install the opener in ClientCookie
opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
ClientCookie.install_opener(opener)
# -------------------------------------------------
# Cookies instaladas, lanza la peticiΓ³n
# -------------------------------------------------
# Contador
inicio = time.clock()
# Diccionario para las cabeceras
txheaders = {}
# Construye el request
if post is None:
_log("read_body_and_headers GET request")
else:
_log("read_body_and_headers POST request")
# AΓ±ade las cabeceras
_log("read_body_and_headers ---------------------------")
for header in headers:
_log("read_body_and_headers header %s=%s" % (str(header[0]),str(header[1])) )
txheaders[header[0]]=header[1]
_log("read_body_and_headers ---------------------------")
req = Request(url, post, txheaders)
if timeout is None:
handle=urlopen(req)
else:
#Disponible en python 2.6 en adelante --> handle = urlopen(req, timeout=timeout)
#Para todas las versiones:
try:
import socket
deftimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
handle=urlopen(req)
socket.setdefaulttimeout(deftimeout)
except:
import sys
for line in sys.exc_info():
_log( "%s" % line )
# Actualiza el almacΓ©n de cookies
cj.save(ficherocookies)
# Lee los datos y cierra
if handle.info().get('Content-Encoding') == 'gzip':
buf = StringIO( handle.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data=handle.read()
info = handle.info()
_log("read_body_and_headers Response")
returnheaders=[]
_log("read_body_and_headers ---------------------------")
for header in info:
_log("read_body_and_headers "+header+"="+info[header])
returnheaders.append([header,info[header]])
handle.close()
_log("read_body_and_headers ---------------------------")
'''
# Lanza la peticiΓ³n
try:
response = urllib2.urlopen(req)
# Si falla la repite sustituyendo caracteres especiales
except:
req = urllib2.Request(url.replace(" ","%20"))
# AΓ±ade las cabeceras
for header in headers:
req.add_header(header[0],header[1])
response = urllib2.urlopen(req)
'''
# Tiempo transcurrido
fin = time.clock()
_log("read_body_and_headers Downloaded in %d seconds " % (fin-inicio+1))
_log("read_body_and_headers body="+data)
return data,returnheaders
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
# Parse string and extracts multiple matches using regular expressions
def find_multiple_matches(text,pattern):
_log("find_multiple_matches pattern="+pattern)
matches = re.findall(pattern,text,re.DOTALL)
return matches
# Parse string and extracts first match as a string
def find_single_match(text,pattern):
_log("find_single_match pattern="+pattern)
result = ""
try:
matches = re.findall(pattern,text, flags=re.DOTALL)
result = matches[0]
except:
result = ""
return result
def add_item( action="" , title="" , plot="" , url="" , thumbnail="" , fanart="" , show="" , episode="" , extra="", page="", info_labels = None, isPlayable = False , folder=True ):
_log("add_item action=["+action+"] title=["+title+"] url=["+url+"] thumbnail=["+thumbnail+"] fanart=["+fanart+"] show=["+show+"] episode=["+episode+"] extra=["+extra+"] page=["+page+"] isPlayable=["+str(isPlayable)+"] folder=["+str(folder)+"]")
listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail )
if info_labels is None:
info_labels = { "Title" : title, "FileName" : title, "Plot" : plot }
listitem.setInfo( "video", info_labels )
if fanart!="":
listitem.setProperty('fanart_image',fanart)
xbmcplugin.setPluginFanart(int(sys.argv[1]), fanart)
if url.startswith("plugin://"):
itemurl = url
listitem.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem( handle=int(sys.argv[1]), url=itemurl, listitem=listitem, isFolder=folder)
elif isPlayable:
listitem.setProperty("Video", "true")
listitem.setProperty('IsPlayable', 'true')
itemurl = '%s?action=%s&title=%s&url=%s&thumbnail=%s&plot=%s&extra=%s&page=%s' % ( sys.argv[ 0 ] , action , urllib.quote_plus( title ) , urllib.quote_plus(url) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extra ) , urllib.quote_plus( page ))
xbmcplugin.addDirectoryItem( handle=int(sys.argv[1]), url=itemurl, listitem=listitem, isFolder=folder)
else:
itemurl = '%s?action=%s&title=%s&url=%s&thumbnail=%s&plot=%s&extra=%s&page=%s' % ( sys.argv[ 0 ] , action , urllib.quote_plus( title ) , urllib.quote_plus(url) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extra ) , urllib.quote_plus( page ))
xbmcplugin.addDirectoryItem( handle=int(sys.argv[1]), url=itemurl, listitem=listitem, isFolder=folder)
def close_item_list():
_log("close_item_list")
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
def play_resolved_url(url):
_log("play_resolved_url ["+url+"]")
listitem = xbmcgui.ListItem(path=url)
listitem.setProperty('IsPlayable', 'true')
return xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
def direct_play(url):
_log("direct_play ["+url+"]")
title = ""
try:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", path=url)
except:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", )
xlistitem.setInfo( "video", { "Title": title } )
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
playlist.add( url, xlistitem )
player_type = xbmc.PLAYER_CORE_AUTO
xbmcPlayer = xbmc.Player( player_type )
xbmcPlayer.play(playlist)
def show_picture(url):
local_folder = os.path.join(get_data_path(),"images")
if not os.path.exists(local_folder):
try:
os.mkdir(local_folder)
except:
pass
local_file = os.path.join(local_folder,"temp.jpg")
# Download picture
urllib.urlretrieve(url, local_file)
# Show picture
xbmc.executebuiltin( "SlideShow("+local_folder+")" )
def get_temp_path():
_log("get_temp_path")
dev = xbmc.translatePath( "special://temp/" )
_log("get_temp_path ->'"+str(dev)+"'")
return dev
def get_runtime_path():
_log("get_runtime_path")
dev = xbmc.translatePath( __settings__.getAddonInfo('Path') )
_log("get_runtime_path ->'"+str(dev)+"'")
return dev
def get_data_path():
_log("get_data_path")
dev = xbmc.translatePath( __settings__.getAddonInfo('Profile') )
# Parche para XBMC4XBOX
if not os.path.exists(dev):
os.makedirs(dev)
_log("get_data_path ->'"+str(dev)+"'")
return dev
def get_setting(name):
_log("get_setting name='"+name+"'")
dev = __settings__.getSetting( name )
_log("get_setting ->'"+str(dev)+"'")
return dev
def set_setting(name,value):
_log("set_setting name='"+name+"','"+value+"'")
__settings__.setSetting( name,value )
def open_settings_dialog():
_log("open_settings_dialog")
__settings__.openSettings()
def get_localized_string(code):
_log("get_localized_string code="+str(code))
dev = __language__(code)
try:
dev = dev.encode("utf-8")
except:
pass
_log("get_localized_string ->'"+dev+"'")
return dev
def keyboard_input(default_text="", title="", hidden=False):
_log("keyboard_input default_text='"+default_text+"'")
keyboard = xbmc.Keyboard(default_text,title,hidden)
keyboard.doModal()
if (keyboard.isConfirmed()):
tecleado = keyboard.getText()
else:
tecleado = ""
_log("keyboard_input ->'"+tecleado+"'")
return tecleado
def message(text1, text2="", text3=""):
_log("message text1='"+text1+"', text2='"+text2+"', text3='"+text3+"'")
if text3=="":
xbmcgui.Dialog().ok( text1 , text2 )
elif text2=="":
xbmcgui.Dialog().ok( "" , text1 )
else:
xbmcgui.Dialog().ok( text1 , text2 , text3 )
def message_yes_no(text1, text2="", text3=""):
_log("message_yes_no text1='"+text1+"', text2='"+text2+"', text3='"+text3+"'")
if text3=="":
yes_pressed = xbmcgui.Dialog().yesno( text1 , text2 )
elif text2=="":
yes_pressed = xbmcgui.Dialog().yesno( "" , text1 )
else:
yes_pressed = xbmcgui.Dialog().yesno( text1 , text2 , text3 )
return yes_pressed
def selector(option_list,title="Select one"):
_log("selector title='"+title+"', options="+repr(option_list))
dia = xbmcgui.Dialog()
selection = dia.select(title,option_list)
return selection
def set_view(view_mode, view_code=0):
_log("set_view view_mode='"+view_mode+"', view_code="+str(view_code))
# Set the content for extended library views if needed
if view_mode==MOVIES:
_log("set_view content is movies")
xbmcplugin.setContent( int(sys.argv[1]) ,"movies" )
elif view_mode==TV_SHOWS:
_log("set_view content is tvshows")
xbmcplugin.setContent( int(sys.argv[1]) ,"tvshows" )
elif view_mode==SEASONS:
_log("set_view content is seasons")
xbmcplugin.setContent( int(sys.argv[1]) ,"seasons" )
elif view_mode==EPISODES:
_log("set_view content is episodes")
xbmcplugin.setContent( int(sys.argv[1]) ,"episodes" )
# Reads skin name
skin_name = xbmc.getSkinDir()
_log("set_view skin_name='"+skin_name+"'")
try:
if view_code==0:
_log("set_view view mode is "+view_mode)
view_codes = ALL_VIEW_CODES.get(view_mode)
view_code = view_codes.get(skin_name)
_log("set_view view code for "+view_mode+" in "+skin_name+" is "+str(view_code))
xbmc.executebuiltin("Container.SetViewMode("+str(view_code)+")")
else:
_log("set_view view code forced to "+str(view_code))
xbmc.executebuiltin("Container.SetViewMode("+str(view_code)+")")
except:
_log("Unable to find view code for view mode "+str(view_mode)+" and skin "+skin_name)
f = open( os.path.join( os.path.dirname(__file__) , "addon.xml") )
data = f.read()
f.close()
addon_id = find_single_match(data,'id="([^"]+)"')
if addon_id=="":
addon_id = find_single_match(data,"id='([^']+)'")
__settings__ = xbmcaddon.Addon(id=addon_id)
__language__ = __settings__.getLocalizedString
| gpl-2.0 |
bop/hybrid | lib/python2.6/site-packages/django/utils/unittest/runner.py | 571 | 6761 | """Running tests"""
import sys
import time
import unittest
from django.utils.unittest import result
try:
from django.utils.unittest.signals import registerResult
except ImportError:
def registerResult(_):
pass
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def stopTestRun(self):
super(TextTestResult, self).stopTestRun()
self.printErrors()
class TextTestRunner(unittest.TextTestRunner):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = stopTime - startTime
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| gpl-2.0 |
SteveHNH/ansible | lib/ansible/modules/cloud/centurylink/clc_blueprint_package.py | 56 | 10127 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_blueprint_package
short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
description:
- An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- A list of server Ids to deploy the blue print package.
required: True
package_id:
description:
- The package id of the blue print.
required: True
package_params:
description:
- The dictionary of arguments required to deploy the blue print.
default: {}
required: False
state:
description:
- Whether to install or un-install the package. Currently it supports only "present" for install action.
required: False
default: present
choices: ['present']
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Deploy package
clc_blueprint_package:
server_ids:
- UC1TEST-SERVER1
- UC1TEST-SERVER2
package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
package_params: {}
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SERVER1",
"UC1TEST-SERVER2"
]
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcBlueprintPackage:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
changed = False
changed_server_ids = []
self._set_clc_credentials_from_env()
server_ids = p['server_ids']
package_id = p['package_id']
package_params = p['package_params']
state = p['state']
if state == 'present':
changed, changed_server_ids, request_list = self.ensure_package_installed(
server_ids, package_id, package_params)
self._wait_for_requests_to_complete(request_list)
self.module.exit_json(changed=changed, server_ids=changed_server_ids)
@staticmethod
def define_argument_spec():
"""
This function defines the dictionary object required for
package module
:return: the package dictionary object
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
package_id=dict(required=True),
package_params=dict(type='dict', default={}),
wait=dict(default=True),
state=dict(default='present', choices=['present'])
)
return argument_spec
def ensure_package_installed(self, server_ids, package_id, package_params):
"""
Ensure the package is installed in the given list of servers
:param server_ids: the server list where the package needs to be installed
:param package_id: the blueprint package id
:param package_params: the package arguments
:return: (changed, server_ids, request_list)
changed: A flag indicating if a change was made
server_ids: The list of servers modified
request_list: The list of request objects from clc-sdk
"""
changed = False
request_list = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to get servers from CLC')
for server in servers:
if not self.module.check_mode:
request = self.clc_install_package(
server,
package_id,
package_params)
request_list.append(request)
changed = True
return changed, server_ids, request_list
def clc_install_package(self, server, package_id, package_params):
"""
Install the package to a given clc server
:param server: The server object where the package needs to be installed
:param package_id: The blue print package id
:param package_params: the required argument dict for the package installation
:return: The result object from the CLC API call
"""
result = None
try:
result = server.ExecutePackage(
package_id=package_id,
parameters=package_params)
except CLCException as ex:
self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
package_id, server.id, ex.message
))
return result
def _wait_for_requests_to_complete(self, request_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param request_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in request_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process package install request')
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: the list of server ids
:param message: the error message to raise if there is any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
self.module.fail_json(msg=message + ': %s' % ex)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
Main function
:return: None
"""
module = AnsibleModule(
argument_spec=ClcBlueprintPackage.define_argument_spec(),
supports_check_mode=True
)
clc_blueprint_package = ClcBlueprintPackage(module)
clc_blueprint_package.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
seanli9jan/tensorflow | tensorflow/python/estimator/canned/optimizers.py | 41 | 1298 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""optimizers python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.canned import optimizers
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
optimizers.__all__ = [s for s in dir(optimizers) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.canned.optimizers import *
| apache-2.0 |
ossobv/planb | planb/management/commands/slist.py | 1 | 2803 | from fnmatch import fnmatch
from planb.common import human
from planb.management.base import BaseCommand
from planb.storage import storage_pools
from planb.storage.base import Datasets
class Command(BaseCommand):
help = 'Lists storage entries/datasets'
def add_arguments(self, parser):
parser.add_argument(
'--stale', action='store_true',
help='List stale (unused) storage entries/datasets only')
parser.add_argument(
'-x', '--exclude', action='append', default=[],
help='Glob patterns to exclude')
return super().add_arguments(parser)
def handle(self, *args, **options):
datasets = []
for storage in storage_pools.values():
datasets.extend(storage.get_datasets())
# For the leaf/parent checks to be effective, we need the database
# config immediately before excluding anything.
datasets = Datasets(datasets)
datasets.load_database_config()
datasets.keep_only_leaves()
for exclude in set(options['exclude']):
datasets = Datasets([
ds for ds in datasets if not fnmatch(ds.name, exclude)])
if options['stale']:
datasets = Datasets([
ds for ds in datasets if not ds.exists_in_database])
datasets.sort()
self.dump_list(datasets)
def dump_list(self, datasets):
ret = []
lastgroup = None
for dataset in datasets:
fileset = dataset.database_object
hostgroup = fileset.hostgroup if fileset else '(nogroup)'
if lastgroup != hostgroup:
lastgroup = hostgroup
if ret:
ret.append('')
ret.append('; {}'.format(hostgroup))
if hostgroup == '(nogroup)':
# XXX: add temporary warning regarding cleanup
ret.append(
'; (when purging, do not forget to remove '
'encryption keys from zfskeys dir)')
ret.append('; (see planb-zfskeys-check contrib tool)')
if fileset:
ret.append(
'{dataset.name:54s} {disk_usage:>8s} '
'id={fileset.id}'.format(
dataset=dataset,
disk_usage=human.bytes(dataset.disk_usage),
fileset=fileset))
else:
ret.append(
'{dataset.name:54s} {disk_usage:>8s} '
'id=NONE'.format(
dataset=dataset,
disk_usage=human.bytes(dataset.disk_usage)))
if ret:
ret.extend(['', ''])
self.stdout.write('\n'.join(ret))
| gpl-3.0 |
umitproject/tease-o-matic | dbindexer/lookups.py | 84 | 8761 | from django.db import models
from djangotoolbox.fields import ListField
from copy import deepcopy
import re
regex = type(re.compile(''))
class LookupDoesNotExist(Exception):
pass
class LookupBase(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if not isinstance(new_cls.lookup_types, (list, tuple)):
new_cls.lookup_types = (new_cls.lookup_types, )
return new_cls
class ExtraFieldLookup(object):
'''Default is to behave like an exact filter on an ExtraField.'''
__metaclass__ = LookupBase
lookup_types = 'exact'
def __init__(self, model=None, field_name=None, lookup_def=None,
new_lookup='exact', field_to_add=models.CharField(
max_length=500, editable=False, null=True)):
self.field_to_add = field_to_add
self.new_lookup = new_lookup
self.contribute(model, field_name, lookup_def)
def contribute(self, model, field_name, lookup_def):
self.model = model
self.field_name = field_name
self.lookup_def = lookup_def
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name, self.lookup_types[0])
def convert_lookup(self, value, lookup_type):
# TODO: can value be a list or tuple? (in case of in yes)
if isinstance(value, (tuple, list)):
value = [self._convert_lookup(val, lookup_type)[1] for val in value]
else:
_, value = self._convert_lookup(value, lookup_type)
return self.new_lookup, value
def _convert_lookup(self, value, lookup_type):
return lookup_type, value
def convert_value(self, value):
if isinstance(value, (tuple, list)):
value = [self._convert_value(val) for val in value]
else:
value = self._convert_value(value)
return value
def _convert_value(self, value):
return value
def matches_filter(self, model, field_name, lookup_type, value):
return self.model == model and lookup_type in self.lookup_types \
and field_name == self.field_name
@classmethod
def matches_lookup_def(cls, lookup_def):
if lookup_def in cls.lookup_types:
return True
return False
def get_field_to_add(self, field_to_index):
field_to_add = deepcopy(self.field_to_add)
if isinstance(field_to_index, ListField):
field_to_add = ListField(field_to_add, editable=False, null=True)
return field_to_add
class DateLookup(ExtraFieldLookup):
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'exact',
'field_to_add': models.IntegerField(editable=False, null=True)}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value
class Day(DateLookup):
lookup_types = 'day'
def _convert_value(self, value):
return value.day
class Month(DateLookup):
lookup_types = 'month'
def _convert_value(self, value):
return value.month
class Year(DateLookup):
lookup_types = 'year'
def _convert_value(self, value):
return value.year
class Weekday(DateLookup):
lookup_types = 'week_day'
def _convert_value(self, value):
return value.isoweekday()
class Contains(ExtraFieldLookup):
lookup_types = 'contains'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith',
'field_to_add': ListField(models.CharField(500),
editable=False, null=True)
}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def get_field_to_add(self, field_to_index):
# always return a ListField of CharFields even in the case of
# field_to_index being a ListField itself!
return deepcopy(self.field_to_add)
def convert_value(self, value):
new_value = []
if isinstance(value, (tuple, list)):
for val in value:
new_value.extend(self.contains_indexer(val))
else:
new_value = self.contains_indexer(value)
return new_value
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value
def contains_indexer(self, value):
# In indexing mode we add all postfixes ('o', 'lo', ..., 'hello')
result = []
if value:
result.extend([value[count:] for count in range(len(value))])
return result
class Icontains(Contains):
lookup_types = 'icontains'
def convert_value(self, value):
return [val.lower() for val in Contains.convert_value(self, value)]
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
class Iexact(ExtraFieldLookup):
lookup_types = 'iexact'
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
def _convert_value(self, value):
return value.lower()
class Istartswith(ExtraFieldLookup):
lookup_types = 'istartswith'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith'}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
def _convert_value(self, value):
return value.lower()
class Endswith(ExtraFieldLookup):
lookup_types = 'endswith'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith'}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value[::-1]
def _convert_value(self, value):
return value[::-1]
class Iendswith(Endswith):
lookup_types = 'iendswith'
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value[::-1].lower()
def _convert_value(self, value):
return value[::-1].lower()
class RegexLookup(ExtraFieldLookup):
lookup_types = ('regex', 'iregex')
def __init__(self, *args, **kwargs):
defaults = {'field_to_add': models.NullBooleanField(editable=False,
null=True)
}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def contribute(self, model, field_name, lookup_def):
ExtraFieldLookup.contribute(self, model, field_name, lookup_def)
if isinstance(lookup_def, regex):
self.lookup_def = re.compile(lookup_def.pattern, re.S | re.U |
(lookup_def.flags & re.I))
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name,
self.lookup_def.pattern.encode('hex'))
def is_icase(self):
return self.lookup_def.flags & re.I
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, True
def _convert_value(self, value):
if self.lookup_def.match(value):
return True
return False
def matches_filter(self, model, field_name, lookup_type, value):
return self.model == model and lookup_type == \
'%sregex' % ('i' if self.is_icase() else '') and \
value == self.lookup_def.pattern and field_name == self.field_name
@classmethod
def matches_lookup_def(cls, lookup_def):
if isinstance(lookup_def, regex):
return True
return False
class StandardLookup(ExtraFieldLookup):
''' Creates a copy of the field_to_index in order to allow querying for
standard lookup_types on a JOINed property. '''
# TODO: database backend can specify standardLookups
lookup_types = ('exact', 'gt', 'gte', 'lt', 'lte', 'in', 'range', 'isnull')
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name, 'standard')
def convert_lookup(self, value, lookup_type):
return lookup_type, value
def get_field_to_add(self, field_to_index):
field_to_add = deepcopy(field_to_index)
if isinstance(field_to_add, (models.DateTimeField,
models.DateField, models.TimeField)):
field_to_add.auto_now_add = field_to_add.auto_now = False
return field_to_add | bsd-3-clause |
rekhajoshm/spark | examples/src/main/python/ml/elementwise_product_example.py | 128 | 1632 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import ElementwiseProduct
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("ElementwiseProductExample")\
.getOrCreate()
# $example on$
# Create some vector data; also works for sparse vectors
data = [(Vectors.dense([1.0, 2.0, 3.0]),), (Vectors.dense([4.0, 5.0, 6.0]),)]
df = spark.createDataFrame(data, ["vector"])
transformer = ElementwiseProduct(scalingVec=Vectors.dense([0.0, 1.0, 2.0]),
inputCol="vector", outputCol="transformedVector")
# Batch transform the vectors to create new column:
transformer.transform(df).show()
# $example off$
spark.stop()
| apache-2.0 |
jlegendary/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
Aiah/flasky | app/api_1_0/posts.py | 122 | 1737 | from flask import jsonify, request, g, abort, url_for, current_app
from .. import db
from ..models import Post, Permission
from . import api
from .decorators import permission_required
from .errors import forbidden
@api.route('/posts/')
def get_posts():
page = request.args.get('page', 1, type=int)
pagination = Post.query.paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def new_post():
post = Post.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, \
{'Location': url_for('api.get_post', id=post.id, _external=True)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
post = Post.query.get_or_404(id)
if g.current_user != post.author and \
not g.current_user.can(Permission.ADMINISTER):
return forbidden('Insufficient permissions')
post.body = request.json.get('body', post.body)
db.session.add(post)
return jsonify(post.to_json())
| mit |
kaiweifan/neutron | neutron/plugins/common/constants.py | 5 | 1771 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
CORE = "CORE"
DUMMY = "DUMMY"
LOADBALANCER = "LOADBALANCER"
FIREWALL = "FIREWALL"
VPN = "VPN"
METERING = "METERING"
L3_ROUTER_NAT = "L3_ROUTER_NAT"
#maps extension alias to service type
EXT_TO_SERVICE_MAPPING = {
'dummy': DUMMY,
'lbaas': LOADBALANCER,
'fwaas': FIREWALL,
'vpnaas': VPN,
'metering': METERING,
'router': L3_ROUTER_NAT
}
# TODO(salvatore-orlando): Move these (or derive them) from conf file
ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING,
L3_ROUTER_NAT]
COMMON_PREFIXES = {
CORE: "",
DUMMY: "/dummy_svc",
LOADBALANCER: "/lb",
FIREWALL: "/fw",
VPN: "/vpn",
METERING: "/metering",
L3_ROUTER_NAT: "",
}
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
# FWaaS firewall rule action
FWAAS_ALLOW = "allow"
FWAAS_DENY = "deny"
# L3 Protocol name constants
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
| apache-2.0 |
twood02/adv-net-samples | sdn/pox/pox/host_tracker/host_tracker.py | 44 | 13799 | # Copyright 2011 Dorgival Guedes
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tracks host location and configuration
Keep track of hosts in the network, where they are and how they are
configured (at least MAC/IP addresses).
For the time being, it keeps tables with the information; later, it should
transfer that information to Topology and handle just the actual
discovery/update of host information.
Timer configuration can be changed when needed (e.g., for debugging) using
the launch facility (check timeoutSec dict and PingCtrl.pingLim).
You can set various timeouts from the commandline. Names and defaults:
arpAware=60*2 Quiet ARP-responding entries are pinged after this
arpSilent=60*20 This is for uiet entries not known to answer ARP
arpReply=4 Time to wait for an ARP reply before retrial
timerInterval=5 Seconds between timer routine activations
entryMove=60 Minimum expected time to move a physical entry
Good values for testing:
--arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4
You can also specify how many ARP pings we try before deciding it failed:
--pingLim=2
"""
from pox.core import core
from pox.lib.addresses import EthAddr
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.recoco import Timer
from pox.lib.revent import Event, EventHalt
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery as discovery
from pox.lib.revent.revent import *
import time
import pox
log = core.getLogger()
# Times (in seconds) to use for differente timouts:
timeoutSec = dict(
arpAware=60*2, # Quiet ARP-responding entries are pinged after this
arpSilent=60*20, # This is for uiet entries not known to answer ARP
arpReply=4, # Time to wait for an ARP reply before retrial
timerInterval=5, # Seconds between timer routine activations
entryMove=60 # Minimum expected time to move a physical entry
)
# Address to send ARP pings from.
# The particular one here is just an arbitrary locally administered address.
DEFAULT_ARP_PING_SRC_MAC = '02:00:00:00:be:ef'
class HostEvent (Event):
"""
Event when hosts join, leave, or move within the network
"""
def __init__ (self, entry, new_dpid = None, new_port = None, join = False,
leave = False, move = False):
super(HostEvent,self).__init__()
self.entry = entry
self.join = join
self.leave = leave
self.move = move
assert sum(1 for x in [join,leave,move] if x) == 1
# You can alter these and they'll change where we think it goes...
self._new_dpid = new_dpid
self._new_port = new_port
#TODO: Allow us to cancel add/removes
@property
def new_dpid (self):
"""
New DPID for move events"
"""
assert self.move
return self._new_dpid
@property
def new_port (self):
"""
New port for move events"
"""
assert self.move
return self._new_port
class Alive (object):
"""
Holds liveliness information for MAC and IP entries
"""
def __init__ (self, livelinessInterval=timeoutSec['arpAware']):
self.lastTimeSeen = time.time()
self.interval=livelinessInterval
def expired (self):
return time.time() > self.lastTimeSeen + self.interval
def refresh (self):
self.lastTimeSeen = time.time()
class PingCtrl (Alive):
"""
Holds information for handling ARP pings for hosts
"""
# Number of ARP ping attemps before deciding it failed
pingLim=3
def __init__ (self):
super(PingCtrl,self).__init__(timeoutSec['arpReply'])
self.pending = 0
def sent (self):
self.refresh()
self.pending += 1
def failed (self):
return self.pending > PingCtrl.pingLim
def received (self):
# Clear any pending timeouts related to ARP pings
self.pending = 0
class IpEntry (Alive):
"""
This entry keeps track of IP addresses seen from each MAC entry and will
be kept in the macEntry object's ipAddrs dictionary. At least for now,
there is no need to refer to the original macEntry as the code is organized.
"""
def __init__ (self, hasARP):
if hasARP:
super(IpEntry,self).__init__(timeoutSec['arpAware'])
else:
super(IpEntry,self).__init__(timeoutSec['arpSilent'])
self.hasARP = hasARP
self.pings = PingCtrl()
def setHasARP (self):
if not self.hasARP:
self.hasARP = True
self.interval = timeoutSec['arpAware']
class MacEntry (Alive):
"""
Not strictly an ARP entry.
When it gets moved to Topology, may include other host info, like
services, and it may replace dpid by a general switch object reference
We use the port to determine which port to forward traffic out of.
"""
def __init__ (self, dpid, port, macaddr):
super(MacEntry,self).__init__()
self.dpid = dpid
self.port = port
self.macaddr = macaddr
self.ipAddrs = {}
def __str__(self):
return ' '.join([str(self.dpid), str(self.port), str(self.macaddr)])
def __eq__ (self, other):
if other is None:
return False
elif type(other) == tuple:
return (self.dpid,self.port,self.macaddr)==other
if self.dpid != other.dpid: return False
if self.port != other.port: return False
if self.macaddr != other.macaddr: return False
if self.dpid != other.dpid: return False
# What about ipAddrs??
return True
def __ne__ (self, other):
return not self.__eq__(other)
class host_tracker (EventMixin):
"""
Host tracking component
"""
_eventMixin_events = set([HostEvent])
def __init__ (self, ping_src_mac = None, install_flow = True,
eat_packets = True):
if ping_src_mac is None:
ping_src_mac = DEFAULT_ARP_PING_SRC_MAC
self.ping_src_mac = EthAddr(ping_src_mac)
self.install_flow = install_flow
self.eat_packets = eat_packets
# The following tables should go to Topology later
self.entryByMAC = {}
self._t = Timer(timeoutSec['timerInterval'],
self._check_timeouts, recurring=True)
# Listen to openflow with high priority if we want to eat our ARP replies
listen_args = {}
if eat_packets:
listen_args={'openflow':{'priority':0}}
core.listen_to_dependencies(self, listen_args=listen_args)
def _all_dependencies_met (self):
log.info("host_tracker ready")
# The following two functions should go to Topology also
def getMacEntry (self, macaddr):
try:
result = self.entryByMAC[macaddr]
except KeyError as e:
result = None
return result
def sendPing (self, macEntry, ipAddr):
"""
Builds an ETH/IP any-to-any ARP packet (an "ARP ping")
"""
r = arp()
r.opcode = arp.REQUEST
r.hwdst = macEntry.macaddr
r.hwsrc = self.ping_src_mac
r.protodst = ipAddr
# src is IP_ANY
e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst)
e.payload = r
log.debug("%i %i sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
msg = of.ofp_packet_out(data = e.pack(),
action = of.ofp_action_output(port=macEntry.port))
if core.openflow.sendToDPID(macEntry.dpid, msg.pack()):
ipEntry = macEntry.ipAddrs[ipAddr]
ipEntry.pings.sent()
else:
# macEntry is stale, remove it.
log.debug("%i %i ERROR sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
del macEntry.ipAddrs[ipAddr]
return
def getSrcIPandARP (self, packet):
"""
Gets source IPv4 address for packets that have one (IPv4 and ARP)
Returns (ip_address, has_arp). If no IP, returns (None, False).
"""
if isinstance(packet, ipv4):
log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip))
return ( packet.srcip, False )
elif isinstance(packet, arp):
log.debug("ARP %s %s => %s",
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode,
'op:%i' % (packet.opcode,)),
str(packet.protosrc), str(packet.protodst))
if (packet.hwtype == arp.HW_TYPE_ETHERNET and
packet.prototype == arp.PROTO_TYPE_IP and
packet.protosrc != 0):
return ( packet.protosrc, True )
return ( None, False )
def updateIPInfo (self, pckt_srcip, macEntry, hasARP):
"""
Update given MacEntry
If there is IP info in the incoming packet, update the macEntry
accordingly. In the past we assumed a 1:1 mapping between MAC and IP
addresses, but removed that restriction later to accomodate cases
like virtual interfaces (1:n) and distributed packet rewriting (n:1)
"""
if pckt_srcip in macEntry.ipAddrs:
# that entry already has that IP
ipEntry = macEntry.ipAddrs[pckt_srcip]
ipEntry.refresh()
log.debug("%s already has IP %s, refreshing",
str(macEntry), str(pckt_srcip) )
else:
# new mapping
ipEntry = IpEntry(hasARP)
macEntry.ipAddrs[pckt_srcip] = ipEntry
log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) )
if hasARP:
ipEntry.pings.received()
def _handle_openflow_ConnectionUp (self, event):
if not self.install_flow: return
log.debug("Installing flow for ARP ping responses")
m = of.ofp_flow_mod()
m.priority += 1 # Higher than normal
m.match.dl_type = ethernet.ARP_TYPE
m.match.dl_dst = self.ping_src_mac
m.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(m)
def _handle_openflow_PacketIn (self, event):
"""
Populate MAC and IP tables based on incoming packets.
Handles only packets from ports identified as not switch-only.
If a MAC was not seen before, insert it in the MAC table;
otherwise, update table and enry.
If packet has a source IP, update that info for the macEntry (may require
removing the info from antoher entry previously with that IP address).
It does not forward any packets, just extract info from them.
"""
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets
return
# This should use Topology later
if not core.openflow_discovery.is_edge_port(dpid, inport):
# No host should be right behind a switch-only port
log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport)
return
log.debug("PacketIn: %i %i ETH %s => %s",
dpid, inport, str(packet.src), str(packet.dst))
# Learn or update dpid/port/MAC info
macEntry = self.getMacEntry(packet.src)
if macEntry is None:
# there is no known host by that MAC
# should we raise a NewHostFound event (at the end)?
macEntry = MacEntry(dpid,inport,packet.src)
self.entryByMAC[packet.src] = macEntry
log.info("Learned %s", str(macEntry))
self.raiseEventNoErrors(HostEvent, macEntry, join=True)
elif macEntry != (dpid, inport, packet.src):
# there is already an entry of host with that MAC, but host has moved
# should we raise a HostMoved event (at the end)?
log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport)
# if there has not been long since heard from it...
if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']:
log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i",
str(macEntry), macEntry.lastTimeSeen,
dpid, inport, time.time())
# should we create a whole new entry, or keep the previous host info?
# for now, we keep it: IP info, answers pings, etc.
e = HostEvent(macEntry, move=True, new_dpid = dpid, new_port = inport)
self.raiseEventNoErrors(e)
macEntry.dpid = e._new_dpid
macEntry.inport = e._new_port
macEntry.refresh()
(pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next)
if pckt_srcip is not None:
self.updateIPInfo(pckt_srcip,macEntry,hasARP)
if self.eat_packets and packet.dst == self.ping_src_mac:
return EventHalt
def _check_timeouts (self):
"""
Checks for timed out entries
"""
for macEntry in self.entryByMAC.values():
entryPinged = False
for ip_addr, ipEntry in macEntry.ipAddrs.items():
if ipEntry.expired():
if ipEntry.pings.failed():
del macEntry.ipAddrs[ip_addr]
log.info("Entry %s: IP address %s expired",
str(macEntry), str(ip_addr) )
else:
self.sendPing(macEntry,ip_addr)
ipEntry.pings.sent()
entryPinged = True
if macEntry.expired() and not entryPinged:
log.info("Entry %s expired", str(macEntry))
# sanity check: there should be no IP addresses left
if len(macEntry.ipAddrs) > 0:
for ip in macEntry.ipAddrs.keys():
log.warning("Entry %s expired but still had IP address %s",
str(macEntry), str(ip_addr) )
del macEntry.ipAddrs[ip_addr]
self.raiseEventNoErrors(HostEvent, macEntry, leave=True)
del self.entryByMAC[macEntry.macaddr]
| mit |
amjith/pyvim | pyvim/editor.py | 1 | 11635 | """
The main editor class.
Usage::
files_to_edit = ['file1.txt', 'file2.py']
e = Editor(files_to_edit)
e.run() # Runs the event loop, starts interaction.
"""
from __future__ import unicode_literals
from prompt_toolkit.application import Application, AbortAction
from prompt_toolkit.buffer import Buffer, AcceptAction
from prompt_toolkit.enums import SEARCH_BUFFER
from prompt_toolkit.filters import Always, Condition
from prompt_toolkit.history import FileHistory
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.key_binding.vi_state import InputMode
from prompt_toolkit.shortcuts import create_eventloop
from prompt_toolkit.utils import Callback
from .commands.completer import create_command_completer
from .commands.handler import handle_command
from .commands.preview import CommandPreviewer
from .editor_buffer import EditorBuffer
from .enums import COMMAND_BUFFER
from .help import HELP_TEXT
from .key_bindings import create_key_bindings
from .layout import EditorLayout
from .reporting import report
from .style import generate_built_in_styles, get_editor_style_by_name
from .window_arrangement import WindowArrangement
from .io import FileIO, DirectoryIO, HttpIO, GZipFileIO
import pygments
import os
__all__ = (
'Editor',
)
class Editor(object):
"""
The main class. Containing the whole editor.
"""
def __init__(self, config_directory='~/.pyvim'):
# Vi options.
self.show_line_numbers = True
self.highlight_search = True
self.paste_mode = False
self.show_ruler = True
self.show_wildmenu = True
self.expand_tab = True # Insect spaces instead of tab characters.
self.tabstop = 4 # Number of spaces that a tab character represents.
self.incsearch = True # Show matches while typing search string.
self.ignore_case = False # Ignore case while searching.
self.enable_mouse_support = True
self.display_unprintable_characters = True # ':set list'
self.enable_jedi = True # ':set jedi', for Python Jedi completion.
self.scroll_offset = 0 # ':set scrolloff'
self.relative_number = False # ':set relativenumber'
self.wrap_lines = True # ':set wrap'
# Ensure config directory exists.
self.config_directory = os.path.abspath(os.path.expanduser(config_directory))
if not os.path.exists(self.config_directory):
os.mkdir(self.config_directory)
self._reporters_running_for_buffer_names = set()
self.window_arrangement = WindowArrangement(self)
self.message = None
# Load styles. (Mapping from name to Style class.)
self.styles = generate_built_in_styles()
self.current_style = get_editor_style_by_name('default')
# I/O backends.
self.io_backends = [
DirectoryIO(),
HttpIO(),
GZipFileIO(), # Should come before FileIO.
FileIO(),
]
# Create eventloop.
self.eventloop = create_eventloop()
# Create key bindings manager
self.key_bindings_manager = create_key_bindings(self)
# Create layout and CommandLineInterface instance.
self.editor_layout = EditorLayout(
self, self.key_bindings_manager, self.window_arrangement)
self.application = self._create_application()
self.cli = CommandLineInterface(
eventloop=self.eventloop,
application=self.application)
# Hide message when a key is pressed.
def key_pressed():
self.message = None
self.cli.input_processor.beforeKeyPress += key_pressed
# Command line previewer.
self.previewer = CommandPreviewer(self)
def load_initial_files(self, locations, in_tab_pages=False, hsplit=False, vsplit=False):
"""
Load a list of files.
"""
assert in_tab_pages + hsplit + vsplit <= 1 # Max one of these options.
# When no files were given, open at least one empty buffer.
locations2 = locations or [None]
# First file
self.window_arrangement.open_buffer(locations2[0])
for f in locations2[1:]:
if in_tab_pages:
self.window_arrangement.create_tab(f)
elif hsplit:
self.window_arrangement.hsplit(location=f)
elif vsplit:
self.window_arrangement.vsplit(location=f)
else:
self.window_arrangement.open_buffer(f)
self.window_arrangement.active_tab_index = 0
if locations and len(locations) > 1:
self.show_message('%i files loaded.' % len(locations))
def _create_application(self):
"""
Create CommandLineInterface instance.
"""
# Create Vi command buffer.
def handle_action(cli, buffer):
' When enter is pressed in the Vi command line. '
text = buffer.text # Remember: leave_command_mode resets the buffer.
# First leave command mode. We want to make sure that the working
# pane is focussed again before executing the command handlers.
self.leave_command_mode(append_to_history=True)
# Execute command.
handle_command(self, text)
# Create history and search buffers.
commands_history = FileHistory(os.path.join(self.config_directory, 'commands_history'))
command_buffer = Buffer(accept_action=AcceptAction(handler=handle_action),
enable_history_search=Always(),
completer=create_command_completer(self),
history=commands_history)
search_buffer_history = FileHistory(os.path.join(self.config_directory, 'search_history'))
search_buffer = Buffer(history=search_buffer_history,
enable_history_search=Always(),
accept_action=AcceptAction.IGNORE)
# Create app.
# Create CLI.
application = Application(
layout=self.editor_layout.layout,
key_bindings_registry=self.key_bindings_manager.registry,
buffers={
COMMAND_BUFFER: command_buffer,
SEARCH_BUFFER: search_buffer,
},
get_style=lambda: self.current_style,
paste_mode=Condition(lambda cli: self.paste_mode),
ignore_case=Condition(lambda cli: self.ignore_case),
mouse_support=Condition(lambda cli: self.enable_mouse_support),
use_alternate_screen=True,
on_abort=AbortAction.IGNORE,
on_exit=AbortAction.IGNORE,
on_buffer_changed=Callback(self._current_buffer_changed))
# Handle command line previews.
# (e.g. when typing ':colorscheme blue', it should already show the
# preview before pressing enter.)
def preview():
if self.cli.current_buffer == command_buffer:
self.previewer.preview(command_buffer.text)
command_buffer.on_text_changed += preview
return application
@property
def current_editor_buffer(self):
"""
Return the `EditorBuffer` that is currently active.
"""
for b in self.window_arrangement.editor_buffers:
if b.buffer_name == self.cli.current_buffer_name:
return b
@property
def add_key_binding(self):
"""
Shortcut for adding new key bindings.
(Mostly useful for a pyvimrc file, that receives this Editor instance
as input.)
"""
return self.key_bindings_manager.registry.add_binding
def show_message(self, message):
"""
Set a warning message. The layout will render it as a "pop-up" at the
bottom.
"""
self.message = message
def use_colorscheme(self, name='default'):
"""
Apply new colorscheme. (By name.)
"""
try:
self.current_style = get_editor_style_by_name(name)
except pygments.util.ClassNotFound:
pass
def sync_with_prompt_toolkit(self):
"""
Update the prompt-toolkit Layout and FocusStack.
"""
# After executing a command, make sure that the layout of
# prompt-toolkit matches our WindowArrangement.
self.editor_layout.update()
# Make sure that the focus stack of prompt-toolkit has the current
# page.
self.cli.focus_stack._stack = [
self.window_arrangement.active_editor_buffer.buffer_name]
def _current_buffer_changed(self, cli):
"""
Current buffer changed.
"""
name = self.cli.current_buffer_name
eb = self.window_arrangement.get_editor_buffer_for_buffer_name(name)
if eb is not None:
# Run reporter.
self.run_reporter_for_editor_buffer(eb)
def run_reporter_for_editor_buffer(self, editor_buffer):
"""
Run reporter on input. (Asynchronously.)
"""
assert isinstance(editor_buffer, EditorBuffer)
eb = editor_buffer
name = eb.buffer_name
if name not in self._reporters_running_for_buffer_names:
text = eb.buffer.text
self._reporters_running_for_buffer_names.add(name)
# Don't run reporter when we don't have a location. (We need to
# know the filetype, actually.)
if eb.location is None:
return
# Better not to access the document in an executor.
document = eb.buffer.document
def in_executor():
# Call reporter
report_errors = report(eb.location, document)
def ready():
self._reporters_running_for_buffer_names.remove(name)
# If the text has not been changed yet in the meantime, set
# reporter errors. (We were running in another thread.)
if text == eb.buffer.text:
eb.report_errors = report_errors
self.cli._redraw()
else:
# Restart reporter when the text was changed.
self._current_buffer_changed(self.cli)
self.cli.eventloop.call_from_executor(ready)
self.cli.eventloop.run_in_executor(in_executor)
def show_help(self):
"""
Show help in new window.
"""
self.window_arrangement.hsplit(text=HELP_TEXT)
self.sync_with_prompt_toolkit() # Show new window.
def run(self):
"""
Run the event loop for the interface.
This starts the interaction.
"""
# Make sure everything is in sync, before starting.
self.sync_with_prompt_toolkit()
# Run eventloop of prompt_toolkit.
self.cli.run(reset_current_buffer=False)
def enter_command_mode(self):
"""
Go into command mode.
"""
self.cli.focus_stack.push(COMMAND_BUFFER)
self.key_bindings_manager.vi_state.input_mode = InputMode.INSERT
self.previewer.save()
def leave_command_mode(self, append_to_history=False):
"""
Leave command mode. Focus document window again.
"""
self.previewer.restore()
self.cli.focus_stack.pop()
self.key_bindings_manager.vi_state.input_mode = InputMode.NAVIGATION
self.cli.buffers[COMMAND_BUFFER].reset(append_to_history=append_to_history)
| bsd-3-clause |
asdofindia/kitsune | kitsune/sumo/form_fields.py | 16 | 5840 | from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils import translation
from babel import Locale, localedata
from babel.support import Format
from tower import ugettext as _, ugettext_lazy as _lazy
class TypedMultipleChoiceField(forms.MultipleChoiceField):
"""Coerce choices to a specific type and don't validate them.
Based on implementation in Django ticket 12398.
Bonus feature: optional coerce_only=True, doesn't raise ValidationError,
best used in combination with required=False, to pass form validation
if a field is optional and all you want is value casting.
"""
def valid_value(self, val):
"""Override: don't raise validation error in parent, if coerce_only."""
if self.coerce_only:
return True
return super(TypedMultipleChoiceField, self).valid_value(val)
def __init__(self, coerce_only=False, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
self.coerce_only = coerce_only
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
value = super(TypedMultipleChoiceField, self).to_python(value)
super(TypedMultipleChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
new_value = []
is_valid = super(TypedMultipleChoiceField, self).valid_value
for choice in value:
if self.coerce_only and not is_valid(choice):
continue
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] %
{'value': choice})
return new_value
def validate(self, value):
pass
# TODO: remove this and use strip kwarg once ticket #6362 is done
# @see http://code.djangoproject.com/ticket/6362
class StrippedCharField(forms.CharField):
"""CharField that strips trailing and leading spaces."""
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(StrippedCharField, self).__init__(max_length, min_length,
*args, **kwargs)
# Remove the default min and max length validators and add our own
# that format numbers in the error messages.
to_remove = []
for validator in self.validators:
class_name = validator.__class__.__name__
if class_name == 'MinLengthValidator' or \
class_name == 'MaxLengthValidator':
to_remove.append(validator)
for validator in to_remove:
self.validators.remove(validator)
if min_length is not None:
self.validators.append(MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(MaxLengthValidator(max_length))
def clean(self, value):
if value is not None:
value = value.strip()
return super(StrippedCharField, self).clean(value)
class MultiUsernameField(forms.Field):
"""Form field that takes a comma-separated list of usernames as input,
validates that users exist for each one, and returns the list of users."""
def to_python(self, value):
if not value:
if self.required:
raise forms.ValidationError(_(u'To field is required.'))
else:
return []
users = []
for username in value.split(','):
username = username.strip()
if username:
try:
user = User.objects.get(username=username)
users.append(user)
except User.DoesNotExist:
msg = _(u'{username} is not a valid username.')
raise forms.ValidationError(msg.format(username=username))
return users
class BaseValidator(validators.BaseValidator):
"""Override the BaseValidator from django to format numbers."""
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': _format_decimal(self.limit_value),
'show_value': _format_decimal(cleaned)}
if self.compare(cleaned, self.limit_value):
raise ValidationError(
self.message % params,
code=self.code,
params=params,
)
class MinLengthValidator(validators.MinLengthValidator, BaseValidator):
message = _lazy(u'Ensure this value has at least %(limit_value)s '
u'characters (it has %(show_value)s).')
class MaxLengthValidator(validators.MaxLengthValidator, BaseValidator):
message = _lazy(u'Ensure this value has at most %(limit_value)s '
u'characters (it has %(show_value)s).')
def _format_decimal(num, format=None):
"""Returns the string of a number formatted for the current language.
Uses django's translation.get_language() to find the current language from
the request.
Falls back to the default language if babel does not support the current.
"""
lang = translation.get_language()
if not localedata.exists(lang):
lang = settings.LANGUAGE_CODE
locale = Locale(translation.to_locale(lang))
return Format(locale).decimal(num, format)
| bsd-3-clause |
cprov/snapcraft | snapcraft/internal/build_providers/_base_provider.py | 1 | 7244 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import contextlib
import datetime
import os
import shlex
import tempfile
from typing import List
import petname
from . import errors
from snapcraft.internal import common, repo
_STORE_ASSERTION_KEY = (
"BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul"
)
class Provider:
_SNAPS_MOUNTPOINT = os.path.join(os.path.sep, "var", "cache", "snapcraft", "snaps")
def __init__(self, *, project, echoer) -> None:
self.project = project
self.echoer = echoer
# Once https://github.com/CanonicalLtd/multipass/issues/220 is
# closed we can prepend snapcraft- again.
self.instance_name = petname.Generate(2, "-")
self.project_dir = shlex.quote(project.info.name)
if project.info.version:
self.snap_filename = "{}_{}_{}.snap".format(
project.info.name, project.info.version, project.deb_arch
)
else:
self.snap_filename = "{}_{}.snap".format(
project.info.name, project.deb_arch
)
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.destroy()
@abc.abstractmethod
def _run(self, command: List) -> None:
"""Run a command on the instance."""
@abc.abstractmethod
def _launch(self):
"""Launch the instance."""
@abc.abstractmethod
def _mount(self, *, mountpoint: str, dev_or_path: str) -> None:
"""Mount a path from the host inside the instance."""
@abc.abstractmethod
def _mount_snaps_directory(self) -> str:
"""Mount the host directory with snaps into the provider."""
@abc.abstractmethod
def _push_file(self, *, source: str, destination: str) -> None:
"""Push a file into the instance."""
@abc.abstractmethod
def create(self) -> None:
"""Provider steps needed to create a fully functioning environment."""
@abc.abstractmethod
def destroy(self) -> None:
"""Provider steps needed to ensure the instance is destroyed.
This method should be safe to call multiple times and do nothing
if the instance to destroy is already destroyed.
"""
@abc.abstractmethod
def provision_project(self, tarball: str) -> None:
"""Provider steps needed to copy project assests to the instance."""
@abc.abstractmethod
def mount_project(self) -> None:
"""Provider steps needed to make the project available to the instance.
"""
@abc.abstractmethod
def build_project(self) -> None:
"""Provider steps needed build the project on the instance."""
@abc.abstractmethod
def retrieve_snap(self) -> str:
"""
Provider steps needed to retrieve the built snap from the instance.
:returns: the filename of the retrieved snap.
:rtype: str
"""
def launch_instance(self) -> None:
self.echoer.info(
"Creating a build environment named {!r}".format(self.instance_name)
)
self._launch()
def _disable_and_wait_for_refreshes(self):
# Disable autorefresh for 15 minutes,
# https://github.com/snapcore/snapd/pull/5436/files
now_plus_15 = datetime.datetime.now() + datetime.timedelta(minutes=15)
self._run(
[
"sudo",
"snap",
"set",
"core",
"refresh.hold={}Z".format(now_plus_15.isoformat()),
]
)
# Auto refresh may have kicked in while setting the hold.
self.echoer.info("Waiting for pending snap auto refreshes.")
with contextlib.suppress(errors.ProviderExecError):
self._run(["sudo", "snap", "watch", "--last=auto-refresh"])
def setup_snapcraft(self) -> None:
self._disable_and_wait_for_refreshes()
self.echoer.info("Setting up snapcraft in {!r}".format(self.instance_name))
# Add the store assertion, common to all snaps.
self._inject_assertions(
[["account-key", "public-key-sha3-384={}".format(_STORE_ASSERTION_KEY)]]
)
# TODO make mounting requirement smarter and depend on is_installed
if common.is_snap():
# Make the snaps available to the provider
self._mount_snaps_directory()
# Now install the snapcraft required base/core.
self.echoer.info("Setting up core")
self._install_snap("core")
# And finally install snapcraft itself.
self.echoer.info("Setting up snapcraft")
self._install_snap("snapcraft")
def _inject_assertions(self, assertions: List[List[str]]):
with tempfile.NamedTemporaryFile() as assertion_file:
for assertion in assertions:
assertion_file.write(repo.snaps.get_assertion(assertion))
assertion_file.write(b"\n")
assertion_file.flush()
self._push_file(source=assertion_file.name, destination=assertion_file.name)
self._run(["sudo", "snap", "ack", assertion_file.name])
def _install_snap(self, snap_name: str) -> None:
snap = repo.snaps.SnapPackage(snap_name)
args = []
if snap.installed:
snap_info = snap.get_local_snap_info()
if snap_info["revision"].startswith("x"):
args.append("--dangerous")
else:
self._inject_assertions(
[
["snap-declaration", "snap-name={}".format(snap_name)],
[
"snap-revision",
"snap-revision={}".format(snap_info["revision"]),
"snap-id={}".format(snap_info["id"]),
],
]
)
if snap_info["confinement"] == "classic":
args.append("--classic")
# https://github.com/snapcore/snapd/blob/master/snap/info.go
# MountFile
snap_file_name = "{}_{}.snap".format(snap_name, snap_info["revision"])
args.append(os.path.join(self._SNAPS_MOUNTPOINT, snap_file_name))
else:
snap_info = snap.get_store_snap_info()
# TODO support other channels
confinement = snap_info["channels"]["latest/stable"]["confinement"]
if confinement == "classic":
args.append("--classic")
args.append(snap_name)
self._run(["sudo", "snap", "install"] + args)
| gpl-3.0 |
carsongee/edx-platform | lms/djangoapps/courseware/tests/test_views.py | 10 | 22197 | # coding=UTF-8
"""
Tests courseware views.py
"""
import unittest
from datetime import datetime
from mock import MagicMock, patch
from pytz import UTC
from django.test import TestCase
from django.http import Http404
from django.test.utils import override_settings
from django.contrib.auth.models import User, AnonymousUser
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory
from edxmako.middleware import MakoMiddleware
from opaque_keys.edx.locations import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory
import courseware.views as views
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from course_modes.models import CourseMode
import shoppingcart
from util.tests.test_date_utils import fake_ugettext, fake_pgettext
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestJumpTo(TestCase):
"""
Check the jumpto link for a course.
"""
def setUp(self):
# Use toy course from XML
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_jumpto_invalid_location(self):
location = self.course_key.make_usage_key(None, 'NoSuchPlace')
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
def test_jumpto_from_chapter(self):
location = self.course_key.make_usage_key('chapter', 'Overview')
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id(self):
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), 'Overview')
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id_invalid_location(self):
location = Location('edX', 'toy', 'NoSuchPlace', None, None, None)
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ViewsTestCase(TestCase):
"""
Tests for views.py methods.
"""
def setUp(self):
self.course = CourseFactory()
self.chapter = ItemFactory(category='chapter', parent_location=self.course.location) # pylint: disable=no-member
self.section = ItemFactory(category='sequential', parent_location=self.chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
self.vertical = ItemFactory(category='vertical', parent_location=self.section.location)
self.component = ItemFactory(category='problem', parent_location=self.vertical.location)
self.course_key = self.course.id
self.user = User.objects.create(username='dummy', password='123456',
email='test@mit.edu')
self.date = datetime(2013, 1, 22, tzinfo=UTC)
self.enrollment = CourseEnrollment.enroll(self.user, self.course_key)
self.enrollment.created = self.date
self.enrollment.save()
self.request_factory = RequestFactory()
chapter = 'Overview'
self.chapter_url = '%s/%s/%s' % ('/courses', self.course_key, chapter)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SHOPPING_CART'), "Shopping Cart not enabled in settings")
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_course_about_in_cart(self):
in_cart_span = '<span class="add-to-cart">'
# don't mock this course due to shopping cart existence checking
course = CourseFactory.create(org="new", number="unenrolled", display_name="course")
request = self.request_factory.get(reverse('about_course', args=[course.id.to_deprecated_string()]))
request.user = AnonymousUser()
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# authenticated user with nothing in cart
request.user = self.user
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# now add the course to the cart
cart = shoppingcart.models.Order.get_cart_for_user(self.user)
shoppingcart.models.PaidCourseRegistration.add_to_order(cart, course.id)
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertIn(in_cart_span, response.content)
def test_user_groups(self):
# depreciated function
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertEqual(views.user_groups(mock_user), [])
def test_get_current_child(self):
self.assertIsNone(views.get_current_child(MagicMock()))
mock_xmodule = MagicMock()
mock_xmodule.position = -1
mock_xmodule.get_display_items.return_value = ['one', 'two']
self.assertEqual(views.get_current_child(mock_xmodule), 'one')
mock_xmodule_2 = MagicMock()
mock_xmodule_2.position = 3
mock_xmodule_2.get_display_items.return_value = []
self.assertIsNone(views.get_current_child(mock_xmodule_2))
def test_redirect_to_course_position(self):
mock_module = MagicMock()
mock_module.descriptor.id = 'Underwater Basketweaving'
mock_module.position = 3
mock_module.get_display_items.return_value = []
self.assertRaises(Http404, views.redirect_to_course_position,
mock_module, views.CONTENT_DEPTH)
def test_index_invalid_position(self):
request_url = '/'.join([
'/courses',
self.course.id.to_deprecated_string(),
self.chapter.location.name,
self.section.location.name,
'f'
])
response = self.client.get(request_url)
self.assertEqual(response.status_code, 404)
def test_unicode_handling_in_url(self):
url_parts = [
'/courses',
self.course.id.to_deprecated_string(),
self.chapter.location.name,
self.section.location.name,
'1'
]
for idx, val in enumerate(url_parts):
url_parts_copy = url_parts[:]
url_parts_copy[idx] = val + u'Ο'
request_url = '/'.join(url_parts_copy)
response = self.client.get(request_url)
self.assertEqual(response.status_code, 404)
def test_registered_for_course(self):
self.assertFalse(views.registered_for_course('Basketweaving', None))
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertFalse(views.registered_for_course('dummy', mock_user))
mock_course = MagicMock()
mock_course.id = self.course_key
self.assertTrue(views.registered_for_course(mock_course, self.user))
def test_jump_to_invalid(self):
# TODO add a test for invalid location
# TODO add a test for no data *
request = self.request_factory.get(self.chapter_url)
self.assertRaisesRegexp(Http404, 'Invalid course_key or usage_key', views.jump_to,
request, 'bar', ())
def test_no_end_on_about_page(self):
# Toy course has no course end date or about/end_date blob
self.verify_end_date('edX/toy/TT_2012_Fall')
def test_no_end_about_blob(self):
# test_end has a course end date, no end_date HTML blob
self.verify_end_date("edX/test_end/2012_Fall", "Sep 17, 2015")
def test_about_blob_end_date(self):
# test_about_blob_end_date has both a course end date and an end_date HTML blob.
# HTML blob wins
self.verify_end_date("edX/test_about_blob_end_date/2012_Fall", "Learning never ends")
def verify_end_date(self, course_id, expected_end_text=None):
"""
Visits the about page for `course_id` and tests that both the text "Classes End", as well
as the specified `expected_end_text`, is present on the page.
If `expected_end_text` is None, verifies that the about page *does not* contain the text
"Classes End".
"""
request = self.request_factory.get("foo")
request.user = self.user
# TODO: Remove the dependency on MakoMiddleware (by making the views explicitly supply a RequestContext)
MakoMiddleware().process_request(request)
result = views.course_about(request, course_id)
if expected_end_text is not None:
self.assertContains(result, "Classes End")
self.assertContains(result, expected_end_text)
else:
self.assertNotContains(result, "Classes End")
def test_chat_settings(self):
mock_user = MagicMock()
mock_user.username = "johndoe"
mock_course = MagicMock()
mock_course.id = "a/b/c"
# Stub this out in the case that it's not in the settings
domain = "jabber.edx.org"
settings.JABBER_DOMAIN = domain
chat_settings = views.chat_settings(mock_course, mock_user)
# Test the proper format of all chat settings
self.assertEqual(chat_settings['domain'], domain)
self.assertEqual(chat_settings['room'], "a-b-c_class")
self.assertEqual(chat_settings['username'], "johndoe@%s" % domain)
# TODO: this needs to be changed once we figure out how to
# generate/store a real password.
self.assertEqual(chat_settings['password'], "johndoe@%s" % domain)
def test_course_mktg_about_coming_soon(self):
# we should not be able to find this course
url = reverse('mktg_about_course', kwargs={'course_id': 'no/course/here'})
response = self.client.get(url)
self.assertIn('Coming Soon', response.content)
def test_course_mktg_register(self):
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
url = reverse('mktg_about_course', kwargs={'course_id': self.course_key.to_deprecated_string()})
response = self.client.get(url)
self.assertIn('Register for', response.content)
self.assertNotIn('and choose your student track', response.content)
def test_course_mktg_register_multiple_modes(self):
admin = AdminFactory()
CourseMode.objects.get_or_create(mode_slug='honor',
mode_display_name='Honor Code Certificate',
course_id=self.course_key)
CourseMode.objects.get_or_create(mode_slug='verified',
mode_display_name='Verified Certificate',
course_id=self.course_key)
self.client.login(username=admin.username, password='test')
url = reverse('mktg_about_course', kwargs={'course_id': self.course_key.to_deprecated_string()})
response = self.client.get(url)
self.assertIn('Register for', response.content)
self.assertIn('and choose your student track', response.content)
# clean up course modes
CourseMode.objects.all().delete()
def test_submission_history_accepts_valid_ids(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': self.component.location.to_deprecated_string(),
})
response = self.client.get(url)
# Tests that we do not get an "Invalid x" response when passing correct arguments to view
self.assertFalse('Invalid' in response.content)
def test_submission_history_xss(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
# try it with an existing user and a malicious location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': '<script>alert("hello");</script>'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
# try it with a malicious user and a non-existent location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': '<script>alert("hello");</script>',
'location': 'dummy'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
# setting TIME_ZONE_DISPLAYED_FOR_DEADLINES explicitly
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE, TIME_ZONE_DISPLAYED_FOR_DEADLINES="UTC")
class BaseDueDateTests(ModuleStoreTestCase):
"""
Base class that verifies that due dates are rendered correctly on a page
"""
__test__ = False
def get_text(self, course): # pylint: disable=unused-argument
"""Return the rendered text for the page to be verified"""
raise NotImplementedError
def set_up_course(self, **course_kwargs):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory(**course_kwargs)
chapter = ItemFactory(category='chapter', parent_location=course.location) # pylint: disable=no-member
section = ItemFactory(category='sequential', parent_location=chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
vertical = ItemFactory(category='vertical', parent_location=section.location)
ItemFactory(category='problem', parent_location=vertical.location)
course = modulestore().get_course(course.id) # pylint: disable=no-member
self.assertIsNotNone(course.get_children()[0].get_children()[0].due)
return course
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
self.time_with_tz = "due Sep 18, 2013 at 11:30 UTC"
self.time_without_tz = "due Sep 18, 2013 at 11:30"
def test_backwards_compatability(self):
# The test course being used has show_timezone = False in the policy file
# (and no due_date_display_format set). This is to test our backwards compatibility--
# in course_module's init method, the date_display_format will be set accordingly to
# remove the timezone.
course = self.set_up_course(due_date_display_format=None, show_timezone=False)
text = self.get_text(course)
self.assertIn(self.time_without_tz, text)
self.assertNotIn(self.time_with_tz, text)
# Test that show_timezone has been cleared (which means you get the default value of True).
self.assertTrue(course.show_timezone)
def test_defaults(self):
course = self.set_up_course()
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_none(self):
# Same for setting the due date to None
course = self.set_up_course(due_date_display_format=None)
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_plain_text(self):
# plain text due date
course = self.set_up_course(due_date_display_format="foobar")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due foobar", text)
def test_format_date(self):
# due date with no time
course = self.set_up_course(due_date_display_format=u"%b %d %y")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due Sep 18 13", text)
def test_format_hidden(self):
# hide due date completely
course = self.set_up_course(due_date_display_format=u"")
text = self.get_text(course)
self.assertNotIn("due ", text)
def test_format_invalid(self):
# improperly formatted due_date_display_format falls through to default
# (value of show_timezone does not matter-- setting to False to make that clear).
course = self.set_up_course(due_date_display_format=u"%%%", show_timezone=False)
text = self.get_text(course)
self.assertNotIn("%%%", text)
self.assertIn(self.time_with_tz, text)
class TestProgressDueDate(BaseDueDateTests):
"""
Test that the progress page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the progress page """
return views.progress(self.request, course.id.to_deprecated_string(), self.user.id).content
class TestAccordionDueDate(BaseDueDateTests):
"""
Test that the accordion page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the accordion """
return views.render_accordion(
self.request, course, course.get_children()[0].scope_ids.usage_id.to_deprecated_string(), None, None
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StartDateTests(ModuleStoreTestCase):
"""
Test that start dates are properly localized and displayed on the student
dashboard.
"""
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
def set_up_course(self):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory(start=datetime(2013, 9, 16, 7, 17, 28))
course = modulestore().get_course(course.id) # pylint: disable=no-member
return course
def get_about_text(self, course_key):
"""
Get the text of the /about page for the course.
"""
text = views.course_about(self.request, course_key.to_deprecated_string()).content
return text
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Sep"): "SEPTEMBER",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
def test_format_localized_in_studio_course(self):
course = self.set_up_course()
text = self.get_about_text(course.id)
# The start date is set in the set_up_course function above.
self.assertIn("2013-SEPTEMBER-16", text)
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Jul"): "JULY",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
def test_format_localized_in_xml_course(self):
text = self.get_about_text(SlashSeparatedCourseKey('edX', 'toy', 'TT_2012_Fall'))
# The start date is set in common/test/data/two_toys/policies/TT_2012_Fall/policy.json
self.assertIn("2015-JULY-17", text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ProgressPageTests(ModuleStoreTestCase):
"""
Tests that verify that the progress page works correctly.
"""
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
MakoMiddleware().process_request(self.request)
course = CourseFactory(
start=datetime(2013, 9, 16, 7, 17, 28),
grade_cutoffs={u'Γ§ΓΌβ ΓΈΖΖ': 0.75, 'Pass': 0.5},
)
self.course = modulestore().get_course(course.id) # pylint: disable=no-member
self.chapter = ItemFactory(category='chapter', parent_location=self.course.location) # pylint: disable=no-member
self.section = ItemFactory(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory(category='vertical', parent_location=self.section.location)
def test_pure_ungraded_xblock(self):
ItemFactory(category='acid', parent_location=self.vertical.location)
resp = views.progress(self.request, self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
def test_non_asci_grade_cutoffs(self):
resp = views.progress(self.request, self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
| agpl-3.0 |
mtrdesign/pylogwatch | pylogwatch/raven/events.py | 1 | 4127 | """
raven.events
~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
import sys
from raven.utils import varmap
from raven.utils.encoding import shorten, to_unicode
from raven.utils.stacks import get_stack_info, iter_traceback_frames, \
get_culprit
__all__ = ('BaseEvent', 'Exception', 'Message', 'Query')
class BaseEvent(object):
def __init__(self, client):
self.client = client
self.logger = logging.getLogger(__name__)
def to_string(self, data):
raise NotImplementedError
def capture(self, **kwargs):
return {
}
class Exception(BaseEvent):
"""
Exceptions store the following metadata:
- value: 'My exception value'
- type: 'ClassName'
- module '__builtin__' (i.e. __builtin__.TypeError)
- frames: a list of serialized frames (see _get_traceback_frames)
"""
def to_string(self, data):
exc = data['sentry.interfaces.Exception']
if exc['value']:
return '%s: %s' % (exc['type'], exc['value'])
return exc['type']
def get_hash(self, data):
exc = data['sentry.interfaces.Exception']
output = [exc['type']]
for frame in data['sentry.interfaces.Stacktrace']['frames']:
output.append(frame['module'])
output.append(frame['function'])
return output
def capture(self, exc_info=None, **kwargs):
new_exc_info = False
if not exc_info or exc_info is True:
new_exc_info = True
exc_info = sys.exc_info()
if not exc_info:
raise ValueError('No exception found')
try:
exc_type, exc_value, exc_traceback = exc_info
frames = varmap(lambda k, v: shorten(v,
string_length=self.client.string_max_length, list_length=self.client.list_max_length),
get_stack_info(iter_traceback_frames(exc_traceback)))
culprit = get_culprit(frames, self.client.include_paths, self.client.exclude_paths)
exc_module = getattr(exc_type, '__module__', None)
exc_type = getattr(exc_type, '__name__', '<unknown>')
finally:
if new_exc_info:
try:
del exc_info
del exc_traceback
except Exception, e:
self.logger.exception(e)
return {
'level': logging.ERROR,
'culprit': culprit,
'sentry.interfaces.Exception': {
'value': to_unicode(exc_value),
'type': str(exc_type),
'module': str(exc_module),
},
'sentry.interfaces.Stacktrace': {
'frames': frames
},
}
class Message(BaseEvent):
"""
Messages store the following metadata:
- message: 'My message from %s about %s'
- params: ('foo', 'bar')
"""
def to_string(self, data):
msg = data['sentry.interfaces.Message']
if msg.get('params'):
return msg['message'] % msg['params']
return msg['message']
def get_hash(self, data):
msg = data['sentry.interfaces.Message']
return [msg['message']]
def capture(self, message, params=(), **kwargs):
data = {
'sentry.interfaces.Message': {
'message': message,
'params': params,
}
}
return data
class Query(BaseEvent):
"""
Messages store the following metadata:
- query: 'SELECT * FROM table'
- engine: 'postgesql_psycopg2'
"""
def to_string(self, data):
sql = data['sentry.interfaces.Query']
return sql['query']
def get_hash(self, data):
sql = data['sentry.interfaces.Query']
return [sql['query'], sql['engine']]
def capture(self, query, engine, **kwargs):
return {
'sentry.interfaces.Query': {
'query': query,
'engine': engine,
}
}
| gpl-3.0 |
rafaelgontijo/Django-facebook-fork | docs/docs_env/Lib/locale.py | 116 | 73200 | """ Locale support.
The module provides low-level access to the C lib's locale APIs
and adds high level number formatting APIs as well as a locale
aliasing engine to complement these.
The aliasing engine includes support for many commonly used locale
names and maps them to values suitable for passing to the C lib's
setlocale() function. It also includes default encodings for all
supported locale names.
"""
import sys, encodings, encodings.aliases
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error, '_locale emulation only supports "C" locale'
return 'C'
def strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return cmp(a,b)
def strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
result = ""
seps = 0
spaces = ""
if s[-1] == ' ':
sp = s.find(' ')
spaces = s[sp:]
s = s[:sp]
while s and grouping:
# if grouping is -1, we are done
if grouping[0] == CHAR_MAX:
break
# 0: re-use last group ad infinitum
elif grouping[0] != 0:
#process last group
group = grouping[0]
grouping = grouping[1:]
if result:
result = s[-group:] + thousands_sep + result
seps += 1
else:
result = s[-group:]
s = s[:-group]
if s and s[-1] not in "0123456789":
# the leading string is only spaces and signs
return s + result + spaces, seps
if not result:
return s + spaces, seps
if s:
result = s + thousands_sep + result
seps += 1
return result + spaces, seps
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
if percent[0] != '%':
raise ValueError("format() must be given exactly one %char "
"format specifier")
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
while seps:
sp = formatted.find(' ')
if sp == -1: break
formatted = formatted[:sp] + formatted[sp+1:]
seps -= 1
elif percent[-1] in 'diu':
if grouping:
formatted = _group(formatted, monetary=monetary)[0]
return formatted
import re, operator
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, tuple):
new_val = list(val)
i = 0
for perc in percents:
starcount = perc.group('modifiers').count('*')
new_val[i] = format(perc.group(), new_val[i], grouping, False, *new_val[i+1:i+1+starcount])
del new_val[i+1:i+1+starcount]
i += (1 + starcount)
val = tuple(new_val)
elif operator.isMappingType(val):
for perc in percents:
key = perc.group("key")
val[key] = format(perc.group(), val[key], grouping)
else:
# val is a single value
val = format(percents[0].group(), val, grouping)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to integer, taking the locale into account."""
return format("%.12g", val)
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
#First, get rid of the grouping
ts = localeconv()['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = localeconv()['decimal_point']
if dd:
string = string.replace(dd, '.')
#finally, parse the string
return func(string)
def atoi(str):
"Converts a string to an integer according to the locale settings."
return atof(str, int)
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format("%d", 123456789,1)
print s1, "is", atoi(s1)
#standard formatting
s1 = str(3.14)
print s1, "is", atof(s1)
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, mal@lemburg.com
# Various tweaks by Fredrik Lundh <fredrik@pythonware.com>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding
fullname = localename.lower()
if ':' in fullname:
# ':' is sometimes used as encoding delimiter.
fullname = fullname.replace(':', '.')
if '.' in fullname:
langname, encoding = fullname.split('.')[:2]
fullname = langname + '.' + encoding
else:
langname = fullname
encoding = ''
# First lookup: fullname (possibly with encoding)
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lookup_name = langname + '.' + encoding
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print 'first lookup failed'
# Second try: langname (without encoding)
code = locale_alias.get(langname, None)
if code is not None:
#print 'langname lookup succeeded'
if '.' in code:
langname, defenc = code.split('.')
else:
langname = code
defenc = ''
if encoding:
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print 'norm encoding: %r' % norm_encoding
norm_encoding = encodings.aliases.aliases.get(norm_encoding,
norm_encoding)
#print 'aliased encoding: %r' % norm_encoding
encoding = locale_encoding_alias.get(norm_encoding,
norm_encoding)
else:
encoding = defenc
#print 'found encoding %r' % encoding
if encoding:
return langname + '.' + encoding
else:
return langname
else:
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@')
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError, 'unknown locale: %s' % localename
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError, 'category LC_ALL is not supported'
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, a locale tuple (language code, encoding), or None.
Locale tuples are converted to strings the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and type(locale) is not type(""):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform in ('win32', 'darwin', 'mac'):
# On Win32, this will return the ANSI code page
# On the Mac, it should return the system encoding;
# it might return "ascii" instead
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
import _locale
return _locale._getdefaultlocale()[1]
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
return getdefaultlocale()[1]
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
setlocale(LC_CTYPE, "")
result = nl_langinfo(CODESET)
setlocale(LC_CTYPE, oldloc)
return result
else:
return nl_langinfo(CODESET)
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF8',
'koi8_r': 'KOI8-R',
'koi8_u': 'KOI8-U',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
locale_alias = {
'a3': 'a3_AZ.KOI8-C',
'a3_az': 'a3_AZ.KOI8-C',
'a3_az.koi8c': 'a3_AZ.KOI8-C',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'af_za.iso88591': 'af_ZA.ISO8859-1',
'am': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'american.iso88591': 'en_US.ISO8859-1',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_aa.iso88596': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_eg.iso88596': 'ar_EG.ISO8859-6',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sa.iso88596': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'arabic.iso88596': 'ar_AA.ISO8859-6',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'be': 'be_BY.CP1251',
'be_by': 'be_BY.CP1251',
'be_by.cp1251': 'be_BY.CP1251',
'be_by.microsoftcp1251': 'be_BY.CP1251',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bg_bg.cp1251': 'bg_BG.CP1251',
'bg_bg.iso88595': 'bg_BG.ISO8859-5',
'bg_bg.koi8r': 'bg_BG.KOI8-R',
'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'br_fr.iso88591': 'br_FR.ISO8859-1',
'br_fr.iso885914': 'br_FR.ISO8859-14',
'br_fr.iso885915': 'br_FR.ISO8859-15',
'br_fr@euro': 'br_FR.ISO8859-15',
'bulgarian': 'bg_BG.CP1251',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c-french.iso88591': 'fr_CA.ISO8859-1',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es.iso88591': 'ca_ES.ISO8859-1',
'ca_es.iso885915': 'ca_ES.ISO8859-15',
'ca_es@euro': 'ca_ES.ISO8859-15',
'catalan': 'ca_ES.ISO8859-1',
'cextend': 'en_US.ISO8859-1',
'cextend.en': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cs.iso88592': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'cs_cz.iso88592': 'cs_CZ.ISO8859-2',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cy_gb.iso88591': 'cy_GB.ISO8859-1',
'cy_gb.iso885914': 'cy_GB.ISO8859-14',
'cy_gb.iso885915': 'cy_GB.ISO8859-15',
'cy_gb@euro': 'cy_GB.ISO8859-15',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da_dk': 'da_DK.ISO8859-1',
'da_dk.88591': 'da_DK.ISO8859-1',
'da_dk.885915': 'da_DK.ISO8859-15',
'da_dk.iso88591': 'da_DK.ISO8859-1',
'da_dk.iso885915': 'da_DK.ISO8859-15',
'da_dk@euro': 'da_DK.ISO8859-15',
'danish': 'da_DK.ISO8859-1',
'danish.iso88591': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de_at': 'de_AT.ISO8859-1',
'de_at.iso88591': 'de_AT.ISO8859-1',
'de_at.iso885915': 'de_AT.ISO8859-15',
'de_at@euro': 'de_AT.ISO8859-15',
'de_be': 'de_BE.ISO8859-1',
'de_be.iso88591': 'de_BE.ISO8859-1',
'de_be.iso885915': 'de_BE.ISO8859-15',
'de_be@euro': 'de_BE.ISO8859-15',
'de_ch': 'de_CH.ISO8859-1',
'de_ch.iso88591': 'de_CH.ISO8859-1',
'de_ch.iso885915': 'de_CH.ISO8859-15',
'de_ch@euro': 'de_CH.ISO8859-15',
'de_de': 'de_DE.ISO8859-1',
'de_de.88591': 'de_DE.ISO8859-1',
'de_de.885915': 'de_DE.ISO8859-15',
'de_de.885915@euro': 'de_DE.ISO8859-15',
'de_de.iso88591': 'de_DE.ISO8859-1',
'de_de.iso885915': 'de_DE.ISO8859-15',
'de_de@euro': 'de_DE.ISO8859-15',
'de_lu': 'de_LU.ISO8859-1',
'de_lu.iso88591': 'de_LU.ISO8859-1',
'de_lu.iso885915': 'de_LU.ISO8859-15',
'de_lu@euro': 'de_LU.ISO8859-15',
'deutsch': 'de_DE.ISO8859-1',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'ee_ee.iso88594': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr.iso88597': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en.iso88591': 'en_US.ISO8859-1',
'en_au': 'en_AU.ISO8859-1',
'en_au.iso88591': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_be@euro': 'en_BE.ISO8859-15',
'en_bw': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_ca.iso88591': 'en_CA.ISO8859-1',
'en_gb': 'en_GB.ISO8859-1',
'en_gb.88591': 'en_GB.ISO8859-1',
'en_gb.iso88591': 'en_GB.ISO8859-1',
'en_gb.iso885915': 'en_GB.ISO8859-15',
'en_gb@euro': 'en_GB.ISO8859-15',
'en_hk': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_ie.iso88591': 'en_IE.ISO8859-1',
'en_ie.iso885915': 'en_IE.ISO8859-15',
'en_ie@euro': 'en_IE.ISO8859-15',
'en_in': 'en_IN.ISO8859-1',
'en_nz': 'en_NZ.ISO8859-1',
'en_nz.iso88591': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_sg': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us.88591': 'en_US.ISO8859-1',
'en_us.885915': 'en_US.ISO8859-15',
'en_us.iso88591': 'en_US.ISO8859-1',
'en_us.iso885915': 'en_US.ISO8859-15',
'en_us.iso885915@euro': 'en_US.ISO8859-15',
'en_us@euro': 'en_US.ISO8859-15',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_za.88591': 'en_ZA.ISO8859-1',
'en_za.iso88591': 'en_ZA.ISO8859-1',
'en_za.iso885915': 'en_ZA.ISO8859-15',
'en_za@euro': 'en_ZA.ISO8859-15',
'en_zw': 'en_ZW.ISO8859-1',
'eng_gb': 'en_GB.ISO8859-1',
'eng_gb.8859': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english.iso88591': 'en_EN.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_uk.8859': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'english_us.8859': 'en_US.ISO8859-1',
'english_us.ascii': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_eo.iso88593': 'eo_EO.ISO8859-3',
'eo_xx': 'eo_XX.ISO8859-3',
'eo_xx.iso88593': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_ar.iso88591': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_bo.iso88591': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_cl.iso88591': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_co.iso88591': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cr.iso88591': 'es_CR.ISO8859-1',
'es_do': 'es_DO.ISO8859-1',
'es_do.iso88591': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_ec.iso88591': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_es.88591': 'es_ES.ISO8859-1',
'es_es.iso88591': 'es_ES.ISO8859-1',
'es_es.iso885915': 'es_ES.ISO8859-15',
'es_es@euro': 'es_ES.ISO8859-15',
'es_gt': 'es_GT.ISO8859-1',
'es_gt.iso88591': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_hn.iso88591': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_mx.iso88591': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_ni.iso88591': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pa.iso88591': 'es_PA.ISO8859-1',
'es_pa.iso885915': 'es_PA.ISO8859-15',
'es_pa@euro': 'es_PA.ISO8859-15',
'es_pe': 'es_PE.ISO8859-1',
'es_pe.iso88591': 'es_PE.ISO8859-1',
'es_pe.iso885915': 'es_PE.ISO8859-15',
'es_pe@euro': 'es_PE.ISO8859-15',
'es_pr': 'es_PR.ISO8859-1',
'es_pr.iso88591': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_py.iso88591': 'es_PY.ISO8859-1',
'es_py.iso885915': 'es_PY.ISO8859-15',
'es_py@euro': 'es_PY.ISO8859-15',
'es_sv': 'es_SV.ISO8859-1',
'es_sv.iso88591': 'es_SV.ISO8859-1',
'es_sv.iso885915': 'es_SV.ISO8859-15',
'es_sv@euro': 'es_SV.ISO8859-15',
'es_us': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_uy.iso88591': 'es_UY.ISO8859-1',
'es_uy.iso885915': 'es_UY.ISO8859-15',
'es_uy@euro': 'es_UY.ISO8859-15',
'es_ve': 'es_VE.ISO8859-1',
'es_ve.iso88591': 'es_VE.ISO8859-1',
'es_ve.iso885915': 'es_VE.ISO8859-15',
'es_ve@euro': 'es_VE.ISO8859-15',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'et_ee.iso88591': 'et_EE.ISO8859-1',
'et_ee.iso885913': 'et_EE.ISO8859-13',
'et_ee.iso885915': 'et_EE.ISO8859-15',
'et_ee.iso88594': 'et_EE.ISO8859-4',
'et_ee@euro': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_es.iso88591': 'eu_ES.ISO8859-1',
'eu_es.iso885915': 'eu_ES.ISO8859-15',
'eu_es@euro': 'eu_ES.ISO8859-15',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'fi': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fi_fi.88591': 'fi_FI.ISO8859-1',
'fi_fi.iso88591': 'fi_FI.ISO8859-1',
'fi_fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi.utf8@euro': 'fi_FI.UTF-8',
'fi_fi@euro': 'fi_FI.ISO8859-15',
'finnish': 'fi_FI.ISO8859-1',
'finnish.iso88591': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fo_fo.iso88591': 'fo_FO.ISO8859-1',
'fo_fo.iso885915': 'fo_FO.ISO8859-15',
'fo_fo@euro': 'fo_FO.ISO8859-15',
'fr': 'fr_FR.ISO8859-1',
'fr_be': 'fr_BE.ISO8859-1',
'fr_be.88591': 'fr_BE.ISO8859-1',
'fr_be.iso88591': 'fr_BE.ISO8859-1',
'fr_be.iso885915': 'fr_BE.ISO8859-15',
'fr_be@euro': 'fr_BE.ISO8859-15',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ca.88591': 'fr_CA.ISO8859-1',
'fr_ca.iso88591': 'fr_CA.ISO8859-1',
'fr_ca.iso885915': 'fr_CA.ISO8859-15',
'fr_ca@euro': 'fr_CA.ISO8859-15',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_ch.88591': 'fr_CH.ISO8859-1',
'fr_ch.iso88591': 'fr_CH.ISO8859-1',
'fr_ch.iso885915': 'fr_CH.ISO8859-15',
'fr_ch@euro': 'fr_CH.ISO8859-15',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_fr.88591': 'fr_FR.ISO8859-1',
'fr_fr.iso88591': 'fr_FR.ISO8859-1',
'fr_fr.iso885915': 'fr_FR.ISO8859-15',
'fr_fr@euro': 'fr_FR.ISO8859-15',
'fr_lu': 'fr_LU.ISO8859-1',
'fr_lu.88591': 'fr_LU.ISO8859-1',
'fr_lu.iso88591': 'fr_LU.ISO8859-1',
'fr_lu.iso885915': 'fr_LU.ISO8859-15',
'fr_lu@euro': 'fr_LU.ISO8859-15',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'fre_fr.8859': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'french_france.8859': 'fr_FR.ISO8859-1',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'ga_ie.iso88591': 'ga_IE.ISO8859-1',
'ga_ie.iso885914': 'ga_IE.ISO8859-14',
'ga_ie.iso885915': 'ga_IE.ISO8859-15',
'ga_ie@euro': 'ga_IE.ISO8859-15',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'gd_gb.iso88591': 'gd_GB.ISO8859-1',
'gd_gb.iso885914': 'gd_GB.ISO8859-14',
'gd_gb.iso885915': 'gd_GB.ISO8859-15',
'gd_gb@euro': 'gd_GB.ISO8859-15',
'ger_de': 'de_DE.ISO8859-1',
'ger_de.8859': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'german_germany.8859': 'de_DE.ISO8859-1',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'gl_es.iso88591': 'gl_ES.ISO8859-1',
'gl_es.iso885915': 'gl_ES.ISO8859-15',
'gl_es@euro': 'gl_ES.ISO8859-15',
'greek': 'el_GR.ISO8859-7',
'greek.iso88597': 'el_GR.ISO8859-7',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'gv_gb.iso88591': 'gv_GB.ISO8859-1',
'gv_gb.iso885914': 'gv_GB.ISO8859-14',
'gv_gb.iso885915': 'gv_GB.ISO8859-15',
'gv_gb@euro': 'gv_GB.ISO8859-15',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'he_il.cp1255': 'he_IL.CP1255',
'he_il.iso88598': 'he_IL.ISO8859-8',
'he_il.microsoftcp1255': 'he_IL.CP1255',
'hebrew': 'iw_IL.ISO8859-8',
'hebrew.iso88598': 'iw_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hr_hr.iso88592': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hu_hu.iso88592': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'icelandic': 'is_IS.ISO8859-1',
'icelandic.iso88591': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'is_is.iso88591': 'is_IS.ISO8859-1',
'is_is.iso885915': 'is_IS.ISO8859-15',
'is_is@euro': 'is_IS.ISO8859-15',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it_ch': 'it_CH.ISO8859-1',
'it_ch.iso88591': 'it_CH.ISO8859-1',
'it_ch.iso885915': 'it_CH.ISO8859-15',
'it_ch@euro': 'it_CH.ISO8859-15',
'it_it': 'it_IT.ISO8859-1',
'it_it.88591': 'it_IT.ISO8859-1',
'it_it.iso88591': 'it_IT.ISO8859-1',
'it_it.iso885915': 'it_IT.ISO8859-15',
'it_it@euro': 'it_IT.ISO8859-15',
'italian': 'it_IT.ISO8859-1',
'italian.iso88591': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.iso88598': 'he_IL.ISO8859-8',
'ja': 'ja_JP.eucJP',
'ja.jis': 'ja_JP.JIS7',
'ja.sjis': 'ja_JP.SJIS',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.ajec': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.eucjp': 'ja_JP.eucJP',
'ja_jp.iso-2022-jp': 'ja_JP.JIS7',
'ja_jp.iso2022jp': 'ja_JP.JIS7',
'ja_jp.jis': 'ja_JP.JIS7',
'ja_jp.jis7': 'ja_JP.JIS7',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.sjis': 'ja_JP.SJIS',
'ja_jp.ujis': 'ja_JP.eucJP',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'japanese.sjis': 'ja_JP.SJIS',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'kl_gl.iso88591': 'kl_GL.ISO8859-1',
'kl_gl.iso885915': 'kl_GL.ISO8859-15',
'kl_gl@euro': 'kl_GL.ISO8859-15',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'ko_kr.euckr': 'ko_KR.eucKR',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'kw_gb.iso88591': 'kw_GB.ISO8859-1',
'kw_gb.iso885914': 'kw_GB.ISO8859-14',
'kw_gb.iso885915': 'kw_GB.ISO8859-15',
'kw_gb@euro': 'kw_GB.ISO8859-15',
'lithuanian': 'lt_LT.ISO8859-13',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lt_lt.iso885913': 'lt_LT.ISO8859-13',
'lt_lt.iso88594': 'lt_LT.ISO8859-4',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'lv_lv.iso885913': 'lv_LV.ISO8859-13',
'lv_lv.iso88594': 'lv_LV.ISO8859-4',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'mk_mk.cp1251': 'mk_MK.CP1251',
'mk_mk.iso88595': 'mk_MK.ISO8859-5',
'mk_mk.microsoftcp1251': 'mk_MK.CP1251',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'ms_my.iso88591': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'mt_mt.iso88593': 'mt_MT.ISO8859-3',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nb_no.88591': 'nb_NO.ISO8859-1',
'nb_no.iso88591': 'nb_NO.ISO8859-1',
'nb_no.iso885915': 'nb_NO.ISO8859-15',
'nb_no@euro': 'nb_NO.ISO8859-15',
'nl': 'nl_NL.ISO8859-1',
'nl_be': 'nl_BE.ISO8859-1',
'nl_be.88591': 'nl_BE.ISO8859-1',
'nl_be.iso88591': 'nl_BE.ISO8859-1',
'nl_be.iso885915': 'nl_BE.ISO8859-15',
'nl_be@euro': 'nl_BE.ISO8859-15',
'nl_nl': 'nl_NL.ISO8859-1',
'nl_nl.88591': 'nl_NL.ISO8859-1',
'nl_nl.iso88591': 'nl_NL.ISO8859-1',
'nl_nl.iso885915': 'nl_NL.ISO8859-15',
'nl_nl@euro': 'nl_NL.ISO8859-15',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'nn_no.88591': 'nn_NO.ISO8859-1',
'nn_no.iso88591': 'nn_NO.ISO8859-1',
'nn_no.iso885915': 'nn_NO.ISO8859-15',
'nn_no@euro': 'nn_NO.ISO8859-15',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.88591': 'no_NO.ISO8859-1',
'no_no.iso88591': 'no_NO.ISO8859-1',
'no_no.iso885915': 'no_NO.ISO8859-15',
'no_no@euro': 'no_NO.ISO8859-15',
'norwegian': 'no_NO.ISO8859-1',
'norwegian.iso88591': 'no_NO.ISO8859-1',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'ny_no.88591': 'ny_NO.ISO8859-1',
'ny_no.iso88591': 'ny_NO.ISO8859-1',
'ny_no.iso885915': 'ny_NO.ISO8859-15',
'ny_no@euro': 'ny_NO.ISO8859-15',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'oc_fr.iso88591': 'oc_FR.ISO8859-1',
'oc_fr.iso885915': 'oc_FR.ISO8859-15',
'oc_fr@euro': 'oc_FR.ISO8859-15',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_de.iso88591': 'pd_DE.ISO8859-1',
'pd_de.iso885915': 'pd_DE.ISO8859-15',
'pd_de@euro': 'pd_DE.ISO8859-15',
'pd_us': 'pd_US.ISO8859-1',
'pd_us.iso88591': 'pd_US.ISO8859-1',
'pd_us.iso885915': 'pd_US.ISO8859-15',
'pd_us@euro': 'pd_US.ISO8859-15',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'ph_ph.iso88591': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'pl_pl.iso88592': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese.iso88591': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'portuguese_brazil.8859': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'pp_an.iso88591': 'pp_AN.ISO8859-1',
'pt': 'pt_PT.ISO8859-1',
'pt_br': 'pt_BR.ISO8859-1',
'pt_br.88591': 'pt_BR.ISO8859-1',
'pt_br.iso88591': 'pt_BR.ISO8859-1',
'pt_br.iso885915': 'pt_BR.ISO8859-15',
'pt_br@euro': 'pt_BR.ISO8859-15',
'pt_pt': 'pt_PT.ISO8859-1',
'pt_pt.88591': 'pt_PT.ISO8859-1',
'pt_pt.iso88591': 'pt_PT.ISO8859-1',
'pt_pt.iso885915': 'pt_PT.ISO8859-15',
'pt_pt.utf8@euro': 'pt_PT.UTF-8',
'pt_pt@euro': 'pt_PT.ISO8859-15',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'ro_ro.iso88592': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.ISO8859-5',
'ru_ru': 'ru_RU.ISO8859-5',
'ru_ru.cp1251': 'ru_RU.CP1251',
'ru_ru.iso88595': 'ru_RU.ISO8859-5',
'ru_ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru.microsoftcp1251': 'ru_RU.CP1251',
'ru_ua': 'ru_UA.KOI8-U',
'ru_ua.cp1251': 'ru_UA.CP1251',
'ru_ua.koi8u': 'ru_UA.KOI8-U',
'ru_ua.microsoftcp1251': 'ru_UA.CP1251',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.ISO8859-5',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sh_YU.ISO8859-2',
'sh': 'sh_YU.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'sh_HR.ISO8859-2',
'sh_sp': 'sh_YU.ISO8859-2',
'sh_yu': 'sh_YU.ISO8859-2',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sk_sk.iso88592': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'sl_si.iso88592': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'sp': 'sp_YU.ISO8859-5',
'sp_yu': 'sp_YU.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish.iso88591': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'spanish_spain.8859': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_al.iso88592': 'sq_AL.ISO8859-2',
'sr': 'sr_YU.ISO8859-5',
'sr@cyrillic': 'sr_YU.ISO8859-5',
'sr_sp': 'sr_SP.ISO8859-2',
'sr_yu': 'sr_YU.ISO8859-5',
'sr_yu.cp1251@cyrillic': 'sr_YU.CP1251',
'sr_yu.iso88592': 'sr_YU.ISO8859-2',
'sr_yu.iso88595': 'sr_YU.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_YU.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_YU.CP1251',
'sr_yu.utf8@cyrillic': 'sr_YU.UTF-8',
'sr_yu@cyrillic': 'sr_YU.ISO8859-5',
'sv': 'sv_SE.ISO8859-1',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_fi.iso88591': 'sv_FI.ISO8859-1',
'sv_fi.iso885915': 'sv_FI.ISO8859-15',
'sv_fi@euro': 'sv_FI.ISO8859-15',
'sv_se': 'sv_SE.ISO8859-1',
'sv_se.88591': 'sv_SE.ISO8859-1',
'sv_se.iso88591': 'sv_SE.ISO8859-1',
'sv_se.iso885915': 'sv_SE.ISO8859-15',
'sv_se@euro': 'sv_SE.ISO8859-15',
'swedish': 'sv_SE.ISO8859-1',
'swedish.iso88591': 'sv_SE.ISO8859-1',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'tg_tj.koi8c': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.iso885911': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tl_ph.iso88591': 'tl_PH.ISO8859-1',
'tr': 'tr_TR.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'tr_tr.iso88599': 'tr_TR.ISO8859-9',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.koi8c': 'tt_RU.KOI8-C',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'turkish': 'tr_TR.ISO8859-9',
'turkish.iso88599': 'tr_TR.ISO8859-9',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'uk_ua.cp1251': 'uk_UA.CP1251',
'uk_ua.iso88595': 'uk_UA.ISO8859-5',
'uk_ua.koi8u': 'uk_UA.KOI8-U',
'uk_ua.microsoftcp1251': 'uk_UA.CP1251',
'univ': 'en_US.utf',
'universal': 'en_US.utf',
'universal.utf8@ucs4': 'en_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_pk': 'ur_PK.CP1256',
'ur_pk.cp1256': 'ur_PK.CP1256',
'ur_pk.microsoftcp1256': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wa_be.iso88591': 'wa_BE.ISO8859-1',
'wa_be.iso885915': 'wa_BE.ISO8859-15',
'wa_be@euro': 'wa_BE.ISO8859-15',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yi_us.cp1255': 'yi_US.CP1255',
'yi_us.microsoftcp1255': 'yi_US.CP1255',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_cn.gb18030': 'zh_CN.gb18030',
'zh_cn.gb2312': 'zh_CN.gb2312',
'zh_cn.gbk': 'zh_CN.gbk',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5': 'zh_HK.big5',
'zh_hk.big5hkscs': 'zh_HK.big5hkscs',
'zh_tw': 'zh_TW.big5',
'zh_tw.big5': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows XP.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to Python bug manager, which you can find via:
# http://www.python.org/dev/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x042c: "az_AZ", # Azeri Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x042d: "eu_ES", # Basque
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian
0x141a: "bs_BA", # Bosnian - Cyrillic
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Carribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Phillippines
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x0447: "gu_IN", # Gujarati
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0434: "xh_ZA", # Xhosa - South Africa
0x0435: "zu_ZA", # Zulu
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYRO Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Raeto-Romanese
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x041f: "tr_TR", # Turkish
0x0422: "uk_UA", # Ukrainian
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print 'Locale defaults as determined by getdefaultlocale():'
print '-'*72
lang, enc = getdefaultlocale()
print 'Language: ', lang or '(undefined)'
print 'Encoding: ', enc or '(undefined)'
print
print 'Locale settings on startup:'
print '-'*72
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
print
print 'Locale settings after calling resetlocale():'
print '-'*72
resetlocale()
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
try:
setlocale(LC_ALL, "")
except:
print 'NOTE:'
print 'setlocale(LC_ALL, "") does not support the default locale'
print 'given in the OS environment variables.'
else:
print
print 'Locale settings after calling setlocale(LC_ALL, ""):'
print '-'*72
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print 'Locale aliasing:'
print
_print_locale()
print
print 'Number formatting:'
print
_test()
| bsd-3-clause |
paweljasinski/ironpython3 | Src/StdLib/Lib/test/test_gzip.py | 5 | 19918 | """Test script for the gzip module.
"""
import unittest
from test import support
import os
import io
import struct
gzip = support.import_module('gzip')
data1 = b""" int length=DEFAULTALLOC, err = Z_OK;
PyObject *RetVal;
int flushmode = Z_FINISH;
unsigned long start_total_out;
"""
data2 = b"""/* zlibmodule.c -- gzip-compatible data compression */
/* See http://www.gzip.org/zlib/
/* See http://www.winimage.com/zLibDll for Windows */
"""
class UnseekableIO(io.BytesIO):
def seekable(self):
return False
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args):
raise io.UnsupportedOperation
class BaseTest(unittest.TestCase):
filename = support.TESTFN
def setUp(self):
support.unlink(self.filename)
def tearDown(self):
support.unlink(self.filename)
class TestGzip(BaseTest):
def test_write(self):
with gzip.GzipFile(self.filename, 'wb') as f:
f.write(data1 * 50)
# Try flush and fileno.
f.flush()
f.fileno()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
f.close()
# Test multiple close() calls.
f.close()
def test_read(self):
self.test_write()
# Try reading.
with gzip.GzipFile(self.filename, 'r') as f:
d = f.read()
self.assertEqual(d, data1*50)
def test_read1(self):
self.test_write()
blocks = []
nread = 0
with gzip.GzipFile(self.filename, 'r') as f:
while True:
d = f.read1()
if not d:
break
blocks.append(d)
nread += len(d)
# Check that position was updated correctly (see issue10791).
self.assertEqual(f.tell(), nread)
self.assertEqual(b''.join(blocks), data1 * 50)
def test_io_on_closed_object(self):
# Test that I/O operations on closed GzipFile objects raise a
# ValueError, just like the corresponding functions on file objects.
# Write to a file, open it for reading, then close it.
self.test_write()
f = gzip.GzipFile(self.filename, 'r')
f.close()
with self.assertRaises(ValueError):
f.read(1)
with self.assertRaises(ValueError):
f.seek(0)
with self.assertRaises(ValueError):
f.tell()
# Open the file for writing, then close it.
f = gzip.GzipFile(self.filename, 'w')
f.close()
with self.assertRaises(ValueError):
f.write(b'')
with self.assertRaises(ValueError):
f.flush()
def test_append(self):
self.test_write()
# Append to the previous file
with gzip.GzipFile(self.filename, 'ab') as f:
f.write(data2 * 15)
with gzip.GzipFile(self.filename, 'rb') as f:
d = f.read()
self.assertEqual(d, (data1*50) + (data2*15))
def test_many_append(self):
# Bug #1074261 was triggered when reading a file that contained
# many, many members. Create such a file and verify that reading it
# works.
with gzip.GzipFile(self.filename, 'wb', 9) as f:
f.write(b'a')
for i in range(0, 200):
with gzip.GzipFile(self.filename, "ab", 9) as f: # append
f.write(b'a')
# Try reading the file
with gzip.GzipFile(self.filename, "rb") as zgfile:
contents = b""
while 1:
ztxt = zgfile.read(8192)
contents += ztxt
if not ztxt: break
self.assertEqual(contents, b'a'*201)
def test_exclusive_write(self):
with gzip.GzipFile(self.filename, 'xb') as f:
f.write(data1 * 50)
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertEqual(f.read(), data1 * 50)
with self.assertRaises(FileExistsError):
gzip.GzipFile(self.filename, 'xb')
def test_buffered_reader(self):
# Issue #7471: a GzipFile can be wrapped in a BufferedReader for
# performance.
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
with io.BufferedReader(f) as r:
lines = [line for line in r]
self.assertEqual(lines, 50 * data1.splitlines(keepends=True))
def test_readline(self):
self.test_write()
# Try .readline() with varying line lengths
with gzip.GzipFile(self.filename, 'rb') as f:
line_length = 0
while 1:
L = f.readline(line_length)
if not L and line_length != 0: break
self.assertTrue(len(L) <= line_length)
line_length = (line_length + 1) % 50
def test_readlines(self):
self.test_write()
# Try .readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
L = f.readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
while 1:
L = f.readlines(150)
if L == []: break
def test_seek_read(self):
self.test_write()
# Try seek, read test
with gzip.GzipFile(self.filename) as f:
while 1:
oldpos = f.tell()
line1 = f.readline()
if not line1: break
newpos = f.tell()
f.seek(oldpos) # negative seek
if len(line1)>10:
amount = 10
else:
amount = len(line1)
line2 = f.read(amount)
self.assertEqual(line1[:amount], line2)
f.seek(newpos) # positive seek
def test_seek_whence(self):
self.test_write()
# Try seek(whence=1), read test
with gzip.GzipFile(self.filename) as f:
f.read(10)
f.seek(10, whence=1)
y = f.read(10)
self.assertEqual(y, data1[20:30])
def test_seek_write(self):
# Try seek, write test
with gzip.GzipFile(self.filename, 'w') as f:
for pos in range(0, 256, 16):
f.seek(pos)
f.write(b'GZ\n')
def test_mode(self):
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
self.assertEqual(f.myfileobj.mode, 'rb')
support.unlink(self.filename)
with gzip.GzipFile(self.filename, 'x') as f:
self.assertEqual(f.myfileobj.mode, 'xb')
def test_1647484(self):
for mode in ('wb', 'rb'):
with gzip.GzipFile(self.filename, mode) as f:
self.assertTrue(hasattr(f, "name"))
self.assertEqual(f.name, self.filename)
def test_paddedfile_getattr(self):
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertTrue(hasattr(f.fileobj, "name"))
self.assertEqual(f.fileobj.name, self.filename)
def test_mtime(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with gzip.GzipFile(self.filename) as fRead:
dataRead = fRead.read()
self.assertEqual(dataRead, data1)
self.assertTrue(hasattr(fRead, 'mtime'))
self.assertEqual(fRead.mtime, mtime)
def test_metadata(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with open(self.filename, 'rb') as fRead:
# see RFC 1952: http://www.faqs.org/rfcs/rfc1952.html
idBytes = fRead.read(2)
self.assertEqual(idBytes, b'\x1f\x8b') # gzip ID
cmByte = fRead.read(1)
self.assertEqual(cmByte, b'\x08') # deflate
flagsByte = fRead.read(1)
self.assertEqual(flagsByte, b'\x08') # only the FNAME flag is set
mtimeBytes = fRead.read(4)
self.assertEqual(mtimeBytes, struct.pack('<i', mtime)) # little-endian
xflByte = fRead.read(1)
self.assertEqual(xflByte, b'\x02') # maximum compression
osByte = fRead.read(1)
self.assertEqual(osByte, b'\xff') # OS "unknown" (OS-independent)
# Since the FNAME flag is set, the zero-terminated filename follows.
# RFC 1952 specifies that this is the name of the input file, if any.
# However, the gzip module defaults to storing the name of the output
# file in this field.
expected = self.filename.encode('Latin-1') + b'\x00'
nameBytes = fRead.read(len(expected))
self.assertEqual(nameBytes, expected)
# Since no other flags were set, the header ends here.
# Rather than process the compressed data, let's seek to the trailer.
fRead.seek(os.stat(self.filename).st_size - 8)
crc32Bytes = fRead.read(4) # CRC32 of uncompressed data [data1]
self.assertEqual(crc32Bytes, b'\xaf\xd7d\x83')
isizeBytes = fRead.read(4)
self.assertEqual(isizeBytes, struct.pack('<i', len(data1)))
def test_with_open(self):
# GzipFile supports the context management protocol
with gzip.GzipFile(self.filename, "wb") as f:
f.write(b"xxx")
f = gzip.GzipFile(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with gzip.GzipFile(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def test_zero_padded_file(self):
with gzip.GzipFile(self.filename, "wb") as f:
f.write(data1 * 50)
# Pad the file with zeroes
with open(self.filename, "ab") as f:
f.write(b"\x00" * 50)
with gzip.GzipFile(self.filename, "rb") as f:
d = f.read()
self.assertEqual(d, data1 * 50, "Incorrect data in file")
def test_non_seekable_file(self):
uncompressed = data1 * 50
buf = UnseekableIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as f:
f.write(uncompressed)
compressed = buf.getvalue()
buf = UnseekableIO(compressed)
with gzip.GzipFile(fileobj=buf, mode="rb") as f:
self.assertEqual(f.read(), uncompressed)
def test_peek(self):
uncompressed = data1 * 200
with gzip.GzipFile(self.filename, "wb") as f:
f.write(uncompressed)
def sizes():
while True:
for n in range(5, 50, 10):
yield n
with gzip.GzipFile(self.filename, "rb") as f:
f.max_read_chunk = 33
nread = 0
for n in sizes():
s = f.peek(n)
if s == b'':
break
self.assertEqual(f.read(len(s)), s)
nread += len(s)
self.assertEqual(f.read(100), b'')
self.assertEqual(nread, len(uncompressed))
def test_textio_readlines(self):
# Issue #10791: TextIOWrapper.readlines() fails when wrapping GzipFile.
lines = (data1 * 50).decode("ascii").splitlines(keepends=True)
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
with io.TextIOWrapper(f, encoding="ascii") as t:
self.assertEqual(t.readlines(), lines)
def test_fileobj_from_fdopen(self):
# Issue #13781: Opening a GzipFile for writing fails when using a
# fileobj created with os.fdopen().
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
with os.fdopen(fd, "wb") as f:
with gzip.GzipFile(fileobj=f, mode="w") as g:
pass
def test_bytes_filename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with gzip.GzipFile(bytes_filename, "wb") as f:
f.write(data1 * 50)
with gzip.GzipFile(bytes_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
# Sanity check that we are actually operating on the right file.
with gzip.GzipFile(str_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
# Testing compress/decompress shortcut functions
def test_compress(self):
for data in [data1, data2]:
for args in [(), (1,), (6,), (9,)]:
datac = gzip.compress(data, *args)
self.assertEqual(type(datac), bytes)
with gzip.GzipFile(fileobj=io.BytesIO(datac), mode="rb") as f:
self.assertEqual(f.read(), data)
def test_decompress(self):
for data in (data1, data2):
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as f:
f.write(data)
self.assertEqual(gzip.decompress(buf.getvalue()), data)
# Roundtrip with compress
datac = gzip.compress(data)
self.assertEqual(gzip.decompress(datac), data)
def test_read_truncated(self):
data = data1*50
# Drop the CRC (4 bytes) and file size (4 bytes).
truncated = gzip.compress(data)[:-8]
with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
self.assertEqual(f.read(len(data)), data)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 10-byte header.
for i in range(2, 10):
with gzip.GzipFile(fileobj=io.BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
def test_read_with_extra(self):
# Gzip data with an extra field
gzdata = (b'\x1f\x8b\x08\x04\xb2\x17cQ\x02\xff'
b'\x05\x00Extra'
b'\x0bI-.\x01\x002\xd1Mx\x04\x00\x00\x00')
with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f:
self.assertEqual(f.read(), b'Test')
class TestOpen(BaseTest):
def test_binary_modes(self):
uncompressed = data1 * 50
with gzip.open(self.filename, "wb") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
with gzip.open(self.filename, "rb") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "ab") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed * 2)
with self.assertRaises(FileExistsError):
gzip.open(self.filename, "xb")
support.unlink(self.filename)
with gzip.open(self.filename, "xb") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
uncompressed = data1 * 50
with gzip.open(self.filename, "w") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
with gzip.open(self.filename, "r") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "a") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed * 2)
with self.assertRaises(FileExistsError):
gzip.open(self.filename, "x")
support.unlink(self.filename)
with gzip.open(self.filename, "x") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
def test_text_modes(self):
uncompressed = data1.decode("ascii") * 50
uncompressed_raw = uncompressed.replace("\n", os.linesep)
with gzip.open(self.filename, "wt") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, uncompressed_raw)
with gzip.open(self.filename, "rt") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "at") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, uncompressed_raw * 2)
def test_fileobj(self):
uncompressed_bytes = data1 * 50
uncompressed_str = uncompressed_bytes.decode("ascii")
compressed = gzip.compress(uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "r") as f:
self.assertEqual(f.read(), uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "rb") as f:
self.assertEqual(f.read(), uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "rt") as f:
self.assertEqual(f.read(), uncompressed_str)
def test_bad_params(self):
# Test invalid parameter combinations.
with self.assertRaises(TypeError):
gzip.open(123.456)
with self.assertRaises(ValueError):
gzip.open(self.filename, "wbt")
with self.assertRaises(ValueError):
gzip.open(self.filename, "xbt")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", encoding="utf-8")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", errors="ignore")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
uncompressed = data1.decode("ascii") * 50
uncompressed_raw = uncompressed.replace("\n", os.linesep)
with gzip.open(self.filename, "wt", encoding="utf-16") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("utf-16")
self.assertEqual(file_data, uncompressed_raw)
with gzip.open(self.filename, "rt", encoding="utf-16") as f:
self.assertEqual(f.read(), uncompressed)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with gzip.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with gzip.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
uncompressed = data1.decode("ascii") * 50
with gzip.open(self.filename, "wt", newline="\n") as f:
f.write(uncompressed)
with gzip.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [uncompressed])
def test_main(verbose=None):
support.run_unittest(TestGzip, TestOpen)
if __name__ == "__main__":
test_main(verbose=True)
| apache-2.0 |
MilesDuronCIMAT/book_exercises | chapter_11/functional_tests/test_list_item_validation.py | 1 | 1423 | from unittest import skip
from .base import FunctionalTest
class ItemValidationTest(FunctionalTest):
@skip
def test_cannot_add_empty_list_items(self):
# Edith goes to the home page and accidentally tries to submit
# an empty list item. She hits Enter on the empty input box
self.browser.get(self.server_url)
self.browser.get_item_input_box().send_keys('\n')
# The home page refreshes, and there is an error message saying
# that list items cannot be blank
error = self.browser.find_element_by_css_selector('.has-error')
self.assertEqual(error.text, "You can't have an empty list item")
# She tries again with some text for the item, which now works
self.browser.get_item_input_box().send_keys('Buy milk\n')
self.check_for_row_in_list_table('1: Buy milk')
# Perversely, she now decides to submit a second blank list item
self.browser.get_item_input_box().send_keys('\n')
# She receives a similar warning on the list page
self.check_for_row_in_list_table('1: Buy milk')
error = self.browser.find_element_by_css_selector('.has-error')
self.assertEqual(error.text, "You can't have an empty list item")
# And she can correct it by filling some text in
self.browser.get_item_input_box().send_keys('Make tea\n')
self.check_for_row_in_list_table('1: Buy milk')
self.check_for_row_in_list_table('2: Make tea')
| mit |
csueiras/rednaskela | rednaskela/utils.py | 1 | 1462 | #
# rednaskela.utils
#
# REDNASKELA: The Mobile Mini-RTS
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Christian A. Sueiras
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
""" Utilities """
import re
import hashlib
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def is_valid_email(email):
return False if not EMAIL_REGEX.match(email) else True
def hash(hash_string, salt):
return hashlib.sha224(salt + hash_string).hexdigest() | mit |
absent1706/sqlalchemy-mixins | sqlalchemy_mixins/tests/test_session.py | 1 | 1809 | import unittest
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy_mixins.session import SessionMixin, NoSessionError
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
session = Session(engine)
class BaseModel(Base, SessionMixin):
__abstract__ = True
pass
class User(BaseModel):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
class Post(BaseModel):
__tablename__ = 'post'
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
class TestSessionMixin(unittest.TestCase):
def setUp(self):
Base.metadata.create_all(engine)
def test_set_session(self):
# before setting session, error is raised
with self.assertRaises(NoSessionError):
_ = BaseModel.session
with self.assertRaises(NoSessionError):
_ = User.session
with self.assertRaises(NoSessionError):
_ = Post.session
# query doesn't work too
with self.assertRaises(NoSessionError):
_ = User.query
with self.assertRaises(NoSessionError):
_ = Post.query
# after setting session, all is OK
BaseModel.set_session(session)
self.assertEqual(BaseModel.session, session)
self.assertEqual(User.session, session)
self.assertEqual(Post.session, session)
self.assertEqual(User.query.first(), session.query(User).first())
self.assertEqual(Post.query.first(), session.query(Post).first())
def tearDown(self):
Base.metadata.create_all(engine)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| mit |
treeio/treeio | treeio/core/sanitizer.py | 3 | 18205 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
import re
from xml.sax.saxutils import escape, unescape
import html5lib
from html5lib import treebuilders, treewalkers, serializer
from html5lib.tokenizer import HTMLTokenizer
from html5lib.constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'callback',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'field', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
remove_tags = ['script', 'style']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + \
mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
token["name"] = token["name"].lower()
if token["name"] in self.allowed_elements:
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped
# characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val]
for name, val in attrs.items()]
return token
else:
if token["name"] in self.remove_tags:
token["name"] = "toberemoved"
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v))
for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
if "name" in token and token["name"] == "style":
print "style", token["data"], dir(token)
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if keyword not in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
def clean_html(buf):
"""Cleans HTML of dangerous tags and content."""
buf = buf.strip()
if not buf:
return buf
html_parser = html5lib.HTMLParser(
tree=treebuilders.getTreeBuilder("dom"), tokenizer=HTMLSanitizer)
dom_tree = html_parser.parseFragment(buf)
walker = treewalkers.getTreeWalker("dom")
stream = walker(dom_tree)
s = serializer.htmlserializer.HTMLSerializer(
omit_optional_tags=False, quote_attr_values=True)
output = s.render(stream, 'utf-8')
while 'toberemoved' in output:
oldoutput = output
matches = re.findall(
r'<toberemoved.*?>.*?</toberemoved>', output, re.DOTALL)
for s in matches:
output = output.replace(s, '')
matches = re.findall(r'</toberemoved>', output, re.DOTALL)
for s in matches:
output = output.replace(s, '')
matches = re.findall(r'<toberemoved.*?>', output, re.DOTALL)
for s in matches:
output = output.replace(s, '')
if output == oldoutput:
break
return output
| mit |
ToBeReplaced/ansible-modules-extras | database/postgresql/postgresql_ext.py | 81 | 5801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database.
description:
- Add or remove PostgreSQL extensions from a database.
version_added: "0.1"
options:
name:
description:
- name of the extension to add or remove
required: true
default: null
db:
description:
- name of the database to add or remove the extension to/from
required: true
default: null
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The database extension state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Daniel Schep (@dschep)"
'''
EXAMPLES = '''
# Adds postgis to the database "acme"
- postgresql_ext: name=postgis db=acme
'''
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
cursor.execute(query)
return True
else:
return False
def ext_create(cursor, ext):
if not ext_exists(cursor, ext):
query = 'CREATE EXTENSION "%s"' % ext
cursor.execute(query)
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
port=dict(default="5432"),
db=dict(required=True),
ext=dict(required=True, aliases=['name']),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
ext = module.params["ext"]
port = module.params["port"]
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != '' )
try:
db_connection = psycopg2.connect(database=db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
try:
if module.check_mode:
if state == "absent":
changed = not db_exists(cursor, ext)
elif state == "present":
changed = db_exists(cursor, ext)
module.exit_json(changed=changed,ext=ext)
if state == "absent":
changed = ext_delete(cursor, ext)
elif state == "present":
changed = ext_create(cursor, ext)
except NotSupportedError, e:
module.fail_json(msg=str(e))
except Exception, e:
module.fail_json(msg="Database query failed: %s" % e)
module.exit_json(changed=changed, db=db)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
Jumpscale/jumpscale6_core | apps/jsftpserver/pyftpdlib/servers.py | 4 | 20566 | #!/usr/bin/env python
# $Id: servers.py 1230 2013-07-16 16:07:08Z btimby $
# ======================================================================
# Copyright (C) 2007-2013 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ======================================================================
"""
This module contains the main FTPServer class which listens on a
host:port and dispatches the incoming connections to a handler.
The concurrency is handled asynchronously by the main process thread,
meaning the handler cannot block otherwise the whole server will hang.
Other than that we have 2 subclasses changing the asynchronous concurrency
model using multiple threads or processes.
You might be interested in these in case your code contains blocking
parts which cannot be adapted to the base async model or if the
underlying filesystem is particularly slow, see:
https://code.google.com/p/pyftpdlib/issues/detail?id=197
https://code.google.com/p/pyftpdlib/issues/detail?id=212
Two classes are provided:
- ThreadingFTPServer
- MultiprocessFTPServer
...spawning a new thread or process every time a client connects.
The main thread will be async-based and be used only to accept new
connections.
Every time a new connection comes in that will be dispatched to a
separate thread/process which internally will run its own IO loop.
This way the handler handling that connections will be free to block
without hanging the whole FTP server.
"""
import os
import socket
import traceback
import sys
import errno
import select
import logging
import signal
import time
from pyftpdlib.log import logger
from pyftpdlib.ioloop import Acceptor, IOLoop
__all__ = ['FTPServer']
_BSD = 'bsd' in sys.platform
# ===================================================================
# --- base class
# ===================================================================
class FTPServer(Acceptor):
"""Creates a socket listening on <address>, dispatching the requests
to a <handler> (typically FTPHandler class).
Depending on the type of address specified IPv4 or IPv6 connections
(or both, depending from the underlying system) will be accepted.
All relevant session information is stored in class attributes
described below.
- (int) max_cons:
number of maximum simultaneous connections accepted (defaults
to 512). Can be set to 0 for unlimited but it is recommended
to always have a limit to avoid running out of file descriptors
(DoS).
- (int) max_cons_per_ip:
number of maximum connections accepted for the same IP address
(defaults to 0 == unlimited).
"""
max_cons = 512
max_cons_per_ip = 0
def __init__(self, address_or_socket, handler, ioloop=None, backlog=5):
"""Creates a socket listening on 'address' dispatching
connections to a 'handler'.
- (tuple) address_or_socket: the (host, port) pair on which
the command channel will listen for incoming connections or
an existent socket object.
- (instance) handler: the handler class to use.
- (instance) ioloop: a pyftpdlib.ioloop.IOLoop instance
- (int) backlog: the maximum number of queued connections
passed to listen(). If a connection request arrives when
the queue is full the client may raise ECONNRESET.
Defaults to 5.
"""
Acceptor.__init__(self, ioloop=ioloop)
self.handler = handler
self.backlog = backlog
self.ip_map = []
# in case of FTPS class not properly configured we want errors
# to be raised here rather than later, when client connects
if hasattr(handler, 'get_ssl_context'):
handler.get_ssl_context()
if callable(getattr(address_or_socket, 'listen', None)):
sock = address_or_socket
sock.setblocking(0)
self.set_socket(sock)
if hasattr(sock, 'family'):
self._af = sock.family
else:
# python 2.4
ip, port = self.socket.getsockname()[:2]
self._af = socket.getaddrinfo(ip, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)[0][0]
else:
self._af = self.bind_af_unspecified(address_or_socket)
self.listen(backlog)
@property
def address(self):
return self.socket.getsockname()[:2]
def _map_len(self):
return len(self.ioloop.socket_map)
def _accept_new_cons(self):
"""Return True if the server is willing to accept new connections."""
if not self.max_cons:
return True
else:
return self._map_len() <= self.max_cons
def _log_start(self):
if not logging.getLogger().handlers:
# If we get to this point it means the user hasn't
# configured logger. We want to log by default so
# we configure logging ourselves so that it will
# print to stderr.
from pyftpdlib.ioloop import _config_logging
_config_logging()
if self.handler.passive_ports:
pasv_ports = "%s->%s" % (self.handler.passive_ports[0],
self.handler.passive_ports[-1])
else:
pasv_ports = None
addr = self.address
logger.info(">>> starting FTP server on %s:%s, pid=%i <<<"
% (addr[0], addr[1], os.getpid()))
logger.info("poller: %r", self.ioloop.__class__)
logger.info("masquerade (NAT) address: %s",
self.handler.masquerade_address)
logger.info("passive ports: %s", pasv_ports)
if os.name == 'posix':
logger.info("use sendfile(2): %s", self.handler.use_sendfile)
def serve_forever(self, timeout=None, blocking=True, handle_exit=True):
"""Start serving.
- (float) timeout: the timeout passed to the underlying IO
loop expressed in seconds (default 1.0).
- (bool) blocking: if False loop once and then return the
timeout of the next scheduled call next to expire soonest
(if any).
- (bool) handle_exit: when True catches KeyboardInterrupt and
SystemExit exceptions (generally caused by SIGTERM / SIGINT
signals) and gracefully exits after cleaning up resources.
Also, logs server start and stop.
"""
if handle_exit:
log = handle_exit and blocking == True
if log:
self._log_start()
try:
self.ioloop.loop(timeout, blocking)
except (KeyboardInterrupt, SystemExit):
pass
if blocking:
if log:
logger.info(">>> shutting down FTP server (%s active fds) <<<",
self._map_len())
self.close_all()
else:
self.ioloop.loop(timeout, blocking)
def handle_accepted(self, sock, addr):
"""Called when remote client initiates a connection."""
handler = None
ip = None
try:
handler = self.handler(sock, self, ioloop=self.ioloop)
if not handler.connected:
return
ip = addr[0]
self.ip_map.append(ip)
# For performance and security reasons we should always set a
# limit for the number of file descriptors that socket_map
# should contain. When we're running out of such limit we'll
# use the last available channel for sending a 421 response
# to the client before disconnecting it.
if not self._accept_new_cons():
handler.handle_max_cons()
return
# accept only a limited number of connections from the same
# source address.
if self.max_cons_per_ip:
if self.ip_map.count(ip) > self.max_cons_per_ip:
handler.handle_max_cons_per_ip()
return
try:
handler.handle()
except:
handler.handle_error()
else:
return handler
except Exception:
# This is supposed to be an application bug that should
# be fixed. We do not want to tear down the server though
# (DoS). We just log the exception, hoping that someone
# will eventually file a bug. References:
# - http://code.google.com/p/pyftpdlib/issues/detail?id=143
# - http://code.google.com/p/pyftpdlib/issues/detail?id=166
# - https://groups.google.com/forum/#!topic/pyftpdlib/h7pPybzAx14
logger.error(traceback.format_exc())
if handler is not None:
handler.close()
else:
if ip is not None and ip in self.ip_map:
self.ip_map.remove(ip)
def handle_error(self):
"""Called to handle any uncaught exceptions."""
try:
raise
except Exception:
logger.error(traceback.format_exc())
self.close()
def close_all(self):
"""Stop serving and also disconnects all currently connected
clients.
"""
return self.ioloop.close()
# ===================================================================
# --- extra implementations
# ===================================================================
class _SpawnerBase(FTPServer):
"""Base class shared by multiple threads/process dispatcher.
Not supposed to be used.
"""
# how many seconds to wait when join()ing parent's threads
# or processes
join_timeout = 5
_lock = None
_exit = None
def __init__(self, address, handler, ioloop=None):
FTPServer.__init__(self, address, handler, ioloop)
self._active_tasks = []
def _start_task(self, *args, **kwargs):
raise NotImplementedError('must be implemented in subclass')
def _current_task(self):
raise NotImplementedError('must be implemented in subclass')
def _map_len(self):
raise NotImplementedError('must be implemented in subclass')
def _loop(self, handler):
"""Serve handler's IO loop in a separate thread or process."""
ioloop = IOLoop()
try:
handler.ioloop = ioloop
try:
handler.add_channel()
except EnvironmentError:
err = sys.exc_info()[1]
if err.errno == errno.EBADF:
# we might get here in case the other end quickly
# disconnected (see test_quick_connect())
return
else:
raise
# Here we localize variable access to minimize overhead.
poll = ioloop.poll
sched_poll = ioloop.sched.poll
poll_timeout = getattr(self, 'poll_timeout', None)
soonest_timeout = poll_timeout
while (ioloop.socket_map or ioloop.sched._tasks) and not \
self._exit.is_set():
try:
if ioloop.socket_map:
poll(timeout=soonest_timeout)
if ioloop.sched._tasks:
soonest_timeout = sched_poll()
# Handle the case where socket_map is emty but some
# cancelled scheduled calls are still around causing
# this while loop to hog CPU resources.
# In theory this should never happen as all the sched
# functions are supposed to be cancel()ed on close()
# but by using threads we can incur into
# synchronization issues such as this one.
# https://code.google.com/p/pyftpdlib/issues/detail?id=245
if not ioloop.socket_map:
ioloop.sched.reheapify() # get rid of cancel()led calls
soonest_timeout = sched_poll()
if soonest_timeout:
time.sleep(min(soonest_timeout, 1))
else:
soonest_timeout = None
except (KeyboardInterrupt, SystemExit):
# note: these two exceptions are raised in all sub
# processes
self._exit.set()
except select.error:
# on Windows we can get WSAENOTSOCK if the client
# rapidly connect and disconnects
err = sys.exc_info()[1]
if os.name == 'nt' and err.args[0] == 10038:
for fd in list(ioloop.socket_map.keys()):
try:
select.select([fd], [], [], 0)
except select.error:
try:
logger.info("discarding broken socket %r",
ioloop.socket_map[fd])
del ioloop.socket_map[fd]
except KeyError:
# dict changed during iteration
pass
else:
raise
else:
if poll_timeout:
if soonest_timeout is None \
or soonest_timeout > poll_timeout:
soonest_timeout = poll_timeout
finally:
ioloop.close()
def handle_accepted(self, sock, addr):
handler = FTPServer.handle_accepted(self, sock, addr)
if handler is not None:
# unregister the handler from the main IOLoop used by the
# main thread to accept connections
self.ioloop.unregister(handler._fileno)
t = self._start_task(target=self._loop, args=(handler,))
t.name = repr(addr)
t.start()
# it is a different process so free resources here
if hasattr(t, 'pid'):
handler.close()
self._lock.acquire()
try:
# clean finished tasks
for task in self._active_tasks[:]:
if not task.is_alive():
self._active_tasks.remove(task)
# add the new task
self._active_tasks.append(t)
finally:
self._lock.release()
def _log_start(self):
FTPServer._log_start(self)
logger.info("dispatcher: %r", self.__class__)
def serve_forever(self, timeout=None, blocking=True, handle_exit=True):
self._exit.clear()
if handle_exit:
log = handle_exit and blocking == True
if log:
self._log_start()
try:
self.ioloop.loop(timeout, blocking)
except (KeyboardInterrupt, SystemExit):
pass
if blocking:
if log:
logger.info(">>> shutting down FTP server (%s active " \
"workers) <<<", self._map_len())
self.close_all()
else:
self.ioloop.loop(timeout, blocking)
def close_all(self):
tasks = self._active_tasks[:]
# this must be set after getting active tasks as it causes
# thread objects to get out of the list too soon
self._exit.set()
if tasks and hasattr(tasks[0], 'terminate'):
# we're dealing with subprocesses
for t in tasks:
try:
if not _BSD:
t.terminate()
else:
# XXX - On FreeBSD using SIGTERM doesn't work
# as the process hangs on kqueue.control() or
# select.select(). Use SIGKILL instead.
os.kill(t.pid, signal.SIGKILL)
except OSError:
err = sys.exc_info()[1]
if err.errno != errno.ESRCH:
raise
self._wait_for_tasks(tasks)
del self._active_tasks[:]
FTPServer.close_all(self)
def _wait_for_tasks(self, tasks):
"""Wait for threads or subprocesses to terminate."""
warn = logger.warning
for t in tasks:
t.join(self.join_timeout)
if t.is_alive():
# Thread or process is still alive. If it's a process
# attempt to send SIGKILL as last resort.
# Set timeout to None so that we will exit immediately
# in case also other threads/processes are hanging.
self.join_timeout = None
if hasattr(t, 'terminate'):
msg = "could not terminate process %r" % t
if not _BSD:
warn(msg + "; sending SIGKILL as last resort")
try:
os.kill(t.pid, signal.SIGKILL)
except OSError:
err = sys.exc_info()[1]
if err.errno != errno.ESRCH:
raise
else:
warn(msg)
else:
warn("thread %r didn't terminate; ignoring it", t)
try:
import threading
except ImportError:
pass
else:
__all__ += ['ThreadedFTPServer']
# compatibility with python <= 2.6
if not hasattr(threading.Thread, 'is_alive'):
threading.Thread.is_alive = threading.Thread.isAlive
class ThreadedFTPServer(_SpawnerBase):
"""A modified version of base FTPServer class which spawns a
thread every time a new connection is established.
"""
# The timeout passed to thread's IOLoop.poll() call on every
# loop. Necessary since threads ignore KeyboardInterrupt.
poll_timeout = 1.0
_lock = threading.Lock()
_exit = threading.Event()
# compatibility with python <= 2.6
if not hasattr(_exit, 'is_set'):
_exit.is_set = _exit.isSet
def _start_task(self, *args, **kwargs):
return threading.Thread(*args, **kwargs)
def _current_task(self):
return threading.currentThread()
def _map_len(self):
return threading.activeCount()
if os.name == 'posix':
try:
import multiprocessing
except ImportError:
pass
else:
__all__ += ['MultiprocessFTPServer']
class MultiprocessFTPServer(_SpawnerBase):
"""A modified version of base FTPServer class which spawns a
process every time a new connection is established.
"""
_lock = multiprocessing.Lock()
_exit = multiprocessing.Event()
def _start_task(self, *args, **kwargs):
return multiprocessing.Process(*args, **kwargs)
def _current_task(self):
return multiprocessing.current_process()
def _map_len(self):
return len(multiprocessing.active_children())
| bsd-2-clause |
dmccloskey/SBaaS_ale | SBaaS_ale/stage01_ale_trajectories_query.py | 1 | 10984 | ο»Ώ#lims
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from .stage01_ale_trajectories_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_ale_trajectories_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_ale_trajectories':data_stage01_ale_trajectories,
'data_stage01_ale_jumps':data_stage01_ale_jumps,
'data_stage01_ale_stocks':data_stage01_ale_stocks
};
self.set_supportedTables(tables_supported);
# query sample name abbreviations from data_stage01_ale_trajectories
def get_sampleNameAbbreviations_experimentID_dataStage01AleTrajectories(self,experiment_id_I):
'''Querry sample name abbreviations that are used from the experiment'''
try:
sample_names = self.session.query(data_stage01_ale_trajectories.ale_id).filter(
data_stage01_ale_trajectories.experiment_id.like(experiment_id_I),
data_stage01_ale_trajectories.used_.is_(True)).group_by(
data_stage01_ale_trajectories.ale_id).order_by(
data_stage01_ale_trajectories.ale_id.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.ale_id);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query rows from data_stage01_ale_rates
def get_rows_experimentIDAndALEID_dataStage01AleTrajectories(self,experiment_id_I,ale_id_I):
'''Querry rows for ale_ids that are used from the experiment'''
try:
data = self.session.query(data_stage01_ale_trajectories).filter(
data_stage01_ale_trajectories.experiment_id.like(experiment_id_I),
data_stage01_ale_trajectories.used_.is_(True),
data_stage01_ale_trajectories.ale_id.like(ale_id_I)).order_by(
data_stage01_ale_trajectories.ale_time.asc()).all();
data_O = [];
if data:
for d in data:
data_tmp = d.__repr__dict__();
data_O.append(data_tmp);
return data_O;
except SQLAlchemyError as e:
print(e);
def add_dataStage01AleTrajectories(self, data_I):
'''add rows of data_stage01_ale_trajectories'''
if data_I:
for d in data_I:
try:
data_add = data_stage01_ale_trajectories(d
#d['experiment_id'],
#d['ale_id'],
#d['ale_time'],
#d['ale_time_units'],
#d['generations'],
#d['ccd'],
#d['rate'],
#d['rate_units'],
#d['used_'],
#d['comment_']
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01AleTrajectories(self,data_I):
'''update rows of data_stage01_ale_trajectories'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_ale_trajectories).filter(
data_stage01_ale_trajectories.id == d['id']).update(
{
'experiment_id':d['experiment_id'],
'ale_id':d['ale_id'],
'ale_time':d['ale_time'],
'ale_time_units':d['ale_time_units'],
'generations':d['generations'],
'ccd':d['ccd'],
'rate':d['rate'],
'rate_units':d['rate_units'],
'used_':d['used_'],
'comment_':d['comment_']},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_dataStage01AleJumps(self, data_I):
'''add rows of data_stage01_ale_jumps'''
if data_I:
for d in data_I:
try:
data_add = data_stage01_ale_jumps(d
#d['experiment_id'],
#d['ale_id'],
#d['ale_time'],
#d['ale_time_units'],
#d['rate_fitted'],
#d['rate_fitted_units'],
#d['jump_region'],
#d['used_'],
#d['comment_']
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01AleJumps(self,data_I):
'''update rows of data_stage01_ale_jumps'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_ale_jumps).filter(
data_stage01_ale_jumps.id == d['id']).update(
{'experiment_id':d['experiment_id'],
'ale_id':d['ale_id'],
'ale_time':d['ale_time'],
'ale_time_units':d['ale_time_units'],
'rate_fitted':d['rate_fitted'],
'rate_fitted_units':d['rate_fitted_units'],
'jump_region':d['jump_region'],
'used_':d['used_'],
'comment_':d['comment_']},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_dataStage01AleStocks(self, data_I):
'''add rows of data_stage01_ale_stocks'''
if data_I:
for d in data_I:
try:
data_add = data_stage01_ale_stocks(d
#d['experiment_id'],
#d['ale_id'],
#d['sample_name_abbreviation'],
#d['time_point'],
#d['ale_time'],
#d['ale_time_units'],
#d['used_'],
#d['comment_']
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01AleStocks(self,data_I):
'''update rows of data_stage01_ale_stocks'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_ale_stocks).filter(
data_stage01_ale_stocks.id == d['id']).update(
{
'experiment_id':d['experiment_id'],
'ale_id':d['ale_id'],
'sample_name_abbreviation':d['sample_name_abbreviation'],
'time_point':d['time_point'],
'ale_time':d['ale_time'],
'ale_time_units':d['ale_time_units'],
'used_':d['used_'],
'comment_':d['comment_']},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def drop_dataStage01_ale_trajectories(self):
try:
data_stage01_ale_trajectories.__table__.drop(self.engine,True);
#data_stage01_ale_jumps.__table__.drop(self.engine,True);
data_stage01_ale_stocks.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_ale_all(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_ale_trajectories).filter(data_stage01_ale_trajectories.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
#reset = self.session.query(data_stage01_ale_jumps).filter(data_stage01_ale_jumps.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_ale_stocks).filter(data_stage01_ale_stocks.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_ale_trajectories(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_ale_trajectories).filter(data_stage01_ale_trajectories.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_ale_jumps(self,experiment_id_I = None):
try:
if experiment_id_I:
#reset = self.session.query(data_stage01_ale_jumps).filter(data_stage01_ale_jumps.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_ale_stocks(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_ale_stocks).filter(data_stage01_ale_stocks.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def initialize_dataStage01_ale_trajectories(self):
try:
data_stage01_ale_trajectories.__table__.create(self.engine,True);
#data_stage01_ale_jumps.__table__.create(self.engine,True);
data_stage01_ale_stocks.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e); | mit |
FarnazH/horton | horton/meanfield/hamiltonian.py | 4 | 9021 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Mean-field DFT/HF Hamiltonian data structures"""
from horton.log import log
from horton.cache import Cache
from horton.utils import doc_inherit
__all__ = [
'REffHam', 'UEffHam'
]
class EffHam(object):
"""Base class for the effective Hamiltonians.
Class attributes
----------------
ndm : int
The number of input density matrices and output fock matrices (e.g. ndm=1 for
restricted wfns, ndm=2 for unrestricted wfns.)
deriv_scale : float
In principle, the fock matrix is the derivative of the expectation value towards
the density matrix elements. In practice, this is not always the case. Depending
on the type of effective Hamiltonian, the fock matrices must be multiplied with a
factor to obtain proper derivatives. This factor is stored in the class attribute
``deriv_scale``. It defaults to 1.0.
"""
ndm = None
deriv_scale = 1.0
def __init__(self, terms, external=None):
"""Initialize an EffHam instance.
Parameters
----------
terms : list with instances of Observable
The terms in the Hamiltonian.
external : dict
A dictionary with external energy contributions that do not depend on the
wavefunction, e.g. nuclear-nuclear interactions or QM/MM mechanical embedding
terms. Use ``nn`` as key for the nuclear-nuclear term.
"""
# check arguments:
if len(terms) == 0:
raise ValueError('At least one term must be present in the Hamiltonian.')
# Assign attributes
self.terms = list(terms)
self.external = {} if external is None else external
# Create a cache for shared intermediate results. This cache should only
# be used for derived quantities that depend on the wavefunction and
# need to be updated at each SCF cycle.
self.cache = Cache()
def reset(self, *dms):
"""Remove intermediate results from cache and specify new input density matrices.
Parameters
----------
dm1, dm2, ... : TwoIndex
The input density matrices. Their interpretation is fixed in
derived classes.
"""
raise NotImplementedError
def reset_delta(self, *delta_dms):
"""Remove intermediate results for delta_dms from cache and specify new inputs.
Parameters
----------
delta_dm1, delta_dm2, ... : TwoIndex
First-order changes to the density matrix, used to compute the dot product
with the energy Hessian.
"""
raise NotImplementedError
def compute_energy(self):
"""Compute the total energy.
The input for this method must be provided through the ``reset``
method.
Returns
-------
energy : float
The expectation value, including the constant terms defined through the
``external`` argument of the constructor
"""
total = 0.0
for term in self.terms:
energy = term.compute_energy(self.cache)
self.cache['energy_%s' % term.label] = energy
total += energy
for key, energy in self.external.iteritems():
self.cache['energy_%s' % key] = energy
total += energy
self.cache['energy'] = total
return total
def log(self):
"""Write an overview of the last computation on screen."""
log('Contributions to the energy:')
log.hline()
log(' term Value')
log.hline()
for term in self.terms:
energy = self.cache['energy_%s' % term.label]
log('%50s %20.12f' % (term.label, energy))
for key, energy in self.external.iteritems():
log('%50s %20.12f' % (key, energy))
log('%50s %20.12f' % ('total', self.cache['energy']))
log.hline()
log.blank()
def compute_fock(self, *focks):
"""Compute the fock matrices.
A Fock matrix is the derivative of the energy toward the components of the
corresponding input density matrices.
The input for this method must be provided through the ``reset`` method. Note that
the Fock matrix must be multiplied by the factor deriv_scale to obtain the proper
derivative of the energy toward the density matrix, in order to maintain the
common conventions for Fock matrices.
Parameters
----------
fock1, fock2, ... : TwoIndex
A list of output Fock operators. Old content is discarded.
"""
for fock in focks:
fock[:] = 0.0
# Loop over all terms and add contributions to the Fock matrix.
for term in self.terms:
term.add_fock(self.cache, *focks)
def compute_dot_hessian(self, *outputs):
"""Compute the dot product of the energy Hessian with a delta DM.
The Hessian in this method is the second derivative of the energy towards the
matrix elements of the input density matrix or matrices. The ``dms`` and
``delta_dms`` are set via the ``reset`` and ``reset_delta`` methods, respectively.
Parameters
----------
outputs : TwoIndex
A list of output TwoIndex objects in which the dot product of the energy
Hessian with the delta density matrices is stored.
Note that the result must be multiplied by the feactor deriv_scale squared in
order to obtain the proper second order derivative. This is due to conventions
related to the definition of the Fock matrix.
"""
for output in outputs:
output[:] = 0.0
# Loop over all terms and add contributions to the output two-index
# objects.
for term in self.terms:
term.add_dot_hessian(self.cache, *outputs)
class REffHam(EffHam):
"""Effective Hamiltonian for restricted wavefunctions."""
ndm = 1
deriv_scale = 2.0
@doc_inherit(EffHam)
def reset(self, in_dm_alpha):
self.cache.clear()
# Take a copy of the input alpha density matrix in the cache.
dm_alpha = self.cache.load('dm_alpha', alloc=in_dm_alpha.shape)[0]
dm_alpha[:] = in_dm_alpha
@doc_inherit(EffHam)
def reset_delta(self, in_delta_dm_alpha):
self.cache.clear(tags='d')
# Take a copy of the input alpha delta density matrix in the cache.
delta_dm_alpha = self.cache.load('delta_dm_alpha', alloc=in_delta_dm_alpha.shape, tags='d')[0]
delta_dm_alpha[:] = in_delta_dm_alpha
@doc_inherit(EffHam)
def compute_fock(self, fock_alpha):
EffHam.compute_fock(self, fock_alpha)
@doc_inherit(EffHam)
def compute_dot_hessian(self, output_alpha):
EffHam.compute_dot_hessian(self, output_alpha)
class UEffHam(EffHam):
"""Effective Hamiltonian for unrestricted wavefunctions."""
ndm = 2
@doc_inherit(EffHam)
def reset(self, in_dm_alpha, in_dm_beta):
self.cache.clear()
# Take copies of the input alpha and beta density matrices in the cache.
dm_alpha = self.cache.load('dm_alpha', alloc=in_dm_alpha.shape)[0]
dm_alpha[:] = in_dm_alpha
dm_beta = self.cache.load('dm_beta', alloc=in_dm_beta.shape)[0]
dm_beta[:] = in_dm_beta
@doc_inherit(EffHam)
def reset_delta(self, in_delta_dm_alpha, in_delta_dm_beta):
self.cache.clear(tags='d')
# Take a copy of the input alpha and beta delta density matrix in the cache.
delta_dm_alpha = self.cache.load('delta_dm_alpha', alloc=in_delta_dm_alpha.shape, tags='d')[0]
delta_dm_alpha[:] = in_delta_dm_alpha
delta_dm_beta = self.cache.load('delta_dm_beta', alloc=in_delta_dm_beta.shape, tags='d')[0]
delta_dm_beta[:] = in_delta_dm_beta
@doc_inherit(EffHam)
def compute_fock(self, fock_alpha, fock_beta):
EffHam.compute_fock(self, fock_alpha, fock_beta)
@doc_inherit(EffHam)
def compute_dot_hessian(self, output_alpha, output_beta):
EffHam.compute_dot_hessian(self, output_alpha, output_beta)
| gpl-3.0 |
jmcarp/django | django/db/backends/oracle/base.py | 129 | 24986 | """
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import os
import platform
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.duration import duration_string
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import Oracle_datetime, convert_unicode # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
conn_string = convert_unicode(self._connect_string())
return Database.connect(conn_string, **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
try:
self.connection.stmtcachesize = 20
except AttributeError:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_full_version(self):
with self.temporary_connection():
return self.connection.version
@cached_property
def oracle_version(self):
try:
return int(self.oracle_full_version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
if timezone.is_aware(param):
warnings.warn(
"The Oracle database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
param = Oracle_datetime.from_datetime(param)
if isinstance(param, datetime.timedelta):
param = duration_string(param)
if ' ' not in param:
param = '0 ' + param
string_size = 0
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, Database.Binary):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if isinstance(self.force_bytes, six.string_types):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params.keys()}
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size))
def fetchall(self):
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchall())
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
| bsd-3-clause |
dougsweetser/ipq | q_notebooks/billiard_calculations.py | 1 | 23460 |
# coding: utf-8
# # Table of Contents
# * [Observing Billiards Using Space-time Numbers](#Observing-Billiards-Using-Space-time-Numbers)
# * [Representations of Numbers Versus Coordinate Transformation of Vectors](#Representations-of-Numbers--Versus-Coordinate-Transformation-of-Vectors)
# * [Observer B Boosted](#Observer-B-Boosted)
# * [Observer C in a Gravity Field in Theory](#Observer-C-in-a-Gravity-Field-in-Theory)
# * [Observer C in a Gravity Field in Practice](#Observer-C-in-a-Gravity-Field-in-Practice)
# * [Conclusions](#Conclusions)
#
# # Observing Billiards Using Space-time Numbers
# The goal of this iPython notebook is to become familiar with using space-time numbers to describe events. This will be done for three different observers. The first case will cover the fact that the observers happen to be at different locations. How does one handle different ways to represent the numbers used to characterize events? One observer will be set in constant motion. We will work out the equivalence classes that cover observers in motion. The final case will look at equivalence classes that may happen due to gravity.
# Here is an animation of a mini billiard shot.
# 
# The cue ball hits the 8 ball, then into the corner pocket it goes. Observer A is yellow, our proverbial reference observer. I promise to do nothing with her ever. Observer B in pink is at a slightly different location, but still watching from the tabletop. Eventually, he will be set into constant motion. We can see about what Observers agree and disagree about. Observer C is in purple and at the end of a pipe cleaner above the tabletop. His observations will be ever-so-slightly different from Observer A due to the effects of gravity and that will be investigated.
# A number of simplifications will be done for this analysis. All but two frames will be used.
# 
# Get rid of the green felt. In its place, put some graph paper. Add a few markers to make any measurement more precise.
# 
# The image was then printed out so a precise dial caliper could be used to make measurements. Notice that observer A is ~2.5 squares to the left and 3+ squares below the 8 ball in the first frame.
# 
# Can the time be measured precisely? In this case, I will use the frames of the gif animation as a proxy for measuring time. I used the command "convert billiard_video.gif Frames/billiards_1%02d.png" to make make individual frames from the gif. The two frames are 147 and 158. The speed of the fastest cue break is over 30 miles per hour, or as a dimensionless relativistic speed is 4.5x10<sup>-8</sup>. If small number are used for the differences in space, then the difference between time should be scaled to be in the ten billion range. So that is what I did: call the first time 1,470,000,000 and the second one 1,580,000,000. The ball is then moving around 20mph. I could have found out the frames per second, and calculated the correct speed from there. The three observers do not need to coordinate to figure out the same origin in time, so I chose B and C to start a billion and two billion earlier respectively.
# This explains how I got numbers related to an 8 ball moving on a table. Now to start calculating with the goal of getting the square. I have written a test of tools called "Q_tool_devo" that allow for numerical manipulations of something I call "space-time numbers". Essentially they are quaternions, a 4D division algebra, written in a funny way. Instead of writing a real number like 5.0, a doublet of values is used, say (6.0, 1.0) which can then be "reduced" to (5.0, 0) and is thus equivalent to the standard real number 5.0. To create a space-time number, feed it eight numbers like so:
# In[1]:
get_ipython().run_cell_magic('capture', '', 'import Q_tools as qt;\nAq1=qt.Q8([1470000000,0,1.1421,0,1.4220,0,0,0])\nAq2=qt.Q8([1580000000,0,4.2966,0,0,0.3643,0,0])')
# In[2]:
q_scale = qt.Q8([2.2119,0,0,0,0,0,0,0], qtype="S")
Aq1s=Aq1.product(q_scale)
Aq2s=Aq2.product(q_scale)
print(Aq1s)
print(Aq2s)
# When scaled, the expected values are seen, the x value at around 2.5, the y value above 3 and zero for z. Event 2 is 9.5 and 0.8 $j_3$ meaning in real numbers, -0.8. There is also the qtype "QxS", a way of keeping track of what operations have been done to a space-time number. After all, all space-time numbers look the same. Keeping the qtype around help avoid combining differing qtypes.
# Calculate the delta quaternion between events one and two:
# In[3]:
Adq=Aq2s.dif(Aq1s).reduce()
print(Aq2s.dif(Aq1s))
print(Adq)
# The difference is nearly 7 in the x<sub>1</sub> direction, and 4 in the j<sub>3</sub>, which if real numbers were being used would be the positive x and negative y. The qtype "QxQ-QxQ.reduce" shows that both initial components were multiplied by a scalar value, the difference taken, then reduced to its real number equivalent form.
# Distances are found using a square.
# In[4]:
Adq2=Adq.square()
print(Adq2)
print(Adq2.reduce())
# This is a case where the non-reduced form is more convenient. The time squared is about 60 quadrillion while the change in space squared is slightly over 64. Classical physics is full of such imbalances and the non-reduced form helps maintain the separation.
# It is my thesis that all the numbers in the square provide important information for comparing any pair of observers. Here are the input numbers for observers B and C:
# In[5]:
Bq1=qt.Q8([2470000000,0,0.8869,0,1.8700,0,0,0])
Bq2=qt.Q8([2580000000,0,3.9481,0,0,0.1064,0,0])
Bq1s=Bq1.product(q_scale)
Bq2s=Bq2.product(q_scale)
Bdq=Bq2s.dif(Bq1s).reduce()
Cq1=qt.Q8([3470000000,0,1.1421,0,1.4220,0,1.3256,0])
Cq2=qt.Q8([3580000000,0,4.2966,0,0,0.3643,1.3256,0])
Cq1s=Cq1.product(q_scale)
Cq2s=Cq2.product(q_scale)
Cdq=Cq2s.dif(Cq1s).reduce()
print(Bq1s)
print(Bq2s)
print(Bdq)
print(Cq1s)
print(Cq2s)
print(Cdq)
# No set of input numbers for two observers are **ever the same**. Two observers must be located in either a different place in time or a different place in space or both.
# In[6]:
Bdq2=Bq1s.dif(Bq2s).reduce().square()
Cdq2=Cq1s.dif(Cq2s).reduce().square()
print(Adq2)
print(Bdq2)
print(Cdq2)
# We are comparing apples to apples since the qtype, "QxS-QxS.reduce.sq", are the same. The first of the 8 terms are exactly the same, the I<sub>0</sub>. The reason is the delta time values were exactly the same. The first and third I<sub>2</sub> are exactly the same because their delta values were identical even though they had different z values. A different physical measurement was made for Observer B. The match is pretty good:
# In[7]:
(64.96 - 64.30)/64.60
# The error is about a percent. So while I reported 4 significant digits, only the first two can be trusted.
# The next experiment involved rotating the graph paper for Observer B. This should not change much other than the numbers that get plugged into the interval calculation.
# 
# In[8]:
BRotq1=qt.Q8([2470000000,0,0.519,0,1.9440,0,0,0])
BRotq2=qt.Q8([2580000000,0,3.9114,0,0.5492,0,0,0])
BRotdq2=BRotq1.product(q_scale).dif(BRotq2.product(q_scale)).reduce().square()
print(BRotdq2)
print(Bdq2)
# No surprise here: the graph paper will make a difference in the numbers used, but the distance is the same up to the errors made in the measuring process.
# ## The Space-times-time term
# What happens with the space-times-time term for these observers that have no relative velocities to each other? The space part always points in a different direction since the spatial origin is in a different location. If we consider the norm squared of the the space-times-time term, that would be $dt^2(dx^2 + dy^2 + dz^2)$. This is something observers with different perspectives will agree upon:
# In[9]:
print(Adq2.norm_squared_of_vector().reduce())
print(Bdq2.norm_squared_of_vector().reduce())
print(Cdq2.norm_squared_of_vector().reduce())
print(BRotdq2.norm_squared_of_vector().reduce())
# These are the same within the margin of error of the measurements.
# ## Representations of Numbers Versus Coordinate Transformation of Vectors
# This notebook is focused on space-time numbers that can be added, subtracted, multiplied, and divided. Formally, they are rank 0 tensors. Yet because space-time numbers have four slots to fill, it is quite easy to mistakenly view them as a four dimensional vector space over the mathematical field of real numbers with four basis vectors. Different representations of numbers changes the values of the numbers that get used, but not their meaning. Let's see this in action for a cylindrical representation of a number. Instead of $x$ and $y$, one uses $R \cos(\alpha)$ and $R \sin(\alpha)$, no change for $z$.
# 
# What needs to be done with the measurements done in cylindrical coordinates is to convert them to Cartesian, the proceed with the same calculations.
# In[10]:
import math
def cyl_2_cart(q1):
"""Convert a measurment made with cylindrical coordinates in angles to Cartesian cooridantes."""
t = q1.dt.p - q1.dt.n
r = q1.dx.p - q1.dx.n
a = q1.dy.p - q1.dy.n
h = q1.dz.p - q1.dz.n
x = r * math.cos(a * math.pi / 180)
y = r * math.sin(a * math.pi / 180)
return qt.Q8([t, x, y, h])
# For polar coordinates, measure directly the distance between the origin and the billiard ball. Then determine an angle. This constitutes a different approach to making a measurement.
# In[11]:
BPolarq1=cyl_2_cart(qt.Q8([2470000000,0,2.0215,0, 68.0,0,0,0]))
BPolarq2=cyl_2_cart(qt.Q8([2580000000,0,3.9414,0,1.2,0,0,0]))
BPolardq2=BPolarq1.product(q_scale).dif(BPolarq2.product(q_scale)).reduce().square()
print(BPolardq2)
print(Bdq2)
# Yet the result for the interval is the same: the positive time squared term is exactly the same since those numbers were not changed, and the negative numbers for the space terms were only different to the error in measurement.
# ## Observer B Boosted
# Give Observer B a Lorenz boost. All that is needed is to relocate Observer B in the second frame like so:
# 
# To make the math simpler, presume all the motion is along $x$, not the slightest wiggle along $y$ or $z$. Constant motion between the frames shown is also presumed.
# What velocity is involved? THat would be the change in space, 2, over the time, a big number
# In[12]:
vx = 2/Bdq.dt.p
print(vx)
# This feels about right. The speed of observer B is about what a cube ball is.
#
# Boost the delta by this velocity.
# In[13]:
Bdq_boosted = Bdq.boost(beta_x = vx)
print(Bdq_boosted)
print(Bdq_boosted.reduce())
print(Bdq)
print(Bdq_boosted.dif(Bdq).reduce())
# The last line indicates there is no difference between the boosted values of $y$ and $z$, as expected. Both the change in time and in space are negative. Moving in unison is a quality of simple boosts. The change in time is tiny. The change in space is almost 4, but not quite due to the work of the $\gamma$ factor that altered the time measurement.
# Compare the squares of the boosted with the non-boosted Observer B
# In[14]:
print(Bdq_boosted.square())
print(Bdq.square())
# Time and space are mixing together for the boosted frame. There are two huge numbers for $I_0$ and $I_2$ instead of a big number and about 65. Are they the same? Compare the reduced squares:
# In[15]:
print(Bdq_boosted.square().reduce())
print(Bdq.square().reduce())
# The reduced intervals are the same. The space-times-time terms are not. The difference between the space-times-time terms can be used to determine how Observer B boosted in moving relative to Observer B (calculation not done here). Even with out going into detail, the motion is only along x because that is the only term that changes.
# Software was written to systematically look at equivalences classes for a pair of quaternions. Three types of comparisons are made: linear, squared, and the norm.
# In[16]:
qb = qt.EQ(Bdq, Bdq_boosted)
print(qb)
# There are 9 equivalences classes in all. Let's visualize them a set of icons:
# In[17]:
qb.visualize()
# The figures in gray are location in time and 3 for space. The colorful set with parabolas are the squares, the interval being purple and off-yellow, space-times-time in green. The norm is in pink.
# For the gray set, the events from Observer B are being compared with a boosted Observer B for motion that is only along the $x$ direction. We thus expect the $y$ and $z$ values to be exact as they are (down exact and here exact because $z=0$). The value of $x$ is boosted, so they are both right, but not the same value. But what about time? The report is for an exact match. The software was written to say two values were equivalent if they were the same to 10 significant digits. It is the 16th significant digit which is different.
# The time-like interval is the same for Observer B and the boosted one, so the equivalence class is time-like-exact as expected. This graphics are icons to represent the class, not a reflection of the numbers used. The space-times-time terms are only different along $t x$ due to the boost along $x$.
# The norm is included for completeness of simple operations, but I don't understand it at this time. It is marked as exact due to the dominance of the time term.
# ## Observer C in a Gravity Field in Theory
# The video of the billiard balls shows there is a gravity field since the eight-ball drops into the pocket. Newton's law of gravity can be written as an interval:
# $$d \tau^2 = \left(1 - 2\frac{G M}{c^2 R}\right) dt^2 - dR^2/c^2 $$
# More precise measurements of weak field gravity adds a few more terms (essentially equation 40.1 of Misner, Thorne and Wheeler):
# $$d \tau^2 = \left(1 - 2\frac{G M}{c^2 R} + 2 \left(\frac{G M}{c^2 R}\right)^2\right) dt^2 - \left(1 + 2\frac{G M}{c^2 R}\right) dR^2 /c^2 $$
# When the mass $M$ goes to zero or the distance from the source gets large, the result is the interval expected in flat space-time.
# The space-times-times equivalence class as gravity proposal stipulates that for a simple gravitational source mass (spherically symmetric, non-rotating, uncharged) the square of a delta quaternion produces a space-times-time that is the same for different observers no matter where they are in a gravitational field. This can be achieved by making the factor for time be the inverse of the one for space (below, a dimensionless M is a stand-in for $\frac{G M}{c^2 R}$).
# In[18]:
from sympy import symbols
M = symbols('M')
(1/(1 - 2 * M)).series(M, 0, n=5)
# Even in the "classical realm", the space-times-time equivalence class as gravity proposal is different from Newtonian gravity. From my brief study of the rotation of thin disk galaxies, this term is not applied to such calculations. This now strikes me as odd. The Schwarzschild solution has this same term, the "first order in M/R", yet only the dt correction is used in practice. The rotation profile calculation is quite complex, needing elliptical integrals. An analytic solution like that would be altered by this well know term. It will be interesting in time to explore if the extra term has consequences.
# Since we are analyzing the square, the delta quaternion would be the square root of with these two terms that use the dimensionless gravitational length:
# $$ \begin{align*} dq &= \left(\sqrt{1 - 2 \frac{G M}{c^2 R}} dt, \frac{1}{\sqrt{1 - 2 \frac{G M}{c^2 R}}} dR/c \right) \\ dq^2 &= \left( \left(1 - 2 \frac{G M}{c^2 R}\right) dt^2 - \left(1 + 2 \frac{G M}{c^2 R} + O(2)\right) dR^2/c^2, 2 ~dt ~dR/c \right) \\
# &= \left( d\tau^2, 2 ~dt ~dR/c \right) \end{align*} $$
# To be consistent with the weak gravity field tests and the algebraic constraints of the equivalence class proposal requires six terms not five:
# In[19]:
(1/(1 - 2 * M + 2 * M ** 2)).series(M, 0, n=3)
# Here are the delta quaternion and its square in a gravity field that will be consistent with all weak field gravitational tests.
# $$ \begin{align*} dq &= \left(\sqrt{1 - 2 \frac{G M}{c^2 R} + 2 \left(\frac{G M}{c^2 R}\right)^2} dt, \frac{1}{\sqrt{1 - 2 \frac{G M}{c^2 R} + 2 \left(\frac{G M}{c^2 R}\right)^2}} dR/c \right) \\ dq^2 &= \left( \left(1 - 2 \frac{G M}{c^2 R} + 2 \left(\frac{G M}{c^2 R}\right)^2\right) dt^2 - \left(1 + 2 \frac{G M}{c^2 R} + 2 \left(\frac{G M}{c^2 R}\right)^2+O(3)\right) dR^2/c^2, 2 ~dt ~dR/c \right) \\
# &= \left( d\tau^2, 2 ~dt ~dR/c \right) \end{align*} $$
# The second order term for $ dR^2 $ has consequences that are tricky to discuss. Notice that no mention has been made of metric, not field equations, nor covariant and contra-variant vectors. That is because numbers are tensors of rank 0 that are equipped with rules of multiplication and division. As discussed above, there are different representations of numbers like a Cartesian representation, a cylindrical representation, and a spherical representation. My default is to use the Cartesian representation because I find it simplest to manage.
# The most successful theory for gravity, general relativity, does using metrics, covariant and contra-variant tensors, as well as connections that reveal how a metric changes in space-time. There are a great many technical choices in this process which have consequences. Einstein worked with a torsion-free connection that was metric compatible. One consequence is that dealing with fermions is an open puzzle. The process of getting to an interval is not simple. Twenty non-linear equation equations must be solved. This can be done analytically for only the simplest of cases. It is such a case, the Schwarzschild solution, that makes up most of the tests of general relativity (eq. 40.1 from MTW written above in isotrophic coordinates).
# The reader is being asked to compare Einstein's apple of an interval to the first of four oranges. There is no overlap between the mechanics of the math, hence the apple versus orange. The forms of the expressions are the same: a Taylor series in a dimensionless gravitational length. Five of the coefficients of the Taylor series are identical. Those five coefficients have been tested in a wide variety of classical tests of weak gravitational fields.
# The sixth term is not the same for the Taylor series expansion of the Schwarzschild solution in either isotrophic or Schwarzschild coordinates. It is not reasonable to expect the simple space-times-time equivalence constraint will solve the non-linear Einstein field equations.
# The truncated series expansion will not be the final story. We could wait for experimentalist to determine 10 terms, but that is quite unreasonable (Personal story: I spend ~$600 to go to an Eastern Gravity Meeting just to ask Prof. Clifford Will when we might get the terms for second order Parameterize Post-Newtonian accuracy, and at the time he knew of no such planned experimental effort ~2005?). Given that gravity is o harmonic phenomena, there are six terms that match, and many other people have made the same speculation, it is a small leap to suggest that a positive and negative exponential to the dimensionless mass length may be the complete solution for simple systems:
# $$ \begin{align*} dq &= \left(\exp\left({-\frac{G M}{c^2 R}}\right) dt, \exp\left(\frac{G M}{c^2 R} \right) dR/c \right) \\ dq^2 &= \left( \exp \left(-2\frac{G M}{c^2 R} \right) dt^2 - \exp \left( 2 \frac{G M}{c^2 R} \right) dR^2/c^2, 2 ~dt ~dR/c \right) \\
# &= \left( d\tau^2, 2 ~dt ~dR/c \right) \end{align*} $$
# The exponential interval does appear in the literature since it makes calculations far simpler.
# ## Observer C in a Gravity Field in Practice
# Gravity is impressively weak. The distance of Observer C over Observer A is impressively small. The change in the interval should in practice be beyond measure.
# \begin{align*}
# G&=6.67384\cdot 10^{-11} \frac{m^{3}}{kg s^2}\\
# M&=5.9722 \cdot 10^{24} kg\\
# c&=299792458 m / s \\
# R&=6.371 \cdot 10^{6} m
# \end{align*}
# In[20]:
GMc2R_for_Observer_A = 6.67384e-11 * 5.9722e+24 / (299792458 ** 2 * 6371000)
GMc2R_for_Observer_C = 6.67384e-11 * 5.9722e+24 / (299792458 ** 2 * 6371000.1)
print(GMc2R_for_Observer_A)
print(GMc2R_for_Observer_C)
# Moving 10 centimeters is not much.
# Do the "minimal" shift meaning the three terms of the Taylor series.
# In[21]:
Adq_g = Cdq.g_shift(GMc2R_for_Observer_A, g_form="minimal")
Cdq_g = Cdq.g_shift(GMc2R_for_Observer_C, g_form="minimal")
print(Adq_g)
print(Cdq_g)
# The squares could be calculated, but if the input values are the same, there will be no difference in any of the squares. This is consistent with expectations: a small number change in a small number cannot be noticed.
# Observer C is a mere 10 centimeters away from Observer A. Let us make the distance so vast that the GMc2R value is zero.
# In[22]:
Cdq_g_zero = Cdq.g_shift(0, g_form="minimal")
print(Adq_g)
print(Cdq_g_zero)
# Get far enough away, and the effects of gravity may become apparent.
# In[23]:
Adq_g_2 = Adq_g.square()
Cdq_g_zero_2 = Cdq_g_zero.square()
eq_g = qt.EQ(Adq_g_2, Cdq_g_zero_2)
print(eq_g)
eq_g.visualize()
# The time-like interval is not exact. That is the effect of gravity. Yet these pairs of observers, A and C at an very great distance from A, all have exact space-times-time values as the quaternion gravity proposal demands.
# ## Conclusions
# Nearly all calculations in physics I have ever done involved vector spaces over a mathematical field like real or complex numbers. This iPython notebook used a fundamentally new and different toolset. Space-time numbers can always be made equivalent to the 4D division algebra of quaternions through the process of reduction. This new breed of numbers was applied to the simple case of an eight-ball moving along a pool table. Three observers watched the events unfold. Their raw data always was different because that is inherent in the definition of being a different observer. Yet by taking the difference between two events and squaring it, equivalence classes could be defined. If two observers agree on the first term of the square, that means the two observers are moving at a constant speed relative to each other. The other three terms can be used to figure out the motion. While this might be view as a novel approach to special relativity, nothing new is claimed.
# If two observers are in an equivalence class because the difference between two events squared space-times-time values are exactly the same, this is a new proposal for how gravity works. There is no metric tensor, connection, nor field equations. Rather, the proposal is a twin of special relativity. In both cases it is the square that matters. The difference between the two is if the observers agree to the reduced real values, that is special relativity, where if they agree to the reduced imaginary values, that is space-times-time equivalence class as gravity.
# Space-time numbers should feel odd. We expect numbers to be unique. Yet Nature plays unusual games with the numbers for particles such as boson versus fermion statistics. Space-time numbers may be rich enough to reflect these kinds of properties. This notebook represents a proof of concept.
| apache-2.0 |
kalbasit/eve | eve/tests/methods/common.py | 6 | 12086 | from datetime import datetime
import simplejson as json
from bson import ObjectId
from eve.methods.common import serialize
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
from eve.utils import config
class TestSerializer(TestBase):
def test_serialize_subdocument(self):
# tests fix for #244, serialization of sub-documents.
schema = {'personal': {'type': 'dict',
'schema': {'best_friend': {'type': 'objectid'},
'born': {'type': 'datetime'}}},
'without_type': {}}
doc = {'personal': {'best_friend': '50656e4538345b39dd0414f0',
'born': 'Tue, 06 Nov 2012 10:33:31 GMT'},
'without_type': 'foo'}
with self.app.app_context():
serialized = serialize(doc, schema=schema)
self.assertTrue(
isinstance(serialized['personal']['best_friend'], ObjectId))
self.assertTrue(
isinstance(serialized['personal']['born'], datetime))
def test_mongo_serializes(self):
schema = {
'id': {'type': 'objectid'},
'date': {'type': 'datetime'},
'count': {'type': 'integer'},
'average': {'type': 'float'},
'dict_keyschema': {
'keyschema': {'type': 'objectid'}
},
'dict_valueschema': {
'valueschema': {'type': 'objectid'}
}
}
with self.app.app_context():
# Success
res = serialize(
{
'id': '50656e4538345b39dd0414f0',
'date': 'Tue, 06 Nov 2012 10:33:31 GMT',
'count': 42,
'average': 42.42,
'dict_keyschema': {
'foo1': '50656e4538345b39dd0414f0',
'foo2': '50656e4538345b39dd0414f0',
},
'dict_valueschema': {
'foo1': '50656e4538345b39dd0414f0',
'foo2': '50656e4538345b39dd0414f0',
}
},
schema=schema
)
self.assertTrue(isinstance(res['id'], ObjectId))
self.assertTrue(isinstance(res['date'], datetime))
self.assertTrue(isinstance(res['count'], int))
self.assertTrue(isinstance(res['average'], float))
ks = res['dict_keyschema']
self.assertTrue(isinstance(ks['foo1'], ObjectId))
self.assertTrue(isinstance(ks['foo2'], ObjectId))
ks = res['dict_valueschema']
self.assertTrue(isinstance(ks['foo1'], ObjectId))
self.assertTrue(isinstance(ks['foo2'], ObjectId))
def test_non_blocking_on_simple_field_serialization_exception(self):
schema = {
'extract_time': {'type': 'datetime'},
'date': {'type': 'datetime'},
'total': {'type': 'integer'}
}
with self.app.app_context():
# Success
res = serialize(
{
'extract_time': 'Tue, 06 Nov 2012 10:33:31 GMT',
'date': 'Tue, 06 Nov 2012 10:33:31 GMT',
'total': 'r123'
},
schema=schema
)
# this has been left untouched as it could not be serialized.
self.assertEqual(res['total'], 'r123')
# these have been both serialized.
self.assertTrue(isinstance(res['extract_time'], datetime))
self.assertTrue(isinstance(res['date'], datetime))
def test_serialize_lists_of_lists(self):
# serialize should handle list of lists of basic types
schema = {
'l_of_l': {
'type': 'list',
'schema': {
'type': 'list',
'schema': {
'type': 'objectid'
}
}
}
}
doc = {
'l_of_l': [
['50656e4538345b39dd0414f0', '50656e4538345b39dd0414f0'],
['50656e4538345b39dd0414f0', '50656e4538345b39dd0414f0']
]
}
with self.app.app_context():
serialized = serialize(doc, schema=schema)
for sublist in serialized['l_of_l']:
for item in sublist:
self.assertTrue(isinstance(item, ObjectId))
# serialize should handle list of lists of dicts
schema = {
'l_of_l': {
'type': 'list',
'schema': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'_id': {
'type': 'objectid'
}
}
}
}
}
}
doc = {
'l_of_l': [
[
{'_id': '50656e4538345b39dd0414f0'},
{'_id': '50656e4538345b39dd0414f0'}
],
[
{'_id': '50656e4538345b39dd0414f0'},
{'_id': '50656e4538345b39dd0414f0'}
],
]
}
with self.app.app_context():
serialized = serialize(doc, schema=schema)
for sublist in serialized['l_of_l']:
for item in sublist:
self.assertTrue(isinstance(item['_id'], ObjectId))
def test_serialize_null_dictionary(self):
# Serialization should continue after encountering a null value dict
# field. Field may be nullable, or error will be caught in validation.
schema = {
'nullable_dict': {
'type': 'dict',
'nullable': True,
'schema': {
'simple_field': {
'type': 'number'
}
}
}
}
doc = {
'nullable_dict': None
}
with self.app.app_context():
try:
serialize(doc, schema=schema)
except Exception:
self.assertTrue(False, "Serializing null dictionaries should "
"not raise an exception.")
class TestOpLogBase(TestBase):
def setUp(self):
super(TestOpLogBase, self).setUp()
self.test_field, self.test_value = 'ref', "1234567890123456789054321"
self.data = {self.test_field: self.test_value}
self.test_client = self.app.test_client()
self.headers = [(('Content-Type', 'application/json'))]
def oplog_reset(self):
self.app._init_oplog()
self.app.register_resource('oplog', self.domain['oplog'])
def oplog_get(self, url='/oplog'):
r = self.test_client.get(url)
return self.parse_response(r)
def assertOpLogEntry(self, entry, op):
self.assertTrue('r' in entry)
self.assertTrue('i' in entry)
self.assertTrue(config.LAST_UPDATED in entry)
self.assertTrue(config.DATE_CREATED in entry)
self.assertTrue('o' in entry)
self.assertEqual(entry['o'], op)
self.assertTrue('127.0.0.1' in entry['ip'])
if op in ('PATCH', 'PUT', 'DELETE'):
self.assertTrue('c' in entry)
class TestOpLogEndpointDisabled(TestOpLogBase):
def setUp(self):
super(TestOpLogEndpointDisabled, self).setUp()
self.app.config['OPLOG'] = True
self.oplog_reset()
def test_post_oplog(self):
r = self.test_client.post(self.known_resource_url,
data=json.dumps(self.data),
headers=self.headers,
environ_base={'REMOTE_ADDR': '127.0.0.1'})
# oplog endpoint is not available.
r, status = self.oplog_get()
self.assert404(status)
# however the oplog collection has been updated.
db = self.connection[MONGO_DBNAME]
cursor = db.oplog.find()
self.assertEqual(cursor.count(), 1)
self.assertOpLogEntry(cursor[0], 'POST')
class TestOpLogEndpointEnabled(TestOpLogBase):
def setUp(self):
super(TestOpLogEndpointEnabled, self).setUp()
self.app.config['OPLOG'] = True
self.app.config['OPLOG_ENDPOINT'] = 'oplog'
self.oplog_reset()
def test_post_oplog(self):
r = self.test_client.post(self.known_resource_url,
data=json.dumps(self.data),
headers=self.headers,
environ_base={'REMOTE_ADDR': '127.0.0.1'})
r, status = self.oplog_get()
self.assert200(status)
self.assertEqual(len(r['_items']), 1)
oplog_entry = r['_items'][0]
self.assertOpLogEntry(oplog_entry, 'POST')
def test_patch_oplog(self):
self.headers.append(('If-Match', self.item_etag))
r = self.test_client.patch(self.item_id_url,
data=json.dumps(self.data),
headers=self.headers,
environ_base={'REMOTE_ADDR': '127.0.0.1'})
r, status = self.oplog_get()
self.assert200(status)
self.assertEqual(len(r['_items']), 1)
oplog_entry = r['_items'][0]
self.assertOpLogEntry(oplog_entry, 'PATCH')
def test_put_oplog(self):
self.headers.append(('If-Match', self.item_etag))
r = self.test_client.put(self.item_id_url,
data=json.dumps(self.data),
headers=self.headers,
environ_base={'REMOTE_ADDR': '127.0.0.1'})
r, status = self.oplog_get()
self.assert200(status)
self.assertEqual(len(r['_items']), 1)
oplog_entry = r['_items'][0]
self.assertOpLogEntry(oplog_entry, 'PUT')
def test_put_oplog_does_not_alter_document(self):
""" Make sure we don't alter document ETag when performing an
oplog_push. See #590. """
self.headers.append(('If-Match', self.item_etag))
r = self.test_client.put(self.item_id_url,
data=json.dumps(self.data),
headers=self.headers,
environ_base={'REMOTE_ADDR': '127.0.0.1'})
etag1 = json.loads(r.get_data())['_etag']
etag2 = json.loads(
self.test_client.get(self.item_id_url).get_data())['_etag']
self.assertEqual(etag1, etag2)
def test_delete_oplog(self):
self.headers.append(('If-Match', self.item_etag))
r = self.test_client.delete(self.item_id_url,
headers=self.headers,
environ_base={'REMOTE_ADDR': '127.0.0.1'})
r, status = self.oplog_get()
self.assert200(status)
self.assertEqual(len(r['_items']), 1)
oplog_entry = r['_items'][0]
self.assertOpLogEntry(oplog_entry, 'DELETE')
def patch(self, url, data, headers=[], content_type='application/json'):
headers.append(('Content-Type', content_type))
headers.append(('If-Match', self.item_etag))
r = self.test_client.patch(url, data=json.dumps(data), headers=headers)
return self.parse_response(r)
def put(self, url, data, headers=[], content_type='application/json'):
headers.append(('Content-Type', content_type))
headers.append(('If-Match', self.item_etag))
r = self.test_client.put(url, data=json.dumps(data), headers=headers)
return self.parse_response(r)
class TestTickets(TestBase):
def test_ticket_681(self):
# See https://github.com/nicolaiarocci/eve/issues/681
with self.app.test_request_context('not_an_existing_endpoint'):
self.app.data.driver.db['again']
| bsd-3-clause |
40223139/203739test | static/Brython3.1.0-20150301-090019/Lib/xml/dom/minicompat.py | 781 | 3228 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| gpl-3.0 |
linkhub-sdk/popbill.cashbill.example.py | updateEmailConfig.py | 1 | 1529 | # -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import CashbillService, PopbillException
cashbillService = CashbillService(testValue.LinkID, testValue.SecretKey)
cashbillService.IsTest = testValue.IsTest
cashbillService.IPRestrictOnOff = testValue.IPRestrictOnOff
cashbillService.UseStaticIP = testValue.UseStaticIP
cashbillService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
νκΈμμμ¦ κ΄λ ¨ λ©μΌμ μ‘ νλͺ©μ λν μ μ‘μ¬λΆλ₯Ό μμ ν©λλ€.
- https://docs.popbill.com/cashbill/python/api#UpdateEmailConfig
λ©μΌμ μ‘μ ν
CSH_ISSUE : κ³ κ°μκ² νκΈμμμ¦μ΄ λ°ν λμμμ μλ €μ£Όλ λ©μΌ μ
λλ€.
CSH_CANCEL : κ³ κ°μκ² νκΈμμμ¦μ΄ λ°νμ·¨μ λμμμ μλ €μ£Όλ λ©μΌ μ
λλ€.
'''
try:
print("=" * 15 + " νκΈμμμ¦ λ©μΌμ μ‘μ¬λΆ μμ " + "=" * 15)
# νλΉνμ μ¬μ
μλ²νΈ
CorpNum = testValue.testCorpNum
# λ©μΌ μ μ‘ μ ν
EmailType = 'CSH_ISSUE'
# μ μ‘ μ¬λΆ (True = μ μ‘, False = λ―Έμ μ‘)
SendYN = True
# νλΉνμ μμ΄λ
UserID = testValue.testUserID
result = cashbillService.updateEmailConfig(CorpNum, EmailType, SendYN, UserID)
print("μ²λ¦¬κ²°κ³Ό : [%d] %s" % (result.code, result.message))
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
| mit |
revolutionaryG/phantomjs | src/qt/qtbase/src/3rdparty/freetype/src/tools/docmaker/docbeauty.py | 877 | 2642 | #!/usr/bin/env python
#
# DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org>
#
# This program is used to beautify the documentation comments used
# in the FreeType 2 public headers.
#
from sources import *
from content import *
from utils import *
import utils
import sys, os, time, string, getopt
content_processor = ContentProcessor()
def beautify_block( block ):
if block.content:
content_processor.reset()
markups = content_processor.process_content( block.content )
text = []
first = 1
for markup in markups:
text.extend( markup.beautify( first ) )
first = 0
# now beautify the documentation "borders" themselves
lines = [" /*************************************************************************"]
for l in text:
lines.append( " *" + l )
lines.append( " */" )
block.lines = lines
def usage():
print "\nDocBeauty 0.1 Usage information\n"
print " docbeauty [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -b : backup original files with the 'orig' extension"
print ""
print " --backup : same as -b"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"hb", \
["help", "backup"] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
output_dir = None
do_backup = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-b", "--backup" ):
do_backup = 1
# create context and processor
source_processor = SourceProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
for block in source_processor.blocks:
beautify_block( block )
new_name = filename + ".new"
ok = None
try:
file = open( new_name, "wt" )
for block in source_processor.blocks:
for line in block.lines:
file.write( line )
file.write( "\n" )
file.close()
except:
ok = 0
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.