repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
jelugbo/tundex | cms/djangoapps/contentstore/views/tests/test_course_index.py | 6 | 15799 | """
Unit tests for getting the list of courses and the course outline.
"""
import json
import lxml
import datetime
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url, add_instructor
from contentstore.views.access import has_course_access
from contentstore.views.course import course_outline_initial_state, _course_outline_json
from contentstore.views.item import create_xblock_info, VisibilityState
from course_action_state.models import CourseRerunState
from util.date_utils import get_default_time_display
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls, \
mongo_uses_error_check
from opaque_keys.edx.locator import CourseLocator
from student.tests.factories import UserFactory
from course_action_state.managers import CourseRerunUIStateManager
from django.conf import settings
import ddt
import threading
import pytz
class TestCourseIndex(CourseTestCase):
"""
Unit tests for getting the list of courses and the course outline.
"""
def setUp(self):
"""
Add a course with odd characters in the fields
"""
super(TestCourseIndex, self).setUp()
# had a problem where index showed course but has_access failed to retrieve it for non-staff
self.odd_course = CourseFactory.create(
org='test.org_1-2',
number='test-2.3_course',
display_name='dotted.course.name-2',
)
def check_index_and_outline(self, authed_client):
"""
Test getting the list of courses and then pulling up their outlines
"""
index_url = '/course/'
index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
course_link_eles = parsed_html.find_class('course-link')
self.assertGreaterEqual(len(course_link_eles), 2)
for link in course_link_eles:
self.assertRegexpMatches(
link.get("href"),
'course/{}'.format(settings.COURSE_KEY_PATTERN)
)
# now test that url
outline_response = authed_client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
# ensure it has the expected 2 self referential links
outline_parsed = lxml.html.fromstring(outline_response.content)
outline_link = outline_parsed.find_class('course-link')[0]
self.assertEqual(outline_link.get("href"), link.get("href"))
course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0]
self.assertEqual(course_menu_link.find("a").get("href"), link.get("href"))
def test_is_staff_access(self):
"""
Test that people with is_staff see the courses and can navigate into them
"""
self.check_index_and_outline(self.client)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
outline_url = reverse_course_url('course_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_course_staff_access(self):
"""
Make and register course_staff and ensure they can access the courses
"""
course_staff_client, course_staff = self.create_non_staff_authed_user_client()
for course in [self.course, self.odd_course]:
permission_url = reverse_course_url('course_team_handler', course.id, kwargs={'email': course_staff.email})
self.client.post(
permission_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
# test access
self.check_index_and_outline(course_staff_client)
def test_json_responses(self):
outline_url = reverse_course_url('course_handler', self.course.id)
chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1")
lesson = ItemFactory.create(parent_location=chapter.location, category='sequential', display_name="Lesson 1")
subsection = ItemFactory.create(parent_location=lesson.location, category='vertical', display_name='Subsection 1')
ItemFactory.create(parent_location=subsection.location, category="video", display_name="My Video")
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def test_notifications_handler_get(self):
state = CourseRerunUIStateManager.State.FAILED
action = CourseRerunUIStateManager.ACTION
should_display = True
# try when no notification exists
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': 1,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
# verify that we get an empty dict out
self.assertEquals(resp.status_code, 400)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(course_key=self.course.id, new_state=state, allow_not_found=True)
CourseRerunState.objects.update_should_display(entry_id=rerun_state.id, user=UserFactory(), should_display=should_display)
# try to get information on this notification
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.assertEquals(json_response['state'], state)
self.assertEquals(json_response['action'], action)
self.assertEquals(json_response['should_display'], should_display)
def test_notifications_handler_dismiss(self):
state = CourseRerunUIStateManager.State.FAILED
should_display = True
rerun_course_key = CourseLocator(org='testx', course='test_course', run='test_run')
# add an instructor to this course
user2 = UserFactory()
add_instructor(rerun_course_key, self.user, user2)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(course_key=rerun_course_key, new_state=state, allow_not_found=True)
CourseRerunState.objects.update_should_display(entry_id=rerun_state.id, user=user2, should_display=should_display)
# try to get information on this notification
notification_dismiss_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.delete(notification_dismiss_url)
self.assertEquals(resp.status_code, 200)
with self.assertRaises(CourseRerunState.DoesNotExist):
# delete nofications that are dismissed
CourseRerunState.objects.get(id=rerun_state.id)
self.assertFalse(has_course_access(user2, rerun_course_key))
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
class TestCourseOutline(CourseTestCase):
"""
Unit tests for the course outline.
"""
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseOutline, self).setUp()
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
def test_json_responses(self):
"""
Verify the JSON responses returned for the course.
"""
outline_url = reverse_course_url('course_handler', self.course.id)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(self.chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_outline_initial_state(self):
course_module = modulestore().get_item(self.course.location)
course_structure = create_xblock_info(
course_module,
include_child_info=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical'
)
# Verify that None is returned for a non-existent locator
self.assertIsNone(course_outline_initial_state('no-such-locator', course_structure))
# Verify that the correct initial state is returned for the test chapter
chapter_locator = unicode(self.chapter.location)
initial_state = course_outline_initial_state(chapter_locator, course_structure)
self.assertEqual(initial_state['locator_to_show'], chapter_locator)
expanded_locators = initial_state['expanded_locators']
self.assertIn(unicode(self.sequential.location), expanded_locators)
self.assertIn(unicode(self.vertical.location), expanded_locators)
def test_start_date_on_page(self):
"""
Verify that the course start date is included on the course outline page.
"""
def _get_release_date(response):
"""Return the release date from the course page"""
parsed_html = lxml.html.fromstring(response.content)
return parsed_html.find_class('course-status')[0].find_class('status-release-value')[0].text_content()
def _assert_settings_link_present(response):
"""
Asserts there's a course settings link on the course page by the course release date.
"""
parsed_html = lxml.html.fromstring(response.content)
settings_link = parsed_html.find_class('course-status')[0].find_class('action-edit')[0].find('a')
self.assertIsNotNone(settings_link)
self.assertEqual(settings_link.get('href'), reverse_course_url('settings_handler', self.course.id))
outline_url = reverse_course_url('course_handler', self.course.id)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
# A course with the default release date should display as "Unscheduled"
self.assertEqual(_get_release_date(response), 'Unscheduled')
_assert_settings_link_present(response)
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
self.assertEqual(_get_release_date(response), get_default_time_display(self.course.start))
_assert_settings_link_present(response)
@ddt.ddt
class OutlinePerfTest(TestCourseOutline):
def setUp(self):
with modulestore().default_store(ModuleStoreEnum.Type.split):
super(OutlinePerfTest, self).setUp()
@ddt.data(1, 2, 4, 8)
def test_query_counts(self, num_threads):
"""
Test that increasing threads does not increase query counts
"""
def test_client():
with modulestore().default_store(ModuleStoreEnum.Type.split):
with modulestore().bulk_operations(self.course.id):
course = modulestore().get_course(self.course.id, depth=0)
return _course_outline_json(None, course)
per_thread = 4
with check_mongo_calls(per_thread * num_threads, 0):
outline_threads = [threading.Thread(target=test_client) for __ in xrange(num_threads)]
[thread.start() for thread in outline_threads]
# now wait until they all finish
[thread.join() for thread in outline_threads]
| agpl-3.0 |
lra/boto | boto/dynamodb/layer2.py | 135 | 33814 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.dynamodb.layer1 import Layer1
from boto.dynamodb.table import Table
from boto.dynamodb.schema import Schema
from boto.dynamodb.item import Item
from boto.dynamodb.batch import BatchList, BatchWriteList
from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \
LossyFloatDynamizer, NonBooleanDynamizer
class TableGenerator(object):
"""
This is an object that wraps up the table_generator function.
The only real reason to have this is that we want to be able
to accumulate and return the ConsumedCapacityUnits element that
is part of each response.
:ivar last_evaluated_key: A sequence representing the key(s)
of the item last evaluated, or None if no additional
results are available.
:ivar remaining: The remaining quantity of results requested.
:ivar table: The table to which the call was made.
"""
def __init__(self, table, callable, remaining, item_class, kwargs):
self.table = table
self.callable = callable
self.remaining = -1 if remaining is None else remaining
self.item_class = item_class
self.kwargs = kwargs
self._consumed_units = 0.0
self.last_evaluated_key = None
self._count = 0
self._scanned_count = 0
self._response = None
@property
def count(self):
"""
The total number of items retrieved thus far. This value changes with
iteration and even when issuing a call with count=True, it is necessary
to complete the iteration to assert an accurate count value.
"""
self.response
return self._count
@property
def scanned_count(self):
"""
As above, but representing the total number of items scanned by
DynamoDB, without regard to any filters.
"""
self.response
return self._scanned_count
@property
def consumed_units(self):
"""
Returns a float representing the ConsumedCapacityUnits accumulated.
"""
self.response
return self._consumed_units
@property
def response(self):
"""
The current response to the call from DynamoDB.
"""
return self.next_response() if self._response is None else self._response
def next_response(self):
"""
Issue a call and return the result. You can invoke this method
while iterating over the TableGenerator in order to skip to the
next "page" of results.
"""
# preserve any existing limit in case the user alters self.remaining
limit = self.kwargs.get('limit')
if (self.remaining > 0 and (limit is None or limit > self.remaining)):
self.kwargs['limit'] = self.remaining
self._response = self.callable(**self.kwargs)
self.kwargs['limit'] = limit
self._consumed_units += self._response.get('ConsumedCapacityUnits', 0.0)
self._count += self._response.get('Count', 0)
self._scanned_count += self._response.get('ScannedCount', 0)
# at the expense of a possibly gratuitous dynamize, ensure that
# early generator termination won't result in bad LEK values
if 'LastEvaluatedKey' in self._response:
lek = self._response['LastEvaluatedKey']
esk = self.table.layer2.dynamize_last_evaluated_key(lek)
self.kwargs['exclusive_start_key'] = esk
lektuple = (lek['HashKeyElement'],)
if 'RangeKeyElement' in lek:
lektuple += (lek['RangeKeyElement'],)
self.last_evaluated_key = lektuple
else:
self.last_evaluated_key = None
return self._response
def __iter__(self):
while self.remaining != 0:
response = self.response
for item in response.get('Items', []):
self.remaining -= 1
yield self.item_class(self.table, attrs=item)
if self.remaining == 0:
break
if response is not self._response:
break
else:
if self.last_evaluated_key is not None:
self.next_response()
continue
break
if response is not self._response:
continue
break
class Layer2(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, security_token=None, region=None,
validate_certs=True, dynamizer=LossyFloatDynamizer,
profile_name=None):
self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug, security_token, region,
validate_certs=validate_certs,
profile_name=profile_name)
self.dynamizer = dynamizer()
def use_decimals(self, use_boolean=False):
"""
Use the ``decimal.Decimal`` type for encoding/decoding numeric types.
By default, ints/floats are used to represent numeric types
('N', 'NS') received from DynamoDB. Using the ``Decimal``
type is recommended to prevent loss of precision.
"""
# Eventually this should be made the default dynamizer.
self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer()
def dynamize_attribute_updates(self, pending_updates):
"""
Convert a set of pending item updates into the structure
required by Layer1.
"""
d = {}
for attr_name in pending_updates:
action, value = pending_updates[attr_name]
if value is None:
# DELETE without an attribute value
d[attr_name] = {"Action": action}
else:
d[attr_name] = {"Action": action,
"Value": self.dynamizer.encode(value)}
return d
def dynamize_item(self, item):
d = {}
for attr_name in item:
d[attr_name] = self.dynamizer.encode(item[attr_name])
return d
def dynamize_range_key_condition(self, range_key_condition):
"""
Convert a layer2 range_key_condition parameter into the
structure required by Layer1.
"""
return range_key_condition.to_dict()
def dynamize_scan_filter(self, scan_filter):
"""
Convert a layer2 scan_filter parameter into the
structure required by Layer1.
"""
d = None
if scan_filter:
d = {}
for attr_name in scan_filter:
condition = scan_filter[attr_name]
d[attr_name] = condition.to_dict()
return d
def dynamize_expected_value(self, expected_value):
"""
Convert an expected_value parameter into the data structure
required for Layer1.
"""
d = None
if expected_value:
d = {}
for attr_name in expected_value:
attr_value = expected_value[attr_name]
if attr_value is True:
attr_value = {'Exists': True}
elif attr_value is False:
attr_value = {'Exists': False}
else:
val = self.dynamizer.encode(expected_value[attr_name])
attr_value = {'Value': val}
d[attr_name] = attr_value
return d
def dynamize_last_evaluated_key(self, last_evaluated_key):
"""
Convert a last_evaluated_key parameter into the data structure
required for Layer1.
"""
d = None
if last_evaluated_key:
hash_key = last_evaluated_key['HashKeyElement']
d = {'HashKeyElement': self.dynamizer.encode(hash_key)}
if 'RangeKeyElement' in last_evaluated_key:
range_key = last_evaluated_key['RangeKeyElement']
d['RangeKeyElement'] = self.dynamizer.encode(range_key)
return d
def build_key_from_values(self, schema, hash_key, range_key=None):
"""
Build a Key structure to be used for accessing items
in Amazon DynamoDB. This method takes the supplied hash_key
and optional range_key and validates them against the
schema. If there is a mismatch, a TypeError is raised.
Otherwise, a Python dict version of a Amazon DynamoDB Key
data structure is returned.
:type hash_key: int|float|str|unicode|Binary
:param hash_key: The hash key of the item you are looking for.
The type of the hash key should match the type defined in
the schema.
:type range_key: int|float|str|unicode|Binary
:param range_key: The range key of the item your are looking for.
This should be supplied only if the schema requires a
range key. The type of the range key should match the
type defined in the schema.
"""
dynamodb_key = {}
dynamodb_value = self.dynamizer.encode(hash_key)
if list(dynamodb_value.keys())[0] != schema.hash_key_type:
msg = 'Hashkey must be of type: %s' % schema.hash_key_type
raise TypeError(msg)
dynamodb_key['HashKeyElement'] = dynamodb_value
if range_key is not None:
dynamodb_value = self.dynamizer.encode(range_key)
if list(dynamodb_value.keys())[0] != schema.range_key_type:
msg = 'RangeKey must be of type: %s' % schema.range_key_type
raise TypeError(msg)
dynamodb_key['RangeKeyElement'] = dynamodb_value
return dynamodb_key
def new_batch_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchList`
object.
"""
return BatchList(self)
def new_batch_write_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList`
object.
"""
return BatchWriteList(self)
def list_tables(self, limit=None):
"""
Return a list of the names of all tables associated with the
current account and region.
:type limit: int
:param limit: The maximum number of tables to return.
"""
tables = []
start_table = None
while not limit or len(tables) < limit:
this_round_limit = None
if limit:
this_round_limit = limit - len(tables)
this_round_limit = min(this_round_limit, 100)
result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table)
tables.extend(result.get('TableNames', []))
start_table = result.get('LastEvaluatedTableName', None)
if not start_table:
break
return tables
def describe_table(self, name):
"""
Retrieve information about an existing table.
:type name: str
:param name: The name of the desired table.
"""
return self.layer1.describe_table(name)
def table_from_schema(self, name, schema):
"""
Create a Table object from a schema.
This method will create a Table object without
making any API calls. If you know the name and schema
of the table, you can use this method instead of
``get_table``.
Example usage::
table = layer2.table_from_schema(
'tablename',
Schema.create(hash_key=('foo', 'N')))
:type name: str
:param name: The name of the table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The schema associated with the table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
return Table.create_from_schema(self, name, schema)
def get_table(self, name):
"""
Retrieve the Table object for an existing table.
:type name: str
:param name: The name of the desired table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
response = self.layer1.describe_table(name)
return Table(self, response)
lookup = get_table
def create_table(self, name, schema, read_units, write_units):
"""
Create a new Amazon DynamoDB table.
:type name: str
:param name: The name of the desired table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The Schema object that defines the schema used
by this table.
:type read_units: int
:param read_units: The value for ReadCapacityUnits.
:type write_units: int
:param write_units: The value for WriteCapacityUnits.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the new Amazon DynamoDB table.
"""
response = self.layer1.create_table(name, schema.dict,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
return Table(self, response)
def update_throughput(self, table, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object whose throughput is being updated.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
response = self.layer1.update_table(table.name,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
table.update_from_response(response)
def delete_table(self, table):
"""
Delete this table and all items in it. After calling this
the Table objects status attribute will be set to 'DELETING'.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being deleted.
"""
response = self.layer1.delete_table(table.name)
table.update_from_response(response)
def create_schema(self, hash_key_name, hash_key_proto_value,
range_key_name=None, range_key_proto_value=None):
"""
Create a Schema object used when creating a Table.
:type hash_key_name: str
:param hash_key_name: The name of the HashKey for the schema.
:type hash_key_proto_value: int|long|float|str|unicode|Binary
:param hash_key_proto_value: A sample or prototype of the type
of value you want to use for the HashKey. Alternatively,
you can also just pass in the Python type (e.g. int, float, etc.).
:type range_key_name: str
:param range_key_name: The name of the RangeKey for the schema.
This parameter is optional.
:type range_key_proto_value: int|long|float|str|unicode|Binary
:param range_key_proto_value: A sample or prototype of the type
of value you want to use for the RangeKey. Alternatively,
you can also pass in the Python type (e.g. int, float, etc.)
This parameter is optional.
"""
hash_key = (hash_key_name, get_dynamodb_type(hash_key_proto_value))
if range_key_name and range_key_proto_value is not None:
range_key = (range_key_name,
get_dynamodb_type(range_key_proto_value))
else:
range_key = None
return Schema.create(hash_key, range_key)
def get_item(self, table, hash_key, range_key=None,
attributes_to_get=None, consistent_read=False,
item_class=Item):
"""
Retrieve an existing item from the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object from which the item is retrieved.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
key = self.build_key_from_values(table.schema, hash_key, range_key)
response = self.layer1.get_item(table.name, key,
attributes_to_get, consistent_read,
object_hook=self.dynamizer.decode)
item = item_class(table, hash_key, range_key, response['Item'])
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return item
def batch_get_item(self, batch_list):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type batch_list: :class:`boto.dynamodb.batch.BatchList`
:param batch_list: A BatchList object which consists of a
list of :class:`boto.dynamoddb.batch.Batch` objects.
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_get_item(request_items,
object_hook=self.dynamizer.decode)
def batch_write_item(self, batch_list):
"""
Performs multiple Puts and Deletes in one batch.
:type batch_list: :class:`boto.dynamodb.batch.BatchWriteList`
:param batch_list: A BatchWriteList object which consists of a
list of :class:`boto.dynamoddb.batch.BatchWrite` objects.
Each Batch object contains the information about one
batch of objects that you wish to put or delete.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_write_item(request_items,
object_hook=self.dynamizer.decode)
def put_item(self, item, expected_value=None, return_values=None):
"""
Store a new item or completely replace an existing item
in Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to write to Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
response = self.layer1.put_item(item.table.name,
self.dynamize_item(item),
expected_value, return_values,
object_hook=self.dynamizer.decode)
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def update_item(self, item, expected_value=None, return_values=None):
"""
Commit pending item updates to Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to update in Amazon DynamoDB. It is expected
that you would have called the add_attribute, put_attribute
and/or delete_attribute methods on this Item prior to calling
this method. Those queued changes are what will be updated.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you
expect. This dictionary should have name/value pairs where the
name is the name of the attribute and the value is either the
value you are expecting or False if you expect the attribute
not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
attr_updates = self.dynamize_attribute_updates(item._updates)
response = self.layer1.update_item(item.table.name, key,
attr_updates,
expected_value, return_values,
object_hook=self.dynamizer.decode)
item._updates.clear()
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def delete_item(self, item, expected_value=None, return_values=None):
"""
Delete the item from Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to delete from Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
return self.layer1.delete_item(item.table.name, key,
expected=expected_value,
return_values=return_values,
object_hook=self.dynamizer.decode)
def query(self, table, hash_key, range_key_condition=None,
attributes_to_get=None, request_limit=None,
max_results=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
item_class=Item, count=False):
"""
Perform a query on the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being queried.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: :class:`boto.dynamodb.condition.Condition`
:param range_key_condition: A Condition object.
Condition object can be one of the following types:
EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
The only condition which expects or will accept two
values is 'BETWEEN', otherwise a single value should
be passed to the Condition constructor.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if range_key_condition:
rkc = self.dynamize_range_key_condition(range_key_condition)
else:
rkc = None
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'hash_key_value': self.dynamizer.encode(hash_key),
'range_key_conditions': rkc,
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'consistent_read': consistent_read,
'scan_index_forward': scan_index_forward,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.query,
max_results, item_class, kwargs)
def scan(self, table, scan_filter=None,
attributes_to_get=None, request_limit=None, max_results=None,
exclusive_start_key=None, item_class=Item, count=False):
"""
Perform a scan of DynamoDB.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being scanned.
:type scan_filter: A dict
:param scan_filter: A dictionary where the key is the
attribute name and the value is a
:class:`boto.dynamodb.condition.Condition` object.
Valid Condition objects include:
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'scan_filter': self.dynamize_scan_filter(scan_filter),
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.scan,
max_results, item_class, kwargs)
| mit |
riyazwalikar/pythonscripts | wp_attachenum/wp_attachenum.py | 1 | 1546 | import sys
import requests
from bs4 import BeautifulSoup
def enumusers(url,p):
r = requests.get(url + '/?attachment_id=' + str(p), allow_redirects=False)
if r.status_code == 200:
soup = BeautifulSoup(r.text)
div=soup.find('div',{'class':'attachment'})
p = soup.find('p',{'class':'attachment'})
if div != None:
img = div.find('img')['src']
print img
if p != None:
link = p.find('a')['href']
print link
helpmsg = "PoC for WordPress Attachment enumeration via the /?attachment_id=<number> vulnerabiltiy.\nCreated by Riyaz Ahemed Walikar\n\nUsage: wp_attachenum.py <blog_url> [max_number_users]\n\nExample: wp_attachenum.py https://advertising.paypal.com 20\n"
if len(sys.argv) < 2:
print "Not enough parameters.\n"
print helpmsg
sys.exit()
url = sys.argv[1]
num_u = ""
if len(sys.argv) > 2:
if sys.argv[2].isdigit() == 1:
num_u = sys.argv[2]
else:
print "Invalid count! Please specify a positive integer to enumerate upto"
print "Starting enumeration on " + url + "\n"
if num_u == "":
print "No upper limit specified, enumerating the default 20 attachments"
num_u = 20
else:
print "Enumerating the first " + str(num_u) + " attachments\n"
for i in range(1,int(num_u)+1):
enumusers(url,i)
sys.exit()
print "Invalid parameters.\n"
print helpmsg
sys.exit()
| gpl-3.0 |
alaski/nova | nova/policy.py | 1 | 8251 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Nova."""
import copy
import re
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
import six
from nova import exception
from nova.i18n import _LE, _LW
from nova import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ENFORCER = None
# This list is about the resources which support user based policy enforcement.
# Avoid sending deprecation warning for those resources.
USER_BASED_RESOURCES = ['os-keypairs']
# oslo_policy will read the policy configuration file again when the file
# is changed in runtime so the old policy rules will be saved to
# saved_file_rules and used to compare with new rules to determine the
# rules whether were updated.
saved_file_rules = []
KEY_EXPR = re.compile(r'%\((\w+)\)s')
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init(policy_file=None, rules=None, default_rule=None, use_conf=True):
"""Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
`CONF.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file.
"""
global _ENFORCER
global saved_file_rules
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf)
register_rules(_ENFORCER)
_ENFORCER.load_rules()
# Only the rules which are loaded from file may be changed.
current_file_rules = _ENFORCER.file_rules
current_file_rules = _serialize_rules(current_file_rules)
# Checks whether the rules are updated in the runtime
if saved_file_rules != current_file_rules:
_warning_for_deprecated_user_based_rules(current_file_rules)
saved_file_rules = copy.deepcopy(current_file_rules)
def _serialize_rules(rules):
"""Serialize all the Rule object as string which is used to compare the
rules list.
"""
result = [(rule_name, str(rule))
for rule_name, rule in six.iteritems(rules)]
return sorted(result, key=lambda rule: rule[0])
def _warning_for_deprecated_user_based_rules(rules):
"""Warning user based policy enforcement used in the rule but the rule
doesn't support it.
"""
for rule in rules:
# We will skip the warning for the resources which support user based
# policy enforcement.
if [resource for resource in USER_BASED_RESOURCES
if resource in rule[0]]:
continue
if 'user_id' in KEY_EXPR.findall(rule[1]):
LOG.warning(_LW("The user_id attribute isn't supported in the "
"rule '%s'. All the user_id based policy "
"enforcement will be removed in the "
"future."), rule[0])
def set_rules(rules, overwrite=True, use_conf=False):
"""Set rules based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from config file.
"""
init(use_conf=False)
_ENFORCER.set_rules(rules, overwrite, use_conf)
def authorize(context, action, target, do_raise=True, exc=None):
"""Verifies that the action is valid on the target in this context.
:param context: nova context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. ``compute:create_instance``,
``compute:attach_volume``,
``volume:attach_volume``
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param do_raise: if True (the default), raises PolicyNotAuthorized;
if False, returns False
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to :meth:`authorize` (both
positional and keyword arguments) will be passed to
the exception class. If not specified,
:class:`PolicyNotAuthorized` will be used.
:raises nova.exception.PolicyNotAuthorized: if verification fails
and do_raise is True. Or if 'exc' is specified it will raise an
exception of that type.
:return: returns a non-False value (not necessarily "True") if
authorized, and the exact value False if not authorized and
do_raise is False.
"""
init()
credentials = context.to_policy_values()
if not exc:
exc = exception.PolicyNotAuthorized
try:
result = _ENFORCER.authorize(action, target, credentials,
do_raise=do_raise, exc=exc, action=action)
except policy.PolicyNotRegistered:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy not registered'))
except Exception:
credentials.pop('auth_token', None)
with excutils.save_and_reraise_exception():
LOG.debug('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': credentials})
return result
def check_is_admin(context):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
init()
# the target is user-self
credentials = context.to_policy_values()
target = credentials
return _ENFORCER.authorize('context_is_admin', target, credentials)
@policy.register('is_admin')
class IsAdminCheck(policy.Check):
"""An explicit check for is_admin."""
def __init__(self, kind, match):
"""Initialize the check."""
self.expected = (match.lower() == 'true')
super(IsAdminCheck, self).__init__(kind, str(self.expected))
def __call__(self, target, creds, enforcer):
"""Determine whether is_admin matches the requested value."""
return creds['is_admin'] == self.expected
def get_rules():
if _ENFORCER:
return _ENFORCER.rules
def register_rules(enforcer):
enforcer.register_defaults(policies.list_rules())
def get_enforcer():
# This method is for use by oslopolicy CLI scripts. Those scripts need the
# 'output-file' and 'namespace' options, but having those in sys.argv means
# loading the Nova config options will fail as those are not expected to
# be present. So we pass in an arg list with those stripped out.
conf_args = []
# Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:]
i = 1
while i < len(sys.argv):
if sys.argv[i].strip('-') in ['namespace', 'output-file']:
i += 2
continue
conf_args.append(sys.argv[i])
i += 1
cfg.CONF(conf_args, project='nova')
init()
return _ENFORCER
| apache-2.0 |
tonnrueter/pymca_devel | PyMca/PCAModule.py | 1 | 32607 | #/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit (industry@esrf.fr) if this license
# is a problem for you.
#############################################################################*/
__author__ = "V.A. Sole & A. Mirone - ESRF Data Analysis"
import os
import time
import numpy
import numpy.linalg
try:
import numpy.core._dotblas as dotblas
except ImportError:
print("WARNING: Not using BLAS, PCA calculation will be slower")
dotblas = numpy
try:
import mdp
MDP = True
except:
# MDP can raise other errors than just an import error
MDP = False
from PyMca import Lanczos
from PyMca import PCATools
DEBUG = 0
# Make these functions accept arguments not relevant to
# them in order to simplify having a common graphical interface
def lanczosPCA(stack, ncomponents=10, binning=None, **kw):
if DEBUG:
print("lanczosPCA")
if binning is None:
binning = 1
if hasattr(stack, "info") and hasattr(stack, "data"):
data = stack.data
else:
data = stack
if not isinstance(data, numpy.ndarray):
raise TypeError(\
"lanczosPCA is only supported when using numpy arrays")
#wrapmatrix = "double"
wrapmatrix = "single"
dtype = numpy.float64
if wrapmatrix == "double":
data = data.astype(dtype)
if len(data.shape) == 3:
r, c, N = data.shape
data.shape = r * c, N
else:
r, N = data.shape
c = 1
npixels = r * c
if binning > 1:
# data.shape may fails with non-contiguous arrays
# use reshape.
data = numpy.reshape(data,
[data.shape[0], data.shape[1] / binning, binning])
data = numpy.sum(data, axis=-1)
N /= binning
if ncomponents > N:
raise ValueError("Number of components too high.")
avg = numpy.sum(data, 0) / (1.0 * npixels)
numpy.subtract(data, avg, data)
Lanczos.LanczosNumericMatrix.tipo = dtype
Lanczos.LanczosNumericVector.tipo = dtype
if wrapmatrix == "single":
SM = [dotblas.dot(data.T, data).astype(dtype)]
SM = Lanczos.LanczosNumericMatrix(SM)
else:
SM = Lanczos.LanczosNumericMatrix([data.T.astype(dtype),
data.astype(dtype)])
eigenvalues, eigenvectors = Lanczos.solveEigenSystem(SM,
ncomponents,
shift=0.0,
tol=1.0e-15)
SM = None
numpy.add(data, avg, data)
images = numpy.zeros((ncomponents, npixels), data.dtype)
vectors = numpy.zeros((ncomponents, N), dtype)
for i in range(ncomponents):
vectors[i, :] = eigenvectors[i].vr
images[i, :] = dotblas.dot(data,
(eigenvectors[i].vr).astype(data.dtype))
data = None
images.shape = ncomponents, r, c
return images, eigenvalues, vectors
def lanczosPCA2(stack, ncomponents=10, binning=None, **kw):
"""
This is a fast method, but it may loose information
"""
if hasattr(stack, "info") and hasattr(stack, "data"):
data = stack.data
else:
data = stack
# check we have received a numpy.ndarray and not an HDF5 group
# or other type of dynamically loaded data
if not isinstance(data, numpy.ndarray):
raise TypeError(\
"lanczosPCA2 is only supported when using numpy arrays")
r, c, N = data.shape
npixels = r * c # number of pixels
data.shape = r * c, N
if npixels < 2000:
BINNING = 2
if npixels < 5000:
BINNING = 4
elif npixels < 10000:
BINNING = 8
elif npixels < 20000:
BINNING = 10
elif npixels < 30000:
BINNING = 15
elif npixels < 60000:
BINNING = 20
else:
BINNING = 30
if BINNING is not None:
dataorig = data
reminder = npixels % BINNING
if reminder:
data = data[0:BINNING * int(npixels / BINNING), :]
data.shape = data.shape[0] / BINNING, BINNING, data.shape[1]
data = numpy.swapaxes(data, 1, 2)
data = numpy.sum(data, axis=-1)
rc = int(r * c / BINNING)
tipo = numpy.float64
neig = ncomponents + 5
# it does not create the covariance matrix but performs two multiplications
rappmatrix = "doppia"
# it creates the covariance matrix but performs only one multiplication
rappmatrix = "singola"
# calcola la media
mediadata = numpy.sum(data, axis=0) / numpy.array([len(data)], data.dtype)
numpy.subtract(data, mediadata, data)
Lanczos.LanczosNumericMatrix.tipo = tipo
Lanczos.LanczosNumericVector.tipo = tipo
if rappmatrix == "singola":
SM = [dotblas.dot(data.T, data).astype(tipo)]
SM = Lanczos.LanczosNumericMatrix(SM)
else:
SM = Lanczos.LanczosNumericMatrix([data.T.astype(tipo),
data.astype(tipo)])
# calculate eigenvalues and eigenvectors
ev, eve = Lanczos.solveEigenSystem(SM, neig, shift=0.0, tol=1.0e-7)
SM = None
rc = rc * BINNING
newmat = numpy.zeros((r * c, neig), numpy.float64)
data = data.astype(tipo)
# numpy in-place addition to make sure not intermediate copies are made
numpy.add(data, mediadata, data)
for i in range(neig):
newmat[:, i] = dotblas.dot(dataorig,
(eve[i].vr).astype(dataorig.dtype))
newcov = dotblas.dot(newmat.T, newmat)
evals, evects = numpy.linalg.eigh(newcov)
nuovispettri = dotblas.dot(evects, eve.vr[:neig])
images = numpy.zeros((ncomponents, npixels), data.dtype)
vectors = numpy.zeros((ncomponents, N), tipo)
for i in range(ncomponents):
vectors[i, :] = nuovispettri[-1 - i, :]
images[i, :] = dotblas.dot(newmat,
evects[-1 - i].astype(dataorig.dtype))
images.shape = ncomponents, r, c
return images, evals, vectors
def multipleArrayPCA(stackList, ncomponents=10, binning=None, **kw):
"""
Given a list of arrays, calculate the requested principal components from
the matrix resulting from their column concatenation. Therefore, all the
input arrays must have the same number of rows.
"""
stack = stackList[0]
if hasattr(stack, "info") and hasattr(stack, "data"):
data = stack.data
else:
data = stack
if not isinstance(data, numpy.ndarray):
raise TypeError(\
"multipleArrayPCA is only supported when using numpy arrays")
if len(data.shape) == 3:
r, c = data.shape[:2]
npixels = r * c
else:
c = None
r = data.shape[0]
npixels = r
#reshape and subtract mean to all the input data
shapeList = []
avgList = []
eigenvectorLength = 0
for i in range(len(stackList)):
shape = stackList[i].shape
eigenvectorLength += shape[-1]
shapeList.append(shape)
stackList[i].shape = npixels, -1
avg = numpy.sum(stackList[i], 0) / (1.0 * npixels)
numpy.subtract(stackList[i], avg, stackList[i])
avgList.append(avg)
#create the needed storage space for the covariance matrix
covMatrix = numpy.zeros((eigenvectorLength, eigenvectorLength),
numpy.float32)
rowOffset = 0
indexDict = {}
for i in range(len(stackList)):
iVectorLength = shapeList[i][-1]
colOffset = 0
for j in range(len(stackList)):
jVectorLength = shapeList[j][-1]
if i <= j:
covMatrix[rowOffset:(rowOffset + iVectorLength),
colOffset:(colOffset + jVectorLength)] =\
dotblas.dot(stackList[i].T, stackList[j])
if i < j:
key = "%02d%02d" % (i, j)
indexDict[key] = (rowOffset, rowOffset + iVectorLength,
colOffset, colOffset + jVectorLength)
else:
key = "%02d%02d" % (j, i)
rowMin, rowMax, colMin, colMax = indexDict[key]
covMatrix[rowOffset:(rowOffset + iVectorLength),
colOffset:(colOffset + jVectorLength)] =\
covMatrix[rowMin:rowMax, colMin:colMax].T
colOffset += jVectorLength
rowOffset += iVectorLength
indexDict = None
#I have the covariance matrix, calculate the eigenvectors and eigenvalues
covMatrix = [covMatrix]
covMatrix = Lanczos.LanczosNumericMatrix(covMatrix)
eigenvalues, evectors = Lanczos.solveEigenSystem(covMatrix,
ncomponents,
shift=0.0,
tol=1.0e-15)
covMatrix = None
images = numpy.zeros((ncomponents, npixels), numpy.float32)
eigenvectors = numpy.zeros((ncomponents, eigenvectorLength), numpy.float32)
for i in range(ncomponents):
eigenvectors[i, :] = evectors[i].vr
colOffset = 0
for j in range(len(stackList)):
jVectorLength = shapeList[j][-1]
images[i, :] +=\
dotblas.dot(stackList[j],
eigenvectors[i, colOffset:(colOffset + jVectorLength)])
colOffset += jVectorLength
#restore shapes and values
for i in range(len(stackList)):
numpy.add(stackList[i], avgList[i], stackList[i])
stackList[i].shape = shapeList[i]
if c is None:
images.shape = ncomponents, r, 1
else:
images.shape = ncomponents, r, c
return images, eigenvalues, eigenvectors
def expectationMaximizationPCA(stack, ncomponents=10, binning=None, **kw):
"""
This is a fast method when the number of components is small
"""
if DEBUG:
print("expectationMaximizationPCA")
#This part is common to all ...
if binning is None:
binning = 1
if hasattr(stack, "info") and hasattr(stack, "data"):
data = stack.data
else:
data = stack
if len(data.shape) == 3:
r, c, N = data.shape
data.shape = r * c, N
else:
r, N = data.shape
c = 1
if binning > 1:
data = numpy.reshape(data, [data.shape[0], data.shape[1] / binning,
binning])
data = numpy.sum(data, axis=-1)
N /= binning
if ncomponents > N:
raise ValueError("Number of components too high.")
#end of common part
avg = numpy.sum(data, axis=0, dtype=numpy.float) / (1.0 * r * c)
numpy.subtract(data, avg, data)
dataw = data * 1
images = numpy.zeros((ncomponents, r * c), data.dtype)
eigenvalues = numpy.zeros((ncomponents,), data.dtype)
eigenvectors = numpy.zeros((ncomponents, N), data.dtype)
for i in range(ncomponents):
#generate a random vector
p = numpy.random.random(N)
#10 iterations seems to be fairly accurate, but it is
#slow when reaching "noise" components.
#A variation threshold of 1 % seems to be acceptable.
tmod_old = 0
tmod = 0.02
j = 0
max_iter = 7
while ((abs(tmod - tmod_old) / tmod) > 0.01) and (j < max_iter):
tmod_old = tmod
t = 0.0
for k in range(r * c):
t += dotblas.dot(dataw[k, :], p.T) * dataw[k, :]
tmod = numpy.sqrt(numpy.sum(t * t))
p = t / tmod
j += 1
eigenvectors[i, :] = p
#subtract the found component from the dataset
for k in range(r * c):
dataw[k, :] -= dotblas.dot(dataw[k, :], p.T) * p
# calculate eigenvalues via the Rayleigh Quotients:
# eigenvalue = \
# (Eigenvector.T * Covariance * EigenVector)/ (Eigenvector.T * Eigenvector)
for i in range(ncomponents):
tmp = dotblas.dot(data, eigenvectors[i, :].T)
eigenvalues[i] = \
dotblas.dot(tmp.T, tmp) / dotblas.dot(eigenvectors[i, :].T,
eigenvectors[i, :])
#Generate the eigenimages
for i0 in range(ncomponents):
images[i0, :] = dotblas.dot(data, eigenvectors[i0, :])
#restore the original data
numpy.add(data, avg, data)
#reshape the images
images.shape = ncomponents, r, c
return images, eigenvalues, eigenvectors
def numpyPCA(stack, ncomponents=10, binning=None, **kw):
"""
This is a covariance method using numpy
"""
return PCATools.numpyPCA(stack,
ncomponents=ncomponents,
binning=binning,
**kw)
def mdpPCASVDFloat32(stack, ncomponents=10, binning=None, mask=None):
return mdpPCA(stack, ncomponents,
binning=binning, dtype='float32', svd='True', mask=mask)
def mdpPCASVDFloat64(stack, ncomponents=10, binning=None, mask=None):
return mdpPCA(stack, ncomponents,
binning=binning, dtype='float64', svd='True', mask=mask)
def mdpICAFloat32(stack, ncomponents=10, binning=None, mask=None):
return mdpICA(stack, ncomponents,
binning=binning, dtype='float32', svd='True', mask=mask)
def mdpICAFloat64(stack, ncomponents=10, binning=None, mask=None):
return mdpICA(stack, ncomponents,
binning=binning, dtype='float64', svd='True', mask=mask)
def mdpPCA(stack, ncomponents=10, binning=None, dtype='float64', svd='True',
mask=None):
if DEBUG:
print("MDP Method")
print("binning =", binning)
print("dtype = ", dtype)
print("svd = ", svd)
#This part is common to all ...
if binning is None:
binning = 1
if hasattr(stack, "info") and hasattr(stack, "data"):
data = stack.data[:]
else:
data = stack[:]
oldShape = data.shape
if len(data.shape) == 3:
r, c, N = data.shape
# data can be dynamically loaded
if isinstance(data, numpy.ndarray):
data.shape = r * c, N
else:
r, N = data.shape
c = 1
if binning > 1:
if isinstance(data, numpy.ndarray):
data = numpy.reshape(data, [data.shape[0], data.shape[1] / binning,
binning])
data = numpy.sum(data, axis=-1)
N /= binning
if ncomponents > N:
if binning == 1:
if data.shape != oldShape:
data.shape = oldShape
raise ValueError("Number of components too high.")
#end of common part
#begin the specific coding
pca = mdp.nodes.PCANode(output_dim=ncomponents, dtype=dtype, svd=svd)
shape = data.shape
if len(data.shape) == 3:
step = 10
if r > step:
last = step * (int(r / step) - 1)
for i in range(0, last, step):
for j in range(step):
print("Training data %d out of %d" % (i + j + 1, r))
tmpData = data[i:(i + step), :, :]
if binning > 1:
tmpData.shape = (step * shape[1],
shape[2] / binning,
binning)
tmpData = numpy.sum(tmpData, axis=-1)
else:
tmpData.shape = step * shape[1], shape[2]
if mask is None:
pca.train(tmpData)
else:
pca.train(tmpData[:, mask > 0])
tmpData = None
last = i + step
else:
last = 0
if binning > 1:
for i in range(last, r):
print("Training data %d out of %d" % (i + 1, r))
tmpData = data[i, :, :]
tmpData.shape = shape[1], shape[2] / binning, binning
tmpData = numpy.sum(tmpData, axis=-1)
if mask is None:
pca.train(tmpData)
else:
pca.train(tmpData[:, mask > 0])
tmpData = None
else:
for i in range(last, r):
print("Training data %d out of %d" % (i + 1, r))
if mask is None:
pca.train(data[i, :, :])
else:
pca.train(data[i, :, mask > 0])
else:
if data.shape[0] > 10000:
step = 1000
last = step * (int(data.shape[0] / step) - 1)
if mask is None:
for i in range(0, last, step):
print("Training data from %d to %d of %d" %\
(i + 1, i + step, data.shape[0]))
pca.train(data[i:(i + step), :])
print("Training data from %d to end of %d" %\
(i + step + 1, data.shape[0]))
pca.train(data[(i + step):, :])
else:
for i in range(0, last, step):
print("Training data from %d to %d of %d" %\
(i + 1, i + step, data.shape[0]))
pca.train(data[i:(i + step), mask > 0])
# TODO i is undefined here in the print statement
print("Training data from %d to end of %d" %\
(i + step + 1, data.shape[0]))
pca.train(data[(i + step):, mask > 0])
elif data.shape[0] > 1000:
i = int(data.shape[0] / 2)
if mask is None:
pca.train(data[:i, :])
else:
pca.train(data[:i, mask > 0])
if DEBUG:
print("Half training")
if mask is None:
pca.train(data[i:, :])
else:
pca.train(data[i:, mask > 0])
if DEBUG:
print("Full training")
else:
if mask is None:
pca.train(data)
else:
pca.train(data[:, mask > 0])
pca.stop_training()
# avg = pca.avg
eigenvalues = pca.d
eigenvectors = pca.v.T
proj = pca.get_projmatrix(transposed=0)
if len(data.shape) == 3:
images = numpy.zeros((ncomponents, r, c), data.dtype)
for i in range(r):
print("Building images. Projecting data %d out of %d" % (i + 1, r))
if binning > 1:
if mask is None:
tmpData = data[i, :, :]
else:
tmpData = data[i, :, mask > 0]
tmpData.shape = data.shape[1], data.shape[2] / binning, binning
tmpData = numpy.sum(tmpData, axis=-1)
images[:, i, :] = numpy.dot(proj.astype(data.dtype), tmpData.T)
else:
if mask is None:
images[:, i, :] = numpy.dot(proj.astype(data.dtype),
data[i, :, :].T)
else:
images[:, i, :] = numpy.dot(proj.astype(data.dtype),
data[i, :, mask > 0].T)
else:
if mask is None:
images = numpy.dot(proj.astype(data.dtype), data.T)
else:
images = numpy.dot(proj.astype(data.dtype), data[:, mask > 0].T)
#make sure the shape of the original data is not modified
if hasattr(stack, "info") and hasattr(stack, "data"):
if stack.data.shape != oldShape:
stack.data.shape = oldShape
else:
if stack.shape != oldShape:
stack.shape = oldShape
if mask is not None:
eigenvectors = numpy.zeros((ncomponents, N), pca.v.dtype)
for i in range(ncomponents):
eigenvectors[i, mask > 0] = pca.v.T[i]
#reshape the images
images.shape = ncomponents, r, c
return images, eigenvalues, eigenvectors
def mdpICA(stack, ncomponents=10, binning=None, dtype='float64', svd='True',
mask=None):
#This part is common to all ...
if binning is None:
binning = 1
if hasattr(stack, "info") and hasattr(stack, "data"):
data = stack.data[:]
else:
data = stack[:]
oldShape = data.shape
if len(data.shape) == 3:
r, c, N = data.shape
if isinstance(data, numpy.ndarray):
data.shape = r * c, N
else:
r, N = data.shape
c = 1
if binning > 1:
if isinstance(data, numpy.ndarray):
data = numpy.reshape(data,
[data.shape[0], data.shape[1] / binning,
binning])
data = numpy.sum(data, axis=-1)
N /= binning
if ncomponents > N:
if binning == 1:
if data.shape != oldShape:
data.shape = oldShape
raise ValueError("Number of components too high.")
if 1:
if (mdp.__version__ >= 2.5):
if DEBUG:
print("TDSEPNone")
ica = mdp.nodes.TDSEPNode(white_comp=ncomponents,
verbose=False,
dtype="float64",
white_parm={'svd': svd})
if DEBUG:
t0 = time.time()
shape = data.shape
if len(data.shape) == 3:
if r > 10:
step = 10
last = step * (int(r / step) - 1)
for i in range(0, last, step):
print("Training data from %d to %d out of %d" %\
(i + 1, i + step, r))
tmpData = data[i:(i + step), :, :]
if binning > 1:
tmpData.shape = (step * shape[1],
shape[2] / binning,
binning)
tmpData = numpy.sum(tmpData, axis=-1)
else:
tmpData.shape = step * shape[1], shape[2]
if mask is None:
ica.train(tmpData)
else:
ica.train(tmpData[:, mask > 0])
tmpData = None
last = i + step
else:
last = 0
if binning > 1:
for i in range(last, r):
print("Training data %d out of %d" % (i + 1, r))
tmpData = data[i, :, :]
tmpData.shape = shape[1], shape[2] / binning, binning
tmpData = numpy.sum(tmpData, axis=-1)
if mask is None:
ica.train(tmpData)
else:
ica.train(tmpData[:, mask > 0])
tmpData = None
else:
for i in range(last, r):
print("Training data %d out of %d" % (i + 1, r))
if mask is None:
ica.train(data[i, :, :])
else:
ica.train(data[i, :, mask > 0])
else:
if data.shape[0] > 10000:
step = 1000
last = step * (int(data.shape[0] / step) - 1)
for i in range(0, last, step):
print("Training data from %d to %d of %d" %\
(i + 1, i + step, data.shape[0]))
if mask is None:
ica.train(data[i:(i + step), :])
else:
ica.train(data[i:(i + step), mask > 0])
print("Training data from %d to end of %d" %\
(i + step + 1, data.shape[0]))
if mask is None:
ica.train(data[(i + step):, :])
else:
ica.train(data[(i + step):, mask > 0])
elif data.shape[0] > 1000:
i = int(data.shape[0] / 2)
if mask is None:
ica.train(data[:i, :])
else:
ica.train(data[:i, mask > 0])
if DEBUG:
print("Half training")
if mask is None:
ica.train(data[i:, :])
else:
ica.train(data[i:, mask > 0])
if DEBUG:
print("Full training")
else:
if mask is None:
ica.train(data)
else:
ica.train(data[:, mask > 0])
ica.stop_training()
if DEBUG:
print("training elapsed = %f" % (time.time() - t0))
else:
if 0:
print("ISFANode (alike)")
ica = mdp.nodes.TDSEPNode(white_comp=ncomponents,
verbose=False,
dtype='float64',
white_parm={'svd':svd})
elif 1:
if DEBUG:
print("FastICANode")
ica = mdp.nodes.FastICANode(white_comp=ncomponents,
verbose=False,
dtype=dtype)
else:
if DEBUG:
print("CuBICANode")
ica = mdp.nodes.CuBICANode(white_comp=ncomponents,
verbose=False,
dtype=dtype)
ica.train(data)
ica.stop_training()
#output = ica.execute(data)
proj = ica.get_projmatrix(transposed=0)
# These are the PCA data
eigenvalues = ica.white.d * 1
eigenvectors = ica.white.v.T * 1
vectors = numpy.zeros((ncomponents * 2, N), data.dtype)
if mask is None:
vectors[0:ncomponents, :] = proj * 1 # ica components?
vectors[ncomponents:, :] = eigenvectors
else:
vectors = numpy.zeros((2 * ncomponents, N), eigenvectors.dtype)
vectors[0:ncomponents, mask > 0] = proj * 1
vectors[ncomponents:, mask > 0] = eigenvectors
if (len(data.shape) == 3):
images = numpy.zeros((2 * ncomponents, r, c), data.dtype)
for i in range(r):
print("Building images. Projecting data %d out of %d" %\
(i + 1, r))
if binning > 1:
if mask is None:
tmpData = data[i, :, :]
else:
tmpData = data[i, :, mask > 0]
tmpData.shape = (data.shape[1],
data.shape[2] / binning,
binning)
tmpData = numpy.sum(tmpData, axis=-1)
tmpData = ica.white.execute(tmpData)
else:
if mask is None:
tmpData = ica.white.execute(data[i, :, :])
else:
tmpData = ica.white.execute(data[i, :, mask > 0])
images[ncomponents:(2 * ncomponents), i, :] = tmpData.T[:, :]
images[0:ncomponents, i, :] =\
numpy.dot(tmpData, ica.filters).T[:, :]
else:
images = numpy.zeros((2 * ncomponents, r * c), data.dtype)
if mask is None:
images[0:ncomponents, :] =\
numpy.dot(proj.astype(data.dtype), data.T)
else:
tmpData = data[:, mask > 0]
images[0:ncomponents, :] =\
numpy.dot(proj.astype(data.dtype), tmpData.T)
proj = ica.white.get_projmatrix(transposed=0)
if mask is None:
images[ncomponents:(2 * ncomponents), :] =\
numpy.dot(proj.astype(data.dtype), data.T)
else:
images[ncomponents:(2 * ncomponents), :] =\
numpy.dot(proj.astype(data.dtype), data[:, mask > 0].T)
images.shape = 2 * ncomponents, r, c
else:
ica = mdp.nodes.FastICANode(white_comp=ncomponents,
verbose=False, dtype=dtype)
ica.train(data)
output = ica.execute(data)
proj = ica.get_projmatrix(transposed=0)
# These are the PCA data
# make sure no reference to the ica module is kept to make sure
# memory is relased.
eigenvalues = ica.white.d * 1
eigenvectors = ica.white.v.T * 1
images = numpy.zeros((2 * ncomponents, r * c), data.dtype)
vectors = numpy.zeros((ncomponents * 2, N), data.dtype)
vectors[0:ncomponents, :] = proj * 1 # ica components?
vectors[ncomponents:, :] = eigenvectors
images[0:ncomponents, :] = numpy.dot(proj.astype(data.dtype), data.T)
proj = ica.white.get_projmatrix(transposed=0)
images[ncomponents:(2 * ncomponents), :] =\
numpy.dot(proj.astype(data.dtype), data.T)
images.shape = 2 * ncomponents, r, c
if binning == 1:
if data.shape != oldShape:
data.shape = oldShape
return images, eigenvalues, vectors
def main():
from PyMca import EDFStack
from PyMca import EdfFile
import sys
inputfile = "D:\DATA\COTTE\ch09\ch09__mca_0005_0000_0000.edf"
if len(sys.argv) > 1:
inputfile = sys.argv[1]
print(inputfile)
elif os.path.exists(inputfile):
print("Using a default test case")
else:
print("Usage:")
print("python PCAModule.py indexed_edf_stack")
sys.exit(0)
stack = EDFStack.EDFStack(inputfile)
r0, c0, n0 = stack.data.shape
ncomponents = 5
outfile = os.path.basename(inputfile) + "ICA.edf"
e0 = time.time()
images, eigenvalues, eigenvectors = mdpICA(stack.data, ncomponents,
binning=1, svd=True,
dtype='float64')
#images, eigenvalues, eigenvectors = lanczosPCA2(stack.data,
# ncomponents,
# binning=1)
if os.path.exists(outfile):
os.remove(outfile)
f = EdfFile.EdfFile(outfile)
for i in range(ncomponents):
f.WriteImage({}, images[i, :])
stack.data.shape = r0, c0, n0
print("PCA Elapsed = %f" % (time.time() - e0))
print("eigenvectors PCA2 = ", eigenvectors[0, 200:230])
stack = None
stack = EDFStack.EDFStack(inputfile)
e0 = time.time()
images2, eigenvalues, eigenvectors = mdpPCA(stack.data, ncomponents,
binning=1)
stack.data.shape = r0, c0, n0
print("MDP Elapsed = %f" % (time.time() - e0))
print("eigenvectors MDP = ", eigenvectors[0, 200:230])
if os.path.exists(outfile):
os.remove(outfile)
f = EdfFile.EdfFile(outfile)
for i in range(ncomponents):
f.WriteImage({}, images[i, :])
for i in range(ncomponents):
f.WriteImage({}, images2[i, :])
f = None
if __name__ == "__main__":
main()
| gpl-2.0 |
CalciferZh/AMCParser | 3Dviewer.py | 1 | 7204 | import pygame
import numpy as np
import time
import transforms3d.euler as euler
from amc_parser import *
from OpenGL.GL import *
from OpenGL.GLU import *
class Viewer:
def __init__(self, joints=None, motions=None):
"""
Display motion sequence in 3D.
Parameter
---------
joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and
values are instance of Joint class.
motions: List returned from `amc_parser.parse_amc. Each element is a dict
with joint names as keys and relative rotation degree as values.
"""
self.joints = joints
self.motions = motions
self.frame = 0 # current frame of the motion sequence
self.playing = False # whether is playing the motion sequence
self.fps = 120 # frame rate
# whether is dragging
self.rotate_dragging = False
self.translate_dragging = False
# old mouse cursor position
self.old_x = 0
self.old_y = 0
# global rotation
self.global_rx = 0
self.global_ry = 0
# rotation matrix for camera moving
self.rotation_R = np.eye(3)
# rotation speed
self.speed_rx = np.pi / 90
self.speed_ry = np.pi / 90
# translation speed
self.speed_trans = 0.25
self.speed_zoom = 0.5
# whether the main loop should break
self.done = False
# default translate set manually to make sure the skeleton is in the middle
# of the window
# if you can't see anything in the screen, this is the first parameter you
# need to adjust
self.default_translate = np.array([0, -20, -100], dtype=np.float32)
self.translate = np.copy(self.default_translate)
pygame.init()
self.screen_size = (1024, 768)
self.screen = pygame.display.set_mode(
self.screen_size, pygame.DOUBLEBUF | pygame.OPENGL
)
pygame.display.set_caption(
'AMC Parser - frame %d / %d' % (self.frame, len(self.motions))
)
self.clock = pygame.time.Clock()
glClearColor(0, 0, 0, 0)
glShadeModel(GL_SMOOTH)
glMaterialfv(
GL_FRONT, GL_SPECULAR, np.array([1, 1, 1, 1], dtype=np.float32)
)
glMaterialfv(
GL_FRONT, GL_SHININESS, np.array([100.0], dtype=np.float32)
)
glMaterialfv(
GL_FRONT, GL_AMBIENT, np.array([0.7, 0.7, 0.7, 0.7], dtype=np.float32)
)
glEnable(GL_POINT_SMOOTH)
glLightfv(GL_LIGHT0, GL_POSITION, np.array([1, 1, 1, 0], dtype=np.float32))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
gluPerspective(45, (self.screen_size[0]/self.screen_size[1]), 0.1, 500.0)
glPointSize(10)
glLineWidth(2.5)
def process_event(self):
"""
Handle user interface events: keydown, close, dragging.
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN: # reset camera
self.translate = self.default_translate
self.global_rx = 0
self.global_ry = 0
elif event.key == pygame.K_SPACE:
self.playing = not self.playing
elif event.type == pygame.MOUSEBUTTONDOWN: # dragging
if event.button == 1:
self.rotate_dragging = True
else:
self.translate_dragging = True
self.old_x, self.old_y = event.pos
elif event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
self.rotate_dragging = False
else:
self.translate_dragging = False
elif event.type == pygame.MOUSEMOTION:
if self.translate_dragging:
# haven't figure out best way to implement this
pass
elif self.rotate_dragging:
new_x, new_y = event.pos
self.global_ry -= (new_x - self.old_x) / \
self.screen_size[0] * np.pi
self.global_rx -= (new_y - self.old_y) / \
self.screen_size[1] * np.pi
self.old_x, self.old_y = new_x, new_y
pressed = pygame.key.get_pressed()
# rotation
if pressed[pygame.K_DOWN]:
self.global_rx -= self.speed_rx
if pressed[pygame.K_UP]:
self. global_rx += self.speed_rx
if pressed[pygame.K_LEFT]:
self.global_ry += self.speed_ry
if pressed[pygame.K_RIGHT]:
self.global_ry -= self.speed_ry
# moving
if pressed[pygame.K_a]:
self.translate[0] -= self.speed_trans
if pressed[pygame.K_d]:
self.translate[0] += self.speed_trans
if pressed[pygame.K_w]:
self.translate[1] += self.speed_trans
if pressed[pygame.K_s]:
self.translate[1] -= self.speed_trans
if pressed[pygame.K_q]:
self.translate[2] += self.speed_zoom
if pressed[pygame.K_e]:
self.translate[2] -= self.speed_zoom
# forward and rewind
if pressed[pygame.K_COMMA]:
self.frame -= 1
if self.frame < 0:
self.frame = len(self.motions) - 1
if pressed[pygame.K_PERIOD]:
self.frame += 1
if self.frame >= len(self.motions):
self.frame = 0
# global rotation
grx = euler.euler2mat(self.global_rx, 0, 0)
gry = euler.euler2mat(0, self.global_ry, 0)
self.rotation_R = grx.dot(gry)
def set_joints(self, joints):
"""
Set joints for viewer.
Parameter
---------
joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and
values are instance of Joint class.
"""
self.joints = joints
def set_motion(self, motions):
"""
Set motion sequence for viewer.
Paramter
--------
motions: List returned from `amc_parser.parse_amc. Each element is a dict
with joint names as keys and relative rotation degree as values.
"""
self.motions = motions
def draw(self):
"""
Draw the skeleton with balls and sticks.
"""
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glBegin(GL_POINTS)
for j in self.joints.values():
coord = np.array(
np.squeeze(j.coordinate).dot(self.rotation_R) + \
self.translate, dtype=np.float32
)
glVertex3f(*coord)
glEnd()
glBegin(GL_LINES)
for j in self.joints.values():
child = j
parent = j.parent
if parent is not None:
coord_x = np.array(
np.squeeze(child.coordinate).dot(self.rotation_R)+self.translate,
dtype=np.float32
)
coord_y = np.array(
np.squeeze(parent.coordinate).dot(self.rotation_R)+self.translate,
dtype=np.float32
)
glVertex3f(*coord_x)
glVertex3f(*coord_y)
glEnd()
def run(self):
"""
Main loop.
"""
while not self.done:
self.process_event()
self.joints['root'].set_motion(self.motions[self.frame])
if self.playing:
self.frame += 1
if self.frame >= len(self.motions):
self.frame = 0
self.draw()
pygame.display.set_caption(
'AMC Parser - frame %d / %d' % (self.frame, len(self.motions))
)
pygame.display.flip()
self.clock.tick(self.fps)
pygame.quit()
if __name__ == '__main__':
asf_path = './data/01/01.asf'
amc_path = './data/01/01_01.amc'
joints = parse_asf(asf_path)
motions = parse_amc(amc_path)
v = Viewer(joints, motions)
v.run()
| mit |
HDT3213/Duang | Duang/mainWindow.py | 2 | 1686 | from PyQt4 import QtCore, QtGui
import sys
from scene import *
from view import *
class MainWindow(QtGui.QMainWindow):
interval = 40
def __init__(self, *args, **kwargs):
QtGui.QMainWindow.__init__(self, *args, **kwargs)
self.scene = QtGui.QGraphicsScene(self)
self.resize(width + 20, self.scene.height + 20)
self.view = BricksView(self.scene, self)
self.view.setRenderHint(QtGui.QPainter.Antialiasing)
self.view.setScene(self.scene)
self.view.setFocusPolicy(QtCore.Qt.NoFocus)
self.view.resize(self.scene.width, self.scene.height)
self.connect(self.view, QtCore.SIGNAL('keyRelease(event)'),self.userBrick.keyNextPos)
self.connect(self.scene, QtCore.SIGNAL('gameOver()'),self.endGame)
self.run = False
self.createBricks()
self.resetBricks()
self.view.show()
self.startTimer(interval)
# def populate(self):
# n = 4
# self.bricks = list()
# for i in range(n):
# self.bricks.append(Brick(self))
def createBricks(self):
self.bricks = list()
for i in range(4):
self.bricks.append(Brick())
self.scene.addItem(self.bricks[i])
self.userBrick = UserBrick()
self.scene.addItem(userbrick)
def resetBricks(self):
self.bricks[0].setRect(20, 20, 20, 20)
self.bricks[1].setRect(width - 20, 20, 20, 20)
self.bricks[2].setRect(20, height - 20, 20, 20)
self.bricks[3].setRect(width - 20, height - 20, 20, 20)
self.userBrick.setRect(width / 2, height / 2, 20, 20)
def startGame(self):
self.resetBricks()
self.run = True
def endGame(self):
self.run = False
def timerEvent(self, event):
if not self.run:
return
for item in self.scene:
if not isinstance(item, Brick):
continue
item.nextPos()
| gpl-3.0 |
superdesk/Live-Blog | documentor/libraries/docutils-0.9.1-py3.2/docutils/languages/ja.py | 52 | 1908 | # -*- coding: utf-8 -*-
# $Id: ja.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Hisashi Morita <hisashim@kt.rim.or.jp>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Japanese-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': '著者',
'authors': '著者',
'organization': '組織',
'address': '住所',
'contact': '連絡先',
'version': 'バージョン',
'revision': 'リビジョン',
'status': 'ステータス',
'date': '日付',
'copyright': '著作権',
'dedication': '献辞',
'abstract': '概要',
'attention': '注目!',
'caution': '注意!',
'danger': '!危険!',
'error': 'エラー',
'hint': 'ヒント',
'important': '重要',
'note': '備考',
'tip': '通報',
'warning': '警告',
'contents': '目次'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'著者': 'author',
' n/a': 'authors',
'組織': 'organization',
'住所': 'address',
'連絡先': 'contact',
'バージョン': 'version',
'リビジョン': 'revision',
'ステータス': 'status',
'日付': 'date',
'著作権': 'copyright',
'献辞': 'dedication',
'概要': 'abstract'}
"""Japanese (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| agpl-3.0 |
ekasitk/sahara | sahara/plugins/cdh/client/services.py | 1 | 19488 | # Copyright (c) 2014 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The contents of this file are mainly copied from cm_api sources,
# released by Cloudera. Codes not used by Sahara CDH plugin are removed.
# You can find the original codes at
#
# https://github.com/cloudera/cm_api/tree/master/python/src/cm_api
#
# To satisfy the pep8 and python3 tests, we did some changes to the codes.
# We also change some importings to use Sahara inherited classes.
from oslo_serialization import jsonutils as json
import six
from sahara.plugins.cdh.client import role_config_groups
from sahara.plugins.cdh.client import roles
from sahara.plugins.cdh.client import types
SERVICES_PATH = "/clusters/%s/services"
SERVICE_PATH = "/clusters/%s/services/%s"
ROLETYPES_CFG_KEY = 'roleTypeConfigs'
def create_service(resource_root, name, service_type,
cluster_name="default"):
"""Create a service
:param resource_root: The root Resource object.
:param name: Service name
:param service_type: Service type
:param cluster_name: Cluster name
:return: An ApiService object
"""
apiservice = ApiService(resource_root, name, service_type)
return types.call(resource_root.post, SERVICES_PATH % (cluster_name,),
ApiService, True, data=[apiservice])[0]
def get_service(resource_root, name, cluster_name="default"):
"""Lookup a service by name
:param resource_root: The root Resource object.
:param name: Service name
:param cluster_name: Cluster name
:return: An ApiService object
"""
return _get_service(resource_root, "%s/%s"
% (SERVICES_PATH % (cluster_name,), name))
def _get_service(resource_root, path):
return types.call(resource_root.get, path, ApiService)
def get_all_services(resource_root, cluster_name="default", view=None):
"""Get all services
:param resource_root: The root Resource object.
:param cluster_name: Cluster name
:return: A list of ApiService objects.
"""
return types.call(resource_root.get, SERVICES_PATH % (cluster_name,),
ApiService, True,
params=(dict(view=view) if view else None))
def delete_service(resource_root, name, cluster_name="default"):
"""Delete a service by name
:param resource_root: The root Resource object.
:param name: Service name
:param cluster_name: Cluster name
:return: The deleted ApiService object
"""
return types.call(resource_root.delete,
"%s/%s" % (SERVICES_PATH % (cluster_name,), name),
ApiService)
class ApiService(types.BaseApiResource):
_ATTRIBUTES = {
'name': None,
'type': None,
'displayName': None,
'serviceState': types.ROAttr(),
'healthSummary': types.ROAttr(),
'healthChecks': types.ROAttr(),
'clusterRef': types.ROAttr(types.ApiClusterRef),
'configStale': types.ROAttr(),
'configStalenessStatus': types.ROAttr(),
'clientConfigStalenessStatus': types.ROAttr(),
'serviceUrl': types.ROAttr(),
'maintenanceMode': types.ROAttr(),
'maintenanceOwners': types.ROAttr(),
}
def __init__(self, resource_root, name=None, type=None):
types.BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return ("<ApiService>: %s (cluster: %s)"
% (self.name, self._get_cluster_name()))
def _get_cluster_name(self):
if hasattr(self, 'clusterRef') and self.clusterRef:
return self.clusterRef.clusterName
return None
def _path(self):
"""Return the API path for this service
This method assumes that lack of a cluster reference means that the
object refers to the Cloudera Management Services instance.
"""
if self._get_cluster_name():
return SERVICE_PATH % (self._get_cluster_name(), self.name)
else:
return '/cm/service'
def _role_cmd(self, cmd, roles, api_version=1):
return self._post("roleCommands/" + cmd, types.ApiBulkCommandList,
data=roles, api_version=api_version)
def _parse_svc_config(self, json_dic, view=None):
"""Parse a json-decoded ApiServiceConfig dictionary into a 2-tuple
:param json_dic: The json dictionary with the config data.
:param view: View to materialize.
:return: 2-tuple (service config dictionary, role type configurations)
"""
svc_config = types.json_to_config(json_dic, view == 'full')
rt_configs = {}
if ROLETYPES_CFG_KEY in json_dic:
for rt_config in json_dic[ROLETYPES_CFG_KEY]:
rt_configs[rt_config['roleType']] = types.json_to_config(
rt_config, view == 'full')
return (svc_config, rt_configs)
def create_yarn_job_history_dir(self):
"""Create the Yarn job history directory
:return: Reference to submitted command.
:since: API v6
"""
return self._cmd('yarnCreateJobHistoryDirCommand', api_version=6)
def get_config(self, view=None):
"""Retrieve the service's configuration
Retrieves both the service configuration and role type configuration
for each of the service's supported role types. The role type
configurations are returned as a dictionary, whose keys are the
role type name, and values are the respective configuration
dictionaries.
The 'summary' view contains strings as the dictionary values. The full
view contains types.ApiConfig instances as the values.
:param view: View to materialize ('full' or 'summary')
:return: 2-tuple (service config dictionary, role type configurations)
"""
path = self._path() + '/config'
resp = self._get_resource_root().get(
path, params=(dict(view=view) if view else None))
return self._parse_svc_config(resp, view)
def update_config(self, svc_config, **rt_configs):
"""Update the service's configuration
:param svc_config: Dictionary with service configuration to update.
:param rt_configs: Dict of role type configurations to update.
:return: 2-tuple (service config dictionary, role type configurations)
"""
path = self._path() + '/config'
if svc_config:
data = types.config_to_api_list(svc_config)
else:
data = {}
if rt_configs:
rt_list = []
for rt, cfg in six.iteritems(rt_configs):
rt_data = types.config_to_api_list(cfg)
rt_data['roleType'] = rt
rt_list.append(rt_data)
data[ROLETYPES_CFG_KEY] = rt_list
resp = self._get_resource_root().put(path, data=json.dumps(data))
return self._parse_svc_config(resp)
def create_role(self, role_name, role_type, host_id):
"""Create a role
:param role_name: Role name
:param role_type: Role type
:param host_id: ID of the host to assign the role to
:return: An ApiRole object
"""
return roles.create_role(self._get_resource_root(), self.name,
role_type, role_name, host_id,
self._get_cluster_name())
def delete_role(self, name):
"""Delete a role by name
:param name: Role name
:return: The deleted ApiRole object
"""
return roles.delete_role(self._get_resource_root(), self.name, name,
self._get_cluster_name())
def get_roles_by_type(self, role_type, view=None):
"""Get all roles of a certain type in a service
:param role_type: Role type
:param view: View to materialize ('full' or 'summary')
:return: A list of ApiRole objects.
"""
return roles.get_roles_by_type(self._get_resource_root(), self.name,
role_type, self._get_cluster_name(),
view)
def get_all_role_config_groups(self):
"""Get a list of role configuration groups in the service
:return: A list of ApiRoleConfigGroup objects.
:since: API v3
"""
return role_config_groups.get_all_role_config_groups(
self._get_resource_root(), self.name, self._get_cluster_name())
def start(self):
"""Start a service
:return: Reference to the submitted command.
"""
return self._cmd('start')
def stop(self):
"""Stop a service
:return: Reference to the submitted command.
"""
return self._cmd('stop')
def restart(self):
"""Restart a service
:return: Reference to the submitted command.
"""
return self._cmd('restart')
def start_roles(self, *role_names):
"""Start a list of roles
:param role_names: names of the roles to start.
:return: List of submitted commands.
"""
return self._role_cmd('start', role_names)
def create_hbase_root(self):
"""Create the root directory of an HBase service
:return: Reference to the submitted command.
"""
return self._cmd('hbaseCreateRoot')
def create_hdfs_tmp(self):
"""Create /tmp directory in HDFS
Create the /tmp directory in HDFS with appropriate ownership and
permissions.
:return: Reference to the submitted command
:since: API v2
"""
return self._cmd('hdfsCreateTmpDir')
def refresh(self, *role_names):
"""Execute the "refresh" command on a set of roles
:param role_names: Names of the roles to refresh.
:return: Reference to the submitted command.
"""
return self._role_cmd('refresh', role_names)
def decommission(self, *role_names):
"""Decommission roles in a service
:param role_names: Names of the roles to decommission.
:return: Reference to the submitted command.
"""
return self._cmd('decommission', data=role_names)
def deploy_client_config(self, *role_names):
"""Deploys client configuration to the hosts where roles are running
:param role_names: Names of the roles to decommission.
:return: Reference to the submitted command.
"""
return self._cmd('deployClientConfig', data=role_names)
def format_hdfs(self, *namenodes):
"""Format NameNode instances of an HDFS service
:param namenodes: Name of NameNode instances to format.
:return: List of submitted commands.
"""
return self._role_cmd('hdfsFormat', namenodes)
def install_oozie_sharelib(self):
"""Installs the Oozie ShareLib
Oozie must be stopped before running this command.
:return: Reference to the submitted command.
:since: API v3
"""
return self._cmd('installOozieShareLib', api_version=3)
def create_oozie_db(self):
"""Creates the Oozie Database Schema in the configured database
:return: Reference to the submitted command.
:since: API v2
"""
return self._cmd('createOozieDb', api_version=2)
def upgrade_oozie_db(self):
"""Upgrade Oozie Database schema as part of a major version upgrade
:return: Reference to the submitted command.
:since: API v6
"""
return self._cmd('oozieUpgradeDb', api_version=6)
def create_hive_metastore_tables(self):
"""Creates the Hive metastore tables in the configured database
Will do nothing if tables already exist. Will not perform an upgrade.
:return: Reference to the submitted command.
:since: API v3
"""
return self._cmd('hiveCreateMetastoreDatabaseTables', api_version=3)
def create_hive_warehouse(self):
"""Creates the Hive warehouse directory in HDFS
:return: Reference to the submitted command.
:since: API v3
"""
return self._cmd('hiveCreateHiveWarehouse')
def create_hive_userdir(self):
"""Creates the Hive user directory in HDFS
:return: Reference to the submitted command.
:since: API v4
"""
return self._cmd('hiveCreateHiveUserDir')
class ApiServiceSetupInfo(ApiService):
_ATTRIBUTES = {
'name': None,
'type': None,
'config': types.Attr(types.ApiConfig),
'roles': types.Attr(roles.ApiRole),
}
def __init__(self, name=None, type=None,
config=None, roles=None):
# The BaseApiObject expects a resource_root, which we don't care about
resource_root = None
# Unfortunately, the json key is called "type". So our input arg
# needs to be called "type" as well, despite it being a python keyword.
types.BaseApiObject.init(self, None, locals())
def set_config(self, config):
"""Set the service configuration
:param config: A dictionary of config key/value
"""
if self.config is None:
self.config = {}
self.config.update(types.config_to_api_list(config))
def add_role_info(self, role_name, role_type, host_id, config=None):
"""Add a role info
The role will be created along with the service setup.
:param role_name: Role name
:param role_type: Role type
:param host_id: The host where the role should run
:param config: (Optional) A dictionary of role config values
"""
if self.roles is None:
self.roles = []
api_config_list = (config is not None
and types.config_to_api_list(config)
or None)
self.roles.append({
'name': role_name,
'type': role_type,
'hostRef': {'hostId': host_id},
'config': api_config_list})
def enable_nn_ha(self, active_name, standby_host_id, nameservice, jns,
standby_name_dir_list=None, qj_name=None,
standby_name=None, active_fc_name=None,
standby_fc_name=None, zk_service_name=None,
force_init_znode=True,
clear_existing_standby_name_dirs=True,
clear_existing_jn_edits_dir=True):
"""Enable High Availability (HA) with Auto-Failover for HDFS NameNode
@param active_name: Name of Active NameNode.
@param standby_host_id: ID of host where Standby NameNode will be
created.
@param nameservice: Nameservice to be used while enabling HA.
Optional if Active NameNode already has this
config set.
@param jns: List of Journal Nodes to be created during the command.
Each element of the list must be a dict containing the
following items:
- jns['jnHostId']: ID of the host where the new JournalNode
will be created.
- jns['jnName']: Name of the JournalNode role (optional)
- jns['jnEditsDir']: Edits dir of the JournalNode. Can be
omitted if the config is already set
at RCG level.
@param standby_name_dir_list: List of directories for the new Standby
NameNode. If not provided then it will
use same dirs as Active NameNode.
@param qj_name: Name of the journal located on each JournalNodes'
filesystem. This can be optionally provided if the
config hasn't been already set for the Active NameNode.
If this isn't provided and Active NameNode doesn't
also have the config, then nameservice is used by
default.
@param standby_name: Name of the Standby NameNode role to be created
(Optional).
@param active_fc_name: Name of the Active Failover Controller role to
be created (Optional).
@param standby_fc_name: Name of the Standby Failover Controller role to
be created (Optional).
@param zk_service_name: Name of the ZooKeeper service to use for auto-
failover. If HDFS service already depends on a
ZooKeeper service then that ZooKeeper service
will be used for auto-failover and in that case
this parameter can either be omitted or should
be the same ZooKeeper service.
@param force_init_znode: Indicates if the ZNode should be force
initialized if it is already present. Useful
while re-enabling High Availability. (Default:
TRUE)
@param clear_existing_standby_name_dirs: Indicates if the existing name
directories for Standby
NameNode should be cleared
during the workflow.
Useful while re-enabling High
Availability. (Default: TRUE)
@param clear_existing_jn_edits_dir: Indicates if the existing edits
directories for the JournalNodes
for the specified nameservice
should be cleared during the
workflow. Useful while re-enabling
High Availability. (Default: TRUE)
@return: Reference to the submitted command.
@since: API v6
"""
args = dict(
activeNnName=active_name,
standbyNnName=standby_name,
standbyNnHostId=standby_host_id,
standbyNameDirList=standby_name_dir_list,
nameservice=nameservice,
qjName=qj_name,
activeFcName=active_fc_name,
standbyFcName=standby_fc_name,
zkServiceName=zk_service_name,
forceInitZNode=force_init_znode,
clearExistingStandbyNameDirs=clear_existing_standby_name_dirs,
clearExistingJnEditsDir=clear_existing_jn_edits_dir,
jns=jns
)
return self._cmd('hdfsEnableNnHa', data=args, api_version=6)
| apache-2.0 |
skykiny/shadowsocks | shadowsocks/manager.py | 925 | 9692 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
| apache-2.0 |
Dm47021/Android_kernel_f6mt_aosp_jb-rebase | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
akurtakov/Pydev | plugins/org.python.pydev.core/pysrc/_pydev_imps/_pydev_SimpleXMLRPCServer.py | 14 | 21572 | #Just a copy of the version in python 2.5 to be used if it's not available in jython 2.1
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
from _pydev_imps._pydev_xmlrpclib import Fault
from _pydev_imps import _pydev_SocketServer as SocketServer
from _pydev_imps import _pydev_BaseHTTPServer as BaseHTTPServer
import sys
import os
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj, i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. There should never be any
reason to instantiate this class directly.
"""
def __init__(self, allow_none, encoding):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method=None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)), #@UndefinedVariable exc_value only available when we actually have an exception
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if self.funcs.has_key(method_name):
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
try:
import pydoc
except ImportError:
return "" #not there for jython
else:
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (sys.exc_type, sys.exc_value)} #@UndefinedVariable exc_value only available when we actually have an exception
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
sys.stdout.write('Content-Type: text/xml\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % { #@UndefinedVariable
'code' : code,
'message' : message,
'explain' : explain
}
sys.stdout.write('Status: %d %s\n' % (code, message))
sys.stdout.write('Content-Type: text/html\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response)
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
if request_text is None:
request_text = sys.stdin.read()
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
sys.stdout.write('Running XML-RPC server on port 8000\n')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x, y: x + y, 'add')
server.serve_forever()
| epl-1.0 |
40223227/2015cdbg6w0622-40223227- | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testcallable.py | 739 | 4234 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import unittest
from unittest.test.testmock.support import is_instance, X, SomeClass
from unittest.mock import (
Mock, MagicMock, NonCallableMagicMock,
NonCallableMock, patch, create_autospec,
CallableMixin
)
class TestCallable(unittest.TestCase):
def assertNotCallable(self, mock):
self.assertTrue(is_instance(mock, NonCallableMagicMock))
self.assertFalse(is_instance(mock, CallableMixin))
def test_non_callable(self):
for mock in NonCallableMagicMock(), NonCallableMock():
self.assertRaises(TypeError, mock)
self.assertFalse(hasattr(mock, '__call__'))
self.assertIn(mock.__class__.__name__, repr(mock))
def test_heirarchy(self):
self.assertTrue(issubclass(MagicMock, Mock))
self.assertTrue(issubclass(NonCallableMagicMock, NonCallableMock))
def test_attributes(self):
one = NonCallableMock()
self.assertTrue(issubclass(type(one.one), Mock))
two = NonCallableMagicMock()
self.assertTrue(issubclass(type(two.two), MagicMock))
def test_subclasses(self):
class MockSub(Mock):
pass
one = MockSub()
self.assertTrue(issubclass(type(one.one), MockSub))
class MagicSub(MagicMock):
pass
two = MagicSub()
self.assertTrue(issubclass(type(two.two), MagicSub))
def test_patch_spec(self):
patcher = patch('%s.X' % __name__, spec=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_set(self):
patcher = patch('%s.X' % __name__, spec_set=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_instance(self):
patcher = patch('%s.X' % __name__, spec=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_set_instance(self):
patcher = patch('%s.X' % __name__, spec_set=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_callable_class(self):
class CallableX(X):
def __call__(self):
pass
class Sub(CallableX):
pass
class Multi(SomeClass, Sub):
pass
for arg in 'spec', 'spec_set':
for Klass in CallableX, Sub, Multi:
with patch('%s.X' % __name__, **{arg: Klass}) as mock:
instance = mock()
mock.assert_called_once_with()
self.assertTrue(is_instance(instance, MagicMock))
# inherited spec
self.assertRaises(AttributeError, getattr, instance,
'foobarbaz')
result = instance()
# instance is callable, result has no spec
instance.assert_called_once_with()
result(3, 2, 1)
result.assert_called_once_with(3, 2, 1)
result.foo(3, 2, 1)
result.foo.assert_called_once_with(3, 2, 1)
def test_create_autopsec(self):
mock = create_autospec(X)
instance = mock()
self.assertRaises(TypeError, instance)
mock = create_autospec(X())
self.assertRaises(TypeError, mock)
def test_create_autospec_instance(self):
mock = create_autospec(SomeClass, instance=True)
self.assertRaises(TypeError, mock)
mock.wibble()
mock.wibble.assert_called_once_with()
self.assertRaises(TypeError, mock.wibble, 'some', 'args')
| gpl-3.0 |
home-assistant/home-assistant | tests/components/philips_js/test_device_trigger.py | 5 | 2191 | """The tests for Philips TV device triggers."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.philips_js.const import DOMAIN
from homeassistant.setup import async_setup_component
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, mock_device):
"""Test we get the expected triggers."""
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turn_on",
"device_id": mock_device.id,
},
]
triggers = await async_get_device_automations(hass, "trigger", mock_device.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_turn_on_request(
hass, calls, mock_tv, mock_entity, mock_device
):
"""Test for turn_on and turn_off triggers firing."""
mock_tv.on = False
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": mock_device.id,
"type": "turn_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.device_id }}",
"id": "{{ trigger.id}}",
},
},
}
]
},
)
await hass.services.async_call(
"media_player",
"turn_on",
{"entity_id": mock_entity},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == mock_device.id
assert calls[0].data["id"] == 0
| apache-2.0 |
BancDelTempsDAW/symfony | vendor/doctrine/orm/docs/en/conf.py | 2448 | 6497 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| mit |
yu-george/MarkFlip | MarkFlip.py | 1 | 2434 | import codecs
import re
mdFileUrl = 'test.md'
outData = []
htmlStart = '''<!DOCTYPE HTML>
<html lang='en'>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<link rel='stylesheet' href='markflip.css' type='text/css' />
<title>Generated by MarkFlip</title>
</head>
<body>
'''
htmlEnd = '''</body>
</html>
'''
headerRegex = re.compile(r'(#{1,6} *)(.*)')
strongRegex = re.compile(r'\*{2}.+\*{2}')
emphasisRegex = re.compile(r'[^\*\\]\*[^/]+[^\\]\*[^\*]')
strikeRegex = re.compile(r'~{2}.+~{2}')
linkRegex = re.compile(r'\[([^\[]+)\]( )*\(([^\)]+)\)')
codeInlineRegex = re.compile(r'([^\\]`)([^`\\\n]+)`')
codeFenceRegex = re.compile(r'```([^\n]+)\n([^\\]+)```')
escapeRegex = re.compile(r'\\([\*|\\|\`])')
lineBreakRegex = re.compile(r'[\n]{2,}')
with codecs.open(mdFileUrl,'r','utf-8') as f:
print('Opening file...')
raw = f.read()
print('Processing data...')
for level, text in headerRegex.findall(raw):
originalText = level+text
level = level.count('#')
raw = raw.replace(originalText, '<h{}>{}</h{}>'.format(level,text,level))
for match in strongRegex.findall(raw):
raw = raw.replace(match, '<strong>{}</strong>'.format(match[2:-2]))
for match in emphasisRegex.findall(raw):
raw = raw.replace(match[1:-1], '<em>{}</em>'.format(match[2:-2]))
for match in strikeRegex.findall(raw):
raw = raw.replace(match, '<del>{}</del>'.format(match[2:-2]))
for display, space, link in linkRegex.findall(raw):
originalText = '[{}]{}({})'.format(display,space,link)
raw = raw.replace(originalText, '<a href=\'{}\'>{}</a>'.format(link.strip(), display.strip()))
for before, code in codeInlineRegex.findall(raw):
originalText = before[1:] + code + '`'
raw = raw.replace(originalText, '<code>{}</code>'.format(code.strip()))
for lang, code in codeFenceRegex.findall(raw):
originalText = '```{}\n{}```'.format(lang, code)
raw = raw.replace(originalText, '<pre>{}</pre>'.format(code).replace('\n','\n\n'))
for match in escapeRegex.findall(raw):
raw = raw.replace('\\{}'.format(match), match)
for match in lineBreakRegex.findall(raw):
raw = raw.replace(match, '<br />')
raw = raw.replace('\n','')
with codecs.open('MarkFlip.html','w','utf-8') as f:
print('Writing data...')
f.write(htmlStart)
f.write(raw)
f.write(htmlEnd)
print('Done!')
| unlicense |
asolntsev/selenium | py/test/selenium/webdriver/marionette/mn_context_tests.py | 39 | 1135 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def test_context_sets_correct_context_and_returns(driver):
def get_context():
return driver.execute('GET_CONTEXT').pop('value')
assert get_context() == driver.CONTEXT_CONTENT
with driver.context(driver.CONTEXT_CHROME):
assert get_context() == driver.CONTEXT_CHROME
assert get_context() == driver.CONTEXT_CONTENT
| apache-2.0 |
EventGhost/EventGhost | plugins/WinUsbTest/__init__.py | 2 | 1470 | # -*- coding: utf-8 -*-
#
# This file is a plugin for EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
ur"""<rst>
Plugin for the Auvisio PC-Remote.
"""
import eg
eg.RegisterPlugin(
name = "WinUSB Test",
author = "Bitmonster",
version = "1.0.0",
kind = "remote",
guid = "{68EA5E13-712D-47C7-AB95-D4B8707D8D33}",
description = __doc__,
)
from math import atan2, pi
class WinUsbTest(eg.PluginBase):
def __start__(self):
self.winUsb = eg.WinUsb(self)
self.winUsb.Device(self.Callback1, 1).AddHardwareId(
"WinUsb Test Device", "USB\\VID_073A&PID_2230"
)
self.winUsb.Start()
def __stop__(self):
self.winUsb.Stop()
def Callback1(self, data):
print data
def Callback2(self, data):
print data
| gpl-2.0 |
NPC360/NPC360 | wsgi/yesnoerr.py | 1 | 1946 | yeslist = [
"yes",
"yasss",
"yea",
"yeah",
"yep",
"yeppers",
"sure",
"yizzir",
"sounds good",
"let's do it",
"no problem",
"let's go",
"absolutely",
"hell yea",
"hell yes",
"ok",
"a little",
"good",
"great",
"of course",
"alright",
"by all means",
"allright",
"all right",
"certainly",
"def",
"defs",
"definitely",
"good enough",
"gladly",
"exactly",
"naturally",
"positively",
"surely",
"sure thing",
"true",
"okay",
"ok",
"gd",
"amen",
"affirmative",
"without fail",
"willingly",
"very well",
"unquestionably",
"nod",
"fine",
"agree",
"aight",
"all righty",
"aye",
"cool",
"correct",
"fine",
"fo sho",
"forizzle",
"indeed",
"indubitably",
"mhm",
"right on",
"right",
"roger",
"sure",
"true dat",
"uh-huh",
"way",
"word",
"ya",
"yaaasss",
"yah",
"ye",
"yeah"
"yeh",
"yes",
"yessum",
"yip"
"yis",
"you bet",
"yup",
"yus"]
nolist = [
"no thanks",
"sorry",
"reject",
"pass",
"not this time",
"not possible",
"not now",
"not at all",
"not at the moment",
"nope",
"no",
"no never",
"no way",
"no thnx",
"no thanks",
"no go",
"no can do",
"nix",
"never",
"nep",
"nein",
"negatory",
"negative",
"neg",
"nay",
"naw",
"nah", "na",
"n o",
"prefer not to",
"if only",
"ick",
"i can't",
"hell no",
"fuck no",
"forget it",
"false",
"eww",
"disagreed",
"deny",
"decline",
"certainly not",
"not really",
"cancel",
"by no means",
"absolutely not"]
errlist = [
"?",
"wut?",
"not really an answer",
"uhm",
"what?",
"???",
"pardon?"]
| mit |
mikedchavez1010/XX-Net | python27/1.0/lib/crypto/py3AES.py | 7 | 34062 | #!/usr/bin/python3
#
# aes.py: implements AES - Advanced Encryption Standard
# from the SlowAES project, http://code.google.com/p/slowaes/
#
# Copyright (c) 2008 Josh Davis ( http://www.josh-davis.org ),
# Alex Martelli ( http://www.aleax.it )
#
# Ported from C code written by Laurent Haan
# ( http://www.progressive-coding.com )
#
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/
#
#
# Ported to Python3
#
# Copyright (c) 2011 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing classes for encryption according
Advanced Encryption Standard.
"""
from __future__ import unicode_literals
import os
import math
def append_PKCS7_padding(b):
"""
Function to pad the given data to a multiple of 16-bytes by PKCS7 padding.
@param b data to be padded (bytes)
@return padded data (bytes)
"""
numpads = 16 - (len(b) % 16)
return b + numpads * bytes(chr(numpads), encoding="ascii")
def strip_PKCS7_padding(b):
"""
Function to strip off PKCS7 padding.
@param b data to be stripped (bytes)
@return stripped data (bytes)
@exception ValueError data padding is invalid
"""
if len(b) % 16 or not b:
raise ValueError(
"Data of len {0} can't be PCKS7-padded".format(len(b)))
numpads = b[-1]
if numpads > 16:
raise ValueError(
"Data ending with {0} can't be PCKS7-padded".format(b[-1]))
return b[:-numpads]
class AES(object):
"""
Class implementing the Advanced Encryption Standard algorithm.
"""
# valid key sizes
KeySize = {
"SIZE_128": 16,
"SIZE_192": 24,
"SIZE_256": 32,
}
# Rijndael S-box
sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67,
0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59,
0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7,
0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1,
0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05,
0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83,
0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29,
0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa,
0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c,
0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc,
0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19,
0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee,
0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49,
0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4,
0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6,
0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70,
0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9,
0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e,
0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1,
0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0,
0x54, 0xbb, 0x16]
# Rijndael Inverted S-box
rsbox = [0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3,
0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f,
0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54,
0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b,
0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24,
0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8,
0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d,
0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab,
0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3,
0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1,
0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6,
0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9,
0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d,
0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0,
0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07,
0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60,
0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f,
0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5,
0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b,
0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55,
0x21, 0x0c, 0x7d]
# Rijndael Rcon
Rcon = [0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97,
0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72,
0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66,
0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d,
0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61,
0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc,
0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5,
0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a,
0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d,
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c,
0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4,
0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08,
0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d,
0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2,
0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74,
0xe8, 0xcb]
def __getSBoxValue(self, num):
"""
Private method to retrieve a given S-Box value.
@param num position of the value (integer)
@return value of the S-Box (integer)
"""
return self.sbox[num]
def __getSBoxInvert(self, num):
"""
Private method to retrieve a given Inverted S-Box value.
@param num position of the value (integer)
@return value of the Inverted S-Box (integer)
"""
return self.rsbox[num]
def __rotate(self, data):
"""
Private method performing Rijndael's key schedule rotate operation.
Rotate the data word eight bits to the left: eg,
rotate(1d2c3a4f) == 2c3a4f1d.
@param data data of size 4 (bytearray)
@return rotated data (bytearray)
"""
return data[1:] + data[:1]
def __getRconValue(self, num):
"""
Private method to retrieve a given Rcon value.
@param num position of the value (integer)
@return Rcon value (integer)
"""
return self.Rcon[num]
def __core(self, data, iteration):
"""
Private method performing the key schedule core operation.
@param data data to operate on (bytearray)
@param iteration iteration counter (integer)
@return modified data (bytearray)
"""
# rotate the 32-bit word 8 bits to the left
data = self.__rotate(data)
# apply S-Box substitution on all 4 parts of the 32-bit word
for i in range(4):
data[i] = self.__getSBoxValue(data[i])
# XOR the output of the rcon operation with i to the first part
# (leftmost) only
data[0] = data[0] ^ self.__getRconValue(iteration)
return data
def __expandKey(self, key, size, expandedKeySize):
"""
Private method performing Rijndael's key expansion.
Expands a 128, 192 or 256 bit key into a 176, 208 or 240 bit key.
@param key key to be expanded (bytes or bytearray)
@param size size of the key in bytes (16, 24 or 32)
@param expandedKeySize size of the expanded key (integer)
@return expanded key (bytearray)
"""
# current expanded keySize, in bytes
currentSize = 0
rconIteration = 1
expandedKey = bytearray(expandedKeySize)
# set the 16, 24, 32 bytes of the expanded key to the input key
for j in range(size):
expandedKey[j] = key[j]
currentSize += size
while currentSize < expandedKeySize:
# assign the previous 4 bytes to the temporary value t
t = expandedKey[currentSize - 4:currentSize]
# every 16, 24, 32 bytes we apply the core schedule to t
# and increment rconIteration afterwards
if currentSize % size == 0:
t = self.__core(t, rconIteration)
rconIteration += 1
# For 256-bit keys, we add an extra sbox to the calculation
if size == self.KeySize["SIZE_256"] and \
((currentSize % size) == 16):
for l in range(4):
t[l] = self.__getSBoxValue(t[l])
# We XOR t with the four-byte block 16, 24, 32 bytes before the new
# expanded key. This becomes the next four bytes in the expanded
# key.
for m in range(4):
expandedKey[currentSize] = \
expandedKey[currentSize - size] ^ t[m]
currentSize += 1
return expandedKey
def __addRoundKey(self, state, roundKey):
"""
Private method to add (XORs) the round key to the state.
@param state state to be changed (bytearray)
@param roundKey key to be used for the modification (bytearray)
@return modified state (bytearray)
"""
buf = state[:]
for i in range(16):
buf[i] ^= roundKey[i]
return buf
def __createRoundKey(self, expandedKey, roundKeyPointer):
"""
Private method to create a round key.
@param expandedKey expanded key to be used (bytearray)
@param roundKeyPointer position within the expanded key (integer)
@return round key (bytearray)
"""
roundKey = bytearray(16)
for i in range(4):
for j in range(4):
roundKey[j * 4 + i] = expandedKey[roundKeyPointer + i * 4 + j]
return roundKey
def __galois_multiplication(self, a, b):
"""
Private method to perform a Galois multiplication of 8 bit characters
a and b.
@param a first factor (byte)
@param b second factor (byte)
@return result (byte)
"""
p = 0
for counter in range(8):
if b & 1:
p ^= a
hi_bit_set = a & 0x80
a <<= 1
# keep a 8 bit
a &= 0xFF
if hi_bit_set:
a ^= 0x1b
b >>= 1
return p
def __subBytes(self, state, isInv):
"""
Private method to substitute all the values from the state with the
value in the SBox using the state value as index for the SBox.
@param state state to be worked on (bytearray)
@param isInv flag indicating an inverse operation (boolean)
@return modified state (bytearray)
"""
state = state[:]
if isInv:
getter = self.__getSBoxInvert
else:
getter = self.__getSBoxValue
for i in range(16):
state[i] = getter(state[i])
return state
def __shiftRows(self, state, isInv):
"""
Private method to iterate over the 4 rows and call __shiftRow() with
that row.
@param state state to be worked on (bytearray)
@param isInv flag indicating an inverse operation (boolean)
@return modified state (bytearray)
"""
state = state[:]
for i in range(4):
state = self.__shiftRow(state, i * 4, i, isInv)
return state
def __shiftRow(self, state, statePointer, nbr, isInv):
"""
Private method to shift the bytes of a row to the left.
@param state state to be worked on (bytearray)
@param statePointer index into the state (integer)
@param nbr number of positions to shift (integer)
@param isInv flag indicating an inverse operation (boolean)
@return modified state (bytearray)
"""
state = state[:]
for i in range(nbr):
if isInv:
state[statePointer:statePointer + 4] = \
state[statePointer + 3:statePointer + 4] + \
state[statePointer:statePointer + 3]
else:
state[statePointer:statePointer + 4] = \
state[statePointer + 1:statePointer + 4] + \
state[statePointer:statePointer + 1]
return state
def __mixColumns(self, state, isInv):
"""
Private method to perform a galois multiplication of the 4x4 matrix.
@param state state to be worked on (bytearray)
@param isInv flag indicating an inverse operation (boolean)
@return modified state (bytearray)
"""
state = state[:]
# iterate over the 4 columns
for i in range(4):
# construct one column by slicing over the 4 rows
column = state[i:i + 16:4]
# apply the __mixColumn on one column
column = self.__mixColumn(column, isInv)
# put the values back into the state
state[i:i + 16:4] = column
return state
# galois multiplication of 1 column of the 4x4 matrix
def __mixColumn(self, column, isInv):
"""
Private method to perform a galois multiplication of 1 column the
4x4 matrix.
@param column column to be worked on (bytearray)
@param isInv flag indicating an inverse operation (boolean)
@return modified column (bytearray)
"""
column = column[:]
if isInv:
mult = [14, 9, 13, 11]
else:
mult = [2, 1, 1, 3]
cpy = column[:]
g = self.__galois_multiplication
column[0] = g(cpy[0], mult[0]) ^ g(cpy[3], mult[1]) ^ \
g(cpy[2], mult[2]) ^ g(cpy[1], mult[3])
column[1] = g(cpy[1], mult[0]) ^ g(cpy[0], mult[1]) ^ \
g(cpy[3], mult[2]) ^ g(cpy[2], mult[3])
column[2] = g(cpy[2], mult[0]) ^ g(cpy[1], mult[1]) ^ \
g(cpy[0], mult[2]) ^ g(cpy[3], mult[3])
column[3] = g(cpy[3], mult[0]) ^ g(cpy[2], mult[1]) ^ \
g(cpy[1], mult[2]) ^ g(cpy[0], mult[3])
return column
def __aes_round(self, state, roundKey):
"""
Private method to apply the 4 operations of the forward round in
sequence.
@param state state to be worked on (bytearray)
@param roundKey round key to be used (bytearray)
@return modified state (bytearray)
"""
state = self.__subBytes(state, False)
state = self.__shiftRows(state, False)
state = self.__mixColumns(state, False)
state = self.__addRoundKey(state, roundKey)
return state
def __aes_invRound(self, state, roundKey):
"""
Private method to apply the 4 operations of the inverse round in
sequence.
@param state state to be worked on (bytearray)
@param roundKey round key to be used (bytearray)
@return modified state (bytearray)
"""
state = self.__shiftRows(state, True)
state = self.__subBytes(state, True)
state = self.__addRoundKey(state, roundKey)
state = self.__mixColumns(state, True)
return state
def __aes_main(self, state, expandedKey, nbrRounds):
"""
Private method to do the AES encryption for one round.
Perform the initial operations, the standard round, and the
final operations of the forward AES, creating a round key for
each round.
@param state state to be worked on (bytearray)
@param expandedKey expanded key to be used (bytearray)
@param nbrRounds number of rounds to be done (integer)
@return modified state (bytearray)
"""
state = self.__addRoundKey(
state, self.__createRoundKey(expandedKey, 0))
i = 1
while i < nbrRounds:
state = self.__aes_round(
state, self.__createRoundKey(expandedKey, 16 * i))
i += 1
state = self.__subBytes(state, False)
state = self.__shiftRows(state, False)
state = self.__addRoundKey(
state, self.__createRoundKey(expandedKey, 16 * nbrRounds))
return state
def __aes_invMain(self, state, expandedKey, nbrRounds):
"""
Private method to do the inverse AES encryption for one round.
Perform the initial operations, the standard round, and the
final operations of the inverse AES, creating a round key for
each round.
@param state state to be worked on (bytearray)
@param expandedKey expanded key to be used (bytearray)
@param nbrRounds number of rounds to be done (integer)
@return modified state (bytearray)
"""
state = self.__addRoundKey(
state, self.__createRoundKey(expandedKey, 16 * nbrRounds))
i = nbrRounds - 1
while i > 0:
state = self.__aes_invRound(
state, self.__createRoundKey(expandedKey, 16 * i))
i -= 1
state = self.__shiftRows(state, True)
state = self.__subBytes(state, True)
state = self.__addRoundKey(
state, self.__createRoundKey(expandedKey, 0))
return state
def encrypt(self, iput, key, size):
"""
Public method to encrypt a 128 bit input block against the given key
of size specified.
@param iput input data (bytearray)
@param key key to be used (bytes or bytearray)
@param size key size (16, 24 or 32)
@return encrypted data (bytes)
@exception ValueError key size is invalid
"""
output = bytearray(16)
# the number of rounds
nbrRounds = 0
# the 128 bit block to encode
block = bytearray(16)
# set the number of rounds
if size == self.KeySize["SIZE_128"]:
nbrRounds = 10
elif size == self.KeySize["SIZE_192"]:
nbrRounds = 12
elif size == self.KeySize["SIZE_256"]:
nbrRounds = 14
else:
raise ValueError("Wrong key size given ({0}).".format(size))
# the expanded keySize
expandedKeySize = 16 * (nbrRounds + 1)
# Set the block values, for the block:
# a0,0 a0,1 a0,2 a0,3
# a1,0 a1,1 a1,2 a1,3
# a2,0 a2,1 a2,2 a2,3
# a3,0 a3,1 a3,2 a3,3
# the mapping order is a0,0 a1,0 a2,0 a3,0 a0,1 a1,1 ... a2,3 a3,3
#
# iterate over the columns
for i in range(4):
# iterate over the rows
for j in range(4):
block[i + j * 4] = iput[i * 4 + j]
# expand the key into an 176, 208, 240 bytes key
# the expanded key
expandedKey = self.__expandKey(key, size, expandedKeySize)
# encrypt the block using the expandedKey
block = self.__aes_main(block, expandedKey, nbrRounds)
# unmap the block again into the output
for k in range(4):
# iterate over the rows
for l in range(4):
output[k * 4 + l] = block[k + l * 4]
return bytes(output)
# decrypts a 128 bit input block against the given key of size specified
def decrypt(self, iput, key, size):
"""
Public method to decrypt a 128 bit input block against the given key
of size specified.
@param iput input data (bytearray)
@param key key to be used (bytes or bytearray)
@param size key size (16, 24 or 32)
@return decrypted data (bytes)
@exception ValueError key size is invalid
"""
output = bytearray(16)
# the number of rounds
nbrRounds = 0
# the 128 bit block to decode
block = bytearray(16)
# set the number of rounds
if size == self.KeySize["SIZE_128"]:
nbrRounds = 10
elif size == self.KeySize["SIZE_192"]:
nbrRounds = 12
elif size == self.KeySize["SIZE_256"]:
nbrRounds = 14
else:
raise ValueError("Wrong key size given ({0}).".format(size))
# the expanded keySize
expandedKeySize = 16 * (nbrRounds + 1)
# Set the block values, for the block:
# a0,0 a0,1 a0,2 a0,3
# a1,0 a1,1 a1,2 a1,3
# a2,0 a2,1 a2,2 a2,3
# a3,0 a3,1 a3,2 a3,3
# the mapping order is a0,0 a1,0 a2,0 a3,0 a0,1 a1,1 ... a2,3 a3,3
# iterate over the columns
for i in range(4):
# iterate over the rows
for j in range(4):
block[i + j * 4] = iput[i * 4 + j]
# expand the key into an 176, 208, 240 bytes key
expandedKey = self.__expandKey(key, size, expandedKeySize)
# decrypt the block using the expandedKey
block = self.__aes_invMain(block, expandedKey, nbrRounds)
# unmap the block again into the output
for k in range(4):
# iterate over the rows
for l in range(4):
output[k * 4 + l] = block[k + l * 4]
return output
class AESModeOfOperation(object):
"""
Class implementing the different AES mode of operations.
"""
aes = AES()
# structure of supported modes of operation
ModeOfOperation = {
"OFB": 0,
"CFB": 1,
"CBC": 2,
}
def __extractBytes(self, input, start, end, mode):
"""
Private method to extract a range of bytes from the input.
@param input input data (bytes)
@param start start index (integer)
@param end end index (integer)
@param mode mode of operation (0, 1, 2)
@return extracted bytes (bytearray)
"""
if end - start > 16:
end = start + 16
if mode == self.ModeOfOperation["CBC"]:
ar = bytearray(16)
else:
ar = bytearray()
i = start
j = 0
while len(ar) < end - start:
ar.append(0)
while i < end:
ar[j] = input[i]
j += 1
i += 1
return ar
def encrypt(self, input, mode, key, size, IV):
"""
Public method to perform the encryption operation.
@param input data to be encrypted (bytes)
@param mode mode of operation (0, 1 or 2)
@param key key to be used (bytes)
@param size length of the key (16, 24 or 32)
@param IV initialisation vector (bytearray)
@return tuple with mode of operation, length of the input and
the encrypted data (integer, integer, bytes)
@exception ValueError key size is invalid or decrypted data is invalid
"""
if len(key) % size:
raise ValueError("Illegal size ({0}) for key '{1}'.".format(
size, key))
if len(IV) % 16:
raise ValueError("IV is not a multiple of 16.")
# the AES input/output
iput = bytearray(16)
output = bytearray()
ciphertext = bytearray(16)
# the output cipher string
cipherOut = bytearray()
# char firstRound
firstRound = True
if input:
for j in range(int(math.ceil(float(len(input)) / 16))):
start = j * 16
end = j * 16 + 16
if end > len(input):
end = len(input)
plaintext = self.__extractBytes(input, start, end, mode)
# print 'PT@%s:%s' % (j, plaintext)
if mode == self.ModeOfOperation["CFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(plaintext) - 1 < i:
ciphertext[i] = 0 ^ output[i]
elif len(output) - 1 < i:
ciphertext[i] = plaintext[i] ^ 0
elif len(plaintext) - 1 < i and len(output) < i:
ciphertext[i] = 0 ^ 0
else:
ciphertext[i] = plaintext[i] ^ output[i]
for k in range(end - start):
cipherOut.append(ciphertext[k])
iput = ciphertext
elif mode == self.ModeOfOperation["OFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(plaintext) - 1 < i:
ciphertext[i] = 0 ^ output[i]
elif len(output) - 1 < i:
ciphertext[i] = plaintext[i] ^ 0
elif len(plaintext) - 1 < i and len(output) < i:
ciphertext[i] = 0 ^ 0
else:
ciphertext[i] = plaintext[i] ^ output[i]
for k in range(end - start):
cipherOut.append(ciphertext[k])
iput = output
elif mode == self.ModeOfOperation["CBC"]:
for i in range(16):
if firstRound:
iput[i] = plaintext[i] ^ IV[i]
else:
iput[i] = plaintext[i] ^ ciphertext[i]
# print 'IP@%s:%s' % (j, iput)
firstRound = False
ciphertext = self.aes.encrypt(iput, key, size)
# always 16 bytes because of the padding for CBC
for k in range(16):
cipherOut.append(ciphertext[k])
return mode, len(input), bytes(cipherOut)
# Mode of Operation Decryption
# cipherIn - Encrypted String
# originalsize - The unencrypted string length - required for CBC
# mode - mode of type modeOfOperation
# key - a number array of the bit length size
# size - the bit length of the key
# IV - the 128 bit number array Initilization Vector
def decrypt(self, cipherIn, originalsize, mode, key, size, IV):
"""
Public method to perform the decryption operation.
@param cipherIn data to be decrypted (bytes)
@param originalsize unencrypted string length (required for CBC)
(integer)
@param mode mode of operation (0, 1 or 2)
@param key key to be used (bytes)
@param size length of the key (16, 24 or 32)
@param IV initialisation vector (bytearray)
@return decrypted data (bytes)
@exception ValueError key size is invalid or decrypted data is invalid
"""
if len(key) % size:
raise ValueError("Illegal size ({0}) for key '{1}'.".format(
size, key))
if len(IV) % 16:
raise ValueError("IV is not a multiple of 16.")
# the AES input/output
ciphertext = bytearray()
iput = bytearray()
output = bytearray()
plaintext = bytearray(16)
# the output bytes
bytesOut = bytearray()
# char firstRound
firstRound = True
if cipherIn is not None:
for j in range(int(math.ceil(float(len(cipherIn)) / 16))):
start = j * 16
end = j * 16 + 16
if j * 16 + 16 > len(cipherIn):
end = len(cipherIn)
ciphertext = cipherIn[start:end]
if mode == self.ModeOfOperation["CFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(output) - 1 < i:
plaintext[i] = 0 ^ ciphertext[i]
elif len(ciphertext) - 1 < i:
plaintext[i] = output[i] ^ 0
elif len(output) - 1 < i and len(ciphertext) < i:
plaintext[i] = 0 ^ 0
else:
plaintext[i] = output[i] ^ ciphertext[i]
for k in range(end - start):
bytesOut.append(plaintext[k])
iput = ciphertext
elif mode == self.ModeOfOperation["OFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(output) - 1 < i:
plaintext[i] = 0 ^ ciphertext[i]
elif len(ciphertext) - 1 < i:
plaintext[i] = output[i] ^ 0
elif len(output) - 1 < i and len(ciphertext) < i:
plaintext[i] = 0 ^ 0
else:
plaintext[i] = output[i] ^ ciphertext[i]
for k in range(end - start):
bytesOut.append(plaintext[k])
iput = output
elif mode == self.ModeOfOperation["CBC"]:
output = self.aes.decrypt(ciphertext, key, size)
for i in range(16):
if firstRound:
plaintext[i] = IV[i] ^ output[i]
else:
plaintext[i] = iput[i] ^ output[i]
firstRound = False
if originalsize is not None and originalsize < end:
for k in range(originalsize - start):
bytesOut.append(plaintext[k])
else:
for k in range(end - start):
bytesOut.append(plaintext[k])
iput = ciphertext
return bytes(bytesOut)
def encryptData(key, data, mode=AESModeOfOperation.ModeOfOperation["CBC"]):
"""
Module function to encrypt the given data with the given key.
@param key key to be used for encryption (bytes)
@param data data to be encrypted (bytes)
@param mode mode of operations (0, 1 or 2)
@return encrypted data prepended with the initialization vector (bytes)
"""
key = bytearray(key)
if mode == AESModeOfOperation.ModeOfOperation["CBC"]:
data = append_PKCS7_padding(data)
keysize = len(key)
assert keysize in AES.KeySize.values(), \
'invalid key size: {0}'.format(keysize)
# create a new iv using random data
iv = bytearray([i for i in os.urandom(16)])
moo = AESModeOfOperation()
mode, length, ciph = moo.encrypt(data, mode, key, keysize, iv)
# With padding, the original length does not need to be known. It's a bad
# idea to store the original message length.
# prepend the iv.
return bytes(iv) + bytes(ciph)
def decryptData(key, data, mode=AESModeOfOperation.ModeOfOperation["CBC"]):
"""
Module function to decrypt the given data with the given key.
@param key key to be used for decryption (bytes)
@param data data to be decrypted (with initialization vector prepended)
(bytes)
@param mode mode of operations (0, 1 or 2)
@return decrypted data (bytes)
"""
key = bytearray(key)
keysize = len(key)
assert keysize in AES.KeySize.values(), 'invalid key size: %s' % keysize
# iv is first 16 bytes
iv = bytearray(data[:16])
data = bytearray(data[16:])
moo = AESModeOfOperation()
decr = moo.decrypt(data, None, mode, key, keysize, iv)
if mode == AESModeOfOperation.ModeOfOperation["CBC"]:
decr = strip_PKCS7_padding(decr)
return bytes(decr)
| bsd-2-clause |
calvinchengx/django-haystack | haystack/query.py | 2 | 24043 | import operator
import re
import warnings
from django.conf import settings
from haystack.backends import SQ
from haystack.constants import REPR_OUTPUT_SIZE, ITERATOR_LOAD_PER_QUERY, DEFAULT_OPERATOR
from haystack.exceptions import NotRegistered
class SearchQuerySet(object):
"""
Provides a way to specify search parameters and lazily load results.
Supports chaining (a la QuerySet) to narrow the search.
"""
def __init__(self, site=None, query=None):
if query is not None:
self.query = query
else:
from haystack import backend
self.query = backend.SearchQuery(site=site)
self._result_cache = []
self._result_count = None
self._cache_full = False
self._load_all = False
self._ignored_result_count = 0
if site is not None:
self.site = site
else:
from haystack import site as main_site
self.site = main_site
def __getstate__(self):
"""
For pickling.
"""
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
del obj_dict['site']
return obj_dict
def __setstate__(self, dict):
"""
For unpickling.
"""
self.__dict__ = dict
from haystack import site as main_site
self.site = main_site
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE])
if len(self) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
if not self._result_count:
self._result_count = self.query.get_count()
# Some backends give weird, false-y values here. Convert to zero.
if not self._result_count:
self._result_count = 0
# This needs to return the actual number of hits, not what's in the cache.
return self._result_count - self._ignored_result_count
def __iter__(self):
if self._cache_is_full():
# We've got a fully populated cache. Let Python do the hard work.
return iter(self._result_cache)
return self._manual_iter()
def __and__(self, other):
if isinstance(other, EmptySearchQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, SQ.AND)
return combined
def __or__(self, other):
combined = self._clone()
if isinstance(other, EmptySearchQuerySet):
return combined
combined.query.combine(other.query, SQ.OR)
return combined
def _cache_is_full(self):
if not self.query.has_run():
return False
if len(self) <= 0:
return True
try:
self._result_cache.index(None)
return False
except ValueError:
# No ``None``s found in the results. Check the length of the cache.
return len(self._result_cache) > 0
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
if len(self._result_cache) > 0:
try:
current_cache_max = self._result_cache.index(None)
except ValueError:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
raise StopIteration
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
if not self._fill_cache(current_position, current_position + ITERATOR_LOAD_PER_QUERY):
raise StopIteration
def _fill_cache(self, start, end):
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(start, end)
results = self.query.get_results()
if results == None or len(results) == 0:
return False
# Setup the full cache now that we know how many results there are.
# We need the ``None``s as placeholders to know what parts of the
# cache we have/haven't filled.
# Using ``None`` like this takes up very little memory. In testing,
# an array of 100,000 ``None``s consumed less than .5 Mb, which ought
# to be an acceptable loss for consistent and more efficient caching.
if len(self._result_cache) == 0:
self._result_cache = [None for i in xrange(self.query.get_count())]
if start is None:
start = 0
if end is None:
end = self.query.get_count()
# Check if we wish to load all objects.
if self._load_all:
original_results = []
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
original_results.append(result)
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
try:
loaded_objects[model] = self.site.get_index(model).read_queryset().in_bulk(models_pks[model])
except NotRegistered:
self.log.warning("Model not registered with search site '%s.%s'." % (self.app_label, self.model_name))
# Revert to old behaviour
loaded_objects[model] = model._default_manager.in_bulk(models_pks[model])
to_cache = []
for result in results:
if self._load_all:
# We have to deal with integer keys being cast from strings
model_objects = loaded_objects.get(result.model, {})
if not result.pk in model_objects:
try:
result.pk = int(result.pk)
except ValueError:
pass
try:
result._object = model_objects[result.pk]
except KeyError:
# The object was either deleted since we indexed or should
# be ignored; fail silently.
self._ignored_result_count += 1
continue
to_cache.append(result)
# Assign by slice.
self._result_cache[start:start + len(to_cache)] = to_cache
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or (None in self._result_cache[start:bound] and not self._cache_is_full()):
try:
self._fill_cache(start, bound)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
else:
return self._result_cache[start]
# Methods that return a SearchQuerySet.
def all(self):
"""Returns all results for the query."""
return self._clone()
def none(self):
"""Returns all results for the query."""
return self._clone(klass=EmptySearchQuerySet)
def filter(self, *args, **kwargs):
"""Narrows the search based on certain attributes and the default operator."""
if DEFAULT_OPERATOR == 'OR':
return self.filter_or(*args, **kwargs)
else:
return self.filter_and(*args, **kwargs)
def exclude(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(~SQ(*args, **kwargs))
return clone
def filter_and(self, *args, **kwargs):
"""Narrows the search by looking for (and including) certain attributes."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs))
return clone
def filter_or(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs), use_or=True)
return clone
def order_by(self, *args):
"""Alters the order in which the results should appear."""
clone = self._clone()
for field in args:
clone.query.add_order_by(field)
return clone
def highlight(self):
"""Adds highlighting to the results."""
clone = self._clone()
clone.query.add_highlight()
return clone
def models(self, *models):
"""Accepts an arbitrary number of Model classes to include in the search."""
clone = self._clone()
for model in models:
if not model in self.site.get_indexed_models():
warnings.warn('The model %r is not registered for search.' % model)
clone.query.add_model(model)
return clone
def result_class(self, klass):
"""
Allows specifying a different class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
clone = self._clone()
clone.query.set_result_class(klass)
return clone
def boost(self, term, boost):
"""Boosts a certain aspect of the query."""
clone = self._clone()
clone.query.add_boost(term, boost)
return clone
def facet(self, field):
"""Adds faceting to a query for the provided field."""
clone = self._clone()
clone.query.add_field_facet(field)
return clone
def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds faceting to a query for the provided field by date."""
clone = self._clone()
clone.query.add_date_facet(field, start_date, end_date, gap_by, gap_amount=gap_amount)
return clone
def query_facet(self, field, query):
"""Adds faceting to a query for the provided field with a custom query."""
clone = self._clone()
clone.query.add_query_facet(field, query)
return clone
def narrow(self, query):
"""Pushes existing facet choices into the search."""
clone = self._clone()
clone.query.add_narrow_query(query)
return clone
def raw_search(self, query_string, **kwargs):
"""Passes a raw query directly to the backend."""
clone = self._clone()
clone.query.raw_search(query_string, **kwargs)
return clone
def load_all(self):
"""Efficiently populates the objects in the search results."""
clone = self._clone()
clone._load_all = True
return clone
def auto_query(self, query_string):
"""
Performs a best guess constructing the search query.
This method is somewhat naive but works well enough for the simple,
common cases.
"""
clone = self._clone()
# Pull out anything wrapped in quotes and do an exact match on it.
open_quote_position = None
non_exact_query = query_string
for offset, char in enumerate(query_string):
if char == '"':
if open_quote_position != None:
current_match = non_exact_query[open_quote_position + 1:offset]
if current_match:
clone = clone.filter(content=clone.query.clean(current_match))
non_exact_query = non_exact_query.replace('"%s"' % current_match, '', 1)
open_quote_position = None
else:
open_quote_position = offset
# Pseudo-tokenize the rest of the query.
keywords = non_exact_query.split()
# Loop through keywords and add filters to the query.
for keyword in keywords:
exclude = False
if keyword.startswith('-') and len(keyword) > 1:
keyword = keyword[1:]
exclude = True
cleaned_keyword = clone.query.clean(keyword)
if exclude:
clone = clone.exclude(content=cleaned_keyword)
else:
clone = clone.filter(content=cleaned_keyword)
return clone
def autocomplete(self, **kwargs):
"""
A shortcut method to perform an autocomplete search.
Must be run against fields that are either ``NgramField`` or
``EdgeNgramField``.
"""
clone = self._clone()
query_bits = []
for field_name, query in kwargs.items():
for word in query.split(' '):
bit = clone.query.clean(word.strip())
kwargs = {
field_name: bit,
}
query_bits.append(SQ(**kwargs))
return clone.filter(reduce(operator.__and__, query_bits))
# Methods that do not return a SearchQuerySet.
def count(self):
"""Returns the total number of matching results."""
return len(self)
def best_match(self):
"""Returns the best/top search result that matches the query."""
return self[0]
def latest(self, date_field):
"""Returns the most recent search result that matches the query."""
clone = self._clone()
clone.query.clear_order_by()
clone.query.add_order_by("-%s" % date_field)
return clone.best_match()
def more_like_this(self, model_instance):
"""Finds similar results to the object passed in."""
clone = self._clone()
clone.query.more_like_this(model_instance)
return clone
def facet_counts(self):
"""
Returns the facet counts found by the query.
This will cause the query to execute and should generally be used when
presenting the data.
"""
clone = self._clone()
return clone.query.get_facet_counts()
def spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion found by the query.
To work, you must set ``settings.HAYSTACK_INCLUDE_SPELLING`` to True.
Otherwise, ``None`` will be returned.
This will cause the query to execute and should generally be used when
presenting the data.
"""
clone = self._clone()
return clone.query.get_spelling_suggestion(preferred_query)
# Utility methods.
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(site=self.site, query=query)
clone._load_all = self._load_all
return clone
class EmptySearchQuerySet(SearchQuerySet):
"""
A stubbed SearchQuerySet that behaves as normal but always returns no
results.
"""
def __len__(self):
return 0
def _cache_is_full(self):
# Pretend the cache is always full with no results.
return True
def _clone(self, klass=None):
clone = super(EmptySearchQuerySet, self)._clone(klass=klass)
clone._result_cache = []
return clone
def _fill_cache(self, start, end):
return False
def facet_counts(self):
return {}
class RelatedSearchQuerySet(SearchQuerySet):
"""
A variant of the SearchQuerySet that can handle `load_all_queryset`s.
This is predominantly different in the `_fill_cache` method, as it is
far less efficient but needs to fill the cache before it to maintain
consistency.
"""
_load_all_querysets = {}
_result_cache = []
def _cache_is_full(self):
return len(self._result_cache) >= len(self)
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
raise StopIteration
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
start = current_position + self._ignored_result_count
if not self._fill_cache(start, start + ITERATOR_LOAD_PER_QUERY):
raise StopIteration
def _fill_cache(self, start, end):
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(start, end)
results = self.query.get_results()
if len(results) == 0:
return False
if start is None:
start = 0
if end is None:
end = self.query.get_count()
# Check if we wish to load all objects.
if self._load_all:
original_results = []
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
original_results.append(result)
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
if model in self._load_all_querysets:
# Use the overriding queryset.
loaded_objects[model] = self._load_all_querysets[model].in_bulk(models_pks[model])
else:
# Check the SearchIndex for the model for an override.
try:
index = self.site.get_index(model)
qs = index.load_all_queryset()
loaded_objects[model] = qs.in_bulk(models_pks[model])
except NotRegistered:
# The model returned doesn't seem to be registered with
# the current site. We should silently fail and populate
# nothing for those objects.
loaded_objects[model] = []
if len(results) + len(self._result_cache) < len(self) and len(results) < ITERATOR_LOAD_PER_QUERY:
self._ignored_result_count += ITERATOR_LOAD_PER_QUERY - len(results)
for result in results:
if self._load_all:
# We have to deal with integer keys being cast from strings; if this
# fails we've got a character pk.
try:
result.pk = int(result.pk)
except ValueError:
pass
try:
result._object = loaded_objects[result.model][result.pk]
except (KeyError, IndexError):
# The object was either deleted since we indexed or should
# be ignored; fail silently.
self._ignored_result_count += 1
continue
self._result_cache.append(result)
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or not self._cache_is_full():
try:
while len(self._result_cache) < bound and not self._cache_is_full():
current_max = len(self._result_cache) + self._ignored_result_count
self._fill_cache(current_max, current_max + ITERATOR_LOAD_PER_QUERY)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
else:
return self._result_cache[start]
def load_all_queryset(self, model, queryset):
"""
Allows for specifying a custom ``QuerySet`` that changes how ``load_all``
will fetch records for the provided model.
This is useful for post-processing the results from the query, enabling
things like adding ``select_related`` or filtering certain data.
"""
clone = self._clone()
clone._load_all_querysets[model] = queryset
return clone
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(site=self.site, query=query)
clone._load_all = self._load_all
clone._load_all_querysets = self._load_all_querysets
return clone
| bsd-3-clause |
JohnDenker/brython | www/src/Lib/test/test_importlib/util.py | 34 | 4274 | from contextlib import contextmanager
import imp
import os.path
from test import support
import unittest
import sys
CASE_INSENSITIVE_FS = True
# Windows is the only OS that is *always* case-insensitive
# (OS X *can* be case-sensitive).
if sys.platform not in ('win32', 'cygwin'):
changed_name = __file__.upper()
if changed_name == __file__:
changed_name = __file__.lower()
if not os.path.exists(changed_name):
CASE_INSENSITIVE_FS = False
def case_insensitive_tests(test):
"""Class decorator that nullifies tests requiring a case-insensitive
file system."""
return unittest.skipIf(not CASE_INSENSITIVE_FS,
"requires a case-insensitive filesystem")(test)
@contextmanager
def uncache(*names):
"""Uncache a module from sys.modules.
A basic sanity check is performed to prevent uncaching modules that either
cannot/shouldn't be uncached.
"""
for name in names:
if name in ('sys', 'marshal', 'imp'):
raise ValueError(
"cannot uncache {0}".format(name))
try:
del sys.modules[name]
except KeyError:
pass
try:
yield
finally:
for name in names:
try:
del sys.modules[name]
except KeyError:
pass
@contextmanager
def import_state(**kwargs):
"""Context manager to manage the various importers and stored state in the
sys module.
The 'modules' attribute is not supported as the interpreter state stores a
pointer to the dict that the interpreter uses internally;
reassigning to sys.modules does not have the desired effect.
"""
originals = {}
try:
for attr, default in (('meta_path', []), ('path', []),
('path_hooks', []),
('path_importer_cache', {})):
originals[attr] = getattr(sys, attr)
if attr in kwargs:
new_value = kwargs[attr]
del kwargs[attr]
else:
new_value = default
setattr(sys, attr, new_value)
if len(kwargs):
raise ValueError(
'unrecognized arguments: {0}'.format(kwargs.keys()))
yield
finally:
for attr, value in originals.items():
setattr(sys, attr, value)
class mock_modules:
"""A mock importer/loader."""
def __init__(self, *names, module_code={}):
self.modules = {}
self.module_code = {}
for name in names:
if not name.endswith('.__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
if '.' not in name:
package = None
elif import_name == name:
package = name.rsplit('.', 1)[0]
else:
package = import_name
module = imp.new_module(import_name)
module.__loader__ = self
module.__file__ = '<mock __file__>'
module.__package__ = package
module.attr = name
if import_name != name:
module.__path__ = ['<mock __path__>']
self.modules[import_name] = module
if import_name in module_code:
self.module_code[import_name] = module_code[import_name]
def __getitem__(self, name):
return self.modules[name]
def find_module(self, fullname, path=None):
if fullname not in self.modules:
return None
else:
return self
def load_module(self, fullname):
if fullname not in self.modules:
raise ImportError
else:
sys.modules[fullname] = self.modules[fullname]
if fullname in self.module_code:
try:
self.module_code[fullname]()
except Exception:
del sys.modules[fullname]
raise
return self.modules[fullname]
def __enter__(self):
self._uncache = uncache(*self.modules.keys())
self._uncache.__enter__()
return self
def __exit__(self, *exc_info):
self._uncache.__exit__(None, None, None)
| bsd-3-clause |
larsoner/mne-python | tutorials/preprocessing/plot_40_artifact_correction_ica.py | 3 | 30767 | # -*- coding: utf-8 -*-
"""
.. _tut-artifact-ica:
Repairing artifacts with ICA
============================
This tutorial covers the basics of independent components analysis (ICA) and
shows how ICA can be used for artifact repair; an extended example illustrates
repair of ocular and heartbeat artifacts.
.. contents:: Page contents
:local:
:depth: 2
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. Because ICA can be computationally
intense, we'll also crop the data to 60 seconds; and to save ourselves from
repeatedly typing ``mne.preprocessing`` we'll directly import a few functions
and classes from that submodule:
"""
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60.)
###############################################################################
# .. note::
# Before applying ICA (or any artifact repair strategy), be sure to observe
# the artifacts in your data to make sure you choose the right repair tool.
# Sometimes the right tool is no tool at all — if the artifacts are small
# enough you may not even need to repair them to get good analysis results.
# See :ref:`tut-artifact-overview` for guidance on detecting and
# visualizing various types of artifact.
#
# What is ICA?
# ^^^^^^^^^^^^
#
# Independent components analysis (ICA) is a technique for estimating
# independent source signals from a set of recordings in which the source
# signals were mixed together in unknown ratios. A common example of this is
# the problem of `blind source separation`_: with 3 musical instruments playing
# in the same room, and 3 microphones recording the performance (each picking
# up all 3 instruments, but at varying levels), can you somehow "unmix" the
# signals recorded by the 3 microphones so that you end up with a separate
# "recording" isolating the sound of each instrument?
#
# It is not hard to see how this analogy applies to EEG/MEG analysis: there are
# many "microphones" (sensor channels) simultaneously recording many
# "instruments" (blinks, heartbeats, activity in different areas of the brain,
# muscular activity from jaw clenching or swallowing, etc). As long as these
# various source signals are `statistically independent`_ and non-gaussian, it
# is usually possible to separate the sources using ICA, and then re-construct
# the sensor signals after excluding the sources that are unwanted.
#
#
# ICA in MNE-Python
# ~~~~~~~~~~~~~~~~~
#
# .. sidebar:: ICA and dimensionality reduction
#
# If you want to perform ICA with *no* dimensionality reduction (other than
# the number of Independent Components (ICs) given in ``n_components``, and
# any subsequent exclusion of ICs you specify in ``ICA.exclude``), pass
# ``n_pca_components=None`` (this is the default value).
#
# However, if you *do* want to reduce dimensionality, consider this
# example: if you have 300 sensor channels and you set
# ``n_pca_components=None`` and ``n_components=50``, then the the first 50
# PCs are sent to the ICA algorithm (yielding 50 ICs), and during
# reconstruction `~mne.preprocessing.ICA.apply` will use the 50 ICs
# plus PCs number 51-300 (the full PCA residual). If instead you specify
# ``n_pca_components=120`` in `~mne.preprocessing.ICA.apply`, it will
# reconstruct using the 50 ICs plus the first 70 PCs in the PCA residual
# (numbers 51-120), thus discarding the smallest 180 components.
#
# **If you have previously been using EEGLAB**'s ``runica()`` and are
# looking for the equivalent of its ``'pca', n`` option to reduce
# dimensionality via PCA before the ICA step, set ``n_components=n``
# during initialization and pass ``n_pca_components=n`` to
# `~mne.preprocessing.ICA.apply`.
#
# MNE-Python implements three different ICA algorithms: ``fastica`` (the
# default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly
# widespread use; Picard is a newer (2017) algorithm that is expected to
# converge faster than FastICA and Infomax, and is more robust than other
# algorithms in cases where the sources are not completely independent, which
# typically happens with real EEG/MEG data. See [1]_ for more information.
#
# The ICA interface in MNE-Python is similar to the interface in
# `scikit-learn`_: some general parameters are specified when creating an
# `~mne.preprocessing.ICA` object, then the `~mne.preprocessing.ICA` object is
# fit to the data using its `~mne.preprocessing.ICA.fit` method. The results of
# the fitting are added to the `~mne.preprocessing.ICA` object as attributes
# that end in an underscore (``_``), such as ``ica.mixing_matrix_`` and
# ``ica.unmixing_matrix_``. After fitting, the ICA component(s) that you want
# to remove must be chosen, and the ICA fit must then be applied to the
# `~mne.io.Raw` or `~mne.Epochs` object using the `~mne.preprocessing.ICA`
# object's `~mne.preprocessing.ICA.apply` method.
#
# As is typically done with ICA, the data are first scaled to unit variance and
# whitened using principal components analysis (PCA) before performing the ICA
# decomposition. This is a two-stage process:
#
# 1. To deal with different channel types having different units
# (e.g., Volts for EEG and Tesla for MEG), data must be pre-whitened.
# If ``noise_cov=None`` (default), all data of a given channel type is
# scaled by the standard deviation across all channels. If ``noise_cov`` is
# a `~mne.Covariance`, the channels are pre-whitened using the covariance.
# 2. The pre-whitened data are then decomposed using PCA.
#
# From the resulting principal components (PCs), the first ``n_components`` are
# then passed to the ICA algorithm if ``n_components`` is an integer number.
# It can also be a float between 0 and 1, specifying the **fraction** of
# explained variance that the PCs should capture; the appropriate number of
# PCs (i.e., just as many PCs as are required to explain the given fraction
# of total variance) is then passed to the ICA.
#
# After visualizing the Independent Components (ICs) and excluding any that
# capture artifacts you want to repair, the sensor signal can be reconstructed
# using the `~mne.preprocessing.ICA` object's
# `~mne.preprocessing.ICA.apply` method. By default, signal
# reconstruction uses all of the ICs (less any ICs listed in ``ICA.exclude``)
# plus all of the PCs that were not included in the ICA decomposition (i.e.,
# the "PCA residual"). If you want to reduce the number of components used at
# the reconstruction stage, it is controlled by the ``n_pca_components``
# parameter (which will in turn reduce the rank of your data; by default
# ``n_pca_components=None`` resulting in no additional dimensionality
# reduction). The fitting and reconstruction procedures and the
# parameters that control dimensionality at various stages are summarized in
# the diagram below:
#
# .. graphviz:: ../../_static/diagrams/ica.dot
# :alt: Diagram of ICA procedure in MNE-Python
# :align: left
#
# See the Notes section of the `~mne.preprocessing.ICA` documentation
# for further details. Next we'll walk through an extended example that
# illustrates each of these steps in greater detail.
#
# Example: EOG and ECG artifact repair
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Visualizing the artifacts
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's begin by visualizing the artifacts that we want to repair. In this
# dataset they are big enough to see easily in the raw data:
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
###############################################################################
# We can get a summary of how the ocular artifact manifests across each channel
# type using :func:`~mne.preprocessing.create_eog_epochs` like we did in the
# :ref:`tut-artifact-overview` tutorial:
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
###############################################################################
# Now we'll do the same for the heartbeat artifacts, using
# :func:`~mne.preprocessing.create_ecg_epochs`:
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
###############################################################################
# Filtering to remove slow drifts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before we run the ICA, an important step is filtering the data to remove
# low-frequency drifts, which can negatively affect the quality of the ICA fit.
# The slow drifts are problematic because they reduce the independence of the
# assumed-to-be-independent sources (e.g., during a slow upward drift, the
# neural, heartbeat, blink, and other muscular sources will all tend to have
# higher values), making it harder for the algorithm to find an accurate
# solution. A high-pass filter with 1 Hz cutoff frequency is recommended.
# However, because filtering is a linear operation, the ICA solution found from
# the filtered signal can be applied to the unfiltered signal (see [2]_ for
# more information), so we'll keep a copy of the unfiltered
# :class:`~mne.io.Raw` object around so we can apply the ICA solution to it
# later.
filt_raw = raw.copy()
filt_raw.load_data().filter(l_freq=1., h_freq=None)
###############################################################################
# Fitting and plotting the ICA solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. sidebar:: Ignoring the time domain
#
# The ICA algorithms implemented in MNE-Python find patterns across
# channels, but ignore the time domain. This means you can compute ICA on
# discontinuous :class:`~mne.Epochs` or :class:`~mne.Evoked` objects (not
# just continuous :class:`~mne.io.Raw` objects), or only use every Nth
# sample by passing the ``decim`` parameter to ``ICA.fit()``.
#
# Now we're ready to set up and fit the ICA. Since we know (from observing our
# raw data) that the EOG and ECG artifacts are fairly strong, we would expect
# those artifacts to be captured in the first few dimensions of the PCA
# decomposition that happens before the ICA. Therefore, we probably don't need
# a huge number of components to do a good job of isolating our artifacts
# (though it is usually preferable to include more components for a more
# accurate solution). As a first guess, we'll run ICA with ``n_components=15``
# (use only the first 15 PCA components to compute the ICA decomposition) — a
# very small number given that our data has over 300 channels, but with the
# advantage that it will run quickly and we will able to tell easily whether it
# worked or not (because we already know what the EOG / ECG artifacts should
# look like).
#
# ICA fitting is not deterministic (e.g., the components may get a sign
# flip on different runs, or may not always be returned in the same order), so
# we'll also specify a `random seed`_ so that we get identical results each
# time this tutorial is built by our web servers.
ica = ICA(n_components=15, random_state=97)
ica.fit(filt_raw)
###############################################################################
# Some optional parameters that we could have passed to the
# :meth:`~mne.preprocessing.ICA.fit` method include ``decim`` (to use only
# every Nth sample in computing the ICs, which can yield a considerable
# speed-up) and ``reject`` (for providing a rejection dictionary for maximum
# acceptable peak-to-peak amplitudes for each channel type, just like we used
# when creating epoched data in the :ref:`tut-overview` tutorial).
#
# Now we can examine the ICs to see what they captured.
# :meth:`~mne.preprocessing.ICA.plot_sources` will show the time series of the
# ICs. Note that in our call to :meth:`~mne.preprocessing.ICA.plot_sources` we
# can use the original, unfiltered :class:`~mne.io.Raw` object:
raw.load_data()
ica.plot_sources(raw, show_scrollbars=False)
###############################################################################
# Here we can pretty clearly see that the first component (``ICA000``) captures
# the EOG signal quite well, and the second component (``ICA001``) looks a lot
# like `a heartbeat <qrs_>`_ (for more info on visually identifying Independent
# Components, `this EEGLAB tutorial`_ is a good resource). We can also
# visualize the scalp field distribution of each component using
# :meth:`~mne.preprocessing.ICA.plot_components`. These are interpolated based
# on the values in the ICA mixing matrix:
# sphinx_gallery_thumbnail_number = 9
ica.plot_components()
###############################################################################
# .. note::
#
# :meth:`~mne.preprocessing.ICA.plot_components` (which plots the scalp
# field topographies for each component) has an optional ``inst`` parameter
# that takes an instance of :class:`~mne.io.Raw` or :class:`~mne.Epochs`.
# Passing ``inst`` makes the scalp topographies interactive: clicking one
# will bring up a diagnostic :meth:`~mne.preprocessing.ICA.plot_properties`
# window (see below) for that component.
#
# In the plots above it's fairly obvious which ICs are capturing our EOG and
# ECG artifacts, but there are additional ways visualize them anyway just to
# be sure. First, we can plot an overlay of the original signal against the
# reconstructed signal with the artifactual ICs excluded, using
# :meth:`~mne.preprocessing.ICA.plot_overlay`:
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
###############################################################################
# We can also plot some diagnostics of each IC using
# :meth:`~mne.preprocessing.ICA.plot_properties`:
ica.plot_properties(raw, picks=[0, 1])
###############################################################################
# In the remaining sections, we'll look at different ways of choosing which ICs
# to exclude prior to reconstructing the sensor signals.
#
#
# Selecting ICA components manually
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Once we're certain which components we want to exclude, we can specify that
# manually by setting the ``ica.exclude`` attribute. Similar to marking bad
# channels, merely setting ``ica.exclude`` doesn't do anything immediately (it
# just adds the excluded ICs to a list that will get used later when it's
# needed). Once the exclusions have been set, ICA methods like
# :meth:`~mne.preprocessing.ICA.plot_overlay` will exclude those component(s)
# even if no ``exclude`` parameter is passed, and the list of excluded
# components will be preserved when using :meth:`mne.preprocessing.ICA.save`
# and :func:`mne.preprocessing.read_ica`.
ica.exclude = [0, 1] # indices chosen based on various plots above
###############################################################################
# Now that the exclusions have been set, we can reconstruct the sensor signals
# with artifacts removed using the :meth:`~mne.preprocessing.ICA.apply` method
# (remember, we're applying the ICA solution from the *filtered* data to the
# original *unfiltered* signal). Plotting the original raw data alongside the
# reconstructed data shows that the heartbeat and blink artifacts are repaired.
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
del reconst_raw
###############################################################################
# Using an EOG channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It may have seemed easy to review the plots and manually select which ICs to
# exclude, but when processing dozens or hundreds of subjects this can become
# a tedious, rate-limiting step in the analysis pipeline. One alternative is to
# use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and
# automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here
# we'll use :meth:`~mne.preprocessing.ICA.find_bads_eog` to automatically find
# the ICs that best match the EOG signal, then use
# :meth:`~mne.preprocessing.ICA.plot_scores` along with our other plotting
# functions to see which ICs it picked. We'll start by resetting
# ``ica.exclude`` back to an empty list:
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
###############################################################################
# Note that above we used :meth:`~mne.preprocessing.ICA.plot_sources` on both
# the original :class:`~mne.io.Raw` instance and also on an
# :class:`~mne.Evoked` instance of the extracted EOG artifacts. This can be
# another way to confirm that :meth:`~mne.preprocessing.ICA.find_bads_eog` has
# identified the correct components.
#
#
# Using a simulated channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you don't have an EOG channel,
# :meth:`~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that
# you can use as a proxy for EOG. You can use a single channel, or create a
# bipolar reference from frontal EEG sensors and use that as virtual EOG
# channel. This carries a risk however: you must hope that the frontal EEG
# channels only reflect EOG and not brain dynamics in the prefrontal cortex (or
# you must not care about those prefrontal signals).
#
# For ECG, it is easier: :meth:`~mne.preprocessing.ICA.find_bads_ecg` can use
# cross-channel averaging of magnetometer or gradiometer channels to construct
# a virtual ECG channel, so if you have MEG channels it is usually not
# necessary to pass a specific channel name.
# :meth:`~mne.preprocessing.ICA.find_bads_ecg` also has two options for its
# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics [3]_) and
# ``'correlation'`` (Pearson correlation between data and ECG channel).
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
###############################################################################
# The last of these plots is especially useful: it shows us that the heartbeat
# artifact is coming through on *two* ICs, and we've only caught one of them.
# In fact, if we look closely at the output of
# :meth:`~mne.preprocessing.ICA.plot_sources` (online, you can right-click →
# "view image" to zoom in), it looks like ``ICA014`` has a weak periodic
# component that is in-phase with ``ICA001``. It might be worthwhile to re-run
# the ICA with more components to see if that second heartbeat artifact
# resolves out a little better:
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
###############################################################################
# Much better! Now we've captured both ICs that are reflecting the heartbeat
# artifact (and as a result, we got two diagnostic plots: one for each IC that
# reflects the heartbeat). This demonstrates the value of checking the results
# of automated approaches like :meth:`~mne.preprocessing.ICA.find_bads_ecg`
# before accepting them.
# clean up memory before moving on
del raw, filt_raw, ica, new_ica
###############################################################################
# Selecting ICA components using template matching
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# When dealing with multiple subjects, it is also possible to manually select
# an IC for exclusion on one subject, and then use that component as a
# *template* for selecting which ICs to exclude from other subjects' data,
# using :func:`mne.preprocessing.corrmap` [4]_. The idea behind
# :func:`~mne.preprocessing.corrmap` is that the artifact patterns are similar
# enough across subjects that corresponding ICs can be identified by
# correlating the ICs from each ICA solution with a common template, and
# picking the ICs with the highest correlation strength.
# :func:`~mne.preprocessing.corrmap` takes a list of ICA solutions, and a
# ``template`` parameter that specifies which ICA object and which component
# within it to use as a template.
#
# Since our sample dataset only contains data from one subject, we'll use a
# different dataset with multiple subjects: the EEGBCI dataset [5]_ [6]_. The
# dataset has 109 subjects, we'll just download one run (a left/right hand
# movement task) from each of the first 4 subjects:
mapping = {
'Fc5.': 'FC5', 'Fc3.': 'FC3', 'Fc1.': 'FC1', 'Fcz.': 'FCz', 'Fc2.': 'FC2',
'Fc4.': 'FC4', 'Fc6.': 'FC6', 'C5..': 'C5', 'C3..': 'C3', 'C1..': 'C1',
'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'Cp5.': 'CP5',
'Cp3.': 'CP3', 'Cp1.': 'CP1', 'Cpz.': 'CPz', 'Cp2.': 'CP2', 'Cp4.': 'CP4',
'Cp6.': 'CP6', 'Fp1.': 'Fp1', 'Fpz.': 'Fpz', 'Fp2.': 'Fp2', 'Af7.': 'AF7',
'Af3.': 'AF3', 'Afz.': 'AFz', 'Af4.': 'AF4', 'Af8.': 'AF8', 'F7..': 'F7',
'F5..': 'F5', 'F3..': 'F3', 'F1..': 'F1', 'Fz..': 'Fz', 'F2..': 'F2',
'F4..': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ft7.': 'FT7', 'Ft8.': 'FT8',
'T7..': 'T7', 'T8..': 'T8', 'T9..': 'T9', 'T10.': 'T10', 'Tp7.': 'TP7',
'Tp8.': 'TP8', 'P7..': 'P7', 'P5..': 'P5', 'P3..': 'P3', 'P1..': 'P1',
'Pz..': 'Pz', 'P2..': 'P2', 'P4..': 'P4', 'P6..': 'P6', 'P8..': 'P8',
'Po7.': 'PO7', 'Po3.': 'PO3', 'Poz.': 'POz', 'Po4.': 'PO4', 'Po8.': 'PO8',
'O1..': 'O1', 'Oz..': 'Oz', 'O2..': 'O2', 'Iz..': 'Iz'
}
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname)
# remove trailing `.` from channel names so we can set montage
raw.rename_channels(mapping)
raw.set_montage('standard_1005')
# fit ICA
ica = ICA(n_components=30, random_state=97)
ica.fit(raw)
raws.append(raw)
icas.append(ica)
###############################################################################
# Now let's run :func:`~mne.preprocessing.corrmap`:
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
###############################################################################
# The first figure shows the template map, while the second figure shows all
# the maps that were considered a "match" for the template (including the
# template itself). There were only three matches from the four subjects;
# notice the output message ``No maps selected for subject(s) 1, consider a
# more liberal threshold``. By default the threshold is set automatically by
# trying several values; here it may have chosen a threshold that is too high.
# Let's take a look at the ICA sources for each subject:
for index, (ica, raw) in enumerate(zip(icas, raws)):
fig = ica.plot_sources(raw, show_scrollbars=False)
fig.subplots_adjust(top=0.9) # make space for title
fig.suptitle('Subject {}'.format(index))
###############################################################################
# Notice that subject 1 *does* seem to have an IC that looks like it reflects
# blink artifacts (component ``ICA000``). Notice also that subject 3 appears to
# have *two* components that are reflecting ocular artifacts (``ICA000`` and
# ``ICA002``), but only one was caught by :func:`~mne.preprocessing.corrmap`.
# Let's try setting the threshold manually:
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
###############################################################################
# Now we get the message ``At least 1 IC detected for each subject`` (which is
# good). At this point we'll re-run :func:`~mne.preprocessing.corrmap` with
# parameters ``label='blink', plot=False`` to *label* the ICs from each subject
# that capture the blink artifacts (without plotting them again).
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
###############################################################################
# Notice that the first subject has 3 different labels for the IC at index 0:
# "eog/0/Fpz", "eog", and "blink". The first two were added by
# :meth:`~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by
# the last call to :func:`~mne.preprocessing.corrmap`. Notice also that each
# subject has at least one IC index labelled "blink", and subject 3 has two
# components (0 and 2) labelled "blink" (consistent with the plot of IC sources
# above). The ``labels_`` attribute of :class:`~mne.preprocessing.ICA` objects
# can also be manually edited to annotate the ICs with custom labels. They also
# come in handy when plotting:
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3], show_scrollbars=False)
###############################################################################
# As a final note, it is possible to extract ICs numerically using the
# :meth:`~mne.preprocessing.ICA.get_components` method of
# :class:`~mne.preprocessing.ICA` objects. This will return a :class:`NumPy
# array <numpy.ndarray>` that can be passed to
# :func:`~mne.preprocessing.corrmap` instead of the :class:`tuple` of
# ``(subject_index, component_index)`` we passed before, and will yield the
# same result:
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
###############################################################################
# An advantage of using this numerical representation of an IC to capture a
# particular artifact pattern is that it can be saved and used as a template
# for future template-matching tasks using :func:`~mne.preprocessing.corrmap`
# without having to load or recompute the ICA solution that yielded the
# template originally. Put another way, when the template is a NumPy array, the
# :class:`~mne.preprocessing.ICA` object containing the template does not need
# to be in the list of ICAs provided to :func:`~mne.preprocessing.corrmap`.
#
#
# References
# ^^^^^^^^^^
#
# .. [1] Ablin P, Cardoso J, Gramfort A (2018). Faster Independent Component
# Analysis by Preconditioning With Hessian Approximations. *IEEE
# Transactions on Signal Processing* 66:4040–4049.
# https://doi.org/10.1109/TSP.2018.2844203
#
# .. [2] Winkler I, Debener S, Müller K-R, Tangermann M (2015). On the
# influence of high-pass filtering on ICA-based artifact reduction in
# EEG-ERP. Proceedings of EMBC-2015, 4101–4105.
# https://doi.org/10.1109/EMBC.2015.7319296
#
# .. [3] Dammers J, Schiek M, Boers F, Silex C, Zvyagintsev M, Pietrzyk U,
# Mathiak K (2008). Integration of amplitude and phase statistics for
# complete artifact removal in independent components of neuromagnetic
# recordings. *IEEE Transactions on Biomedical Engineering*
# 55(10):2353–2362. https://doi.org/10.1109/TBME.2008.926677
#
# .. [4] Viola FC, Thorne J, Edmonds B, Schneider T, Eichele T, Debener S
# (2009). Semi-automatic identification of independent components
# representing EEG artifact. *Clinical Neurophysiology* 120(5):868–877.
# https://doi.org/10.1016/j.clinph.2009.01.015
#
# .. [5] Schalk G, McFarland DJ, Hinterberger T, Birbaumer N, Wolpaw JR (2004).
# BCI2000: A General-Purpose Brain-Computer Interface (BCI) System.
# *IEEE Transactions on Biomedical Engineering* 51(6):1034-1043.
# https://doi.org/10.1109/TBME.2004.827072
#
# .. [6] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
# Mietus JE, Moody GB, Peng C-K, Stanley HE (2000). PhysioBank,
# PhysioToolkit, and PhysioNet: Components of a New Research Resource
# for Complex Physiologic Signals. *Circulation* 101(23):e215-e220.
# https://doi.org/10.1161/01.CIR.101.23.e215
#
#
# .. LINKS
#
# .. _`blind source separation`:
# https://en.wikipedia.org/wiki/Signal_separation
# .. _`statistically independent`:
# https://en.wikipedia.org/wiki/Independence_(probability_theory)
# .. _`scikit-learn`: https://scikit-learn.org
# .. _`random seed`: https://en.wikipedia.org/wiki/Random_seed
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels
| bsd-3-clause |
tophua/spark1.52 | python/pyspark/storagelevel.py | 11 | 2822 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["StorageLevel"]
class StorageLevel(object):
"""
Flags for controlling the storage of an RDD. Each StorageLevel records whether to use memory,
whether to drop the RDD to disk if it falls out of memory, whether to keep the data in memory
in a serialized format, and whether to replicate the RDD partitions on multiple nodes.
Also contains static constants for some commonly used storage levels, such as MEMORY_ONLY.
"""
def __init__(self, useDisk, useMemory, useOffHeap, deserialized, replication=1):
self.useDisk = useDisk
self.useMemory = useMemory
self.useOffHeap = useOffHeap
self.deserialized = deserialized
self.replication = replication
def __repr__(self):
return "StorageLevel(%s, %s, %s, %s, %s)" % (
self.useDisk, self.useMemory, self.useOffHeap, self.deserialized, self.replication)
def __str__(self):
result = ""
result += "Disk " if self.useDisk else ""
result += "Memory " if self.useMemory else ""
result += "Tachyon " if self.useOffHeap else ""
result += "Deserialized " if self.deserialized else "Serialized "
result += "%sx Replicated" % self.replication
return result
StorageLevel.DISK_ONLY = StorageLevel(True, False, False, False)
StorageLevel.DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
StorageLevel.MEMORY_ONLY = StorageLevel(False, True, False, True)
StorageLevel.MEMORY_ONLY_2 = StorageLevel(False, True, False, True, 2)
StorageLevel.MEMORY_ONLY_SER = StorageLevel(False, True, False, False)
StorageLevel.MEMORY_ONLY_SER_2 = StorageLevel(False, True, False, False, 2)
StorageLevel.MEMORY_AND_DISK = StorageLevel(True, True, False, True)
StorageLevel.MEMORY_AND_DISK_2 = StorageLevel(True, True, False, True, 2)
StorageLevel.MEMORY_AND_DISK_SER = StorageLevel(True, True, False, False)
StorageLevel.MEMORY_AND_DISK_SER_2 = StorageLevel(True, True, False, False, 2)
StorageLevel.OFF_HEAP = StorageLevel(False, False, True, False, 1)
| apache-2.0 |
erjohnso/ansible | test/units/modules/cloud/google/test_gcp_url_map.py | 158 | 6086 | import unittest
from ansible.modules.cloud.google.gcp_url_map import _build_path_matchers, _build_url_map_dict
class TestGCPUrlMap(unittest.TestCase):
"""Unit tests for gcp_url_map module."""
params_dict = {
'url_map_name': 'foo_url_map_name',
'description': 'foo_url_map description',
'host_rules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'path_matcher': 'host_rules_path_matcher'
}
],
'path_matchers': [
{
'name': 'path_matcher_one',
'description': 'path matcher one',
'defaultService': 'bes-pathmatcher-one-default',
'pathRules': [
{
'service': 'my-one-bes',
'paths': [
'/',
'/aboutus'
]
}
]
},
{
'name': 'path_matcher_two',
'description': 'path matcher two',
'defaultService': 'bes-pathmatcher-two-default',
'pathRules': [
{
'service': 'my-two-bes',
'paths': [
'/webapp',
'/graphs'
]
}
]
}
]
}
def test__build_path_matchers(self):
input_list = [
{
'defaultService': 'bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'my-one-bes'
}
]
},
{
'defaultService': 'bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'my-two-bes'
}
]
}
]
expected = [
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-one-bes'
}
]
},
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-two-bes'
}
]
}
]
actual = _build_path_matchers(input_list, 'my-project')
self.assertEqual(expected, actual)
def test__build_url_map_dict(self):
expected = {
'description': 'foo_url_map description',
'hostRules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'pathMatcher': 'host_rules_path_matcher'
}
],
'name': 'foo_url_map_name',
'pathMatchers': [
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-one-bes'
}
]
},
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-two-bes'
}
]
}
]
}
actual = _build_url_map_dict(self.params_dict, 'my-project')
self.assertEqual(expected, actual)
| gpl-3.0 |
savoirfairelinux/django | tests/template_tests/syntax_tests/i18n/test_filters.py | 133 | 2071 | from django.test import SimpleTestCase
from django.utils import translation
from ...utils import setup
class I18nFiltersTests(SimpleTestCase):
libraries = {
'custom': 'template_tests.templatetags.custom',
'i18n': 'django.templatetags.i18n',
}
@setup({'i18n32': '{% load i18n %}{{ "hu"|language_name }} '
'{{ "hu"|language_name_local }} {{ "hu"|language_bidi }} '
'{{ "hu"|language_name_translated }}'})
def test_i18n32(self):
output = self.engine.render_to_string('i18n32')
self.assertEqual(output, 'Hungarian Magyar False Hungarian')
with translation.override('cs'):
output = self.engine.render_to_string('i18n32')
self.assertEqual(output, 'Hungarian Magyar False maďarsky')
@setup({'i18n33': '{% load i18n %}'
'{{ langcode|language_name }} {{ langcode|language_name_local }} '
'{{ langcode|language_bidi }} {{ langcode|language_name_translated }}'})
def test_i18n33(self):
output = self.engine.render_to_string('i18n33', {'langcode': 'nl'})
self.assertEqual(output, 'Dutch Nederlands False Dutch')
with translation.override('cs'):
output = self.engine.render_to_string('i18n33', {'langcode': 'nl'})
self.assertEqual(output, 'Dutch Nederlands False nizozemsky')
@setup({'i18n38_2': '{% load i18n custom %}'
'{% get_language_info_list for langcodes|noop:"x y" as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }}/{{ l.name_translated }} '
'bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n38_2(self):
with translation.override('cs'):
output = self.engine.render_to_string('i18n38_2', {'langcodes': ['it', 'fr']})
self.assertEqual(
output,
'it: Italian/italiano/italsky bidi=False; '
'fr: French/français/francouzsky bidi=False; '
)
| bsd-3-clause |
phenoxim/cinder | cinder/tests/unit/backup/test_chunkeddriver.py | 2 | 20207 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base chunkedbackupdriver class."""
import json
import uuid
import mock
from oslo_config import cfg
from oslo_utils import units
from cinder.backup import chunkeddriver as cbd
from cinder import context
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
CONF = cfg.CONF
TEST_DATA = ('abcdefhijklmnopqrstuvwxyz' * 10).encode('utf-8')
class ConcreteChunkedDriver(cbd.ChunkedBackupDriver):
def __init__(self, ctxt):
super(ConcreteChunkedDriver, self).__init__(
ctxt, 1, 1, 'container', False)
def _generate_object_name_prefix(self, backup):
return 'test-'
def delete_object(self, container, object_name):
return True
def get_container_entries(self, container, prefix):
return ['{}{}'.format(container, prefix)]
def get_extra_metadata(self, backup, volume):
return "{}extra_metadata".format(volume.id)
def get_object_reader(self, *args, **kwargs):
return TestObjectReader(*args, **kwargs)
def get_object_writer(self, *args, **kwargs):
return TestObjectWriter(self, *args, **kwargs)
def put_container(self, bucket):
pass
def update_container_name(self, backup, bucket):
return None
class TestObjectWriter(object):
def __init__(self, container, filename, extra_metadata=None):
self.container = container
self.filename = filename
self.extra_metadata = extra_metadata
self.written_data = None
self.write_count = 0
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def write(self, data):
self.written_data = data
self.write_count += 1
class TestObjectReader(object):
def __init__(self, container, filename, extra_metadata=None):
self.container = container
self.filename = filename
self.extra_metadata = extra_metadata
self.written_data = None
metadata = {}
metadata['version'] = 1
metadata['backup_id'] = 'backupid'
metadata['volume_id'] = 'volumeid'
metadata['backup_name'] = 'backup_name'
metadata['backup_description'] = 'backup_description'
metadata['objects'] = ['obj1']
metadata['parent_id'] = 'parent_id'
metadata['extra_metadata'] = 'extra_metadata'
metadata['chunk_size'] = 1
metadata['sha256s'] = ['sha']
metadata['volume_meta'] = json.dumps(metadata)
metadata['version'] = '1.0.0'
self.metadata = metadata
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def read(self):
return json.dumps(self.metadata).encode('utf-8')
class ChunkedDriverTestCase(test.TestCase):
def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()),
restore_volume_id=None,
display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status=fields.BackupStatus.CREATING,
size=1,
object_count=0,
project_id=str(uuid.uuid4()),
service=None,
temp_volume_id=None,
temp_snapshot_id=None,
snapshot_id=None,
metadata=None,
parent_id=None,
encryption_key_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['restore_volume_id'] = restore_volume_id
kwargs['user_id'] = str(uuid.uuid4())
kwargs['project_id'] = project_id
kwargs['host'] = 'testhost'
kwargs['availability_zone'] = '1'
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['container'] = container
kwargs['status'] = status
kwargs['fail_reason'] = ''
kwargs['service'] = service or CONF.backup_driver
kwargs['snapshot_id'] = snapshot_id
kwargs['parent_id'] = parent_id
kwargs['size'] = size
kwargs['object_count'] = object_count
kwargs['temp_volume_id'] = temp_volume_id
kwargs['temp_snapshot_id'] = temp_snapshot_id
kwargs['metadata'] = metadata or {}
kwargs['encryption_key_id'] = encryption_key_id
kwargs['service_metadata'] = 'test_metadata'
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
previous_status='available',
size=1,
host='testhost',
encryption_key_id=None):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = str(uuid.uuid4())
vol['project_id'] = str(uuid.uuid4())
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = fields.VolumeAttachStatus.DETACHED
vol['availability_zone'] = '1'
vol['previous_status'] = previous_status
vol['encryption_key_id'] = encryption_key_id
volume = objects.Volume(context=self.ctxt, **vol)
volume.create()
return volume.id
def setUp(self):
super(ChunkedDriverTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.driver = ConcreteChunkedDriver(self.ctxt)
self.driver.compressor = None
self.volume = self._create_volume_db_entry()
self.backup = self._create_backup_db_entry(volume_id=self.volume)
def test_get_compressor_none(self):
for algo in ['None', 'Off', 'No']:
self.assertIsNone(self.driver._get_compressor(algo))
def test_get_compressor_zlib(self):
for algo in ['zlib', 'gzip']:
self.assertTrue('zlib' in str(self.driver._get_compressor(algo)))
def test_get_compressor_bz(self):
for algo in ['bz2', 'bzip2']:
self.assertTrue('bz' in str(self.driver._get_compressor(algo)))
def test_get_compressor_invalid(self):
self.assertRaises(ValueError, self.driver._get_compressor, 'winzip')
def test_create_container(self):
self.assertEqual(self.backup.container,
self.driver._create_container(self.backup))
def test_create_container_default(self):
self.backup.container = None
self.assertEqual('container',
self.driver._create_container(self.backup))
def test_create_container_new_container(self):
with mock.patch.object(self.driver, 'update_container_name',
return_value='new_and_improved'):
self.assertEqual('new_and_improved',
self.driver._create_container(self.backup))
def test_generate_object_names(self):
obj_names = self.driver._generate_object_names(self.backup)
self.assertTrue(len(obj_names) == 1)
self.assertEqual('{}{}'.format(self.backup.container,
self.backup.service_metadata),
obj_names[0])
def test_metadata_filename(self):
filename = self.driver._metadata_filename(self.backup)
self.assertEqual('{}_metadata'.format(self.backup.service_metadata),
filename)
def test_sha256_filename(self):
filename = self.driver._sha256_filename(self.backup)
self.assertEqual('{}_sha256file'.format(self.backup.service_metadata),
filename)
def test_write_metadata(self):
obj_writer = TestObjectWriter('', '')
with mock.patch.object(self.driver, 'get_object_writer',
return_value=obj_writer):
self.driver._write_metadata(self.backup, 'volid', 'contain_name',
['obj1'], 'volume_meta',
extra_metadata='extra_metadata')
self.assertIsNotNone(obj_writer.written_data)
written_data = obj_writer.written_data.decode('utf-8')
metadata = json.loads(written_data)
self.assertEqual(self.driver.DRIVER_VERSION,
metadata.get('version'))
self.assertEqual(self.backup.id, metadata.get('backup_id'))
self.assertEqual('volid', metadata.get('volume_id'))
self.assertEqual(self.backup.display_name,
metadata.get('backup_name'))
self.assertEqual(self.backup.display_description,
metadata.get('backup_description'))
self.assertEqual(['obj1'], metadata.get('objects'))
self.assertEqual(self.backup.parent_id, metadata.get('parent_id'))
self.assertEqual('volume_meta', metadata.get('volume_meta'))
self.assertEqual('extra_metadata', metadata.get('extra_metadata'))
def test_write_sha256file(self):
obj_writer = TestObjectWriter('', '')
with mock.patch.object(self.driver, 'get_object_writer',
return_value=obj_writer):
self.driver._write_sha256file(self.backup, 'volid', 'contain_name',
['sha'])
self.assertIsNotNone(obj_writer.written_data)
written_data = obj_writer.written_data.decode('utf-8')
metadata = json.loads(written_data)
self.assertEqual(self.driver.DRIVER_VERSION,
metadata.get('version'))
self.assertEqual(self.backup.id, metadata.get('backup_id'))
self.assertEqual('volid', metadata.get('volume_id'))
self.assertEqual(self.backup.display_name,
metadata.get('backup_name'))
self.assertEqual(self.backup.display_description,
metadata.get('backup_description'))
self.assertEqual(self.driver.sha_block_size_bytes,
metadata.get('chunk_size'))
self.assertEqual(['sha'], metadata.get('sha256s'))
def test_read_metadata(self):
obj_reader = TestObjectReader('', '')
with mock.patch.object(self.driver, 'get_object_reader',
return_value=obj_reader):
metadata = self.driver._read_metadata(self.backup)
self.assertIsNotNone(obj_reader.metadata)
expected = obj_reader.metadata
self.assertEqual(expected['version'], metadata['version'])
self.assertEqual(expected['backup_id'], metadata['backup_id'])
self.assertEqual(expected['volume_id'], metadata['volume_id'])
self.assertEqual(expected['backup_name'], metadata['backup_name'])
self.assertEqual(expected['backup_description'],
metadata['backup_description'])
self.assertEqual(expected['objects'], metadata['objects'])
self.assertEqual(expected['parent_id'], metadata['parent_id'])
self.assertEqual(expected['volume_meta'], metadata['volume_meta'])
self.assertEqual(expected['extra_metadata'],
metadata['extra_metadata'])
def test_read_sha256file(self):
obj_reader = TestObjectReader('', '')
with mock.patch.object(self.driver, 'get_object_reader',
return_value=obj_reader):
metadata = self.driver._read_sha256file(self.backup)
self.assertIsNotNone(obj_reader.metadata)
expected = obj_reader.metadata
self.assertEqual(expected['version'], metadata['version'])
self.assertEqual(expected['backup_id'], metadata['backup_id'])
self.assertEqual(expected['volume_id'], metadata['volume_id'])
self.assertEqual(expected['backup_name'], metadata['backup_name'])
self.assertEqual(expected['backup_description'],
metadata['backup_description'])
self.assertEqual(expected['chunk_size'], metadata['chunk_size'])
self.assertEqual(expected['sha256s'], metadata['sha256s'])
def test_prepare_backup(self):
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self.driver._prepare_backup(self.backup)
self.assertDictEqual({'id': 1,
'list': [],
'prefix': 'test-',
'volume_meta': None,
'extra_metadata': "{}extra_metadata".format(
self.volume),
},
object_meta)
self.assertDictEqual({'id': 1,
'sha256s': [],
'prefix': 'test-',
},
object_sha256)
self.assertEqual(extra_metadata, object_meta['extra_metadata'])
self.assertEqual(self.backup.container, container)
self.assertEqual(self.backup.size * units.Gi, volume_size_bytes)
def test_prepare_backup_invalid_size(self):
volume = self._create_volume_db_entry(size=0)
backup = self._create_backup_db_entry(volume_id=volume)
self.assertRaises(exception.InvalidVolume,
self.driver._prepare_backup,
backup)
def test_backup_chunk(self):
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self.driver._prepare_backup(self.backup)
obj_writer = TestObjectWriter('', '')
with mock.patch.object(self.driver, 'get_object_writer',
return_value=obj_writer):
self.driver._backup_chunk(self.backup,
self.backup.container,
TEST_DATA,
0,
object_meta,
extra_metadata)
self.assertEqual(TEST_DATA, obj_writer.written_data)
self.assertEqual(1, len(object_meta['list']))
self.assertEqual(2, object_meta['id'])
chunk = object_meta['list'][0]['test--00001']
self.assertEqual('b4bc937908ab6be6039b6d4141200de8', chunk['md5'])
self.assertEqual(0, chunk['offset'])
self.assertEqual(len(TEST_DATA), chunk['length'])
def test_finalize_backup(self):
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self.driver._prepare_backup(self.backup)
obj_writer = TestObjectWriter('', '')
with mock.patch.object(self.driver, 'get_object_writer',
return_value=obj_writer):
self.driver._backup_chunk(self.backup,
self.backup.container,
TEST_DATA,
0,
object_meta,
extra_metadata)
self.driver._finalize_backup(self.backup,
self.backup.container,
object_meta,
object_sha256)
# TODO(smcginnis): Object count is either misnamed or we use it in an
# odd way. We increment the object count from 1, so writing one chunk
# results in an object count of 2. Should probably straighten that out
# at some point.
self.assertEqual(2, self.backup.object_count)
def test_backup_metadata(self):
object_meta = {}
self.driver._backup_metadata(self.backup, object_meta)
self.assertTrue('volume_meta' in object_meta.keys())
# Too much that we mostly don't care about for UT purposes. Just spot
# check a few things
metadata = json.loads(object_meta['volume_meta'])
self.assertTrue('volume-base-metadata' in metadata.keys())
self.assertEqual(self.volume, metadata['volume-base-metadata']['id'])
self.assertEqual(1, metadata['volume-base-metadata']['size'])
self.assertEqual('test_volume',
metadata['volume-base-metadata']['display_name'])
self.assertEqual('testhost', metadata['volume-base-metadata']['host'])
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_send_progress_end(self, mock_notify):
obj_meta = {}
self.driver._send_progress_end(self.ctxt, self.backup, obj_meta)
self.assertEqual(100, obj_meta.get('backup_percent', 0))
self.assertTrue(mock_notify.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_send_progress_notification(self, mock_notify):
obj_meta = {}
self.driver._send_progress_notification(
self.ctxt, self.backup, obj_meta, 1, 2)
self.assertEqual(50, obj_meta.get('backup_percent', 0))
self.assertTrue(mock_notify.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_backup(self, mock_notify):
volume_file = mock.Mock()
volume_file.tell.side_effect = [0, len(TEST_DATA)]
volume_file.read.side_effect = [TEST_DATA, b'']
obj_writer = TestObjectWriter('', '')
with mock.patch.object(self.driver, 'get_object_writer',
return_value=obj_writer):
self.driver.backup(self.backup, volume_file)
mock_notify.assert_called()
def test_backup_invalid_size(self):
self.driver.chunk_size_bytes = 999
self.driver.sha_block_size_bytes = 1024
self.assertRaises(exception.InvalidBackup,
self.driver.backup,
self.backup,
mock.Mock())
def test_restore(self):
volume_file = mock.Mock()
restore_test = mock.Mock()
self.driver._restore_v1 = restore_test
# Create a second backup
backup = self._create_backup_db_entry(
self.volume, parent_id=self.backup.id)
with mock.patch.object(self.driver, 'put_metadata') as mock_put:
self.driver.restore(backup, self.volume, volume_file)
self.assertEqual(2, mock_put.call_count)
restore_test.assert_called()
def test_delete_backup(self):
with mock.patch.object(self.driver, 'delete_object') as mock_delete:
self.driver.delete_backup(self.backup)
mock_delete.assert_called()
self.assertEqual(1, mock_delete.call_count)
mock_delete.assert_called_once_with(
self.backup.container,
self.backup.container + self.backup.service_metadata)
| apache-2.0 |
dcantrell/no | no.py | 1 | 3287 | #!/usr/bin/python
#
# no.py - Forbid installation of certain packages on yum systems
# Copyright (C) 2010 David Cantrell <david.l.cantrell@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from yum.plugins import TYPE_CORE
from yum.plugins import PluginYumExit
requires_api_version = '2.6'
plugin_type = (TYPE_CORE,)
def _checkPackage(pkg, property, author, forbid):
if getattr(pkg, property) in forbid:
return True
if author is not None:
found = filter(lambda x: getattr(pkg, property).find(x) != -1, forbid)
if len(found) > 0:
return True
return False
def predownload_hook(conduit):
yb = conduit._base
# configuration options
c = conduit.confString("exclude", "packages", default="")
excludePackages = filter(lambda x: x != '', c.split())
c = conduit.confString("exclude", "authors", default="")
excludeAuthors = filter(lambda x: x != '', c.split())
# look for forbidden packages
triggers = [(excludePackages, 'name', None),
(excludeAuthors, 'committer', 'committer')]
downloadPackages = set(conduit.getDownloadPackages())
forbidden = {}
found = []
for forbid, property, author in triggers:
if len(downloadPackages) == 0:
break
found = filter(lambda x: _checkPackage(x, property, author, forbid),
downloadPackages)
if len(found) > 0:
for pkg in found:
if author is not None:
forbidden[pkg] = getattr(pkg, author)
else:
forbidden[pkg] = author
downloadPackages = downloadPackages.difference(found)
# exclude by author name appearing in changelog
if len(downloadPackages) > 0:
for pkg in downloadPackages:
for logentry in pkg.changelog:
if len(logentry) < 3:
continue
try:
author = logentry[1][:logentry[1].index('>') + 1]
except ValueError:
continue
found = filter(lambda x: author.find(x) != -1, excludeAuthors)
if len(found) > 0:
forbidden[pkg] = author
# report existence of forbidden packages and reason, then force exit
if forbidden != {}:
for pkg in forbidden.keys():
author = forbidden[pkg]
if author is None:
sys.stderr.write("*** This system forbids installation of %s.\n" % pkg.name)
else:
sys.stderr.write("*** %s touched by %s, installation forbidden.\n" % (pkg.name, author))
raise PluginYumExit
| gpl-2.0 |
MarcusTan/yncn-grid | venv/lib/python2.7/site-packages/flask/testsuite/views.py | 561 | 5068 | # -*- coding: utf-8 -*-
"""
flask.testsuite.views
~~~~~~~~~~~~~~~~~~~~~
Pluggable views.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import flask.views
import unittest
from flask.testsuite import FlaskTestCase
from werkzeug.http import parse_set_header
class ViewTestCase(FlaskTestCase):
def common_test(self, app):
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
self.assert_equal(c.post('/').data, b'POST')
self.assert_equal(c.put('/').status_code, 405)
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_basic_view(self):
app = flask.Flask(__name__)
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_method_based_view(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_view_patching(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
1 // 0
def post(self):
1 // 0
class Other(Index):
def get(self):
return 'GET'
def post(self):
return 'POST'
view = Index.as_view('index')
view.view_class = Other
app.add_url_rule('/', view_func=view)
self.common_test(app)
def test_view_inheritance(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
class BetterIndex(Index):
def delete(self):
return 'DELETE'
app.add_url_rule('/', view_func=BetterIndex.as_view('index'))
c = app.test_client()
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST'])
def test_view_decorators(self):
app = flask.Flask(__name__)
def add_x_parachute(f):
def new_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
resp.headers['X-Parachute'] = 'awesome'
return resp
return new_function
class Index(flask.views.View):
decorators = [add_x_parachute]
def dispatch_request(self):
return 'Awesome'
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.headers['X-Parachute'], 'awesome')
self.assert_equal(rv.data, b'Awesome')
def test_implicit_head(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return flask.Response('Blub', headers={
'X-Method': flask.request.method
})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Blub')
self.assert_equal(rv.headers['X-Method'], 'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_explicit_head(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def head(self):
return flask.Response('', headers={'X-Method': 'HEAD'})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_endpoint_override(self):
app = flask.Flask(__name__)
app.debug = True
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
with self.assert_raises(AssertionError):
app.add_url_rule('/', view_func=Index.as_view('index'))
# But these tests should still pass. We just log a warning.
self.common_test(app)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ViewTestCase))
return suite
| mit |
richpolis/siveinpy | env/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 355 | 6215 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
self.isstring = isinstance(obj, str) or isinstance(obj, bytes)
# Support for bytes here is Py2
if self.isstring:
self.obj = ensure_str(self.obj)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| mit |
extremewaysback/django | tests/test_client/tests.py | 29 | 30690 | # -*- coding: utf-8 -*-
"""
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
import datetime
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse
from django.test import (
Client, RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .views import get_view, post_view, trace_view
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client.urls',)
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='testclient@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
"""TRACE a view"""
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>
"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect)
self.assertRedirects(response, '/get_view/')
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, '/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.assertFalse(login)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/broken_view/")
# Try the same assertion, a different way
try:
self.client.get('/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
def test_exception_following_nested_client_request(self):
"""
A nested test client request shouldn't clobber exception signals from
the outer client request.
"""
with self.assertRaisesMessage(Exception, 'exception message'):
self.client.get('/nesting_exception_view/')
@override_settings(
MIDDLEWARE_CLASSES=['django.middleware.csrf.CsrfViewMiddleware'],
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
_generic_view = lambda request: HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertEqual(response.status_code, 200)
self.assertContains(response, echoed_request_line)
| bsd-3-clause |
katiecheng/Bombolone | env/lib/python2.7/site-packages/werkzeug/contrib/iterio.py | 89 | 8239 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: http://codespeak.net/py/dist/greenlet.html
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
import greenlet
except ImportError:
greenlet = None
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
"""
def __new__(cls, obj):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj)
return IterO(iterator)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.pos
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def truncate(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def writelines(self, list):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readlines(self, sizehint=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def next(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func):
if greenlet is None:
raise RuntimeError('IterI requires greenlet support')
stream = object.__new__(cls)
stream._parent = greenlet.getcurrent()
stream._buffer = []
stream.closed = False
stream.pos = 0
def run():
func(stream)
stream.flush()
g = greenlet.greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def close(self):
if not self.closed:
self.closed = True
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
self.write(''.join(list))
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
data = ''.join(self._buffer)
self._buffer = []
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen):
self = object.__new__(cls)
self._gen = gen
self._buf = ''
self.closed = False
self.pos = 0
return self
def __iter__(self):
return self
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, 'close'):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError('Invalid argument')
buf = []
try:
tmp_end_pos = len(self._buf)
while pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf += ''.join(buf)
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if n < 0:
self._buf += ''.join(self._gen)
result = self._buf[self.pos:]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = len(self._buf)
while new_pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf += ''.join(buf)
new_pos = max(0, new_pos)
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
nl_pos = self._buf.find('\n', self.pos)
buf = []
try:
pos = self.pos
while nl_pos < 0:
item = self._gen.next()
local_pos = item.find('\n')
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf += ''.join(buf)
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
| bsd-3-clause |
weiting-chen/manila | manila/compute/__init__.py | 9 | 1150 | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_config.cfg
import oslo_utils.importutils
_compute_opts = [
oslo_config.cfg.StrOpt('compute_api_class',
default='manila.compute.nova.API',
help='The full class name of the '
'Compute API class to use.'),
]
oslo_config.cfg.CONF.register_opts(_compute_opts)
def API():
importutils = oslo_utils.importutils
compute_api_class = oslo_config.cfg.CONF.compute_api_class
cls = importutils.import_class(compute_api_class)
return cls()
| apache-2.0 |
djoproject/pyshell | pyshell/utils/test/key_test.py | 3 | 5574 | #!/usr/bin/env python -t
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Jonathan Delvaux <pyshell@djoproject.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from pyshell.utils.exception import KeyStoreException
from pyshell.utils.key import CryptographicKey
class TestKey(object):
# not a string and not a unicode
def test_notValidKeyString1(self):
with pytest.raises(KeyStoreException):
CryptographicKey(None)
# string but does not start with 0b or 0x
def test_notValidKeyString2(self):
with pytest.raises(KeyStoreException):
CryptographicKey("plop")
# start with 0b or 0x but contains invalid char
def test_notValidKeyString3(self):
with pytest.raises(KeyStoreException):
CryptographicKey("0bplop")
with pytest.raises(KeyStoreException):
CryptographicKey("0xplip")
# valid binary key
def test_validKey1(self):
k = CryptographicKey("0b10101011")
assert k is not None
assert str(k) == "0b10101011"
assert repr(k) == "0b10101011 ( BinaryKey, size=8 bit(s))"
assert k.getKeyType() == CryptographicKey.KEYTYPE_BIT
assert k.getKeySize() == 8
assert k.getTypeString() == "bit"
# valid hexa key
def test_validKey2(self):
k = CryptographicKey("0x11223344EEDDFF")
assert k is not None
assert str(k) == "0x11223344eeddff"
assert repr(k) == "0x11223344eeddff ( HexaKey, size=7 byte(s))"
assert k.getKeyType() == CryptographicKey.KEYTYPE_HEXA
assert k.getKeySize() == 7
assert k.getTypeString() == "byte"
# valid hexa key but with odd number of char
def test_validKey3(self):
k = CryptographicKey("0x1223344EEDDFF")
assert k is not None
assert str(k) == "0x01223344eeddff"
assert repr(k) == "0x01223344eeddff ( HexaKey, size=7 byte(s))"
assert k.getKeyType() == CryptographicKey.KEYTYPE_HEXA
assert k.getKeySize() == 7
assert k.getTypeString() == "byte"
# end is lower than start
def test_getKeyBinary1(self):
k = CryptographicKey("0b10101011")
r = k.getKey(5, 3)
assert len(r) == 0
# start is bigger than the size of the key and no end asked
def test_getKeyBinary2(self):
k = CryptographicKey("0b10101011")
r = k.getKey(800)
assert len(r) == 0
# start is in range with not defined end
def test_getKeyBinary3(self):
k = CryptographicKey("0b10101011")
r = k.getKey(3)
assert len(r) == 5
assert r == [0, 1, 0, 1, 1]
# start is in range but not end
def test_getKeyBinary4(self):
k = CryptographicKey("0b10101011")
r = k.getKey(3, 10)
assert len(r) == 7
assert r == [0, 1, 0, 1, 1, 0, 0]
# start and end are in range
def test_getKeyBinary5(self):
k = CryptographicKey("0b10101011")
r = k.getKey(3, 6)
assert len(r) == 3
assert r == [0, 1, 0]
# start is in range but not end with padding disabled
def test_getKeyBinary6(self):
k = CryptographicKey("0b10101011")
r = k.getKey(3, 10, False)
assert len(r) == 5
assert r == [0, 1, 0, 1, 1]
# start and end are in range with padding disabled
def test_getKeyBinary7(self):
k = CryptographicKey("0b10101011")
r = k.getKey(3, 6, False)
assert len(r) == 3
assert r == [0, 1, 0]
# end is lower than start
def test_getKeyHexa1(self):
k = CryptographicKey("0x11223344EEDDFF")
r = k.getKey(5, 3)
assert len(r) == 0
# start is bigger than the size of the key and no end asked
def test_getKeyHexa2(self):
k = CryptographicKey("0x11223344EEDDFF")
r = k.getKey(800)
assert len(r) == 0
# start is in range with not defined end
def test_getKeyHexa3(self):
k = CryptographicKey("0x11223344EEDDFF")
r = k.getKey(3)
assert len(r) == 4
assert r == [0x44, 0xee, 0xdd, 0xff]
# start is in range but not end
def test_getKeyHexa4(self):
k = CryptographicKey("0x11223344EEDDFF")
r = k.getKey(3, 10)
assert len(r) == 7
assert r == [0x44, 0xee, 0xdd, 0xff, 0, 0, 0]
# start and end are in range
def test_getKeyHexa5(self):
k = CryptographicKey("0x11223344EEDDFF")
r = k.getKey(3, 6)
assert len(r) == 3
assert r == [0x44, 0xee, 0xdd]
# start is in range but not end with padding disabled
def test_getKeyHexa6(self):
k = CryptographicKey("0x11223344EEDDFF")
r = k.getKey(3, 10, False)
assert len(r) == 4
assert r == [0x44, 0xee, 0xdd, 0xff]
# start and end are in range with padding disabled
def test_getKeyHexa7(self):
k = CryptographicKey("0x11223344EEDDFF")
r = k.getKey(3, 6, False)
assert len(r) == 3
assert r == [0x44, 0xee, 0xdd]
| gpl-3.0 |
Stanford-Online/edx-analytics-pipeline | edx/analytics/tasks/warehouse/load_internal_reporting_events.py | 1 | 86821 | """EXPERIMENTAL:
Load most events into warehouse for internal reporting purposes. This
combines segment events and tracking log events, and defines a common
(wide) representation for all events to share, sparsely. Requires
definition of a Record enumerating these columns, and also a mapping
from event values to column values.
"""
import datetime
import json
import logging
import re
from importlib import import_module
import ciso8601
import dateutil
import luigi
import luigi.task
import pytz
import ua_parser
import user_agents
from luigi.configuration import get_config
from luigi.date_interval import DateInterval
from edx.analytics.tasks.common.bigquery_load import BigQueryLoadDownstreamMixin, BigQueryLoadTask
from edx.analytics.tasks.common.mapreduce import MapReduceJobTaskMixin, MultiOutputMapReduceJobTask
from edx.analytics.tasks.common.pathutil import EventLogSelectionDownstreamMixin, EventLogSelectionMixin
from edx.analytics.tasks.common.vertica_load import SchemaManagementTask, VerticaCopyTask, VerticaCopyTaskMixin
from edx.analytics.tasks.util import eventlog
from edx.analytics.tasks.util.hive import BareHiveTableTask, HivePartition, HivePartitionTask, WarehouseMixin
from edx.analytics.tasks.util.obfuscate_util import backslash_encode_value
from edx.analytics.tasks.util.opaque_key_util import get_course_key_from_url, get_org_id_for_course, is_valid_course_id
from edx.analytics.tasks.util.record import (
BooleanField, DateField, DateTimeField, FloatField, IntegerField, SparseRecord, StringField
)
from edx.analytics.tasks.util.url import ExternalURL, url_path_join
log = logging.getLogger(__name__)
VERSION = '0.2.4'
EVENT_TABLE_NAME = 'event_records'
# Define pattern to extract a course_id from a string by looking
# explicitly for a version string and two plus-delimiters.
# TODO: avoid this hack by finding a more opaque-keys-respectful way.
NEW_COURSE_ID_PATTERN = r'(?P<course_id>course\-v1\:[^/+]+(\+)[^/+]+(\+)[^/]+)'
NEW_COURSE_REGEX = re.compile(r'^.*?{}'.format(NEW_COURSE_ID_PATTERN))
class EventRecord(SparseRecord):
"""Represents an event, either a tracking log event or segment event."""
# Metadata:
version = StringField(length=20, nullable=False, description='blah.')
input_file = StringField(length=255, nullable=True, description='blah.')
# hash_id = StringField(length=255, nullable=False, description='blah.')
# Globals:
project = StringField(length=255, nullable=False, description='blah.')
event_type = StringField(length=255, nullable=False, description='The type of event. Example: video_play.')
event_source = StringField(length=255, nullable=False, description='blah.')
event_category = StringField(length=255, nullable=True, description='blah.')
# TODO: decide what type 'timestamp' should be.
# Also make entries required (not nullable), once we have confidence.
timestamp = StringField(length=255, nullable=True, description='Timestamp when event was emitted.')
received_at = StringField(length=255, nullable=True, description='Timestamp when event was received.')
# TODO: figure out why these have errors, and then make DateField.
date = StringField(length=255, nullable=False, description='The learner interacted with the entity on this date.')
# Common (but optional) values:
accept_language = StringField(length=255, nullable=True, description='')
agent = StringField(length=1023, nullable=True, description='')
# 'agent' string gets parsed into the following:
agent_type = StringField(length=20, nullable=True, description='')
agent_device_name = StringField(length=100, nullable=True, description='', truncate=True)
agent_os = StringField(length=100, nullable=True, description='')
agent_browser = StringField(length=100, nullable=True, description='')
# agent_touch_capable = BooleanField(nullable=True, description='')
agent_touch_capable = StringField(length=10, nullable=True, description='')
host = StringField(length=80, nullable=True, description='')
# TODO: geolocate ip to find country or more specific information?
ip = StringField(length=64, nullable=True, description='')
# name: not really used yet?
page = StringField(length=1024, nullable=True, description='')
referer = StringField(length=2047, nullable=True, description='')
session = StringField(length=255, nullable=True, description='')
username = StringField(length=50, nullable=True, description='Learner\'s username.')
# Common (but optional) context values:
# We exclude course_user_tags, as it's a set of key-value pairs that affords no stable naming scheme.
# TODO: decide how to deal with redundant data. Shouldn't be specifying "context_" here,
# since that doesn't generalize to segment data at all.
context_course_id = StringField(length=255, nullable=True, description='Id of course.')
context_org_id = StringField(length=255, nullable=True, description='Id of organization, as used in course_id.')
context_path = StringField(length=1024, nullable=True, description='')
context_user_id = StringField(length=255, nullable=True, description='')
context_module_display_name = StringField(length=255, nullable=True, description='')
context_module_usage_key = StringField(length=255, nullable=True, description='')
context_module_original_usage_key = StringField(length=255, nullable=True, description='')
context_module_original_usage_version = StringField(length=255, nullable=True, description='')
# course_user_tags object
# application object: explicitly extracted to 'app_name', 'app_version'.
# client object
context_component = StringField(length=255, nullable=True, description='') # string
context_mode = StringField(length=255, nullable=True, description='') # string
# This is handled for tracking logs by writing to received_at explicitly.
# context_received_at = StringField(length=255, nullable=True, description='') # number
# Per-event values:
# entity_type = StringField(length=10, nullable=True, description='Category of entity that the learner interacted'
# ' with. Example: "video".')
# entity_id = StringField(length=255, nullable=True, description='A unique identifier for the entity within the'
# ' course that the learner interacted with.')
add_method = StringField(length=255, nullable=True, description='') # string
# added list
allowance_key = StringField(length=255, nullable=True, description='') # string
allowance_user_id = StringField(length=255, nullable=True, description='') # number
allowance_value = StringField(length=255, nullable=True, description='') # string
amount = StringField(length=255, nullable=True, description='') # string
anonymous = StringField(length=255, nullable=True, description='') # Boolean
anonymous_to_peers = StringField(length=255, nullable=True, description='') # Boolean
answer = StringField(length=255, nullable=True, description='') # integer
# answer object
answers = StringField(length=255, nullable=True, description='') # string
# answers object
attempt_allowed_time_limit_mins = StringField(length=255, nullable=True, description='') # number
attempt_code = StringField(length=255, nullable=True, description='') # string
attempt_completed_at = StringField(length=255, nullable=True, description='') # datetime
attempt_event_elapsed_time_secs = StringField(length=255, nullable=True, description='') # number
attempt_id = StringField(length=255, nullable=True, description='') # number
attempt_number = StringField(length=255, nullable=True, description='') # number
attempt_started_at = StringField(length=255, nullable=True, description='') # datetime
attempt_status = StringField(length=255, nullable=True, description='') # string
attempt_user_id = StringField(length=255, nullable=True, description='') # number
attempts = StringField(length=255, nullable=True, description='') # use int
body = StringField(length=2047, nullable=True, description='') # string
bookmark_id = StringField(length=255, nullable=True, description='') # string
bookmarks_count = StringField(length=255, nullable=True, description='') # integer
bumper_id = StringField(length=255, nullable=True, description='') # string
casesensitive = StringField(length=255, nullable=True, description='') # Boolean
category = StringField(length=255, nullable=True, description='') # number
category_id = StringField(length=255, nullable=True, description='') # string
category_name = StringField(length=255, nullable=True, description='') # string
certificate_id = StringField(length=255, nullable=True, description='') # string
certificate_url = StringField(length=255, nullable=True, description='') # string
chapter = StringField(length=255, nullable=True, description='') # string: pdf
chapter_title = StringField(length=255, nullable=True, description='') # string
child_id = StringField(length=255, nullable=True, description='') # string
choice = StringField(length=255, nullable=True, description='') # string: poll
# choice_all array
# choices object
code = StringField(length=255, nullable=True, description='') # string: video
cohort_id = StringField(length=255, nullable=True, description='') # number: cohort
cohort_name = StringField(length=255, nullable=True, description='') # string
commentable_id = StringField(length=255, nullable=True, description='') # string: forums
component_type = StringField(length=255, nullable=True, description='') # string
component_usage_id = StringField(length=255, nullable=True, description='') # string
content = StringField(length=255, nullable=True, description='') # string
# correct_map object
corrected_text = StringField(length=255, nullable=True, description='') # forum search
# corrections object
correctness = StringField(length=255, nullable=True, description='') # Boolean
course = StringField(length=255, nullable=True, description='') # string
course_id = StringField(length=255, nullable=True, description='') # enrollment, certs
created_at = StringField(length=255, nullable=True, description='') # datetime
# "current_time" is a SQL function name/alias, so we need to use something different here.
# We will instead map it to "currenttime", which will receive values from "current_time" and "currentTime".
currenttime = StringField(length=255, nullable=True, description='') # float/int/str: video
current_slide = StringField(length=255, nullable=True, description='') # number
current_tab = StringField(length=255, nullable=True, description='') # integer
current_url = StringField(length=255, nullable=True, description='') # string
direction = StringField(length=255, nullable=True, description='') # pdf
discussion_id = StringField(length=255, nullable=True, description='') # discussion.id forum
displayed_in = StringField(length=255, nullable=True, description='') # googlecomponent
done = StringField(length=255, nullable=True, description='') # Boolean
duration = StringField(length=255, nullable=True, description='') # int: videobumper
enrollment_mode = StringField(length=255, nullable=True, description='') # certs
event = StringField(length=255, nullable=True, description='') # string
event_name = StringField(length=255, nullable=True, description='') # string
exam_content_id = StringField(length=255, nullable=True, description='') # string
exam_default_time_limit_mins = StringField(length=255, nullable=True, description='') # number
exam_id = StringField(length=255, nullable=True, description='') # number
exam_is_active = StringField(length=255, nullable=True, description='') # Boolean
exam_is_practice_exam = StringField(length=255, nullable=True, description='') # Boolean
exam_is_proctored = StringField(length=255, nullable=True, description='') # Boolean
exam_name = StringField(length=255, nullable=True, description='') # string
exploration_id = StringField(length=255, nullable=True, description='') # string
exploration_version = StringField(length=255, nullable=True, description='') # string
failure = StringField(length=255, nullable=True, description='') # string
feedback = StringField(length=2047, nullable=True, description='') # string
feedback_text = StringField(length=2047, nullable=True, description='') # string
field = StringField(length=255, nullable=True, description='') # team
fileName = StringField(length=255, nullable=True, description='') # string
fileSize = StringField(length=255, nullable=True, description='') # number
fileType = StringField(length=255, nullable=True, description='') # string
findprevious = StringField(length=255, nullable=True, description='') # Boolean
generation_mode = StringField(length=255, nullable=True, description='') # cert
grade = StringField(length=255, nullable=True, description='') # float/int: problem_check
group_id = StringField(length=255, nullable=True, description='') # int: forum
group_name = StringField(length=255, nullable=True, description='') # user_to_partition
highlightall = StringField(length=255, nullable=True, description='') # highlightAll: Boolean
highlighted_content = StringField(length=255, nullable=True, description='') # string
hint_index = StringField(length=255, nullable=True, description='') # number
hint_label = StringField(length=255, nullable=True, description='') # string
hint_len = StringField(length=255, nullable=True, description='') # number
hint_text = StringField(length=2047, nullable=True, description='') # string
# hints array
host_component_id = StringField(length=255, nullable=True, description='') # string
id = StringField(length=255, nullable=True, description='') # string: video, forum
input = StringField(length=255, nullable=True, description='') # integer
instructor = StringField(length=255, nullable=True, description='')
is_correct = StringField(length=255, nullable=True, description='') # Boolean
is_correct_location = StringField(length=255, nullable=True, description='') # Boolean
item_id = StringField(length=255, nullable=True, description='') # integer, string
letter_grade = StringField(length=64, nullable=True)
list_type = StringField(length=255, nullable=True, description='') # string
location = StringField(length=255, nullable=True, description='') # library
manually = StringField(length=255, nullable=True, description='') # Boolean
max_count = StringField(length=255, nullable=True, description='') # int: library
max_grade = StringField(length=255, nullable=True, description='') # int: problem_check
mode = StringField(length=255, nullable=True, description='') # enrollment
module_id = StringField(length=255, nullable=True, description='') # hint
name = StringField(length=255, nullable=True, description='') # pdf
# NEW is a keyword in SQL on Vertica, so use different name here.
new_value = StringField(length=2047, nullable=True, description='') # int: seq, str: book, team, settings
new_score = StringField(length=255, nullable=True, description='') # number
new_speed = StringField(length=255, nullable=True, description='') # video
# new_state object
new_state_name = StringField(length=255, nullable=True, description='') # string
new_time = StringField(length=255, nullable=True, description='') # float/int: video
new_total = StringField(length=255, nullable=True, description='') # number
note_id = StringField(length=255, nullable=True, description='') # string
note_text = StringField(length=255, nullable=True, description='') # string
# notes array
number_of_results = StringField(length=255, nullable=True, description='') # integer, number
# Not documented, but used by problembuilder:
num_attempts = StringField(length=255, nullable=True, description='') # int: problem_builder
# OLD is a keyword in SQL on Vertica, so use different name here.
old_value = StringField(length=2047, nullable=True, description='') # int: seq, str: book, team, settings
old_attempts = StringField(length=255, nullable=True, description='') # string
old_note_text = StringField(length=255, nullable=True, description='') # string
old_speed = StringField(length=255, nullable=True, description='') # video
# old_state object
old_state_name = StringField(length=255, nullable=True, description='') # string
# old_tags array of strings
old_time = StringField(length=255, nullable=True, description='') # number
# options array
# options object
options_followed = StringField(length=255, nullable=True, description='') # options.followed: boolean
# options_selected object
orig_score = StringField(length=255, nullable=True, description='') # number
orig_total = StringField(length=255, nullable=True, description='') # number
page = StringField(length=1023, nullable=True, description='') # int/str: forum, pdf
page_name = StringField(length=255, nullable=True, description='') # string
page_number = StringField(length=255, nullable=True, description='') # integer
page_size = StringField(length=255, nullable=True, description='') # integer
partition_id = StringField(length=255, nullable=True, description='') # number
partition_name = StringField(length=255, nullable=True, description='') # string
percent_grade = FloatField(nullable=True)
# parts: [criterion, option, feedback] array
previous_cohort_id = StringField(length=255, nullable=True, description='') # int: cohort
previous_cohort_name = StringField(length=255, nullable=True, description='') # cohort
previous_count = StringField(length=255, nullable=True, description='') # int: lib
problem = StringField(length=255, nullable=True, description='') # show/reset/rescore
problem_id = StringField(length=255, nullable=True, description='') # capa
problem_part_id = StringField(length=255, nullable=True, description='') # hint
query = StringField(length=255, nullable=True, description='') # forum, pdf
question_type = StringField(length=255, nullable=True, description='') # hint
rationale = StringField(length=1023, nullable=True, description='') # string
reason = StringField(length=255, nullable=True, description='') # string
remove_method = StringField(length=255, nullable=True, description='') # string
# removed list
report_type = StringField(length=255, nullable=True, description='') # string
report_url = StringField(length=255, nullable=True, description='') # string
requested_skip_interval = StringField(length=255, nullable=True, description='') # number
requesting_staff_id = StringField(length=255, nullable=True, description='') # string
requesting_student_id = StringField(length=255, nullable=True, description='') # string
response_id = StringField(length=255, nullable=True, description='') # response.id: forum
# result list
review_attempt_code = StringField(length=255, nullable=True, description='') # string
review_status = StringField(length=255, nullable=True, description='') # string
review_video_url = StringField(length=255, nullable=True, description='') # string
# rubric object
# saved_response object
score_type = StringField(length=255, nullable=True, description='') # string
scored_at = StringField(length=255, nullable=True, description='') # datetime
scorer_id = StringField(length=255, nullable=True, description='') # string
search_string = StringField(length=255, nullable=True, description='') # string
search_text = StringField(length=255, nullable=True, description='') # team
selection = StringField(length=255, nullable=True, description='') # number
slide = StringField(length=255, nullable=True, description='') # number
# Not listed or attested: seek_type = StringField(length=255, nullable=True, description='') # video
social_network = StringField(length=255, nullable=True, description='') # certificate
source_url = StringField(length=255, nullable=True, description='') # string
# state object
status = StringField(length=255, nullable=True, description='') # status
student = StringField(length=255, nullable=True, description='') # reset/delete/rescore
# student_answer array
# submission object
submission_returned_uuid = StringField(length=255, nullable=True, description='') # string
submission_uuid = StringField(length=255, nullable=True, description='') # string
submitted_at = StringField(length=255, nullable=True, description='') # datetime
success = StringField(length=255, nullable=True, description='') # problem_check
tab_count = StringField(length=255, nullable=True, description='') # integer
# tags array of strings
target_name = StringField(length=255, nullable=True, description='') # string
target_tab = StringField(length=255, nullable=True, description='') # integer
target_url = StringField(length=255, nullable=True, description='') # string
target_username = StringField(length=255, nullable=True, description='') # string
team_id = StringField(length=255, nullable=True, description='') # team, forum
thread_type = StringField(length=255, nullable=True, description='') # forum
title = StringField(length=1023, nullable=True, description='') # forum, segment
thumbnail_title = StringField(length=255, nullable=True, description='') # string
topic_id = StringField(length=255, nullable=True, description='') # team
total_results = StringField(length=255, nullable=True, description='') # int: forum
total_slides = StringField(length=255, nullable=True, description='') # number
trigger_type = StringField(length=255, nullable=True, description='') # string
truncated = StringField(length=255, nullable=True, description='') # bool: forum
# truncated array
# truncated array of strings
type = StringField(length=255, nullable=True, description='') # video, book
undo_vote = StringField(length=255, nullable=True, description='') # Boolean
url_name = StringField(length=255, nullable=True, description='') # poll/survey
url = StringField(length=2047, nullable=True, description='') # forum, googlecomponent, segment
# USER is a keyword in SQL on Vertica, so use different name here.
event_user = StringField(length=255, nullable=True, description='') # string
# user_course_roles array
# user_forums_roles array
user_id = StringField(length=255, nullable=True, description='') # int: enrollment, cohort, etc.
# event_username is mapped from root.event.username, to keep separate from root.username.
event_username = StringField(length=255, nullable=True, description='') # add/remove forum
value = StringField(length=255, nullable=True, description='') # number
view = StringField(length=255, nullable=True, description='') # string
vote_value = StringField(length=255, nullable=True, description='') # string
widget_placement = StringField(length=255, nullable=True, description='') # string
# Stuff from segment:
channel = StringField(length=255, nullable=True, description='')
anonymous_id = StringField(length=255, nullable=True, description='')
path = StringField(length=2047, nullable=True, description='')
referrer = StringField(length=8191, nullable=True, description='')
search = StringField(length=2047, nullable=True, description='')
# title and url already exist
variationname = StringField(length=255, nullable=True, description='')
variationid = StringField(length=255, nullable=True, description='')
experimentid = StringField(length=255, nullable=True, description='')
experimentname = StringField(length=255, nullable=True, description='')
category = StringField(length=255, nullable=True, description='')
label = StringField(length=511, nullable=True, description='')
display_name = StringField(length=255, nullable=True, description='')
client_id = StringField(length=255, nullable=True, description='')
locale = StringField(length=255, nullable=True, description='')
timezone = StringField(length=255, nullable=True, description='')
app_name = StringField(length=255, nullable=True, description='')
app_version = StringField(length=255, nullable=True, description='')
os_name = StringField(length=255, nullable=True, description='')
os_version = StringField(length=255, nullable=True, description='')
device_manufacturer = StringField(length=255, nullable=True, description='')
device_model = StringField(length=255, nullable=True, description='')
network_carrier = StringField(length=255, nullable=True, description='')
action = StringField(length=255, nullable=True, description='')
screen_width = StringField(length=255, nullable=True, description='')
screen_height = StringField(length=255, nullable=True, description='')
campaign_source = StringField(length=255, nullable=True, description='')
campaign_medium = StringField(length=255, nullable=True, description='')
campaign_content = StringField(length=255, nullable=True, description='')
campaign_name = StringField(length=255, nullable=True, description='')
class JsonEventRecord(SparseRecord):
"""Represents an event, either a tracking log event or segment event."""
timestamp = DateTimeField(nullable=True, description='Timestamp when event was emitted.')
received_at = DateTimeField(nullable=True, description='Timestamp when event was received/recorded.')
# was context_user_id:
user_id = StringField(length=255, nullable=True, description='The identifier of the user who was logged in when the event was emitted. '
'This is often but not always numeric.')
username = StringField(length=50, nullable=True, description='The username of the user who was logged in when the event was emitted.')
anonymous_id = StringField(length=255, nullable=True, description='The anonymous_id of the user.')
event_type = StringField(
length=255,
nullable=False,
description='The name of the event (event name in the segment logs). For implicit events, this should be "edx.server.request".'
)
# was context_course_id
course_id = StringField(length=255, nullable=True, description='The course_id associated with this event (if any).')
org_id = StringField(length=255, nullable=True, description='Id of organization, as used in course_id.')
label = StringField(length=511, nullable=True, description='The GA label associated with this event.')
# This is not the same as "event_category".
category = StringField(length=255, nullable=True, description='The GA category for this event.')
# This was event_source:
emitter_type = StringField(length=255, nullable=False, description='Where the event was collected from (browser, mobile, server, etc).')
# This is populated for most segment events, but also forum, googlecomponent tracking log events.
url = StringField(
length=2047,
nullable=True,
description='For page events, the full URL (including hostname) that was accessed by the user. '
'For implicit events, this should be the full URL that the request was for.'
)
# use the length (and name) from segment version (referrer), and write tracking log 'referer' here as well.
referrer = StringField(length=8191, nullable=True, description='The HTTP referrer - also as a full URL.')
# was project:
source = StringField(length=255, nullable=False, description='The segment.com project the event was sent to.')
input_file = StringField(length=255, nullable=False, description='The full URL of the file that contains this event in S3.')
agent_type = StringField(length=20, nullable=True, description='The type of device used.')
agent_device_name = StringField(length=100, nullable=True, description='The name of the device used.', truncate=True)
agent_os = StringField(length=100, nullable=True, description='The name of the OS on the device used. ')
agent_browser = StringField(length=100, nullable=True, description='The name of the browser used.')
# So having a StringField (as with EventRecord) has this write out to disk as "True".
# Using a BooleanField here has this end up being written out as 0 or 1.
# However, when loading to BigQuery as BOOLEAN, the boolean is translated back to True/False, or null.
agent_touch_capable = BooleanField(nullable=True, description='A boolean value indicating that the device was touch-capable.')
raw_event = StringField(length=60000, nullable=True, description='The full text of the event as a JSON string. This can be parsed at query time using UDFs.')
# This was originally a StringField, but a DateField outputs the same format and is more useful.
date = DateField(nullable=False, description='The date when the event was received.')
class EventRecordClassMixin(object):
event_record_type = luigi.Parameter(
description='The kind of event record to load. Default is EventRecord, override with JsonEventRecord as needed.',
default='EventRecord',
)
def __init__(self, *args, **kwargs):
super(EventRecordClassMixin, self).__init__(*args, **kwargs)
module_name = self.__class__.__module__
local_module = import_module(module_name)
self.record_class = getattr(local_module, self.event_record_type)
if not self.record_class:
raise ValueError("No event record class found: {}".format(self.event_record_type))
def get_event_record_class(self):
return self.record_class
def uses_JSON_event_record(self):
return self.event_record_type == 'JsonEventRecord'
class EventRecordDownstreamMixin(EventRecordClassMixin, WarehouseMixin, MapReduceJobTaskMixin):
events_list_file_path = luigi.Parameter(default=None)
class EventRecordDataDownstreamMixin(EventRecordDownstreamMixin):
"""Common parameters and base classes used to pass parameters through the event record workflow."""
output_root = luigi.Parameter()
class BaseEventRecordDataTask(EventRecordDataDownstreamMixin, MultiOutputMapReduceJobTask):
"""Base class for loading EventRecords from different sources."""
# Create a DateField object to help with converting date_string
# values for assignment to DateField objects.
date_field_for_converting = DateField()
date_time_field_for_validating = DateTimeField()
# This is a placeholder. It is expected to be overridden in derived classes.
counter_category_name = 'Event Record Exports'
# TODO: maintain support for info about events. We may need something similar to identify events
# that should -- or should not -- be included in the event dump.
def requires_local(self):
if self.events_list_file_path is not None:
return ExternalURL(url=self.events_list_file_path)
else:
return []
def init_local(self):
super(BaseEventRecordDataTask, self).init_local()
if self.events_list_file_path is None:
self.known_events = {}
else:
self.known_events = self.parse_events_list_file()
def parse_events_list_file(self):
"""Read and parse the known events list file and populate it in a dictionary."""
parsed_events = {}
with self.input_local().open() as f_in:
lines = f_in.readlines()
for line in lines:
if not line.startswith('#') and len(line.split("\t")) is 3:
parts = line.rstrip('\n').split("\t")
parsed_events[(parts[1], parts[2])] = parts[0]
return parsed_events
def multi_output_reducer(self, _key, values, output_file):
"""
Write values to the appropriate file as determined by the key.
"""
for value in values:
# Assume that the value is a dict containing the relevant sparse data,
# either raw or encoded in a json string.
# Either that, or we could ship the original event as a json string,
# or ship the resulting sparse record as a tuple.
# It should be a pretty arbitrary decision, since it all needs
# to be done, and it's just a question where to do it.
# For now, keep this simple, and assume it's tupled already.
output_file.write(value)
output_file.write('\n')
# WARNING: This line ensures that Hadoop knows that our process is not sitting in an infinite loop.
# Do not remove it.
self.incr_counter(self.counter_category_name, 'Raw Bytes Written', len(value) + 1)
def output_path_for_key(self, key):
"""
Output based on date and project.
Mix them together by date, but identify with different files for each project/environment.
Output is in the form {warehouse_path}/event_records/dt={CCYY-MM-DD}/{project}.tsv
"""
date_received, project = key
return url_path_join(
self.output_root,
EVENT_TABLE_NAME,
'dt={date}'.format(date=date_received),
'{project}.tsv'.format(project=project),
)
def extra_modules(self):
return [pytz, ua_parser, user_agents, dateutil]
def normalize_time(self, event_time):
"""
Convert time string to ISO-8601 format in UTC timezone.
Returns None if string representation cannot be parsed.
"""
datetime = ciso8601.parse_datetime(event_time)
if datetime:
return datetime.astimezone(pytz.utc).isoformat()
else:
return None
def extended_normalize_time(self, event_time):
"""
Convert time string to ISO-8601 format in UTC timezone.
Returns None if string representation cannot be parsed.
"""
datetime = dateutil.parser.parse(event_time)
if datetime:
return datetime.astimezone(pytz.utc).isoformat()
else:
return None
def convert_date(self, date_string):
"""Converts date from string format to date object, for use by DateField."""
if date_string:
try:
# TODO: for now, return as a string.
# When actually supporting DateField, then switch back to date.
# ciso8601.parse_datetime(ts).astimezone(pytz.utc).date().isoformat()
return self.date_field_for_converting.deserialize_from_string(date_string).isoformat()
except ValueError:
self.incr_counter(self.counter_category_name, 'Cannot convert to date', 1)
# Don't bother to make sure we return a good value
# within the interval, so we can find the output for
# debugging. Should not be necessary, as this is only
# used for the column value, not the partitioning.
return u"BAD: {}".format(date_string)
# return self.lower_bound_date_string
else:
self.incr_counter(self.counter_category_name, 'Missing date', 1)
return date_string
def _canonicalize_user_agent(self, agent):
"""
There is a lot of variety in the user agent field that is hard for humans to parse, so we canonicalize
the user agent to extract the information we're looking for.
Args:
agent: an agent string.
Returns:
a dictionary of information about the user agent.
"""
agent_dict = {}
try:
user_agent = user_agents.parse(agent)
except Exception: # If the user agent can't be parsed, just drop the agent data on the floor since it's of no use to us.
self.incr_counter(self.counter_category_name, 'Quality Unparseable agent', 1)
return agent_dict
device_type = '' # It is possible that the user agent isn't any of the below.
if user_agent.is_mobile:
device_type = "mobile"
elif user_agent.is_tablet:
device_type = "tablet"
elif user_agent.is_pc:
device_type = "desktop"
elif user_agent.is_bot:
device_type = "bot"
if device_type:
agent_dict['type'] = device_type
agent_dict['device_name'] = user_agent.device.family
agent_dict['os'] = user_agent.os.family
agent_dict['browser'] = user_agent.browser.family
# TODO: figure out how to handle this, so that it works
# when the target field is either BooleanField or StringField.
# agent_dict['touch_capable'] = unicode(user_agent.is_touch_capable)
agent_dict['touch_capable'] = user_agent.is_touch_capable
else:
self.incr_counter(self.counter_category_name, 'Quality Unrecognized agent type', 1)
return agent_dict
def add_agent_info(self, event_dict, agent):
if agent:
agent_dict = self._canonicalize_user_agent(agent)
for key in agent_dict.keys():
new_key = u"agent_{}".format(key)
# event_dict[new_key] = agent_dict[key]
self.add_calculated_event_entry(event_dict, new_key, agent_dict[key])
def _add_event_entry(self, event_dict, event_record_key, event_record_field, label, obj):
if isinstance(event_record_field, StringField):
if obj is None:
# TODO: this should really check to see if the record_field is nullable.
value = None
else:
value = backslash_encode_value(unicode(obj))
if '\x00' in value:
value = value.replace('\x00', '\\0')
# Avoid validation errors later due to length by truncating here.
field_length = event_record_field.length
value_length = len(value)
# TODO: This implies that field_length is at least 4.
if value_length > field_length:
log.error("Record value length (%d) exceeds max length (%d) for field %s: %r", value_length, field_length, event_record_key, value)
value = u"{}...".format(value[:field_length - 4])
self.incr_counter(self.counter_category_name, 'Quality Truncated string value', 1)
event_dict[event_record_key] = value
elif isinstance(event_record_field, IntegerField):
try:
event_dict[event_record_key] = int(obj)
except ValueError:
log.error('Unable to cast value to int for %s: %r', label, obj)
elif isinstance(event_record_field, DateTimeField):
datetime_obj = None
try:
if obj is not None:
datetime_obj = ciso8601.parse_datetime(obj)
if datetime_obj.tzinfo:
datetime_obj = datetime_obj.astimezone(pytz.utc)
else:
datetime_obj = obj
except ValueError:
log.error('Unable to cast value to datetime for %s: %r', label, obj)
# Because it's not enough just to create a datetime object, also perform
# validation here.
if datetime_obj is not None:
validation_errors = self.date_time_field_for_validating.validate(datetime_obj)
if len(validation_errors) > 0:
log.error('Invalid assigment of value %r to field "%s": %s', datetime_obj, label, ', '.join(validation_errors))
datetime_obj = None
event_dict[event_record_key] = datetime_obj
elif isinstance(event_record_field, DateField):
date_obj = None
try:
if obj is not None:
date_obj = self.date_field_for_converting.deserialize_from_string(obj)
except ValueError:
log.error('Unable to cast value to date for %s: %r', label, obj)
# Because it's not enough just to create a datetime object, also perform
# validation here.
if date_obj is not None:
validation_errors = self.date_field_for_converting.validate(date_obj)
if len(validation_errors) > 0:
log.error('Invalid assigment of value %r to field "%s": %s', date_obj, label, ', '.join(validation_errors))
date_obj = None
event_dict[event_record_key] = date_obj
elif isinstance(event_record_field, BooleanField):
try:
event_dict[event_record_key] = bool(obj)
except ValueError:
log.error('Unable to cast value to bool for %s: %r', label, obj)
elif isinstance(event_record_field, FloatField):
try:
event_dict[event_record_key] = float(obj)
except ValueError:
log.error('Unable to cast value to float for %s: %r', label, obj)
else:
event_dict[event_record_key] = obj
def _add_event_info_recurse(self, event_dict, event_mapping, obj, label):
if obj is None:
pass
elif isinstance(obj, dict):
for key in obj.keys():
new_value = obj.get(key)
# Normalize labels to be all lower-case, since all field (column) names are lowercased.
new_label = u"{}.{}".format(label, key.lower())
self._add_event_info_recurse(event_dict, event_mapping, new_value, new_label)
elif isinstance(obj, list):
# We will not output any values that are stored in lists.
pass
else:
# We assume it's a single object, and look it up now.
if label in event_mapping:
event_record_key, event_record_field = event_mapping[label]
self._add_event_entry(event_dict, event_record_key, event_record_field, label, obj)
def add_event_info(self, event_dict, event_mapping, event):
self._add_event_info_recurse(event_dict, event_mapping, event, 'root')
def add_calculated_event_entry(self, event_dict, event_record_key, obj):
"""Use this to explicitly add calculated entry values."""
event_record_field = self.get_event_record_class().get_fields()[event_record_key]
label = event_record_key
self._add_event_entry(event_dict, event_record_key, event_record_field, label, obj)
class TrackingEventRecordDataTask(EventLogSelectionMixin, BaseEventRecordDataTask):
"""Task to compute event_type and event_source values being encountered on each day in a given time interval."""
# Override superclass to disable this parameter
event_mapping = None
PROJECT_NAME = 'tracking_prod'
counter_category_name = 'Tracking Event Exports'
def get_event_emission_time(self, event):
return super(TrackingEventRecordDataTask, self).get_event_time(event)
def get_event_arrival_time(self, event):
try:
return event['context']['received_at']
except KeyError:
return self.get_event_emission_time(event)
def get_event_time(self, event):
# Some events may emitted and stored for quite some time before actually being entered into the tracking logs.
# The primary cause of this is mobile devices that go offline for a significant period of time. They will store
# events locally and then when connectivity is restored transmit them to the server. We log the time that they
# were received by the server and use that to batch them into exports since it is much simpler than trying to
# inject them into past exports.
return self.get_event_arrival_time(event)
def get_event_mapping(self):
"""Return dictionary of event attributes to the output keys they map to."""
if self.event_mapping is None:
self.event_mapping = {}
fields = self.get_event_record_class().get_fields()
field_keys = fields.keys()
for field_key in field_keys:
field_tuple = (field_key, fields[field_key])
def add_event_mapping_entry(source_key):
self.event_mapping[source_key] = field_tuple
# Most common is to map first-level entries in event data directly.
# Skip values that are explicitly set in EventRecord:
if field_key in ['version', 'input_file', 'project', 'event_type', 'event_source', 'context_course_id', 'username']:
pass
# Skip values that are explicitly set in JSONEventRecord:
elif field_key in ['source', 'emitter_type', 'raw_event']:
pass
# Skip values that are explicitly calculated rather than copied:
elif field_key.startswith('agent_') or field_key in ['event_category', 'timestamp', 'received_at', 'date']:
pass
elif self.uses_JSON_event_record() and field_key == 'course_id':
pass
elif self.uses_JSON_event_record() and field_key == 'referrer':
add_event_mapping_entry('root.referer')
elif self.uses_JSON_event_record() and field_key == 'user_id':
add_event_mapping_entry('root.context.user_id')
# Handle special-cases:
elif field_key == "currenttime":
# Collapse values from either form into a single column. No event should have both,
# though there are event_types that have used both at different times.
add_event_mapping_entry('root.event.currenttime')
add_event_mapping_entry('root.event.current_time')
elif field_key in ['discussion_id', 'response_id', 'options_followed']:
add_event_mapping_entry(u"root.event.{}".format(field_key.replace('_', '.')))
elif field_key in ['app_name', 'app_version']:
add_event_mapping_entry(u"root.context.application.{}".format(field_key[len('app_'):]))
elif field_key == "old_value":
add_event_mapping_entry('root.event.old')
elif field_key == "new_value":
add_event_mapping_entry('root.event.new')
# Map values that are top-level:
elif field_key in ['host', 'ip', 'page', 'referer', 'session', 'agent', 'accept_language']:
add_event_mapping_entry(u"root.{}".format(field_key))
elif field_key.startswith('context_module_'):
add_event_mapping_entry(u"root.context.module.{}".format(field_key[15:]))
elif field_key.startswith('context_'):
add_event_mapping_entry(u"root.context.{}".format(field_key[8:]))
elif field_key in ['event_user', 'event_username']:
add_event_mapping_entry(u"root.event.{}".format(field_key[6:]))
else:
add_event_mapping_entry(u"root.event.{}".format(field_key))
return self.event_mapping
def mapper(self, line):
event, date_received = self.get_event_and_date_string(line) or (None, None)
if event is None:
return
self.incr_counter(self.counter_category_name, 'Inputs with Dates', 1)
event_type = event.get('event_type')
if event_type is None:
self.incr_counter(self.counter_category_name, 'Discard Missing Event Type', 1)
return
# Handle events that begin with a slash (i.e. implicit events).
# * For JSON events, give them a marker event_type so we can more easily search (or filter) them,
# and treat the type as the URL that was called.
# * For regular events, ignore those that begin with a slash (i.e. implicit events).
event_url = None
if event_type.startswith('/'):
if self.uses_JSON_event_record():
event_url = event_type
event_type = 'edx.server.request'
else:
self.incr_counter(self.counter_category_name, 'Discard Implicit Events', 1)
return
username = event.get('username', '').strip()
# if not username:
# return
course_id = eventlog.get_course_id(event)
# if not course_id:
# return
event_data = eventlog.get_event_data(event)
if event_data is None:
self.incr_counter(self.counter_category_name, 'Discard Missing Event Data', 1)
return
# Put the fixed value back, so it can be properly mapped.
event['event'] = event_data
event_source = event.get('event_source')
if event_source is None:
self.incr_counter(self.counter_category_name, 'Discard Missing Event Source', 1)
return
project_name = self.PROJECT_NAME
event_dict = {}
self.add_calculated_event_entry(event_dict, 'input_file', self.get_map_input_file())
self.add_calculated_event_entry(event_dict, 'event_type', event_type)
self.add_calculated_event_entry(event_dict, 'timestamp', self.get_event_emission_time(event))
self.add_calculated_event_entry(event_dict, 'received_at', self.get_event_arrival_time(event))
self.add_calculated_event_entry(event_dict, 'date', self.convert_date(date_received))
self.add_calculated_event_entry(event_dict, 'username', username)
self.add_agent_info(event_dict, event.get('agent'))
if self.uses_JSON_event_record():
# Add a check in the payload for a course_id -- it is sometimes there
# instead of in context.
if not course_id and event_data.get('course_id'):
course_id = event_data.get('course_id')
# was project
self.add_calculated_event_entry(event_dict, 'source', project_name)
# was event_source
self.add_calculated_event_entry(event_dict, 'emitter_type', event_source)
# was context_course_id
self.add_calculated_event_entry(event_dict, 'course_id', course_id)
self.add_calculated_event_entry(event_dict, 'raw_event', json.dumps(event, sort_keys=True))
# Additional fields:
# The event_url is the original event_type of implicit events.
if event_url is not None:
self.add_calculated_event_entry(event_dict, 'url', event_url)
# Try to extract information from course_id.
if course_id is not None:
org_id = get_org_id_for_course(course_id)
if org_id:
self.add_calculated_event_entry(event_dict, 'org_id', org_id)
else:
if (event_source, event_type) in self.known_events:
event_category = self.known_events[(event_source, event_type)]
else:
event_category = 'unknown'
self.add_calculated_event_entry(event_dict, 'version', VERSION)
self.add_calculated_event_entry(event_dict, 'project', project_name)
self.add_calculated_event_entry(event_dict, 'event_source', event_source)
self.add_calculated_event_entry(event_dict, 'event_category', event_category)
self.add_calculated_event_entry(event_dict, 'context_course_id', course_id)
event_mapping = self.get_event_mapping()
self.add_event_info(event_dict, event_mapping, event)
record = self.get_event_record_class()(**event_dict)
key = (date_received, project_name)
self.incr_counter(self.counter_category_name, 'Output From Mapper', 1)
# Convert to form for output by reducer here,
# so that reducer doesn't do any conversion.
# yield key, record.to_string_tuple()
yield key, record.to_separated_values()
class SegmentEventLogSelectionDownstreamMixin(EventLogSelectionDownstreamMixin):
"""Defines parameters for passing upstream to tasks that use SegmentEventLogSelectionMixin."""
source = luigi.ListParameter(
config_path={'section': 'segment-logs', 'name': 'source'},
description='A URL to a path that contains log files that contain the events. (e.g., s3://my_bucket/foo/). Segment-logs',
)
pattern = luigi.ListParameter(
config_path={'section': 'segment-logs', 'name': 'pattern'},
description='A regex with a named capture group for the date or timestamp that approximates the date that the events '
'within were emitted. Note that the search interval is expanded, so events don\'t have to be in exactly '
'the right file in order for them to be processed. Segment-logs',
)
class SegmentEventLogSelectionMixin(SegmentEventLogSelectionDownstreamMixin, EventLogSelectionMixin):
pass
class SegmentEventRecordDataTask(SegmentEventLogSelectionMixin, BaseEventRecordDataTask):
"""Task to compute event_type and event_source values being encountered on each day in a given time interval."""
# Project information, pulled from config file.
project_names = {}
config = None
event_mapping = None
counter_category_name = 'Segment Event Exports'
# TODO: this never actually worked in a cluster. Figure out how to get it to work.
def _get_project_name(self, project_id):
if project_id not in self.project_names:
if self.config is None:
self.config = get_config()
section_name = 'segment:' + project_id
project_name = self.config.get(section_name, 'project_name', None)
self.project_names[project_id] = project_name
return self.project_names[project_id]
def _get_time_from_segment_event(self, event, key):
try:
event_time = event[key]
event_time = self.normalize_time(event_time)
if event_time is None:
# Try again, with a more powerful (and more flexible) parser.
try:
event_time = self.extended_normalize_time(event[key])
if event_time is None:
log.error("Really unparseable %s time from event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Unparseable {} Time Field'.format(key), 1)
else:
# Log this for now, until we have confidence this is reasonable.
log.warning("Parsable unparseable type for %s time in event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Parsable unparseable for {} Time Field'.format(key), 1)
except Exception:
# This was commented out in the JSON event code because presumably it was happening a lot
# in cases where it was using multipe key values (e.g. get_event_arrival_time).
log.error("Unparseable %s time from event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Unparseable {} Time Field'.format(key), 1)
return event_time
except KeyError:
log.error("Missing %s time from event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Missing {} Time Field'.format(key), 1)
return None
except TypeError:
log.error("Bad type for %s time in event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Bad type for {} Time Field'.format(key), 1)
return None
except UnicodeEncodeError:
# This is more specific than ValueError, so it is processed first.
log.error("Bad encoding for %s time in event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Bad encoding for {} Time Field'.format(key), 1)
return None
except ValueError:
# Try again, with a more powerful (and more flexible) parser.
try:
event_time = self.extended_normalize_time(event[key])
if event_time is None:
log.error("Unparseable %s time from event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Unparseable {} Time Field'.format(key), 1)
else:
# Log this for now, until we have confidence this is reasonable.
log.warning("Parsable bad value for %s time in event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Parsable bad value for {} Time Field'.format(key), 1)
return event_time
except Exception:
log.error("Bad value for %s time in event: %r", key, event)
self.incr_counter(self.counter_category_name, 'Quality Bad value for {} Time Field'.format(key), 1)
return None
def get_event_arrival_time(self, event):
if 'receivedAt' in event:
return self._get_time_from_segment_event(event, 'receivedAt')
if 'requestTime' in event:
self.incr_counter(self.counter_category_name, 'Event arrival from requestTime', 1)
return self._get_time_from_segment_event(event, 'requestTime')
if 'timestamp' in event:
self.incr_counter(self.counter_category_name, 'Event arrival from timestamp', 1)
return self._get_time_from_segment_event(event, 'timestamp')
self.incr_counter(self.counter_category_name, 'Event arrival not set', 1)
log.error("Missing event arrival time in event '%r'", event)
return None
def get_event_emission_time(self, event):
return self._get_time_from_segment_event(event, 'sentAt')
def get_event_time(self, event):
"""
Returns time information from event if present, else returns None.
Overrides base class implementation to get correct timestamp
used by get_event_and_date_string(line).
"""
# TODO: clarify which value should be used.
# "originalTimestamp" is almost "sentAt". "timestamp" is
# almost "receivedAt". Order is (probably)
# "originalTimestamp" < "sentAt" < "timestamp" < "receivedAt".
return self.get_event_arrival_time(event)
def get_event_mapping(self):
"""Return dictionary of event attributes to the output keys they map to."""
if self.event_mapping is None:
self.event_mapping = {}
fields = self.get_event_record_class().get_fields()
field_keys = fields.keys()
for field_key in field_keys:
field_tuple = (field_key, fields[field_key])
def add_event_mapping_entry(source_key):
self.event_mapping[source_key] = field_tuple
# Most common is to map first-level entries in event data directly.
# Skip values that are explicitly set:
if field_key in ['version', 'input_file', 'project', 'event_type', 'event_source']:
pass
# Skip values that are explicitly calculated rather than copied:
elif field_key.startswith('agent_') or field_key in ['event_category', 'timestamp', 'received_at', 'date']:
pass
# Skip values that are explicitly set or calculated for JSONEventRecord:
elif field_key in ['emitter_type', 'source', 'raw_event']:
pass
# Map values that are top-level:
elif field_key in ['channel']:
add_event_mapping_entry(u"root.{}".format(field_key))
elif field_key in ['anonymous_id']:
add_event_mapping_entry(u"root.context.anonymousid")
add_event_mapping_entry("root.anonymousid")
add_event_mapping_entry(u"root.context.traits.anonymousid")
add_event_mapping_entry(u"root.traits.anonymousid")
elif field_key in ['agent']:
add_event_mapping_entry(u"root.context.useragent")
add_event_mapping_entry(u"root.properties.context.agent")
elif field_key in ['course_id']:
# This is sometimes a course, but not always.
# add_event_mapping_entry(u"root.properties.label")
add_event_mapping_entry(u"root.properties.courseid")
add_event_mapping_entry(u"root.properties.course_id")
add_event_mapping_entry(u"root.properties.course")
add_event_mapping_entry(u"root.properties.data.course_id")
add_event_mapping_entry(u"root.properties.data.course-id")
add_event_mapping_entry(u"root.properties.context.course_id")
elif field_key in ['username']:
add_event_mapping_entry(u"root.traits.username")
add_event_mapping_entry(u"root.properties.context.username")
add_event_mapping_entry(u"root.context.traits.username")
elif field_key in ['client_id', 'host', 'session', 'referer']:
add_event_mapping_entry(u"root.properties.context.{}".format(field_key))
elif field_key in ['user_id']:
add_event_mapping_entry(u"root.context.user_id")
# I think this is more often a username than an id.
# TODO: figure it out later... Exception is type=page,
# for which it's an id? No, that's not consistent,
# even for the same projectId. We may need more complicated
# logic to help sort that out (more) consistently.
add_event_mapping_entry(u"root.userid")
add_event_mapping_entry(u"root.properties.context.user_id")
add_event_mapping_entry(u"root.properties.data.user_id")
add_event_mapping_entry(u"root.context.traits.userid")
add_event_mapping_entry(u"root.traits.userid")
elif field_key in [
'os_name', 'os_version', 'app_name', 'app_version', 'device_manufacturer',
'device_model', 'network_carrier', 'screen_width', 'screen_height',
'campaign_source', 'campaign_medium', 'campaign_content', 'campaign_name'
]:
add_event_mapping_entry(u"root.context.{}".format(field_key.replace('_', '.')))
elif field_key in ['action']:
add_event_mapping_entry(u"root.properties.{}".format(field_key))
elif field_key in ['locale', 'ip', 'timezone']:
add_event_mapping_entry(u"root.context.{}".format(field_key))
add_event_mapping_entry(u"root.properties.context.{}".format(field_key))
elif field_key in ['path', 'referrer', 'search', 'title', 'url', 'variationname', 'variationid', 'experimentid', 'experimentname', 'category', 'label', 'display_name']:
add_event_mapping_entry(u"root.properties.{}".format(field_key))
add_event_mapping_entry(u"root.context.page.{}".format(field_key))
add_event_mapping_entry(u"root.properties.context.page.{}".format(field_key))
add_event_mapping_entry(u"root.data.{}".format(field_key))
else:
pass
return self.event_mapping
def mapper(self, line):
self.incr_counter(self.counter_category_name, 'Inputs', 1)
value = self.get_event_and_date_string(line)
if value is None:
return
event, date_received = value
self.incr_counter(self.counter_category_name, 'Inputs with Dates', 1)
segment_type = event.get('type')
if segment_type is None and 'action' in event:
segment_type = event.get('action').lower()
self.incr_counter(self.counter_category_name, u'Subset Type {}'.format(segment_type), 1)
channel = event.get('channel')
self.incr_counter(self.counter_category_name, u'Subset Channel {}'.format(channel), 1)
if segment_type == 'track':
event_type = event.get('event')
if event_type is None or date_received is None:
# Ignore if any of the keys is None
self.incr_counter(self.counter_category_name, 'Discard Tracking with missing type', 1)
return
if event_type.startswith('/'):
# Ignore events that begin with a slash. How many?
self.incr_counter(self.counter_category_name, 'Discard Tracking with implicit type', 1)
return
# Not all 'track' events have event_source information. In particular, edx.bi.XX events.
# Their 'properties' lack any 'context', having only label and category.
event_category = event.get('properties', {}).get('category')
if channel == 'server':
event_source = event.get('properties', {}).get('context', {}).get('event_source')
if event_source is None:
event_source = 'track-server'
elif (event_source, event_type) in self.known_events:
event_category = self.known_events[(event_source, event_type)]
self.incr_counter(self.counter_category_name, 'Subset Type track And Channel server', 1)
else:
# expect that channel is 'client'.
event_source = channel
self.incr_counter(self.counter_category_name, 'Subset Type track And Channel Not server', 1)
else:
# type is 'page' or 'identify' or 'screen'
event_category = segment_type
event_type = segment_type
event_source = channel
project_id = event.get('projectId')
project_name = self._get_project_name(project_id) or project_id
self.incr_counter(self.counter_category_name, u'Subset Project {}'.format(project_name), 1)
event_dict = {}
self.add_calculated_event_entry(event_dict, 'input_file', self.get_map_input_file())
self.add_calculated_event_entry(event_dict, 'event_type', event_type)
self.add_calculated_event_entry(event_dict, 'timestamp', self.get_event_emission_time(event))
self.add_calculated_event_entry(event_dict, 'received_at', self.get_event_arrival_time(event))
self.add_calculated_event_entry(event_dict, 'date', self.convert_date(date_received))
# An issue with the original logic: if a key exists and contains a value of None, then None will
# be returned instead of an empty dict specified as the default, and the next get() will fail.
# So check specifically for non-false values.
# self.add_agent_info(event_dict, event.get('context', {}).get('userAgent'))
# self.add_agent_info(event_dict, event.get('properties', {}).get('context', {}).get('agent'))
if event.get('context'):
self.add_agent_info(event_dict, event.get('context').get('userAgent'))
properties = event.get('properties')
if properties and properties.get('context'):
self.add_agent_info(event_dict, properties.get('context').get('agent'))
if self.uses_JSON_event_record():
self.add_calculated_event_entry(event_dict, 'source', project_name) # was 'project'
self.add_calculated_event_entry(event_dict, 'emitter_type', event_source) # was 'event_source'
# TODO: figure out why we check for this here, and not much earlier. Why would
# it be in event_type, but not in event_dict?? And why so bad if it's not found?
# Is it required and cannot be 'None'?
if event_dict.get("event_type") is None:
self.incr_counter(self.counter_category_name, 'Missing event_type field', 1)
return
self.add_calculated_event_entry(event_dict, 'raw_event', json.dumps(event, sort_keys=True))
else:
self.add_calculated_event_entry(event_dict, 'version', VERSION)
self.add_calculated_event_entry(event_dict, 'project', project_name)
self.add_calculated_event_entry(event_dict, 'event_source', event_source)
self.add_calculated_event_entry(event_dict, 'event_category', event_category)
event_mapping = self.get_event_mapping()
self.add_event_info(event_dict, event_mapping, event)
if self.uses_JSON_event_record():
# Try harder to extract course_id and related information.
course_id = event_dict.get('course_id')
if course_id is None:
# course_id may be stored in 'label', so try to parse what is there.
label = event_dict.get('label')
if label and is_valid_course_id(label):
self.add_calculated_event_entry(event_dict, 'course_id', label)
course_id = event_dict.get('course_id')
if course_id is None:
# course_id may be extractable from 'url' in the usual
# way, so try to parse what is there.
url = event_dict.get('url')
course_key = get_course_key_from_url(url)
if course_key:
course_id = unicode(course_key)
self.add_calculated_event_entry(event_dict, 'course_id', course_id)
elif url:
# course_id may be extractable from 'url' by looking for the
# version string and plus-delimiters explicitly anywhere in the URL.
match = NEW_COURSE_REGEX.match(url)
if match:
course_id_string = match.group('course_id')
if is_valid_course_id(course_id_string):
self.add_calculated_event_entry(event_dict, 'course_id', course_id_string)
course_id = event_dict.get('course_id')
if course_id is not None:
org_id = get_org_id_for_course(course_id)
if org_id:
self.add_calculated_event_entry(event_dict, 'org_id', org_id)
record = self.get_event_record_class()(**event_dict)
key = (date_received, project_name)
self.incr_counter(self.counter_category_name, 'Output From Mapper', 1)
# Convert to form for output by reducer here,
# so that reducer doesn't do any conversion.
# yield key, record.to_string_tuple()
yield key, record.to_separated_values()
##########################
# Bulk Loading into S3
##########################
class BulkEventRecordIntervalTask(EventRecordDownstreamMixin, luigi.WrapperTask):
"""Compute event information over a range of dates and insert the results into Hive."""
interval = luigi.DateIntervalParameter(
description='The range of dates for which to create event records.',
)
def requires(self):
kwargs = {
'output_root': self.warehouse_path,
'events_list_file_path': self.events_list_file_path,
'n_reduce_tasks': self.n_reduce_tasks,
'interval': self.interval,
'event_record_type': self.event_record_type,
}
yield (
TrackingEventRecordDataTask(**kwargs),
SegmentEventRecordDataTask(**kwargs),
)
def output(self):
return [task.output() for task in self.requires()]
##########################
# Loading into S3 by Date
##########################
class PerDateEventRecordDataDownstreamMixin(EventRecordDataDownstreamMixin):
"""Common parameters and base classes used to pass parameters through the event record workflow."""
# Required parameter
date = luigi.DateParameter(
description='Upper bound date for the end of the interval to analyze. Data produced before 00:00 on this'
' date will be analyzed. This workflow is intended to run nightly and this parameter is intended'
' to be set to "today\'s" date, so that all of yesterday\'s data is included and none of today\'s.'
)
# Override superclass to disable this parameter
interval = None
class PerDateEventRecordDataMixin(PerDateEventRecordDataDownstreamMixin):
def __init__(self, *args, **kwargs):
super(BaseEventRecordDataTask, self).__init__(*args, **kwargs)
self.interval = luigi.date_interval.Date.from_date(self.date)
def output_path_for_key(self, key):
"""
Output based on project.
Output is in the form {warehouse_path}/event_records/dt={CCYY-MM-DD}/{project}.tsv,
but output_root is assumed to be set externally to {warehouse_path}/event_records/dt={CCYY-MM-DD}.
"""
_date_received, project = key
return url_path_join(
self.output_root,
'{project}.tsv'.format(project=project),
)
class PerDateTrackingEventRecordDataTask(PerDateEventRecordDataMixin, TrackingEventRecordDataTask):
pass
class PerDateSegmentEventRecordDataTask(PerDateEventRecordDataMixin, SegmentEventRecordDataTask):
pass
class PerDateGeneralEventRecordDataTask(PerDateEventRecordDataDownstreamMixin, luigi.WrapperTask):
"""Runs all Event Record tasks for a given time interval."""
def requires(self):
kwargs = {
'event_record_type': self.event_record_type,
'output_root': self.output_root,
'events_list_file_path': self.events_list_file_path,
'n_reduce_tasks': self.n_reduce_tasks,
'date': self.date,
}
yield (
PerDateTrackingEventRecordDataTask(**kwargs),
PerDateSegmentEventRecordDataTask(**kwargs),
)
class EventRecordTableTask(EventRecordClassMixin, BareHiveTableTask):
"""The hive table for event_record data."""
@property
def partition_by(self):
return 'dt'
@property
def table(self):
return EVENT_TABLE_NAME
@property
def columns(self):
return self.get_event_record_class().get_hive_schema()
class EventRecordPartitionTask(EventRecordDownstreamMixin, HivePartitionTask):
"""The hive table partition for this engagement data."""
date = luigi.DateParameter()
interval = None
@property
def partition_value(self):
"""Use a dynamic partition value based on the date parameter."""
return self.date.isoformat() # pylint: disable=no-member
@property
def hive_table_task(self):
return EventRecordTableTask(
event_record_type=self.event_record_type,
warehouse_path=self.warehouse_path,
# overwrite=self.overwrite,
)
@property
def data_task(self):
return PerDateGeneralEventRecordDataTask(
event_record_type=self.event_record_type,
date=self.date,
n_reduce_tasks=self.n_reduce_tasks,
output_root=self.partition_location,
# overwrite=self.overwrite,
events_list_file_path=self.events_list_file_path,
)
class EventRecordIntervalTask(EventRecordDownstreamMixin, luigi.WrapperTask):
"""Compute event information over a range of dates and insert the results into Hive."""
interval = luigi.DateIntervalParameter(
description='The range of received dates for which to create event records.',
)
def requires(self):
for date in reversed([d for d in self.interval]): # pylint: disable=not-an-iterable
# should_overwrite = date >= self.overwrite_from_date
yield EventRecordPartitionTask(
event_record_type=self.event_record_type,
date=date,
n_reduce_tasks=self.n_reduce_tasks,
warehouse_path=self.warehouse_path,
# overwrite=should_overwrite,
# overwrite_from_date=self.overwrite_from_date,
events_list_file_path=self.events_list_file_path,
)
def output(self):
return [task.output() for task in self.requires()]
def get_raw_data_tasks(self):
"""
A generator that iterates through all tasks used to generate the data in each partition in the interval.
This can be used by downstream map reduce jobs to read all of the raw data.
"""
for task in self.requires():
if isinstance(task, EventRecordPartitionTask):
yield task.data_task
##########################
# Loading into Vertica
##########################
class LoadDailyEventRecordToVertica(EventRecordDownstreamMixin, VerticaCopyTask):
# Required parameter
date = luigi.DateParameter()
@property
def partition(self):
"""The table is partitioned by date."""
return HivePartition('dt', self.date.isoformat()) # pylint: disable=no-member
@property
def insert_source_task(self):
# For now, let's just get by with ExternalURL.
hive_table = EVENT_TABLE_NAME
partition_location = url_path_join(self.warehouse_path, hive_table, self.partition.path_spec) + '/'
return ExternalURL(url=partition_location)
@property
def table(self):
return EVENT_TABLE_NAME
# Just use the default default: "created"
# @property
# def default_columns(self):
# """List of tuples defining name and definition of automatically-filled columns."""
# return None
@property
def auto_primary_key(self):
# The default is to use 'id', which would cause a conflict with field already having that name.
# But there seems to be little value in having such a column.
return None
@property
def columns(self):
return self.get_event_record_class().get_sql_schema()
@property
def table_partition_key(self):
return 'date'
class LoadEventRecordIntervalToVertica(EventRecordDownstreamMixin, VerticaCopyTaskMixin, luigi.WrapperTask):
"""
Loads the event records table from Hive into the Vertica data warehouse.
"""
interval = luigi.DateIntervalParameter(
description='The range of received dates for which to create event records.',
)
def requires(self):
for date in reversed([d for d in self.interval]): # pylint: disable=not-an-iterable
# should_overwrite = date >= self.overwrite_from_date
yield LoadDailyEventRecordToVertica(
event_record_type=self.event_record_type,
date=date,
n_reduce_tasks=self.n_reduce_tasks,
warehouse_path=self.warehouse_path,
events_list_file_path=self.events_list_file_path,
schema=self.schema,
credentials=self.credentials,
)
def output(self):
return [task.output() for task in self.requires()]
class EventRecordLoadDownstreamMixin(EventRecordDownstreamMixin):
"""Define parameters for entrypoint for loading events."""
interval = luigi.DateIntervalParameter(
description='The range of dates for which to load event records.',
)
retention_interval = luigi.TimeDeltaParameter(
config_path={'section': 'vertica-export', 'name': 'event_retention_interval'},
description='The number of days of events to retain in Vertica. If not set, no pruning will occur.',
default=None,
)
class PruneEventPartitionsInVertica(EventRecordLoadDownstreamMixin, SchemaManagementTask):
"""Drop partitions that are beyond a specified retention interval."""
# Mask date parameter from SchemaManagementTask so that it is not required.
date = None
# Date of earliest current record in Vertica. Once calculated, this is used to
# create queries to delete the excess partitions.
earliest_date = None
# Override the standard roles here since these tables will be rather raw. We may want to restrict access to a
# subset of users.
roles = luigi.ListParameter(
config_path={'section': 'vertica-export', 'name': 'restricted_roles'},
)
def requires(self):
return {
'source': LoadEventRecordIntervalToVertica(
event_record_type=self.event_record_type,
interval=self.interval,
n_reduce_tasks=self.n_reduce_tasks,
warehouse_path=self.warehouse_path,
events_list_file_path=self.events_list_file_path,
schema=self.schema,
credentials=self.credentials,
),
'credentials': ExternalURL(self.credentials)
}
@property
def queries(self):
query_list = [
"GRANT USAGE ON SCHEMA {schema} TO {roles};".format(schema=self.schema, roles=self.vertica_roles),
"GRANT SELECT ON ALL TABLES IN SCHEMA {schema} TO {roles};".format(
schema=self.schema,
roles=self.vertica_roles
),
]
# Check for pruning.
if self.interval and self.earliest_date and self.retention_interval:
earliest_date_to_retain = self.interval.date_b - self.retention_interval
split_date = self.earliest_date.split('-')
earliest_date = datetime.date(int(split_date[0]), int(split_date[1]), int(split_date[2]))
pruning_interval = DateInterval(earliest_date, earliest_date_to_retain)
log.debug("Looking to prune partitions from %s up to but not including %s", earliest_date, earliest_date_to_retain)
for date in pruning_interval:
query_list.append(
"SELECT DROP_PARTITION('{schema}.{table}', '{date}');".format(
schema=self.schema,
table=EVENT_TABLE_NAME,
date=date,
)
)
else:
log.warning("No pruning of event records: missing parameters: earliest date=%s, retention_interval=%s ",
self.earliest_date, self.retention_interval)
return query_list
@property
def marker_name(self):
return 'prune_event_partitions' + self.interval.date_b.strftime('%Y-%m-%d')
def run(self):
# First figure out what needs pruning.
connection = self.output().connect()
cursor = connection.cursor()
query = "SELECT min(date) FROM {schema}.{table}".format(
schema=self.schema,
table=EVENT_TABLE_NAME,
)
log.debug(query)
cursor.execute(query)
row = cursor.fetchone()
if row is None:
connection.close()
raise Exception('Failed to find data in table: {schema}.{table}'.format(schema=self.schema, table=EVENT_TABLE_NAME))
self.earliest_date = row[0]
log.debug("Found earliest date for data in table: %s", self.earliest_date)
connection.close()
# Then execute the grants and the pruning queries.
super(PruneEventPartitionsInVertica, self).run()
class LoadEventsIntoWarehouseWorkflow(EventRecordLoadDownstreamMixin, VerticaCopyTaskMixin, luigi.WrapperTask):
"""
Provides entry point for loading event data into warehouse.
"""
def requires(self):
return PruneEventPartitionsInVertica(
event_record_type=self.event_record_type,
interval=self.interval,
n_reduce_tasks=self.n_reduce_tasks,
warehouse_path=self.warehouse_path,
events_list_file_path=self.events_list_file_path,
schema=self.schema,
credentials=self.credentials,
)
##########################
# Loading into BigQuery
##########################
class LoadDailyEventRecordToBigQuery(EventRecordDownstreamMixin, BigQueryLoadTask):
@property
def table(self):
if self.uses_JSON_event_record():
return 'json_event_records'
else:
return 'event_records'
@property
def partitioning_type(self):
"""Set to 'DAY' in order to partition by day."""
return 'DAY'
@property
def schema(self):
return self.get_event_record_class().get_bigquery_schema()
@property
def insert_source_task(self):
return ExternalURL(url=self.hive_partition_path(EVENT_TABLE_NAME, self.date))
class LoadEventRecordIntervalToBigQuery(EventRecordDownstreamMixin, BigQueryLoadDownstreamMixin, luigi.WrapperTask):
"""
Loads the event records table from Hive into the BigQuery data warehouse.
"""
interval = luigi.DateIntervalParameter(
description='The range of dates for which to create event records.',
)
def requires(self):
for date in reversed([d for d in self.interval]): # pylint: disable=not-an-iterable
# should_overwrite = date >= self.overwrite_from_date
yield LoadDailyEventRecordToBigQuery(
event_record_type=self.event_record_type,
date=date,
n_reduce_tasks=self.n_reduce_tasks,
warehouse_path=self.warehouse_path,
events_list_file_path=self.events_list_file_path,
overwrite=self.overwrite,
dataset_id=self.dataset_id,
credentials=self.credentials,
max_bad_records=self.max_bad_records,
)
def output(self):
return [task.output() for task in self.requires()]
def complete(self):
# OverwriteOutputMixin changes the complete() method behavior, so we override it.
return all(r.complete() for r in luigi.task.flatten(self.requires()))
| agpl-3.0 |
jswope00/GAI | common/lib/symmath/symmath/symmath_check.py | 67 | 12559 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# File: symmath_check.py
# Date: 02-May-12 (creation)
#
# Symbolic mathematical expression checker for edX. Uses sympy to check for expression equality.
#
# Takes in math expressions given as Presentation MathML (from ASCIIMathML), converts to Content MathML using SnuggleTeX
import traceback
from .formula import *
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# check function interface
#
# This is one of the main entry points to call.
def symmath_check_simple(expect, ans, adict={}, symtab=None, extra_options=None):
'''
Check a symbolic mathematical expression using sympy.
The input is an ascii string (not MathML) converted to math using sympy.sympify.
'''
options = {'__MATRIX__': False, '__ABC__': False, '__LOWER__': False}
if extra_options: options.update(extra_options)
for op in options: # find options in expect string
if op in expect:
expect = expect.replace(op, '')
options[op] = True
expect = expect.replace('__OR__', '__or__') # backwards compatibility
if options['__LOWER__']:
expect = expect.lower()
ans = ans.lower()
try:
ret = check(expect, ans,
matrix=options['__MATRIX__'],
abcsym=options['__ABC__'],
symtab=symtab,
)
except Exception, err:
return {'ok': False,
'msg': 'Error %s<br/>Failed in evaluating check(%s,%s)' % (err, expect, ans)
}
return ret
#-----------------------------------------------------------------------------
# pretty generic checking function
def check(expect, given, numerical=False, matrix=False, normphase=False, abcsym=False, do_qubit=True, symtab=None, dosimplify=False):
"""
Returns dict with
'ok': True if check is good, False otherwise
'msg': response message (in HTML)
"expect" may have multiple possible acceptable answers, separated by "__OR__"
"""
if "__or__" in expect: # if multiple acceptable answers
eset = expect.split('__or__') # then see if any match
for eone in eset:
ret = check(eone, given, numerical, matrix, normphase, abcsym, do_qubit, symtab, dosimplify)
if ret['ok']:
return ret
return ret
flags = {}
if "__autonorm__" in expect:
flags['autonorm'] = True
expect = expect.replace('__autonorm__', '')
matrix = True
threshold = 1.0e-3
if "__threshold__" in expect:
(expect, st) = expect.split('__threshold__')
threshold = float(st)
numerical = True
if str(given) == '' and not (str(expect) == ''):
return {'ok': False, 'msg': ''}
try:
xgiven = my_sympify(given, normphase, matrix, do_qubit=do_qubit, abcsym=abcsym, symtab=symtab)
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in evaluating your expression "%s"' % (err, given)}
try:
xexpect = my_sympify(expect, normphase, matrix, do_qubit=do_qubit, abcsym=abcsym, symtab=symtab)
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in evaluating OUR expression "%s"' % (err, expect)}
if 'autonorm' in flags: # normalize trace of matrices
try:
xgiven /= xgiven.trace()
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in normalizing trace of your expression %s' % (err, to_latex(xgiven))}
try:
xexpect /= xexpect.trace()
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in normalizing trace of OUR expression %s' % (err, to_latex(xexpect))}
msg = 'Your expression was evaluated as ' + to_latex(xgiven)
# msg += '<br/>Expected ' + to_latex(xexpect)
# msg += "<br/>flags=%s" % flags
if matrix and numerical:
xgiven = my_evalf(xgiven, chop=True)
dm = my_evalf(sympy.Matrix(xexpect) - sympy.Matrix(xgiven), chop=True)
msg += " = " + to_latex(xgiven)
if abs(dm.vec().norm().evalf()) < threshold:
return {'ok': True, 'msg': msg}
else:
pass
#msg += "dm = " + to_latex(dm) + " diff = " + str(abs(dm.vec().norm().evalf()))
#msg += "expect = " + to_latex(xexpect)
elif dosimplify:
if (sympy.simplify(xexpect) == sympy.simplify(xgiven)):
return {'ok': True, 'msg': msg}
elif numerical:
if (abs((xexpect - xgiven).evalf(chop=True)) < threshold):
return {'ok': True, 'msg': msg}
elif (xexpect == xgiven):
return {'ok': True, 'msg': msg}
#msg += "<p/>expect='%s', given='%s'" % (expect,given) # debugging
# msg += "<p/> dot test " + to_latex(dot(sympy.Symbol('x'),sympy.Symbol('y')))
return {'ok': False, 'msg': msg}
#-----------------------------------------------------------------------------
# helper function to convert all <p> to <span class='inline-error'>
def make_error_message(msg):
# msg = msg.replace('<p>','<p><span class="inline-error">').replace('</p>','</span></p>')
msg = '<div class="capa_alert">%s</div>' % msg
return msg
def is_within_tolerance(expected, actual, tolerance):
if expected == 0:
return (abs(actual) < tolerance)
else:
return (abs(abs(actual - expected) / expected) < tolerance)
#-----------------------------------------------------------------------------
# Check function interface, which takes pmathml input
#
# This is one of the main entry points to call.
def symmath_check(expect, ans, dynamath=None, options=None, debug=None, xml=None):
'''
Check a symbolic mathematical expression using sympy.
The input may be presentation MathML. Uses formula.
This is the default Symbolic Response checking function
Desc of args:
expect is a sympy string representing the correct answer. It is interpreted
using my_sympify (from formula.py), which reads strings as sympy input
(e.g. 'integrate(x^2, (x,1,2))' would be valid, and evaluate to give 1.5)
ans is student-typed answer. It is expected to be ascii math, but the code
below would support a sympy string.
dynamath is the PMathML string converted by MathJax. It is used if
evaluation with ans is not sufficient.
options is a string with these possible substrings, set as an xml property
of the problem:
-matrix - make a sympy matrix, rather than a list of lists, if possible
-qubit - passed to my_sympify
-imaginary - used in formla, presumably to signal to use i as sqrt(-1)?
-numerical - force numerical comparison.
'''
msg = ''
# msg += '<p/>abname=%s' % abname
# msg += '<p/>adict=%s' % (repr(adict).replace('<','<'))
threshold = 1.0e-3 # for numerical comparison (also with matrices)
DEBUG = debug
if xml is not None:
DEBUG = xml.get('debug', False) # override debug flag using attribute in symbolicmath xml
if DEBUG in ['0', 'False']:
DEBUG = False
# options
do_matrix = 'matrix' in (options or '')
do_qubit = 'qubit' in (options or '')
do_imaginary = 'imaginary' in (options or '')
do_numerical = 'numerical' in (options or '')
# parse expected answer
try:
fexpect = my_sympify(str(expect), matrix=do_matrix, do_qubit=do_qubit)
except Exception, err:
msg += '<p>Error %s in parsing OUR expected answer "%s"</p>' % (err, expect)
return {'ok': False, 'msg': make_error_message(msg)}
###### Sympy input #######
# if expected answer is a number, try parsing provided answer as a number also
try:
fans = my_sympify(str(ans), matrix=do_matrix, do_qubit=do_qubit)
except Exception, err:
fans = None
# do a numerical comparison if both expected and answer are numbers
if (hasattr(fexpect, 'is_number') and fexpect.is_number
and hasattr(fans, 'is_number') and fans.is_number):
if is_within_tolerance(fexpect, fans, threshold):
return {'ok': True, 'msg': msg}
else:
msg += '<p>You entered: %s</p>' % to_latex(fans)
return {'ok': False, 'msg': msg}
if do_numerical: # numerical answer expected - force numerical comparison
if is_within_tolerance(fexpect, fans, threshold):
return {'ok': True, 'msg': msg}
else:
msg += '<p>You entered: %s (note that a numerical answer is expected)</p>' % to_latex(fans)
return {'ok': False, 'msg': msg}
if fexpect == fans:
msg += '<p>You entered: %s</p>' % to_latex(fans)
return {'ok': True, 'msg': msg}
###### PMathML input ######
# convert mathml answer to formula
try:
mmlans = dynamath[0] if dynamath else None
except Exception, err:
mmlans = None
if not mmlans:
return {'ok': False, 'msg': '[symmath_check] failed to get MathML for input; dynamath=%s' % dynamath}
f = formula(mmlans, options=options)
# get sympy representation of the formula
# if DEBUG: msg += '<p/> mmlans=%s' % repr(mmlans).replace('<','<')
try:
fsym = f.sympy
msg += '<p>You entered: %s</p>' % to_latex(f.sympy)
except Exception, err:
log.exception("Error evaluating expression '%s' as a valid equation" % ans)
msg += "<p>Error in evaluating your expression '%s' as a valid equation</p>" % (ans)
if "Illegal math" in str(err):
msg += "<p>Illegal math expression</p>"
if DEBUG:
msg += 'Error: %s' % str(err).replace('<', '<')
msg += '<hr>'
msg += '<p><font color="blue">DEBUG messages:</p>'
msg += "<p><pre>%s</pre></p>" % traceback.format_exc()
msg += '<p>cmathml=<pre>%s</pre></p>' % f.cmathml.replace('<', '<')
msg += '<p>pmathml=<pre>%s</pre></p>' % mmlans.replace('<', '<')
msg += '<hr>'
return {'ok': False, 'msg': make_error_message(msg)}
# do numerical comparison with expected
if hasattr(fexpect, 'is_number') and fexpect.is_number:
if hasattr(fsym, 'is_number') and fsym.is_number:
if abs(abs(fsym - fexpect) / fexpect) < threshold:
return {'ok': True, 'msg': msg}
return {'ok': False, 'msg': msg}
msg += "<p>Expecting a numerical answer!</p>"
msg += "<p>given = %s</p>" % repr(ans)
msg += "<p>fsym = %s</p>" % repr(fsym)
# msg += "<p>cmathml = <pre>%s</pre></p>" % str(f.cmathml).replace('<','<')
return {'ok': False, 'msg': make_error_message(msg)}
# Here is a good spot for adding calls to X.simplify() or X.expand(),
# allowing equivalence over binomial expansion or trig identities
# exactly the same?
if fexpect == fsym:
return {'ok': True, 'msg': msg}
if type(fexpect) == list:
try:
xgiven = my_evalf(fsym, chop=True)
dm = my_evalf(sympy.Matrix(fexpect) - sympy.Matrix(xgiven), chop=True)
if abs(dm.vec().norm().evalf()) < threshold:
return {'ok': True, 'msg': msg}
except sympy.ShapeError:
msg += "<p>Error - your input vector or matrix has the wrong dimensions"
return {'ok': False, 'msg': make_error_message(msg)}
except Exception, err:
msg += "<p>Error %s in comparing expected (a list) and your answer</p>" % str(err).replace('<', '<')
if DEBUG: msg += "<p/><pre>%s</pre>" % traceback.format_exc()
return {'ok': False, 'msg': make_error_message(msg)}
#diff = (fexpect-fsym).simplify()
#fsym = fsym.simplify()
#fexpect = fexpect.simplify()
try:
diff = (fexpect - fsym)
except Exception, err:
diff = None
if DEBUG:
msg += '<hr>'
msg += '<p><font color="blue">DEBUG messages:</p>'
msg += "<p>Got: %s</p>" % repr(fsym)
# msg += "<p/>Got: %s" % str([type(x) for x in fsym.atoms()]).replace('<','<')
msg += "<p>Expecting: %s</p>" % repr(fexpect).replace('**', '^').replace('hat(I)', 'hat(i)')
# msg += "<p/>Expecting: %s" % str([type(x) for x in fexpect.atoms()]).replace('<','<')
if diff:
msg += "<p>Difference: %s</p>" % to_latex(diff)
msg += '<hr>'
# Used to return more keys: 'ex': fexpect, 'got': fsym
return {'ok': False, 'msg': msg}
| agpl-3.0 |
rnavarro/vitess | py/vtproto/vttest_pb2.py | 11 | 7546 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vttest.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='vttest.proto',
package='vttest',
syntax='proto3',
serialized_pb=_b('\n\x0cvttest.proto\x12\x06vttest\"/\n\x05Shard\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x64\x62_name_override\x18\x02 \x01(\t\"\xb5\x01\n\x08Keyspace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1d\n\x06shards\x18\x02 \x03(\x0b\x32\r.vttest.Shard\x12\x1c\n\x14sharding_column_name\x18\x03 \x01(\t\x12\x1c\n\x14sharding_column_type\x18\x04 \x01(\t\x12\x13\n\x0bserved_from\x18\x05 \x01(\t\x12\x15\n\rreplica_count\x18\x06 \x01(\x05\x12\x14\n\x0crdonly_count\x18\x07 \x01(\x05\"D\n\x0eVTTestTopology\x12#\n\tkeyspaces\x18\x01 \x03(\x0b\x32\x10.vttest.Keyspace\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SHARD = _descriptor.Descriptor(
name='Shard',
full_name='vttest.Shard',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='vttest.Shard.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='db_name_override', full_name='vttest.Shard.db_name_override', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=71,
)
_KEYSPACE = _descriptor.Descriptor(
name='Keyspace',
full_name='vttest.Keyspace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='vttest.Keyspace.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shards', full_name='vttest.Keyspace.shards', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sharding_column_name', full_name='vttest.Keyspace.sharding_column_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sharding_column_type', full_name='vttest.Keyspace.sharding_column_type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='served_from', full_name='vttest.Keyspace.served_from', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replica_count', full_name='vttest.Keyspace.replica_count', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rdonly_count', full_name='vttest.Keyspace.rdonly_count', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=255,
)
_VTTESTTOPOLOGY = _descriptor.Descriptor(
name='VTTestTopology',
full_name='vttest.VTTestTopology',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keyspaces', full_name='vttest.VTTestTopology.keyspaces', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cells', full_name='vttest.VTTestTopology.cells', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=257,
serialized_end=325,
)
_KEYSPACE.fields_by_name['shards'].message_type = _SHARD
_VTTESTTOPOLOGY.fields_by_name['keyspaces'].message_type = _KEYSPACE
DESCRIPTOR.message_types_by_name['Shard'] = _SHARD
DESCRIPTOR.message_types_by_name['Keyspace'] = _KEYSPACE
DESCRIPTOR.message_types_by_name['VTTestTopology'] = _VTTESTTOPOLOGY
Shard = _reflection.GeneratedProtocolMessageType('Shard', (_message.Message,), dict(
DESCRIPTOR = _SHARD,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.Shard)
))
_sym_db.RegisterMessage(Shard)
Keyspace = _reflection.GeneratedProtocolMessageType('Keyspace', (_message.Message,), dict(
DESCRIPTOR = _KEYSPACE,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.Keyspace)
))
_sym_db.RegisterMessage(Keyspace)
VTTestTopology = _reflection.GeneratedProtocolMessageType('VTTestTopology', (_message.Message,), dict(
DESCRIPTOR = _VTTESTTOPOLOGY,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.VTTestTopology)
))
_sym_db.RegisterMessage(VTTestTopology)
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
guoxf/linux | scripts/rt-tester/rt-tester.py | 1106 | 5305 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
jhawkesworth/ansible-modules-core | utilities/logic/fail.py | 198 | 1458 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: fail
short_description: Fail with custom message
description:
- This module fails the progress with a custom message. It can be
useful for bailing out when a certain condition is met using C(when).
version_added: "0.8"
options:
msg:
description:
- The customized message used for failing execution. If omitted,
fail will simple bail out with a generic message.
required: false
default: "'Failed as requested from task'"
author: "Dag Wieers (@dagwieers)"
'''
EXAMPLES = '''
# Example playbook using fail and when together
- fail: msg="The system may not be provisioned according to the CMDB status."
when: cmdb_status != "to-be-staged"
'''
| gpl-3.0 |
fernandozamoraj/py_sandbox | py101/autocorrect.py | 1 | 4097 | import unittest
MIN_SCORE = .7
g_words = []
def print1(line):
pass
#print(line);
def is_possible_replacement(target_word, possible_word):
global MIN_SCORE
score = 0
print1('*'*40)
if(target_word[0] == possible_word[0]):
score += 100
if(target_word[-1] == possible_word[-1]):
score += 100
first_set = {x for x in target_word[1:-1]}
second_set = {x for x in possible_word[1:-1]}
intersection_set = first_set.intersection(second_set)
if(len(second_set) > len(first_set)):
longer_set = len(second_set)
else:
longer_set = len(first_set)
score_for_matching_chars = ((len(intersection_set)*100.0) / (longer_set*100.0))*.75
score_first_and_last_chars = ((score*1.0)/(200.0))*.25
demerit = 0
if(len(possible_word) > len(target_word)):
diff = len(possible_word) - len(target_word)
demerit = (diff/(len(possible_word)*1.0))*.5
else:
diff = len(target_word) - len(possible_word)
demerit = (diff/(len(target_word)*1.0))*.5
total_score = (score_for_matching_chars + score_first_and_last_chars) - demerit;
return total_score > MIN_SCORE
def get_words(target_word):
global g_words
if(len(g_words) < 1):
file = open('C:/Dev/python/py_sandbox/words.txt')
g_words = file.read().split('\n')
filtered_words = []
for word in g_words:
if(len(word) > 0 and word[0] == target_word[0]):
print1(word)
filtered_words.append(word)
return filtered_words
def correct_word(target_word):
for word in get_words(target_word):
if(is_possible_replacement(target_word, word)):
return word
def is_word(target_word):
for word in get_words(target_word):
if(word == target_word):
return True
return False
def prompt():
while(True):
print('\nEntere a word: \n');
user_entry = input();
new_word = user_entry
if(is_word(user_entry) == False):
new_word = correct_word(user_entry)
print('Your corrected word is: {0}'.format(new_word))
if(new_word == 'quit'):
break;
class test_auto_correct(unittest.TestCase):
def test_check_exact_word(self):
itis = is_possible_replacement('automobile', 'automobile')
self.assertTrue(itis)
def test_check_possible_word(self):
itis = is_possible_replacement('automobile', 'automobile')
self.assertTrue(itis)
def test_check_aardvark_word(self):
itis = is_possible_replacement('ark', 'aardvark')
self.assertFalse(itis)
def test_check_ark_word(self):
itis = is_possible_replacement('aardvark', 'ark')
self.assertFalse(itis)
def test_check_apple_people(self):
itis = is_possible_replacement('apple', 'people')
self.assertFalse(itis)
def test_check_spaghetti_spageti(self):
itis = is_possible_replacement('spageti', 'spaghetti')
self.assertTrue(itis)
def test_check_sapgetti(self):
itis = is_possible_replacement('sapgetti', 'spaghetti')
self.assertTrue(itis)
def test_check_acomodate(self):
itis = is_possible_replacement('acomodate', 'accommodate')
self.assertTrue(itis)
def test_check_acomodat(self):
itis = is_possible_replacement('acomodat', 'accommodate')
self.assertTrue(itis)
def test_check_acknolegement(self):
itis = is_possible_replacement('acknolegment', 'acknowledgement')
self.assertTrue(itis)
def test_check_typo(self):
itis = is_possible_replacement('assird', 'assure')
self.assertTrue(itis)
if(__name__ == '__main__'):
#unittest.main()
#get_words('foo')
prompt()
| apache-2.0 |
sublime1809/django | tests/m2m_regress/tests.py | 23 | 3734 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import (SelfRefer, Tag, TagCollection, Entry, SelfReferChild,
SelfReferChildSibling, Worksheet, RegressionModelSplit)
class M2MRegressionTests(TestCase):
def test_multiple_m2m(self):
# Multiple m2m references to model must be distinguished when
# accessing the relations through an instance attribute.
s1 = SelfRefer.objects.create(name='s1')
s2 = SelfRefer.objects.create(name='s2')
s3 = SelfRefer.objects.create(name='s3')
s1.references.add(s2)
s1.related.add(s3)
e1 = Entry.objects.create(name='e1')
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
e1.topics.add(t1)
e1.related.add(t2)
self.assertQuerysetEqual(s1.references.all(), ["<SelfRefer: s2>"])
self.assertQuerysetEqual(s1.related.all(), ["<SelfRefer: s3>"])
self.assertQuerysetEqual(e1.topics.all(), ["<Tag: t1>"])
self.assertQuerysetEqual(e1.related.all(), ["<Tag: t2>"])
def test_internal_related_name_not_in_error_msg(self):
# The secret internal related names for self-referential many-to-many
# fields shouldn't appear in the list when an error is made.
six.assertRaisesRegex(
self, FieldError,
"Choices are: id, name, references, related, selfreferchild, selfreferchildsibling$",
lambda: SelfRefer.objects.filter(porcupine='fred')
)
def test_m2m_inheritance_symmetry(self):
# Test to ensure that the relationship between two inherited models
# with a self-referential m2m field maintains symmetry
sr_child = SelfReferChild(name="Hanna")
sr_child.save()
sr_sibling = SelfReferChildSibling(name="Beth")
sr_sibling.save()
sr_child.related.add(sr_sibling)
self.assertQuerysetEqual(sr_child.related.all(), ["<SelfRefer: Beth>"])
self.assertQuerysetEqual(sr_sibling.related.all(), ["<SelfRefer: Hanna>"])
def test_m2m_pk_field_type(self):
# Regression for #11311 - The primary key for models in a m2m relation
# doesn't have to be an AutoField
w = Worksheet(id='abc')
w.save()
w.delete()
def test_add_m2m_with_base_class(self):
# Regression for #11956 -- You can add an object to a m2m with the
# base class without causing integrity errors
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
c1 = TagCollection.objects.create(name='c1')
c1.tags = [t1, t2]
c1 = TagCollection.objects.get(name='c1')
self.assertQuerysetEqual(c1.tags.all(), ["<Tag: t1>", "<Tag: t2>"], ordered=False)
self.assertQuerysetEqual(t1.tag_collections.all(), ["<TagCollection: c1>"])
def test_manager_class_caching(self):
e1 = Entry.objects.create()
e2 = Entry.objects.create()
t1 = Tag.objects.create()
t2 = Tag.objects.create()
# Get same manager twice in a row:
self.assertTrue(t1.entry_set.__class__ is t1.entry_set.__class__)
self.assertTrue(e1.topics.__class__ is e1.topics.__class__)
# Get same manager for different instances
self.assertTrue(e1.topics.__class__ is e2.topics.__class__)
self.assertTrue(t1.entry_set.__class__ is t2.entry_set.__class__)
def test_m2m_abstract_split(self):
# Regression for #19236 - an abstract class with a 'split' method
# causes a TypeError in add_lazy_relation
m1 = RegressionModelSplit(name='1')
m1.save()
| bsd-3-clause |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/theano/misc/pycuda_utils.py | 7 | 2472 | import pycuda.gpuarray
from theano.sandbox import cuda
if cuda.cuda_available is False:
raise ImportError('Optional theano package cuda disabled')
def to_gpuarray(x, copyif=False):
""" take a CudaNdarray and return a pycuda.gpuarray.GPUArray
:type x: CudaNdarray
:param x: The array to transform to pycuda.gpuarray.GPUArray.
:type copyif: bool
:param copyif: If False, raise an error if x is not c contiguous.
If it is c contiguous, we return a GPUArray that share
the same memory region as x.
If True, copy x if it is no c contiguous, so the return won't
shape the same memory region. If c contiguous, the return
will share the same memory region.
We need to do this as GPUArray don't fully support strided memory.
:return type: pycuda.gpuarray.GPUArray
"""
if not isinstance(x, cuda.CudaNdarray):
raise ValueError("We can transfer only CudaNdarray to pycuda.gpuarray.GPUArray")
else:
# Check if it is c contiguous
size = 1
c_contiguous = True
for i in range(x.ndim - 1, -1, -1):
if x.shape[i] == 1:
continue
if x._strides[i] != size:
c_contiguous = False
break
size *= x.shape[i]
if not c_contiguous:
if copyif:
x = x.copy()
else:
raise ValueError("We were asked to not copy memory, but the memory is not c contiguous.")
# Now x is always c contiguous
px = pycuda.gpuarray.GPUArray(x.shape, x.dtype, base=x, gpudata=x.gpudata)
return px
def to_cudandarray(x):
""" take a pycuda.gpuarray.GPUArray and make a CudaNdarray that point to its memory
:note: CudaNdarray support only float32, so only float32 GPUArray are accepted
"""
if not isinstance(x, pycuda.gpuarray.GPUArray):
raise ValueError("We can transfer only pycuda.gpuarray.GPUArray to CudaNdarray")
elif x.dtype != "float32":
raise ValueError("CudaNdarray support only float32")
else:
strides = [1]
for i in x.shape[::-1][:-1]:
strides.append(strides[-1] * i)
strides = tuple(strides[::-1])
ptr = int(x.gpudata) # in pycuda trunk, y.ptr also works, which is a little cleaner
z = cuda.from_gpu_pointer(ptr, x.shape, strides, x)
return z
| bsd-3-clause |
ncdesouza/bookworm | env/lib/python2.7/site-packages/werkzeug/testsuite/internal.py | 146 | 2940 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.internal
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Internal tests.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from datetime import datetime
from warnings import filterwarnings, resetwarnings
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import Request, Response
from werkzeug import _internal as internal
from werkzeug.test import create_environ
class InternalTestCase(WerkzeugTestCase):
def test_date_to_unix(self):
assert internal._date_to_unix(datetime(1970, 1, 1)) == 0
assert internal._date_to_unix(datetime(1970, 1, 1, 1, 0, 0)) == 3600
assert internal._date_to_unix(datetime(1970, 1, 1, 1, 1, 1)) == 3661
x = datetime(2010, 2, 15, 16, 15, 39)
assert internal._date_to_unix(x) == 1266250539
def test_easteregg(self):
req = Request.from_values('/?macgybarchakku')
resp = Response.force_type(internal._easteregg(None), req)
assert b'About Werkzeug' in resp.get_data()
assert b'the Swiss Army knife of Python web development' in resp.get_data()
def test_wrapper_internals(self):
req = Request.from_values(data={'foo': 'bar'}, method='POST')
req._load_form_data()
assert req.form.to_dict() == {'foo': 'bar'}
# second call does not break
req._load_form_data()
assert req.form.to_dict() == {'foo': 'bar'}
# check reprs
assert repr(req) == "<Request 'http://localhost/' [POST]>"
resp = Response()
assert repr(resp) == '<Response 0 bytes [200 OK]>'
resp.set_data('Hello World!')
assert repr(resp) == '<Response 12 bytes [200 OK]>'
resp.response = iter(['Test'])
assert repr(resp) == '<Response streamed [200 OK]>'
# unicode data does not set content length
response = Response([u'Hällo Wörld'])
headers = response.get_wsgi_headers(create_environ())
assert u'Content-Length' not in headers
response = Response([u'Hällo Wörld'.encode('utf-8')])
headers = response.get_wsgi_headers(create_environ())
assert u'Content-Length' in headers
# check for internal warnings
filterwarnings('error', category=Warning)
response = Response()
environ = create_environ()
response.response = 'What the...?'
self.assert_raises(Warning, lambda: list(response.iter_encoded()))
self.assert_raises(Warning, lambda: list(response.get_app_iter(environ)))
response.direct_passthrough = True
self.assert_raises(Warning, lambda: list(response.iter_encoded()))
self.assert_raises(Warning, lambda: list(response.get_app_iter(environ)))
resetwarnings()
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InternalTestCase))
return suite
| gpl-3.0 |
stscieisenhamer/glue | doc/redirect.py | 5 | 2172 | # This Sphinx plugin comes from https://github.com/openstack/nova-specs and was
# originally licensed under a Creative Commons Attribution 3.0 Unported License.
# The full text for this license can be found here:
#
# http://creativecommons.org/licenses/by/3.0/legalcode
# A simple sphinx plugin which creates HTML redirections from old names
# to new names. It does this by looking for files named "redirect" in
# the documentation source and using the contents to create simple HTML
# redirection pages for changed filenames.
import os.path
from sphinx.application import ENV_PICKLE_FILENAME
from sphinx.util.console import bold
def setup(app):
from sphinx.application import Sphinx
if not isinstance(app, Sphinx):
return
app.connect('build-finished', emit_redirects)
def process_redirect_file(app, path, ent):
parent_path = path.replace(app.builder.srcdir, app.builder.outdir)
with open(os.path.join(path, ent)) as redirects:
for line in redirects.readlines():
from_path, to_path = line.rstrip().split(' ')
from_path = from_path.replace('.rst', '.html')
to_path = to_path.replace('.rst', '.html')
redirected_filename = os.path.join(parent_path, from_path)
redirected_directory = os.path.dirname(redirected_filename)
if not os.path.exists(redirected_directory):
os.makedirs(redirected_directory)
with open(redirected_filename, 'w') as f:
f.write('<html><head><meta http-equiv="refresh" content="0; '
'url=%s" /></head></html>'
% to_path)
def emit_redirects(app, exc):
app.builder.info(bold('scanning %s for redirects...') % app.builder.srcdir)
def process_directory(path):
for ent in os.listdir(path):
p = os.path.join(path, ent)
if os.path.isdir(p):
process_directory(p)
elif ent == 'redirects':
app.builder.info(' found redirects at %s' % p)
process_redirect_file(app, path, ent)
process_directory(app.builder.srcdir)
app.builder.info('...done')
| bsd-3-clause |
parrondo/arctic | tests/integration/scripts/test_list_libraries.py | 5 | 1033 | from mock import patch, call
import pytest
from arctic.scripts import arctic_list_libraries
from ...util import run_as_main
def test_list_library(mongo_host, library, library_name):
with patch('arctic.scripts.arctic_list_libraries.print') as p:
run_as_main(arctic_list_libraries.main, "--host", mongo_host)
for x in p.call_args_list:
if x == call(library_name):
return
assert False, "Failed to find a library"
def test_list_library_args(mongo_host, library, library_name):
with patch('arctic.scripts.arctic_list_libraries.print') as p:
run_as_main(arctic_list_libraries.main, "--host", mongo_host, library_name[:2])
for x in p.call_args_list:
assert x[0][0].startswith(library_name[:2])
def test_list_library_args_not_found(mongo_host, library, library_name):
with patch('arctic.scripts.arctic_list_libraries.print') as p:
run_as_main(arctic_list_libraries.main, "--host", mongo_host, 'some_library_which_doesnt_exist')
assert p.call_count == 0
| lgpl-2.1 |
TomKita/ansible-modules-extras | notification/hipchat.py | 9 | 5616 | #!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: hipchat
version_added: "1.2"
short_description: Send a message to hipchat.
description:
- Send a message to hipchat
options:
token:
description:
- API token.
required: true
room:
description:
- ID or name of the room.
required: true
from:
description:
- Name the message will appear be sent from. max 15 characters.
Over 15, will be shorten.
required: false
default: Ansible
msg:
description:
- The message body.
required: true
default: null
color:
description:
- Background color for the message. Default is yellow.
required: false
default: yellow
choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
msg_format:
description:
- message format. html or text. Default is text.
required: false
default: text
choices: [ "text", "html" ]
notify:
description:
- notify or not (change the tab color, play a sound, etc)
required: false
default: 'yes'
choices: [ "yes", "no" ]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
api:
description:
- API url if using a self-hosted hipchat server. For hipchat api version 2 use C(/v2) path in URI
required: false
default: 'https://api.hipchat.com/v1'
version_added: 1.6.0
requirements: [ ]
author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)"
'''
EXAMPLES = '''
- hipchat: room=notify msg="Ansible task finished"
# Use Hipchat API version 2
- hipchat:
api: "https://api.hipchat.com/v2/"
token: OAUTH2_TOKEN
room: notify
msg: "Ansible task finished"
'''
# ===========================================
# HipChat module specific support methods.
#
import urllib
DEFAULT_URI = "https://api.hipchat.com/v1"
MSG_URI_V1 = "/rooms/message"
NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
color='yellow', notify=False, api=MSG_URI_V1):
'''sending message to hipchat v1 server'''
print "Sending message to v1 server"
params = {}
params['room_id'] = room
params['from'] = msg_from[:15] # max length is 15
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
params['api'] = api
params['notify'] = int(notify)
url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
data = urllib.urlencode(params)
if module.check_mode:
# In check mode, exit before actually sending the message
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
color='yellow', notify=False, api=NOTIFY_URI_V2):
'''sending message to hipchat v2 server'''
print "Sending message to v2 server"
headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
body = dict()
body['message'] = msg
body['color'] = color
body['message_format'] = msg_format
params['notify'] = notify
POST_URL = api + NOTIFY_URI_V2
url = POST_URL.replace('{id_or_name}', room)
data = json.dumps(body)
if module.check_mode:
# In check mode, exit before actually sending the message
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
if info['status'] == 200:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
room=dict(required=True),
msg=dict(required=True),
msg_from=dict(default="Ansible", aliases=['from']),
color=dict(default="yellow", choices=["yellow", "red", "green",
"purple", "gray", "random"]),
msg_format=dict(default="text", choices=["text", "html"]),
notify=dict(default=True, type='bool'),
validate_certs=dict(default='yes', type='bool'),
api=dict(default=DEFAULT_URI),
),
supports_check_mode=True
)
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
msg_from = module.params["msg_from"]
color = module.params["color"]
msg_format = module.params["msg_format"]
notify = module.params["notify"]
api = module.params["api"]
try:
if api.find('/v2') != -1:
send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
else:
send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
except Exception, e:
module.fail_json(msg="unable to send msg: %s" % e)
changed = True
module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
Tehsmash/nova | nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py | 34 | 3799 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import extended_virtual_interfaces_net
from nova import compute
from nova import network
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_VIFS = [{'uuid': '00000000-0000-0000-0000-00000000000000000',
'address': '00-00-00-00-00-00',
'net_uuid': '00000000-0000-0000-0000-00000000000000001'},
{'uuid': '11111111-1111-1111-1111-11111111111111111',
'address': '11-11-11-11-11-11',
'net_uuid': '11111111-1111-1111-1111-11111111111111112'}]
EXPECTED_NET_UUIDS = ['00000000-0000-0000-0000-00000000000000001',
'11111111-1111-1111-1111-11111111111111112']
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
def get_vifs_by_instance(self, context, instance_id):
return FAKE_VIFS
def get_vif_by_mac_address(self, context, mac_address):
if mac_address == "00-00-00-00-00-00":
return {'net_uuid': '00000000-0000-0000-0000-00000000000000001'}
else:
return {'net_uuid': '11111111-1111-1111-1111-11111111111111112'}
class ExtendedServerVIFNetTest(test.NoDBTestCase):
content_type = 'application/json'
prefix = "%s:" % extended_virtual_interfaces_net. \
Extended_virtual_interfaces_net.alias
def setUp(self):
super(ExtendedServerVIFNetTest, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_vifs_by_instance",
get_vifs_by_instance)
self.stubs.Set(network.api.API, "get_vif_by_mac_address",
get_vif_by_mac_address)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Virtual_interfaces',
'Extended_virtual_interfaces_net'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=(
'os-virtual-interfaces', 'OS-EXT-VIF-NET')))
return res
def _get_vifs(self, body):
return jsonutils.loads(body).get('virtual_interfaces')
def _get_net_id(self, vifs):
for vif in vifs:
yield vif['%snet_id' % self.prefix]
def assertVIFs(self, vifs):
result = []
for net_id in self._get_net_id(vifs):
result.append(net_id)
sorted(result)
for i, net_uuid in enumerate(result):
self.assertEqual(net_uuid, EXPECTED_NET_UUIDS[i])
def test_get_extend_virtual_interfaces_list(self):
res = self._make_request('/v2/fake/servers/abcd/os-virtual-interfaces')
self.assertEqual(res.status_int, 200)
self.assertVIFs(self._get_vifs(res.body))
| apache-2.0 |
gauravbose/digital-menu | tests/migrations/test_executor.py | 31 | 21893 | from django.apps.registry import apps as global_apps
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.graph import MigrationGraph
from django.db.utils import DatabaseError
from django.test import TestCase, modify_settings, override_settings
from .test_base import MigrationTestBase
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
class ExecutorTests(MigrationTestBase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = ["migrations", "migrations2", "django.contrib.auth", "django.contrib.contenttypes"]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_run_with_squashed(self):
"""
Tests running a squashed migration from zero (should ignore what it replaces)
"""
executor = MigrationExecutor(connection)
# Check our leaf node is the squashed one
leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"]
self.assertEqual(leaves, [("migrations", "0001_squashed_0002")])
# Check the plan
plan = executor.migration_plan([("migrations", "0001_squashed_0002")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0001_squashed_0002")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did. Should also just use squashed.
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2",
})
def test_empty_plan(self):
"""
Tests that re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
executor = MigrationExecutor(connection)
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate([
("migrations", "0002_second"),
("migrations2", "0001_initial")
], fake=True)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Now plan a second time and make sure it's empty
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(plan, [])
# Erase all the fake records
executor.recorder.record_unapplied("migrations2", "0001_initial")
executor.recorder.record_unapplied("migrations", "0002_second")
executor.recorder.record_unapplied("migrations", "0001_initial")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_soft_apply(self):
"""
Tests detection of initial migrations already having been applied.
"""
state = {"faked": None}
def fake_storer(phase, migration=None, fake=None):
state["faked"] = fake
executor = MigrationExecutor(connection, progress_callback=fake_storer)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run it normally
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# We shouldn't have faked that one
self.assertEqual(state["faked"], False)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Fake-reverse that
executor.migrate([("migrations", None)], fake=True)
# Are the tables still there?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure that was faked
self.assertEqual(state["faked"], True)
# Finally, migrate forwards; this should fake-apply our initial migration
executor.loader.build_graph()
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
# Applying the migration should raise a database level error
# because we haven't given the --fake-initial option
with self.assertRaises(DatabaseError):
executor.migrate([("migrations", "0001_initial")])
# Reset the faked state
state = {"faked": None}
# Allow faking of initial CreateModel operations
executor.migrate([("migrations", "0001_initial")], fake_initial=True)
self.assertEqual(state["faked"], True)
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_custom_user",
"django.contrib.auth": "django.contrib.auth.migrations",
},
AUTH_USER_MODEL="migrations.Author",
)
def test_custom_user(self):
"""
Regression test for #22325 - references to a custom user model defined in the
same app are not resolved correctly.
"""
executor = MigrationExecutor(connection)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Migrate forwards
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure the soft-application detection works (#23093)
# Change table_names to not return auth_user during this as
# it wouldn't be there in a normal run, and ensure migrations.Author
# exists in the global app registry temporarily.
old_table_names = connection.introspection.table_names
connection.introspection.table_names = lambda c: [x for x in old_table_names(c) if x != "auth_user"]
migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps
global_apps.get_app_config("migrations").models["author"] = migrations_apps.get_model("migrations", "author")
try:
migration = executor.loader.get_migration("auth", "0001_initial")
self.assertEqual(executor.detect_soft_applied(None, migration)[0], True)
finally:
connection.introspection.table_names = old_table_names
del global_apps.get_app_config("migrations").models["author"]
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_forwards(self):
"""
#24123 - Tests that all models of apps already applied which are
unrelated to the first app being applied are part of the initial model
state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([("lookuperror_b", "0003_b3")])
self.assertTableExists("lookuperror_b_b3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate forwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is already applied
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_a", None),
("lookuperror_b", None),
("lookuperror_c", None),
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_backwards(self):
"""
#24123 - Tests that all models of apps being unapplied which are
unrelated to the first app being unapplied are part of the initial
model state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_b", "0003_b3"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_b_b3")
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate backwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is not in the initial state (unrelated to app c)
executor.migrate([("lookuperror_a", None)])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_b", None),
("lookuperror_c", None)
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_process_callback(self):
"""
#24129 - Tests callback process
"""
call_args_list = []
def callback(*args):
call_args_list.append(args)
executor = MigrationExecutor(connection, progress_callback=callback)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
executor.migrate([
("migrations", "0001_initial"),
("migrations", "0002_second"),
])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate([
("migrations", None),
("migrations", None),
])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
migrations = executor.loader.graph.nodes
expected = [
("render_start", ),
("render_success", ),
("apply_start", migrations['migrations', '0001_initial'], False),
("apply_success", migrations['migrations', '0001_initial'], False),
("apply_start", migrations['migrations', '0002_second'], False),
("apply_success", migrations['migrations', '0002_second'], False),
("render_start", ),
("render_success", ),
("unapply_start", migrations['migrations', '0002_second'], False),
("unapply_success", migrations['migrations', '0002_second'], False),
("unapply_start", migrations['migrations', '0001_initial'], False),
("unapply_success", migrations['migrations', '0001_initial'], False),
]
self.assertEqual(call_args_list, expected)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_alter_id_type_with_fk(self):
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
# Apply initial migrations
executor.migrate([
("author_app", "0001_initial"),
("book_app", "0001_initial"),
])
self.assertTableExists("author_app_author")
self.assertTableExists("book_app_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Apply PK type alteration
executor.migrate([("author_app", "0002_alter_id")])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# We can't simply unapply the migrations here because there is no
# implicit cast from VARCHAR to INT on the database level.
with connection.schema_editor() as editor:
editor.execute(editor.sql_delete_table % {"table": "book_app_book"})
editor.execute(editor.sql_delete_table % {"table": "author_app_author"})
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
class FakeLoader(object):
def __init__(self, graph, applied):
self.graph = graph
self.applied_migrations = applied
class FakeMigration(object):
"""Really all we need is any object with a debug-useful repr."""
def __init__(self, name):
self.name = name
def __repr__(self):
return 'M<%s>' % self.name
class ExecutorUnitTests(TestCase):
"""(More) isolated unit tests for executor methods."""
def test_minimize_rollbacks(self):
"""
Minimize unnecessary rollbacks in connected apps.
When you say "./manage.py migrate appA 0001", rather than migrating to
just after appA-0001 in the linearized migration plan (which could roll
back migrations in other apps that depend on appA 0001, but don't need
to be rolled back since we're not rolling back appA 0001), we migrate
to just before appA-0002.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, a2, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1, a2})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [(a2_impl, True)])
def test_minimize_rollbacks_branchy(self):
"""
Minimize rollbacks when target has multiple in-app children.
a: 1 <---- 3 <--\
\ \- 2 <--- 4
\ \
b: \- 1 <--- 2
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
a3_impl = FakeMigration('a3')
a3 = ('a', '3')
a4_impl = FakeMigration('a4')
a4 = ('a', '4')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
b2_impl = FakeMigration('b2')
b2 = ('b', '2')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(a3, a3_impl)
graph.add_node(a4, a4_impl)
graph.add_node(b1, b1_impl)
graph.add_node(b2, b2_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, a3, a1)
graph.add_dependency(None, a4, a2)
graph.add_dependency(None, a4, a3)
graph.add_dependency(None, b2, b1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, b2, a2)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1, a2, b2, a3, a4})
plan = executor.migration_plan({a1})
should_be_rolled_back = [b2_impl, a4_impl, a2_impl, a3_impl]
exp = [(m, True) for m in should_be_rolled_back]
self.assertEqual(plan, exp)
def test_backwards_nothing_to_do(self):
"""
If the current state satisfies the given target, do nothing.
a: 1 <--- 2
b: \- 1
c: \- 1
If a1 is applied already and a2 is not, and we're asked to migrate to
a1, don't apply or unapply b1 or c1, regardless of their current state.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
c1_impl = FakeMigration('c1')
c1 = ('c', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_node(c1, c1_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, c1, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [])
| bsd-3-clause |
rahulxxarora/Jango | jango/search.py | 1 | 1049 | import re
import wikipedia
import sys
import jangopath
flag = 0
query = ""
f = open(jangopath.HOME_DIR + '/ans.txt','w')
for arg in sys.argv:
if flag==1:
query = query + arg + " "
if arg.lower()=="for" or arg.lower()=="about":
flag = 1
def strip_non_ascii(string):
''' Returns the string without non ASCII characters'''
stripped = (c for c in string if 0 < ord(c) < 127)
return ''.join(stripped)
def handle():
flag = 0
try:
info = wikipedia.summary(query, sentences=3)
except wikipedia.exceptions.DisambiguationError:
info = None
f.write("Try to be more specific.")
print "Try to be more specific."
flag = 1
except wikipedia.exceptions.PageError:
info = None
f.write("No result found!!")
print "No result found!!"
flag = 1
if flag!=1:
info = re.sub(r'\([^)]*\)',' ',info)
info = strip_non_ascii(info)
try:
f.write(info)
print info
except UnicodeEncodeError:
pass
f.close()
handle()
| mit |
udragon/ldc | ldc/test/test_ldcldap.py | 1 | 1472 | #!/usr/bin/python
import unittest
import uuid
import ldc
class TestLDCLdapInterface(unittest.TestCase):
def test_ldcusers(self):
rand_str = str(uuid.uuid4())
ldc.ldap.users.add(rand_str, rand_str, "password", "12345", "100", "/home/users/" + rand_str, "/bin/bash")
assert ldc.ldap.users.get(rand_str) != None
ldc.ldap.users.modify(rand_str, "loginShell", "/bin/tcsh")
assert ldc.ldap.users.get(rand_str).loginShell == "/bin/tcsh"
ldc.ldap.users.delete(rand_str)
assert ldc.ldap.users.get(rand_str) == None
def test_ldcgroups(self):
rand_str = str(uuid.uuid4())
ldc.ldap.groups.add(rand_str, 12345, [], "test group")
assert ldc.ldap.groups.get(rand_str) != None
ldc.ldap.groups.modify(rand_str, "description", "modified")
assert ldc.ldap.groups.get(rand_str).description == "modified"
ldc.ldap.groups.delete(rand_str)
assert ldc.ldap.groups.get(rand_str) == None
def test_ldchost(self):
rand_str = str(uuid.uuid4())
ldc.ldap.hosts.add(rand_str, "255.255.255.255", "ff:ff:ff:ff:ff:ff", "test host")
assert ldc.ldap.hosts.get(rand_str) != None
ldc.ldap.hosts.modify(rand_str, "description", "modified")
assert ldc.ldap.hosts.get(rand_str).description == "modified"
ldc.ldap.hosts.delete(rand_str)
assert ldc.ldap.hosts.get(rand_str) == None
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
correl/Transmission-XBMC | resources/lib/basictypes/wx/wxcopyreg.py | 4 | 3196 | """wxcopyreg -- functions for storing/restoring simple wxPython data types to pickle-friendly formats
importing this module installs the functions automatically!
"""
import pickle, zlib
from wxPython.wx import *
##
def bind( classObject, outFunction, inFunction ):
"""Bind get and set state for the classObject"""
classObject.__getstate__ = outFunction
classObject.__setstate__ = inFunction
def wxColourOut( value ):
return value.Red(), value.Green(), value.Blue()
def wxColourIn( self, args ):
self.this = apply(gdic.new_wxColour,args)
self.thisown = 1
bind( wxColourPtr, wxColourOut, wxColourIn )
def wxFontOut( value ):
return (
value.GetPointSize(),
value.GetFamily(),
value.GetStyle(),
value.GetWeight(),
value.GetUnderlined(),
value.GetFaceName(),
# note that encoding is missing...
)
def wxFontIn( self, args ):
self.this = apply(fontsc.new_wxFont,args)
self.thisown = 1
bind( wxFontPtr, wxFontOut, wxFontIn )
def wxPenOut( value ):
colour = value.GetColour()
return (
(
colour.Red(),
colour.Green(),
colour.Blue()
),
(
value.GetWidth(),
value.GetStyle(),
),
(
#stipple is a bitmap, we don't currently have
#mechanisms for storing/restoring it, so ignore it
## value.GetStipple(),
value.GetJoin(),
# missing in the current wxPython pre-release
# should be available in wxPython 2.3.3 final
## value.GetDashes(),
value.GetCap(),
),
)
def wxPenIn( self, (colour, init, props) ):
colour = wxColour( *colour )
self.this = apply(gdic.new_wxPen,(colour,)+init)
self.thisown = 1
for prop, function in map( None, props, (
#stipple is a bitmap, we don't currently have
#mechanisms for storing/restoring it, so ignore it
## self.SetStipple,
self.SetJoin,
# missing in the current wxPython pre-release
# should be available in wxPython 2.3.3 final
## self.SetDashes,
self.SetCap
)):
function( prop )
def wxPyPenIn( self, (colour, init, props) ):
colour = wxColour( *colour )
self.this = apply(gdic.new_wxPyPen,(colour,)+init)
self.thisown = 1
for prop, function in map( None, props, (
#stipple is a bitmap, we don't currently have
#mechanisms for storing/restoring it, so ignore it
## self.SetStipple,
self.SetJoin,
# missing in the current wxPython pre-release
# should be available in wxPython 2.3.3 final
## self.SetDashes,
self.SetCap
)):
function( prop )
bind( wxPenPtr, wxPenOut, wxPenIn )
bind( wxPyPenPtr, wxPenOut, wxPyPenIn )
def wxImageOut( value ):
width,height = value.GetWidth(), value.GetHeight()
data = value.GetData()
data = zlib.compress( data )
return ( width, height, data )
def wxImageIn( self, (width, height, data) ):
self.this = apply(imagec.wxEmptyImage,(width,height))
self.thisown = 1
self.SetData( zlib.decompress( data) )
bind( wxImagePtr, wxImageOut, wxImageIn )
def test():
for o in [
wxColour( 23,23,23),
wxFont( 12, wxMODERN, wxNORMAL, wxNORMAL ),
wxPen(wxColour( 23,23,23),1,wxSOLID),
wxImage( 'test.jpg', wxBITMAP_TYPE_ANY ),
]:
o2 = pickle.loads(pickle.dumps(o))
print o2
if __name__ == "__main__":
wxInitAllImageHandlers()
test() | mit |
cryptojuice/flask-scrypt | flask_scrypt.py | 1 | 3509 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Flask-Scrypt flask extension provides scrypt password hashing and random salt generation.
Hashes and Salts are base64 encoded.
"""
from __future__ import print_function, unicode_literals
import sys
import base64
import hmac
from os import urandom
from werkzeug.security import safe_str_cmp
try:
from itertools import izip
except ImportError:
izip = zip
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
__version_info__ = ('0', '1', '3', '6')
__version__ = '.'.join(__version_info__)
__author__ = 'Gilbert Robinson'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2013 Gilbert Robinson, Copyright (c) 2014 Samuel Marks'
__all__ = ['generate_password_hash', 'generate_random_salt',
'check_password_hash', 'enbase64', 'debase64']
try:
from scrypt import hash as scrypt_hash
except ImportError as err:
print('Please install py-scrypt package. Error: ', err)
raise err
PYTHON2 = sys.version_info < (3, 0)
def enbase64(byte_str):
"""
Encode bytes/strings to base64.
Args:
- ``byte_str``: The string or bytes to base64 encode.
Returns:
- byte_str encoded as base64.
"""
# Python 3: base64.b64encode() expects type byte
if isinstance(byte_str, str) and not PYTHON2:
byte_str = bytes(byte_str, 'utf-8')
return base64.b64encode(byte_str)
def debase64(byte_str):
"""
Decode base64 encoded bytes/strings.
Args:
- ``byte_str``: The string or bytes to base64 encode.
Returns:
- decoded string as type str for python2 and type byte for python3.
"""
# Python 3: base64.b64decode() expects type byte
if isinstance(byte_str, str) and not PYTHON2:
byte_str = bytes(byte_str, 'utf-8')
return base64.b64decode(byte_str)
def generate_password_hash(password, salt, N=1 << 14, r=8, p=1, buflen=64):
"""
Generate password hash givin the password string and salt.
Args:
- ``password``: Password string.
- ``salt`` : Random base64 encoded string.
Optional args:
- ``N`` : the CPU cost, must be a power of 2 greater than 1, defaults to 1 << 14.
- ``r`` : the memory cost, defaults to 8.
- ``p`` : the parallelization parameter, defaults to 1.
The parameters r, p, and buflen must satisfy r * p < 2^30 and
buflen <= (2^32 - 1) * 32.
The recommended parameters for interactive logins as of 2009 are N=16384,
r=8, p=1. Remember to use a good random salt.
Returns:
- base64 encoded scrypt hash.
"""
if PYTHON2:
password = password.encode('utf-8')
salt = salt.encode('utf-8')
pw_hash = scrypt_hash(password, salt, N, r, p, buflen)
return enbase64(pw_hash)
def generate_random_salt(byte_size=64):
"""
Generate random salt to use with generate_password_hash().
Optional Args:
- ``byte_size``: The length of salt to return. default = 64.
Returns:
- str of base64 encoded random bytes.
"""
return enbase64(urandom(byte_size))
def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64):
"""
Given a password, hash, salt this function verifies the password is equal to hash/salt.
Args:
- ``password``: The password to perform check on.
Returns:
- ``bool``
"""
candidate_hash = generate_password_hash(password, salt, N, r, p, buflen)
return safe_str_cmp(password_hash, candidate_hash)
| mit |
ryfeus/lambda-packs | HDF4_H5_NETCDF/source2.7/numpy/polynomial/hermite_e.py | 17 | 58086 | """
Objects for dealing with Hermite_e series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermedomain` -- Hermite_e series default domain, [-1,1].
- `hermezero` -- Hermite_e series that evaluates identically to 0.
- `hermeone` -- Hermite_e series that evaluates identically to 1.
- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
Calculus
--------
- `hermeder` -- differentiate a Hermite_e series.
- `hermeint` -- integrate a Hermite_e series.
Misc Functions
--------------
- `hermefromroots` -- create a Hermite_e series with specified roots.
- `hermeroots` -- find the roots of a Hermite_e series.
- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
- `hermeweight` -- Hermite_e weight function.
- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
- `hermefit` -- least-squares fit returning a Hermite_e series.
- `hermetrim` -- trim leading coefficients from a Hermite_e series.
- `hermeline` -- Hermite_e series of given straight line.
- `herme2poly` -- convert a Hermite_e series to a polynomial.
- `poly2herme` -- convert a polynomial to a Hermite_e series.
Classes
-------
- `HermiteE` -- A Hermite_e series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',
'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',
'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',
'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',
'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',
'hermegauss', 'hermeweight']
hermetrim = pu.trimcoef
def poly2herme(pol):
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermeadd(hermemulx(res), pol[i])
return res
def herme2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herme
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(i - 1))
c1 = polyadd(tmp, polymulx(c1))
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermedomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermezero = np.array([0])
# Hermite coefficients representing one.
hermeone = np.array([1])
# Hermite coefficients representing the identity x.
hermex = np.array([0, 1])
def hermeline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeline
>>> from numpy.polynomial.hermite_e import hermeline, hermeval
>>> hermeval(0,hermeline(3, 2))
3.0
>>> hermeval(1,hermeline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def hermefromroots(roots):
"""
Generate a HermiteE series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in HermiteE form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in HermiteE form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
chebfromroots.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermeline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermemul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermeadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermesub, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermesub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermeadd, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermemulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
array([ 2., 7., 2., 3.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
prd[i + 1] = c[i]
prd[i - 1] += c[i]*i
return prd
def hermemul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermeadd, hermesub, hermediv, hermepow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
array([ 14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermesub(c[-i]*xs, c1*(nd - 1))
c1 = hermeadd(tmp, hermemulx(c1))
return hermeadd(c0, hermemulx(c1))
def hermediv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermeadd, hermesub, hermemul, hermepow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 2.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermemul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermepow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermemul(prd, c)
return prd
def hermeder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite_e series.
Returns the series coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermeint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
array([ 1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
return c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def hermeval(x, c, tensor=True):
"""
Evaluate an HermiteE series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeval
>>> coef = [1,2,3]
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
array([[ 3., 14.],
[ 31., 54.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(nd - 1)
c1 = tmp + c1*x
return c0 + c1*x
def hermeval2d(x, y, c):
"""
Evaluate a 2-D HermiteE series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermeval, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except Exception:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
return c
def hermegrid2d(x, y, c):
"""
Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
return c
def hermeval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite_e series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermegrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except Exception:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
c = hermeval(z, c, tensor=False)
return c
def hermegrid3d(x, y, z, c):
"""
Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermeval3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
c = hermeval(z, c)
return c
def hermevander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = He_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the HermiteE polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
``hermeval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of HermiteE series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermevander
>>> x = np.array([-1, 0, 1])
>>> hermevander(x, 3)
array([[ 1., -1., 0., 2.],
[ 1., 0., -1., -0.],
[ 1., 1., 0., -2.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
return np.moveaxis(v, 0, -1)
def hermevander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the HermiteE polynomials.
If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermevander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then Hehe pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the HermiteE polynomials.
If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
vz = hermevander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermefit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a HermiteE series of degree `deg` that is
the least squares fit to the data values `y` given at points `x`. If
`y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
multiple fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, polyfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the HermiteE series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
are the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using HermiteE series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermeweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefit, hermeval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
array([ 1.01690445, 1.99951418, 2.99948696])
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = hermevander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = hermevander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermecompanion(c):
"""
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an HermiteE basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of HermiteE series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/c[-1]
return mat
def hermeroots(c):
"""
Compute the roots of a HermiteE series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * He_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The HermiteE series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
array([ 0., 2., 0., 1.])
>>> hermeroots(coef)
array([-1., 0., 1.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = hermecompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_e_n(x, n):
"""
Evaluate a normalized HermiteE polynomial.
Compute the value of the normalized HermiteE polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized HermiteE function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard HermiteE functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(1./nd)
nd = nd - 1.0
return c0 + c1*x
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
with the weight function :math:`f(x) = \\exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = _normed_hermite_e_n(x, ideg)
df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(2*np.pi) / w.sum()
return x, w
def hermeweight(x):
"""Weight function of the Hermite_e polynomials.
The weight function is :math:`\\exp(-x^2/2)` and the interval of
integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = np.exp(-.5*x**2)
return w
#
# HermiteE series class
#
class HermiteE(ABCPolyBase):
"""An HermiteE series class.
The HermiteE class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
HermiteE coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermeadd)
_sub = staticmethod(hermesub)
_mul = staticmethod(hermemul)
_div = staticmethod(hermediv)
_pow = staticmethod(hermepow)
_val = staticmethod(hermeval)
_int = staticmethod(hermeint)
_der = staticmethod(hermeder)
_fit = staticmethod(hermefit)
_line = staticmethod(hermeline)
_roots = staticmethod(hermeroots)
_fromroots = staticmethod(hermefromroots)
# Virtual properties
nickname = 'herme'
domain = np.array(hermedomain)
window = np.array(hermedomain)
| mit |
Lukc/ospace-lukc | client-ai/ai_main.py | 1 | 3091 | #
# Copyright 2001 - 2011 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import time
from config import Config
import osci, random, time
import ige.version
from ige import log
import sys, os, os.path
import re
from optparse import OptionParser
# log initialization
log.message("Starting Outer Space Client", ige.version.versionStringFull)
log.debug("sys.path =", sys.path)
log.debug("os.name =", os.name)
log.debug("sys.platform =", sys.platform)
log.debug("os.getcwd() =", os.getcwd())
log.debug("sys.frozen =", getattr(sys, "frozen", None))
# parse command line parameters
parser = OptionParser()
parser.add_option("", "--configdir", dest = "configDir",
metavar = "DIRECTORY",
default = os.path.join(os.path.expanduser("~"), ".outerspace"),
help = "Override default configuration directory",
)
parser.add_option("", "--server", dest = "server",
metavar = "HOSTNAME:PORT",
default = "www.ospace.net:9080",
help = "Outer Space server location"
)
parser.add_option("", "--login", dest = "login",
metavar = "LOGIN",
default = None,
help = "Login name of the AI player.",
)
parser.add_option("", "--password", dest = "password",
metavar = "PASSWORD",
default = None,
help = "Corresponding password of the AI player.",
)
parser.add_option("", "--ai", dest = "ai",
metavar = "AI",
default = None,
help = "Type of the AI applied."
)
options, args = parser.parse_args()
if args:
parser.error("No additional arguments are supported")
# create required directories
if not os.path.exists(options.configDir):
os.makedirs(options.configDir)
# client
import ai_client as client
import ai_handler
from igeclient.IClient import IClientException
if options.ai:
exec ("import AIs." + options.ai + " as ai")
else:
raise Exception
from ige.ospace.Const import *
import gdata
gdata.config = Config(os.path.join(options.configDir, 'ais_dummy'))
client.initialize(options.server, ai_handler, options)
import gettext
tran = gettext.NullTranslations()
tran.install(unicode = 1)
if options.login:
login = options.login
else:
raise Exception, 'You have to provide login.'
if options.password:
password = options.password
else:
raise Exception, 'You have to provide password.'
client.login('Alpha', login, password)
# event loop
client.updateDatabase()
ai.run(client)
client.logout()
log.debug("Shut down")
| gpl-2.0 |
atuljain/odoo | addons/hr/__init__.py | 382 | 1092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr
import res_config
import res_users
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
glwu/python-for-android | python-modules/pybluez/bluetooth/btcommon.py | 68 | 13317 | import sys
import struct
import binascii
L2CAP=0
RFCOMM=3
PORT_ANY=0
# Service Class IDs
SDP_SERVER_CLASS = "1000"
BROWSE_GRP_DESC_CLASS = "1001"
PUBLIC_BROWSE_GROUP = "1002"
SERIAL_PORT_CLASS = "1101"
LAN_ACCESS_CLASS = "1102"
DIALUP_NET_CLASS = "1103"
IRMC_SYNC_CLASS = "1104"
OBEX_OBJPUSH_CLASS = "1105"
OBEX_FILETRANS_CLASS = "1106"
IRMC_SYNC_CMD_CLASS = "1107"
HEADSET_CLASS = "1108"
CORDLESS_TELEPHONY_CLASS = "1109"
AUDIO_SOURCE_CLASS = "110a"
AUDIO_SINK_CLASS = "110b"
AV_REMOTE_TARGET_CLASS = "110c"
ADVANCED_AUDIO_CLASS = "110d"
AV_REMOTE_CLASS = "110e"
VIDEO_CONF_CLASS = "110f"
INTERCOM_CLASS = "1110"
FAX_CLASS = "1111"
HEADSET_AGW_CLASS = "1112"
WAP_CLASS = "1113"
WAP_CLIENT_CLASS = "1114"
PANU_CLASS = "1115"
NAP_CLASS = "1116"
GN_CLASS = "1117"
DIRECT_PRINTING_CLASS = "1118"
REFERENCE_PRINTING_CLASS = "1119"
IMAGING_CLASS = "111a"
IMAGING_RESPONDER_CLASS = "111b"
IMAGING_ARCHIVE_CLASS = "111c"
IMAGING_REFOBJS_CLASS = "111d"
HANDSFREE_CLASS = "111e"
HANDSFREE_AGW_CLASS = "111f"
DIRECT_PRT_REFOBJS_CLASS = "1120"
REFLECTED_UI_CLASS = "1121"
BASIC_PRINTING_CLASS = "1122"
PRINTING_STATUS_CLASS = "1123"
HID_CLASS = "1124"
HCR_CLASS = "1125"
HCR_PRINT_CLASS = "1126"
HCR_SCAN_CLASS = "1127"
CIP_CLASS = "1128"
VIDEO_CONF_GW_CLASS = "1129"
UDI_MT_CLASS = "112a"
UDI_TA_CLASS = "112b"
AV_CLASS = "112c"
SAP_CLASS = "112d"
PNP_INFO_CLASS = "1200"
GENERIC_NETWORKING_CLASS = "1201"
GENERIC_FILETRANS_CLASS = "1202"
GENERIC_AUDIO_CLASS = "1203"
GENERIC_TELEPHONY_CLASS = "1204"
UPNP_CLASS = "1205"
UPNP_IP_CLASS = "1206"
UPNP_PAN_CLASS = "1300"
UPNP_LAP_CLASS = "1301"
UPNP_L2CAP_CLASS = "1302"
VIDEO_SOURCE_CLASS = "1303"
VIDEO_SINK_CLASS = "1304"
# Bluetooth Profile Descriptors
SDP_SERVER_PROFILE = ( SDP_SERVER_CLASS, 0x0100)
BROWSE_GRP_DESC_PROFILE = ( BROWSE_GRP_DESC_CLASS, 0x0100)
SERIAL_PORT_PROFILE = ( SERIAL_PORT_CLASS, 0x0100)
LAN_ACCESS_PROFILE = ( LAN_ACCESS_CLASS, 0x0100)
DIALUP_NET_PROFILE = ( DIALUP_NET_CLASS, 0x0100)
IRMC_SYNC_PROFILE = ( IRMC_SYNC_CLASS, 0x0100)
OBEX_OBJPUSH_PROFILE = ( OBEX_OBJPUSH_CLASS, 0x0100)
OBEX_FILETRANS_PROFILE = ( OBEX_FILETRANS_CLASS, 0x0100)
IRMC_SYNC_CMD_PROFILE = ( IRMC_SYNC_CMD_CLASS, 0x0100)
HEADSET_PROFILE = ( HEADSET_CLASS, 0x0100)
CORDLESS_TELEPHONY_PROFILE = ( CORDLESS_TELEPHONY_CLASS, 0x0100)
AUDIO_SOURCE_PROFILE = ( AUDIO_SOURCE_CLASS, 0x0100)
AUDIO_SINK_PROFILE = ( AUDIO_SINK_CLASS, 0x0100)
AV_REMOTE_TARGET_PROFILE = ( AV_REMOTE_TARGET_CLASS, 0x0100)
ADVANCED_AUDIO_PROFILE = ( ADVANCED_AUDIO_CLASS, 0x0100)
AV_REMOTE_PROFILE = ( AV_REMOTE_CLASS, 0x0100)
VIDEO_CONF_PROFILE = ( VIDEO_CONF_CLASS, 0x0100)
INTERCOM_PROFILE = ( INTERCOM_CLASS, 0x0100)
FAX_PROFILE = ( FAX_CLASS, 0x0100)
HEADSET_AGW_PROFILE = ( HEADSET_AGW_CLASS, 0x0100)
WAP_PROFILE = ( WAP_CLASS, 0x0100)
WAP_CLIENT_PROFILE = ( WAP_CLIENT_CLASS, 0x0100)
PANU_PROFILE = ( PANU_CLASS, 0x0100)
NAP_PROFILE = ( NAP_CLASS, 0x0100)
GN_PROFILE = ( GN_CLASS, 0x0100)
DIRECT_PRINTING_PROFILE = ( DIRECT_PRINTING_CLASS, 0x0100)
REFERENCE_PRINTING_PROFILE = ( REFERENCE_PRINTING_CLASS, 0x0100)
IMAGING_PROFILE = ( IMAGING_CLASS, 0x0100)
IMAGING_RESPONDER_PROFILE = ( IMAGING_RESPONDER_CLASS, 0x0100)
IMAGING_ARCHIVE_PROFILE = ( IMAGING_ARCHIVE_CLASS, 0x0100)
IMAGING_REFOBJS_PROFILE = ( IMAGING_REFOBJS_CLASS, 0x0100)
HANDSFREE_PROFILE = ( HANDSFREE_CLASS, 0x0100)
HANDSFREE_AGW_PROFILE = ( HANDSFREE_AGW_CLASS, 0x0100)
DIRECT_PRT_REFOBJS_PROFILE = ( DIRECT_PRT_REFOBJS_CLASS, 0x0100)
REFLECTED_UI_PROFILE = ( REFLECTED_UI_CLASS, 0x0100)
BASIC_PRINTING_PROFILE = ( BASIC_PRINTING_CLASS, 0x0100)
PRINTING_STATUS_PROFILE = ( PRINTING_STATUS_CLASS, 0x0100)
HID_PROFILE = ( HID_CLASS, 0x0100)
HCR_PROFILE = ( HCR_SCAN_CLASS, 0x0100)
HCR_PRINT_PROFILE = ( HCR_PRINT_CLASS, 0x0100)
HCR_SCAN_PROFILE = ( HCR_SCAN_CLASS, 0x0100)
CIP_PROFILE = ( CIP_CLASS, 0x0100)
VIDEO_CONF_GW_PROFILE = ( VIDEO_CONF_GW_CLASS, 0x0100)
UDI_MT_PROFILE = ( UDI_MT_CLASS, 0x0100)
UDI_TA_PROFILE = ( UDI_TA_CLASS, 0x0100)
AV_PROFILE = ( AV_CLASS, 0x0100)
SAP_PROFILE = ( SAP_CLASS, 0x0100)
PNP_INFO_PROFILE = ( PNP_INFO_CLASS, 0x0100)
GENERIC_NETWORKING_PROFILE = ( GENERIC_NETWORKING_CLASS, 0x0100)
GENERIC_FILETRANS_PROFILE = ( GENERIC_FILETRANS_CLASS, 0x0100)
GENERIC_AUDIO_PROFILE = ( GENERIC_AUDIO_CLASS, 0x0100)
GENERIC_TELEPHONY_PROFILE = ( GENERIC_TELEPHONY_CLASS, 0x0100)
UPNP_PROFILE = ( UPNP_CLASS, 0x0100)
UPNP_IP_PROFILE = ( UPNP_IP_CLASS, 0x0100)
UPNP_PAN_PROFILE = ( UPNP_PAN_CLASS, 0x0100)
UPNP_LAP_PROFILE = ( UPNP_LAP_CLASS, 0x0100)
UPNP_L2CAP_PROFILE = ( UPNP_L2CAP_CLASS, 0x0100)
VIDEO_SOURCE_PROFILE = ( VIDEO_SOURCE_CLASS, 0x0100)
VIDEO_SINK_PROFILE = ( VIDEO_SINK_CLASS, 0x0100)
# Universal Service Attribute IDs
SERVICE_RECORD_HANDLE_ATTRID = 0x0000
SERVICE_CLASS_ID_LIST_ATTRID = 0x0001
SERVICE_RECORD_STATE_ATTRID = 0x0002
SERVICE_ID_ATTRID = 0x0003
PROTOCOL_DESCRIPTOR_LIST_ATTRID = 0x0004
BROWSE_GROUP_LIST_ATTRID = 0x0005
LANGUAGE_BASE_ATTRID_LIST_ATTRID = 0x0006
SERVICE_INFO_TIME_TO_LIVE_ATTRID = 0x0007
SERVICE_AVAILABILITY_ATTRID = 0x0008
BLUETOOTH_PROFILE_DESCRIPTOR_LIST_ATTRID = 0x0009
DOCUMENTATION_URL_ATTRID = 0x000a
CLIENT_EXECUTABLE_URL_ATTRID = 0x000b
ICON_URL_ATTRID = 0x000c
SERVICE_NAME_ATTRID = 0x0100
SERVICE_DESCRIPTION_ATTRID = 0x0101
PROVIDER_NAME_ATTRID = 0x0102
# Protocol UUIDs
SDP_UUID = "0001"
UDP_UUID = "0002"
RFCOMM_UUID = "0003"
TCP_UUID = "0004"
TCS_BIN_UUID = "0005"
TCS_AT_UUID = "0006"
OBEX_UUID = "0008"
IP_UUID = "0009"
FTP_UUID = "000a"
HTTP_UUID = "000c"
WSP_UUID = "000e"
BNEP_UUID = "000f"
UPNP_UUID = "0010"
HIDP_UUID = "0011"
HCRP_CTRL_UUID = "0012"
HCRP_DATA_UUID = "0014"
HCRP_NOTE_UUID = "0016"
AVCTP_UUID = "0017"
AVDTP_UUID = "0019"
CMTP_UUID = "001b"
UDI_UUID = "001d"
L2CAP_UUID = "0100"
class BluetoothError (IOError):
pass
def is_valid_address (s):
"""
returns True if address is a valid Bluetooth address
valid address are always strings of the form XX:XX:XX:XX:XX:XX
where X is a hexadecimal character. For example,
01:23:45:67:89:AB is a valid address, but
IN:VA:LI:DA:DD:RE is not
"""
try:
pairs = s.split (":")
if len (pairs) != 6: return False
for b in pairs: int (b, 16)
except:
return False
return True
def is_valid_uuid (uuid):
"""
is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive)
"""
try:
if len (uuid) == 4:
if int (uuid, 16) < 0: return False
elif len (uuid) == 8:
if int (uuid, 16) < 0: return False
elif len (uuid) == 36:
pieces = uuid.split ("-")
if len (pieces) != 5 or \
len (pieces[0]) != 8 or \
len (pieces[1]) != 4 or \
len (pieces[2]) != 4 or \
len (pieces[3]) != 4 or \
len (pieces[4]) != 12:
return False
[ int (p, 16) for p in pieces ]
else:
return False
except ValueError:
return False
except TypeError:
return False
return True
def to_full_uuid (uuid):
"""
converts a short 16-bit or 32-bit reserved UUID to a full 128-bit Bluetooth
UUID.
"""
if not is_valid_uuid (uuid): raise ValueError ("invalid UUID")
if len (uuid) == 4:
return "0000%s-0000-1000-8000-00805F9B34FB" % uuid
elif len (uuid) == 8:
return "%s-0000-1000-8000-00805F9B34FB" % uuid
else:
return uuid
# =============== parsing and constructing raw SDP records ============
def sdp_parse_size_desc (data):
dts = struct.unpack ("B", data[0])[0]
dtype, dsizedesc = dts >> 3, dts & 0x7
dstart = 1
if dtype == 0: dsize = 0
elif dsizedesc == 0: dsize = 1
elif dsizedesc == 1: dsize = 2
elif dsizedesc == 2: dsize = 4
elif dsizedesc == 3: dsize = 8
elif dsizedesc == 4: dsize = 16
elif dsizedesc == 5:
dsize = struct.unpack ("B", data[1])[0]
dstart += 1
elif dsizedesc == 6:
dsize = struct.unpack ("!H", data[1:3])[0]
dstart += 2
elif dsizedesc == 7:
dsize == struct.unpack ("!I", data[1:5])[0]
dstart += 4
if dtype > 8:
raise ValueError ("Invalid TypeSizeDescriptor byte %s %d, %d" \
% (binascii.hexlify (data[0]), dtype, dsizedesc))
return dtype, dsize, dstart
def sdp_parse_uuid (data, size):
if size == 2:
return binascii.hexlify (data)
elif size == 4:
return binascii.hexlify (data)
elif size == 16:
return "%08X-%04X-%04X-%04X-%04X%08X" % struct.unpack ("!IHHHHI", data)
else: return ValueError ("invalid UUID size")
def sdp_parse_int (data, size, signed):
fmts = { 1 : "!b" , 2 : "!h" , 4 : "!i" , 8 : "!q" , 16 : "!qq" }
fmt = fmts[size]
if not signed: fmt = fmt.upper ()
if fmt in [ "!qq", "!QQ" ]:
upp, low = struct.unpack ("!QQ", data)
result = ( upp << 64) | low
if signed:
result=- ((~ (result-1))&0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFL)
return result
else:
return struct.unpack (fmt, data)[0]
def sdp_parse_data_elementSequence (data):
result = []
pos = 0
datalen = len (data)
while pos < datalen:
rtype, rval, consumed = sdp_parse_data_element (data[pos:])
pos += consumed
result.append ( (rtype, rval))
return result
def sdp_parse_data_element (data):
dtype, dsize, dstart = sdp_parse_size_desc (data)
elem = data[dstart:dstart+dsize]
if dtype == 0:
rtype, rval = "Nil", None
elif dtype == 1:
rtype, rval = "UInt%d"% (dsize*8), sdp_parse_int (elem, dsize, False)
elif dtype == 2:
rtype, rval = "SInt%d"% (dsize*8), sdp_parse_int (elem, dsize, True)
elif dtype == 3:
rtype, rval = "UUID", sdp_parse_uuid (elem, dsize)
elif dtype == 4:
rtype, rval = "String", elem
elif dtype == 5:
rtype, rval = "Bool", (struct.unpack ("B", elem)[0] != 0)
elif dtype == 6:
rtype, rval = "ElemSeq", sdp_parse_data_elementSequence (elem)
elif dtype == 7:
rtype, rval = "AltElemSeq", sdp_parse_data_elementSequence (elem)
elif dtype == 8:
rtype, rval = "URL", elem
return rtype, rval, dstart+dsize
def sdp_parse_raw_record (data):
dtype, dsize, dstart = sdp_parse_size_desc (data)
assert dtype == 6
pos = dstart
datalen = len (data)
record = {}
while pos < datalen:
type, attrid, consumed = sdp_parse_data_element (data[pos:])
assert type == "UInt16"
pos += consumed
type, attrval, consumed = sdp_parse_data_element (data[pos:])
pos += consumed
record[attrid] = attrval
return record
def sdp_make_data_element (type, value):
def maketsd (tdesc, sdesc):
return struct.pack ("B", (tdesc << 3) | sdesc)
def maketsdl (tdesc, size):
if size < (1<<8): return struct.pack ("!BB", tdesc << 3 | 5, size)
elif size < (1<<16): return struct.pack ("!BH", tdesc << 3 | 6, size)
else: return struct.pack ("!BI", tdesc << 3 | 7, size)
easyinttypes = { "UInt8" : (1, 0, "!B"), "UInt16" : (1, 1, "!H"),
"UInt32" : (1, 2, "!I"), "UInt64" : (1, 3, "!Q"),
"SInt8" : (2, 0, "!b"), "SInt16" : (2, 1, "!h"),
"SInt32" : (2, 2, "!i"), "SInt64" : (2, 3, "!q"),
}
if type == "Nil":
return maketsd (0, 0)
elif type in easyinttypes:
tdesc, sdesc, fmt = easyinttypes[type]
return maketsd (tdesc, sdesc) + struct.pack (fmt, value)
elif type == "UInt128":
ts = maketsd (1, 4)
upper = ts >> 64
lower = (ts & 0xFFFFFFFFFFFFFFFFL)
return ts + struct.pack ("!QQ", upper, lower)
elif type == "SInt128":
ts = maketsd (2, 4)
# FIXME
raise NotImplementedException ("128-bit signed int NYI!")
elif type == "UUID":
if len (value) == 4:
return maketsd (3, 1) + binascii.unhexlify (value)
elif len (value) == 8:
return maketsd (3, 2) + binascii.unhexlify (value)
elif len (value) == 36:
return maketsd (3, 4) + binascii.unhexlify (value.replace ("-",""))
elif type == "String":
return maketsdl (4, len (value)) + value
elif type == "Bool":
return maketsd (5,0) + (value and "\x01" or "\x00")
elif type == "ElemSeq":
packedseq = ""
for subtype, subval in value:
nextelem = sdp_make_data_element (subtype, subval)
packedseq = packedseq + nextelem
return maketsdl (6, len (packedseq)) + packedseq
elif type == "AltElemSeq":
packedseq = ""
for subtype, subval in value:
packedseq = packedseq + sdp_make_data_element (subtype, subval)
return maketsdl (7, len (packedseq)) + packedseq
elif type == "URL":
return maketsdl (8, len (value)) + value
else:
raise ValueError ("invalid type %s" % type)
| apache-2.0 |
wkschwartz/django | django/utils/ipv6.py | 129 | 1350 | import ipaddress
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Clean an IPv6 address string.
Raise ValidationError if the address is invalid.
Replace the longest continuous zero-sequence with "::", remove leading
zeroes, and make sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Return a compressed IPv6 address or the same value.
"""
try:
addr = ipaddress.IPv6Address(int(ipaddress.IPv6Address(ip_str)))
except ValueError:
raise ValidationError(error_message, code='invalid')
if unpack_ipv4 and addr.ipv4_mapped:
return str(addr.ipv4_mapped)
elif addr.ipv4_mapped:
return '::ffff:%s' % str(addr.ipv4_mapped)
return str(addr)
def is_valid_ipv6_address(ip_str):
"""
Return whether or not the `ip_str` string is a valid IPv6 address.
"""
try:
ipaddress.IPv6Address(ip_str)
except ValueError:
return False
return True
| bsd-3-clause |
rowemoore/odoo | addons/sale_order_dates/sale_order_dates.py | 223 | 5308 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
class sale_order_dates(osv.osv):
"""Add several date fields to Sale Orders, computed or user-entered"""
_inherit = 'sale.order'
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
"""Compute the expected date from the requested date, not the order date"""
if order and order.requested_date:
date_planned = datetime.strptime(order.requested_date, DEFAULT_SERVER_DATETIME_FORMAT)
date_planned -= timedelta(days=order.company_id.security_lead)
return date_planned.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return super(sale_order_dates, self)._get_date_planned(
cr, uid, order, line, start_date, context=context)
def _get_effective_date(self, cr, uid, ids, name, arg, context=None):
"""Read the shipping date from the related packings"""
# TODO: would be better if it returned the date the picking was processed?
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for pick in order.picking_ids:
dates_list.append(pick.date)
if dates_list:
res[order.id] = min(dates_list)
else:
res[order.id] = False
return res
def _get_commitment_date(self, cr, uid, ids, name, arg, context=None):
"""Compute the commitment date"""
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
order_datetime = datetime.strptime(order.date_order, DEFAULT_SERVER_DATETIME_FORMAT)
for line in order.order_line:
if line.state == 'cancel':
continue
dt = order_datetime + timedelta(days=line.delay or 0.0)
dt_s = dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
dates_list.append(dt_s)
if dates_list:
res[order.id] = min(dates_list)
return res
def onchange_requested_date(self, cr, uid, ids, requested_date,
commitment_date, context=None):
"""Warn if the requested dates is sooner than the commitment date"""
if (requested_date and commitment_date and requested_date < commitment_date):
return {'warning': {
'title': _('Requested date is too soon!'),
'message': _("The date requested by the customer is "
"sooner than the commitment date. You may be "
"unable to honor the customer's request.")
}
}
return {}
_columns = {
'commitment_date': fields.function(_get_commitment_date, store=True,
type='datetime', string='Commitment Date',
help="Date by which the products are sure to be delivered. This is "
"a date that you can promise to the customer, based on the "
"Product Lead Times."),
'requested_date': fields.datetime('Requested Date',
readonly=True, states={'draft': [('readonly', False)],
'sent': [('readonly', False)]}, copy=False,
help="Date by which the customer has requested the items to be "
"delivered.\n"
"When this Order gets confirmed, the Delivery Order's "
"expected date will be computed based on this date and the "
"Company's Security Delay.\n"
"Leave this field empty if you want the Delivery Order to be "
"processed as soon as possible. In that case the expected "
"date will be computed using the default method: based on "
"the Product Lead Times and the Company's Security Delay."),
'effective_date': fields.function(_get_effective_date, type='date',
store=True, string='Effective Date',
help="Date on which the first Delivery Order was created."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benfinkelcbt/CPD200 | CPD200-Lab13-Python/googleapiclient/schema.py | 82 | 9317 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
from __future__ import absolute_import
import six
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
from oauth2client import util
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
@util.positional(2)
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent=dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
@util.positional(2)
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
@util.positional(3)
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
if 'properties' in schema:
for pname, pschema in six.iteritems(schema.get('properties', {})):
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
elif 'additionalProperties' in schema:
self.emitBegin('"a_key": ')
self._to_str_impl(schema['additionalProperties'])
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, seen=self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| gpl-3.0 |
loulich/Couchpotato | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/iprima.py | 32 | 3608 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from random import random
from math import floor
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class IPrimaIE(InfoExtractor):
_VALID_URL = r'https?://play\.iprima\.cz/[^?#]+/(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://play.iprima.cz/particka/particka-92',
'info_dict': {
'id': '39152',
'ext': 'flv',
'title': 'Partička (92)',
'description': 'md5:3740fda51464da35a2d4d0670b8e4fd6',
'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://play.iprima.cz/particka/tchibo-particka-jarni-moda',
'info_dict': {
'id': '9718337',
'ext': 'flv',
'title': 'Tchibo Partička - Jarní móda',
'description': 'md5:589f8f59f414220621ff8882eb3ce7be',
'thumbnail': 're:^http:.*\.jpg$',
},
'params': {
'skip_download': True, # requires rtmpdump
},
'skip': 'Do not have permission to access this page',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
if re.search(r'Nemáte oprávnění přistupovat na tuto stránku\.\s*</div>', webpage):
raise ExtractorError(
'%s said: You do not have permission to access this page' % self.IE_NAME, expected=True)
player_url = (
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
(floor(random() * 1073741824), floor(random() * 1073741824))
)
req = compat_urllib_request.Request(player_url)
req.add_header('Referer', url)
playerpage = self._download_webpage(req, video_id)
base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1])
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
if zoneGEO != '0':
base_url = base_url.replace('token', 'token_' + zoneGEO)
formats = []
for format_id in ['lq', 'hq', 'hd']:
filename = self._html_search_regex(
r'"%s_id":(.+?),' % format_id, webpage, 'filename')
if filename == 'null':
continue
real_id = self._search_regex(
r'Prima-(?:[0-9]{10}|WEB)-([0-9]+)[-_]',
filename, 'real video id')
if format_id == 'lq':
quality = 0
elif format_id == 'hq':
quality = 1
elif format_id == 'hd':
quality = 2
filename = 'hq/' + filename
formats.append({
'format_id': format_id,
'url': base_url,
'quality': quality,
'play_path': 'mp4:' + filename.replace('"', '')[:-4],
'rtmp_live': True,
'ext': 'flv',
})
self._sort_formats(formats)
return {
'id': real_id,
'title': self._og_search_title(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
'description': self._og_search_description(webpage),
}
| gpl-3.0 |
lakshayg/tensorflow | tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py | 100 | 6359 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data stored in a variable.
Run using bazel:
bazel run --config opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded_var
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded_var.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data
images_initializer = tf.placeholder(
dtype=data_sets.train.images.dtype,
shape=data_sets.train.images.shape)
labels_initializer = tf.placeholder(
dtype=data_sets.train.labels.dtype,
shape=data_sets.train.labels.shape)
input_images = tf.Variable(
images_initializer, trainable=False, collections=[])
input_labels = tf.Variable(
labels_initializer, trainable=False, collections=[])
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
sess.run(input_images.initializer,
feed_dict={images_initializer: data_sets.train.images})
sess.run(input_labels.initializer,
feed_dict={labels_initializer: data_sets.train.labels})
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
koson/MissionPlannerKMTI | Lib/xml/etree/ElementTree.py | 40 | 56165 | #
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
class _SimpleElementPath(object):
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag, namespaces=None):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None, namespaces=None):
elem = self.find(element, tag)
if elem is None:
return default
return elem.text or ""
def iterfind(self, element, tag, namespaces=None):
if tag[:3] == ".//":
for elem in element.iter(tag[3:]):
yield elem
for elem in element:
if elem.tag == tag:
yield elem
def findall(self, element, tag, namespaces=None):
return list(self.iterfind(element, tag, namespaces))
try:
from . import ElementPath
except ImportError:
ElementPath = _SimpleElementPath()
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element(object):
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __nonzero__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
# assert iselement(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
# for element in elements:
# assert iselement(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
# assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, basestring) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName(object):
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree(object):
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8. None is default.
def write(self, file_or_filename,
# keyword arguments
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
# assert self._root is not None
if not method:
method = "xml"
elif method not in _serialize:
# FIXME: raise an ImportError for c14n if ElementC14N is missing?
raise ValueError("unknown method %r" % method)
if hasattr(file_or_filename, "write"):
file = file_or_filename
else:
file = open(file_or_filename, "wb")
write = file.write
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
elif xml_declaration or (xml_declaration is None and
encoding not in ("utf-8", "us-ascii")):
if method == "xml":
write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
if method == "text":
_serialize_text(write, self._root, encoding)
else:
qnames, namespaces = _namespaces(
self._root, encoding, default_namespace
)
serialize = _serialize[method]
serialize(write, self._root, encoding, qnames, namespaces)
if file_or_filename is not file:
file.close()
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _encode(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _encode(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v, encoding)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def _serialize_text(write, elem, encoding):
for part in elem.itertext():
write(part.encode(encoding))
if elem.tail:
write(elem.tail.encode(encoding))
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in _namespace_map.items():
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
return "".join(data)
##
# Generates a string representation of an XML element, including all
# subelements. The string is returned as a sequence of string fragments.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
def tostringlist(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
# FIXME: merge small fragments into larger parts
return data
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
if sys.platform == 'cli':
raise NotImplementedError('iterparse is not supported on IronPython. (CP #31923)')
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser)
class _IterParseIterator(object):
def __init__(self, source, events, parser):
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = (uri or "").encode("ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder(object):
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser(object):
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# optional callbacks
parser.CommentHandler = self._comment
parser.ProcessingInstructionHandler = self._pi
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return text.encode("ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = fixtext(value)
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1])
return self.target.start(tag, attrib)
def _data(self, text):
return self.target.data(self._fixtext(text))
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _comment(self, data):
try:
comment = self.target.comment
except AttributeError:
pass
else:
return comment(self._fixtext(data))
def _pi(self, target, data):
try:
pi = self.target.pi
except AttributeError:
pass
else:
return pi(self._fixtext(target), self._fixtext(data))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype is not self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self._parser.Parse(data, 0)
except self._error, v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self._parser.Parse("", 1) # end of data
except self._error, v:
self._raiseerror(v)
tree = self.target.close()
del self.target, self._parser # get rid of circular references
return tree
if sys.platform == 'cli':
from . import SimpleXMLTreeBuilder
XMLParser = SimpleXMLTreeBuilder.TreeBuilder
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
| gpl-3.0 |
Taketrung/betfair.py | betfair/models.py | 2 | 14047 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from schematics.types import IntType
from schematics.types import LongType
from schematics.types import FloatType
from schematics.types import StringType
from schematics.types import BooleanType
from schematics.types.compound import DictType
from schematics.types.compound import ListType
from schematics.types.compound import ModelType
from betfair.meta.types import EnumType
from betfair.meta.types import DateTimeType
from betfair.meta.models import BetfairModel
from betfair import constants
class Event(BetfairModel):
id = StringType()
name = StringType()
country_code = StringType()
timezone = StringType()
venue = StringType()
open_date = DateTimeType()
class MarketDescription(BetfairModel):
persistence_enabled = BooleanType(required=True)
bsp_market = BooleanType(required=True)
market_time = DateTimeType(required=True)
suspend_time = DateTimeType(required=True)
settle_time = DateTimeType()
betting_type = EnumType(constants.MarketBettingType, required=True)
turn_in_play_enabled = BooleanType(required=True)
market_type = StringType(required=True)
regulator = StringType(required=True)
market_base_rate = FloatType(required=True)
discount_allowed = BooleanType(required=True)
wallet = StringType()
rules = StringType()
rules_has_date = BooleanType()
clarifications = StringType()
each_way_divisor = FloatType()
class RunnerCatalog(BetfairModel):
selection_id = IntType(required=True)
runner_name = StringType(required=True)
handicap = FloatType(required=True)
sort_priority = IntType(required=True)
metadata = DictType(StringType)
class EventType(BetfairModel):
id = StringType()
name = StringType()
class Competition(BetfairModel):
id = StringType()
name = StringType()
class MarketCatalogue(BetfairModel):
market_id = StringType()
market_name = StringType()
market_start_time = DateTimeType()
description = ModelType(MarketDescription)
total_matched = FloatType()
runners = ListType(ModelType(RunnerCatalog))
event_type = ModelType(EventType)
competition = ModelType(Competition)
event = ModelType(Event)
class TimeRange(BetfairModel):
from_ = DateTimeType(deserialize_from='from', serialized_name='from')
to = DateTimeType()
class MarketFilter(BetfairModel):
text_query = StringType()
exchange_ids = StringType()
event_type_ids = ListType(StringType)
event_ids = ListType(StringType)
competition_ids = ListType(StringType)
market_ids = ListType(StringType)
venues = ListType(StringType)
bsp_only = BooleanType()
turn_in_play_enabled = BooleanType()
in_play_only = BooleanType()
market_betting_types = ListType(EnumType(constants.MarketBettingType))
market_countries = ListType(StringType)
market_type_codes = ListType(StringType)
market_start_time = ModelType(TimeRange)
with_orders = ListType(EnumType(constants.OrderStatus))
class PriceSize(BetfairModel):
price = FloatType(required=True)
size = FloatType(required=True)
class StartingPrices(BetfairModel):
near_price = FloatType()
far_price = FloatType()
back_stake_taken = ListType(ModelType(PriceSize))
lay_liability_taken = ListType(ModelType(PriceSize))
actual_SP = FloatType()
class ExchangePrices(BetfairModel):
available_to_back = ListType(ModelType(PriceSize))
available_to_lay = ListType(ModelType(PriceSize))
traded_volume = ListType(ModelType(PriceSize))
class Order(BetfairModel):
bet_id = StringType(required=True)
order_type = EnumType(constants.OrderType, required=True)
status = EnumType(constants.OrderStatus, required=True)
persistence_type = EnumType(constants.PersistenceType, required=True)
side = EnumType(constants.Side, required=True)
price = FloatType(required=True)
size = FloatType(required=True)
bsp_liability = BooleanType(required=True)
placed_date = DateTimeType(required=True)
avg_price_matched = FloatType()
size_matched = FloatType()
size_remaining = FloatType()
size_lapsed = FloatType()
size_cancelled = FloatType()
size_voided = FloatType()
class Match(BetfairModel):
bet_id = StringType()
match_id = StringType()
side = EnumType(constants.Side, required=True)
price = FloatType(required=True)
size = FloatType(required=True)
match_date = DateTimeType()
class Runner(BetfairModel):
selection_id = IntType(required=True)
handicap = FloatType(required=True)
status = EnumType(constants.RunnerStatus, required=True)
adjustment_factor = FloatType()
last_price_traded = FloatType()
total_matched = FloatType()
removal_date = DateTimeType()
sp = ModelType(StartingPrices)
ex = ModelType(ExchangePrices)
orders = ListType(ModelType(Order))
matches = ListType(ModelType(Match))
class MarketBook(BetfairModel):
market_id = StringType(required=True)
is_market_data_delayed = BooleanType(required=True)
status = EnumType(constants.MarketStatus)
bet_delay = IntType()
bsp_reconciled = BooleanType()
complete = BooleanType()
inplay = BooleanType()
number_of_winners = IntType()
number_of_runners = IntType()
number_of_active_runners = IntType()
last_match_time = DateTimeType()
total_matched = FloatType()
total_available = FloatType()
cross_matching = BooleanType()
runners_voidable = BooleanType()
version = FloatType()
runners = ListType(ModelType(Runner))
class RunnerProfitAndLoss(BetfairModel):
selection_id = IntType()
if_win = FloatType()
if_lose = FloatType()
class MarketProfitAndLoss(BetfairModel):
market_id = StringType()
commission_applied = FloatType()
profit_and_losses = ListType(ModelType(RunnerProfitAndLoss))
class ExBestOffersOverrides(BetfairModel):
best_prices_depth = IntType()
rollup_model = EnumType(constants.RollupModel)
rollup_limit = IntType()
rollup_liability_threshold = FloatType()
rollup_liability_factor = IntType()
class PriceProjection(BetfairModel):
price_data = ListType(EnumType(constants.PriceData))
ex_best_offers_overrides = ModelType(ExBestOffersOverrides)
virtualise = BooleanType()
rollover_stakes = BooleanType()
class LimitOrder(BetfairModel):
size = FloatType(required=True)
price = FloatType(required=True)
persistence_type = EnumType(constants.PersistenceType, required=True)
class LimitOnCloseOrder(BetfairModel):
liability = FloatType(required=True)
price = FloatType(required=True)
class MarketOnCloseOrder(BetfairModel):
liability = FloatType(required=True)
# Results
class CompetitionResult(BetfairModel):
competition = ModelType(Competition)
market_count = IntType()
competition_region = StringType()
class CountryCodeResult(BetfairModel):
country_code = StringType()
market_count = IntType()
class EventResult(BetfairModel):
event = ModelType(Event)
market_count = IntType()
class EventTypeResult(BetfairModel):
event_type = ModelType(EventType)
market_count = IntType()
class MarketTypeResult(BetfairModel):
market_type = StringType()
market_count = IntType()
class TimeRangeResult(BetfairModel):
time_range = ModelType(TimeRange)
market_count = IntType()
class VenueResult(BetfairModel):
venue = StringType()
market_count = IntType()
# Instructions
class PlaceInstruction(BetfairModel):
order_type = EnumType(constants.OrderType, required=True)
selection_id = IntType(required=True)
handicap = FloatType()
side = EnumType(constants.Side, required=True)
limit_order = ModelType(LimitOrder)
limit_on_close_order = ModelType(LimitOnCloseOrder)
market_on_close_order = ModelType(MarketOnCloseOrder)
class CancelInstruction(BetfairModel):
bet_id = StringType(required=True)
size_reduction = FloatType()
class ReplaceInstruction(BetfairModel):
bet_id = StringType(required=True)
new_price = FloatType(required=True)
class UpdateInstruction(BetfairModel):
bet_id = StringType(required=True)
new_persistence_type = EnumType(constants.PersistenceType, required=True)
# Summary reports
class CurrentOrderSummary(BetfairModel):
bet_id = StringType(required=True)
market_id = StringType(required=True)
selection_id = IntType(required=True)
handicap = FloatType(required=True)
price_size = ModelType(PriceSize, required=True)
bsp_liability = FloatType(required=True)
side = EnumType(constants.Side, required=True)
status = EnumType(constants.OrderStatus, required=True)
persistence_type = EnumType(constants.PersistenceType, required=True)
order_type = EnumType(constants.OrderType, required=True)
placed_date = DateTimeType(required=True)
matched_date = DateTimeType()
average_price_matched = FloatType()
size_matched = FloatType()
size_remaining = FloatType()
size_lapsed = FloatType()
size_cancelled = FloatType()
size_voided = FloatType()
regulator_auth_code = StringType()
regulator_code = StringType()
class CurrentOrderSummaryReport(BetfairModel):
current_orders = ListType(ModelType(CurrentOrderSummary), required=True)
more_available = BooleanType(required=True)
class ItemDescription(BetfairModel):
event_type_desc = StringType()
event_desc = StringType()
market_desc = StringType()
market_start_Time = DateTimeType()
runner_desc = StringType()
number_of_winners = IntType()
class ClearedOrderSummary(BetfairModel):
event_type_id = StringType()
event_id = StringType()
market_id = StringType()
selection_id = IntType()
handicap = FloatType()
bet_id = StringType()
placed_date = DateTimeType()
persistence_type = EnumType(constants.PersistenceType)
order_type = EnumType(constants.OrderType)
side = EnumType(constants.Side)
item_description = ModelType(ItemDescription)
price_requested = FloatType()
settled_date = DateTimeType()
bet_count = IntType()
commission = FloatType()
price_matched = FloatType()
price_reduced = BooleanType()
size_settled = FloatType()
profit = FloatType()
size_cancelled = FloatType()
class ClearedOrderSummaryReport(BetfairModel):
cleared_orders = ListType(ModelType(ClearedOrderSummary), required=True)
more_available = BooleanType(required=True)
# Instruction reports
class BaseInstructionReport(BetfairModel):
status = EnumType(constants.InstructionReportStatus, required=True)
error_code = EnumType(constants.InstructionReportErrorCode)
class PlaceInstructionReport(BaseInstructionReport):
instruction = ModelType(PlaceInstruction, required=True)
bet_id = StringType()
placed_date = DateTimeType()
average_price_matched = FloatType()
size_matched = FloatType()
class CancelInstructionReport(BaseInstructionReport):
instruction = ModelType(CancelInstruction)
size_cancelled = FloatType(required=True)
cancelled_date = DateTimeType()
class ReplaceInstructionReport(BaseInstructionReport):
cancel_instruction_report = ModelType(CancelInstructionReport)
place_instruction_report = ModelType(PlaceInstructionReport)
class UpdateInstructionReport(BaseInstructionReport):
instruction = ModelType(UpdateInstruction, required=True)
# Execution reports
class BaseExecutionReport(BetfairModel):
customer_ref = StringType()
status = EnumType(constants.ExecutionReportStatus, required=True)
error_code = EnumType(constants.ExecutionReportErrorCode)
market_id = StringType()
class PlaceExecutionReport(BaseExecutionReport):
instruction_reports = ListType(ModelType(PlaceInstructionReport))
class CancelExecutionReport(BaseExecutionReport):
instruction_reports = ListType(ModelType(CancelInstructionReport))
class ReplaceExecutionReport(BaseExecutionReport):
instruction_reports = ListType(ModelType(ReplaceInstructionReport))
class UpdateExecutionReport(BaseExecutionReport):
instruction_reports = ListType(ModelType(UpdateInstructionReport))
# Accounts
class AccountFundsResponse(BetfairModel):
available_to_bet_balance = FloatType()
exposure = FloatType()
retained_commission = FloatType()
exposure_limit = FloatType()
discount_rate = FloatType()
points_balance = IntType()
wallet = EnumType(constants.Wallet)
class StatementLegacyData(BetfairModel):
avg_price = FloatType()
bet_size = FloatType()
bet_type = StringType()
bet_category_type = StringType()
commission_rate = StringType()
event_id = LongType()
event_type_id = LongType()
full_market_name = StringType()
gross_bet_amount = FloatType()
market_name = StringType()
market_type = StringType()
placed_date = DateTimeType()
selection_id = LongType()
selection_name = StringType()
start_date = DateTimeType()
transaction_type = StringType()
transaction_id = LongType()
win_lose = StringType()
class StatementItem(BetfairModel):
ref_id = StringType()
item_date = DateTimeType()
amount = FloatType()
balance = FloatType()
item_class = EnumType(constants.ItemClass)
item_class_data = DictType(StringType)
legacy_data = ModelType(StatementLegacyData)
class AccountDetailsResponse(BetfairModel):
currency_code = StringType()
first_name = StringType()
last_name = StringType()
locale_code = StringType()
region = StringType()
timezone = StringType()
discount_rate = FloatType()
points_balance = IntType()
country_code = StringType()
class AccountStatementReport(BetfairModel):
account_statement = ListType(ModelType(StatementItem))
more_available = BooleanType()
class CurrencyRate(BetfairModel):
currency_code = StringType()
rate = FloatType()
class TransferResponse(BetfairModel):
transaction_id = StringType()
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/django/core/management/__init__.py | 26 | 14905 | from __future__ import unicode_literals
import os
import pkgutil
import sys
from collections import OrderedDict, defaultdict
from importlib import import_module
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (
BaseCommand, CommandError, CommandParser, handle_default_options,
)
from django.core.management.color import color_style
from django.utils import autoreload, lru_cache, six
from django.utils._os import npath, upath
from django.utils.encoding import force_text
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
return [name for _, name, is_pkg in pkgutil.iter_modules([npath(command_dir)])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@lru_cache.lru_cache(maxsize=None)
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(upath(__path__[0]))}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(command_name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
`name` may be a string or a command object. Using a string is preferred
unless the command object is required for further processing or testing.
Some examples:
call_command('migrate')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
from django.core.management.commands import flush
cmd = flush.Command()
call_command(cmd, verbosity=0, interactive=False)
# Do something with cmd ...
"""
if isinstance(command_name, BaseCommand):
# Command object passed in.
command = command_name
command_name = command.__class__.__module__.split('.')[-1]
else:
# Load the command object by name.
try:
app_name = get_commands()[command_name]
except KeyError:
raise CommandError("Unknown command: %r" % command_name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, command_name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', command_name)
# Use the `dest` option name from the parser option
opt_mapping = {
sorted(s_opt.option_strings)[0].lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings
}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=[force_text(a) for a in args])
defaults = dict(defaults._get_kwargs(), **arg_options)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin and manage.py utilities.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get('DJANGO_SETTINGS_MODULE'):
# If `subcommand` is missing due to misconfigured settings, the
# following line will retrigger an ImproperlyConfigured exception
# (get_commands() swallows the original one) so the user is
# informed about it.
settings.INSTALLED_APPS
else:
sys.stderr.write("No Django settings specified.\n")
sys.stderr.write(
"Unknown command: %r\nType '%s help' for usage.\n"
% (subcommand, self.prog_name)
)
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
options.extend(
(sorted(s_opt.option_strings)[0], s_opt.nargs != 0)
for s_opt in parser._actions if s_opt.option_strings
)
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
# Exit code of the bash completion function is never passed back to
# the user, so it's safe to always exit with 0.
# For more details see #25420.
sys.exit(0)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == 'runserver' and '--noreload' not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader. Pretend it didn't happen by
# loading an empty list of applications.
apps.all_models = defaultdict(OrderedDict)
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = True
# Remove options not compatible with the built-in runserver
# (e.g. options for the contrib.staticfiles' runserver).
# Changes here require manually testing as described in
# #27522.
_parser = self.fetch_command('runserver').create_parser('django', 'runserver')
_options, _args = _parser.parse_known_args(self.argv[2:])
for _arg in _args:
self.argv.remove(_arg)
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
| gpl-3.0 |
JinXinDeep/tensorflow | tensorflow/python/kernel_tests/bias_op_test.py | 8 | 4045 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BiasAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class BiasAddTest(tf.test.TestCase):
def _npBias(self, inputs, bias):
assert len(bias.shape) == 1
print(inputs.shape)
print(bias.shape)
assert inputs.shape[-1] == bias.shape[0]
return inputs + bias.reshape(([1] * (len(inputs.shape) - 1))
+ [bias.shape[0]])
def testNpBias(self):
self.assertAllClose(np.array([[11, 22, 33], [41, 52, 63]]),
self._npBias(np.array([[10, 20, 30], [40, 50, 60]]),
np.array([1, 2, 3])))
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.nn.bias_add(np_inputs, np_bias).eval()
self.assertAllClose(np_val, tf_val)
def _testAll(self, np_inputs, np_bias):
self._testBias(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype == np.float32 or np_inputs.dtype == np.float64:
self._testBias(np_inputs, np_bias, use_gpu=True)
def testInputDims(self):
with self.assertRaises(ValueError):
tf.nn.bias_add([1, 2], [1])
def testBiasVec(self):
with self.assertRaises(ValueError):
tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[1, 2]))
def testBiasInputsMatch(self):
with self.assertRaises(ValueError):
tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1], shape=[1]))
def testIntTypes(self):
for t in [np.int8, np.int16, np.int32, np.int64]:
self._testAll(np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
np.array([1, 2, 3]).astype(t))
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(4, 3, 3).astype(t),
np.random.rand(3).astype(t))
def testGradientTensor(self):
with self.test_session():
t = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64)
b = tf.constant([1.3, 2.4], dtype=tf.float64)
bo = tf.nn.bias_add(t, b)
err = tf.test.compute_gradient_error(t, [3, 2], bo, [3, 2])
print("bias add tensor gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientBias(self):
with self.test_session():
t = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64)
b = tf.constant([1.3, 2.4], dtype=tf.float64)
bo = tf.nn.bias_add(t, b)
err = tf.test.compute_gradient_error(b, [2], bo, [3, 2])
print("bias add bias gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientTensor4D(self):
with self.test_session():
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
t = tf.constant(x, shape=s, dtype=tf.float32)
b = tf.constant([1.3, 2.4], dtype=tf.float32)
bo = tf.nn.bias_add(t, b)
err = tf.test.compute_gradient_error(t, s, bo, s, x_init_value=x)
print("bias add tensor gradient err = ", err)
self.assertLess(err, 1e-3)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
jezdez/kuma | vendor/packages/logilab/common/decorators.py | 93 | 8868 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
""" A few useful function/method decorators. """
from __future__ import print_function
__docformat__ = "restructuredtext en"
import sys
import types
from time import clock, time
from inspect import isgeneratorfunction, getargspec
from logilab.common.compat import method_type
# XXX rewrite so we can use the decorator syntax when keyarg has to be specified
class cached_decorator(object):
def __init__(self, cacheattr=None, keyarg=None):
self.cacheattr = cacheattr
self.keyarg = keyarg
def __call__(self, callableobj=None):
assert not isgeneratorfunction(callableobj), \
'cannot cache generator function: %s' % callableobj
if len(getargspec(callableobj).args) == 1 or self.keyarg == 0:
cache = _SingleValueCache(callableobj, self.cacheattr)
elif self.keyarg:
cache = _MultiValuesKeyArgCache(callableobj, self.keyarg, self.cacheattr)
else:
cache = _MultiValuesCache(callableobj, self.cacheattr)
return cache.closure()
class _SingleValueCache(object):
def __init__(self, callableobj, cacheattr=None):
self.callable = callableobj
if cacheattr is None:
self.cacheattr = '_%s_cache_' % callableobj.__name__
else:
assert cacheattr != callableobj.__name__
self.cacheattr = cacheattr
def __call__(__me, self, *args):
try:
return self.__dict__[__me.cacheattr]
except KeyError:
value = __me.callable(self, *args)
setattr(self, __me.cacheattr, value)
return value
def closure(self):
def wrapped(*args, **kwargs):
return self.__call__(*args, **kwargs)
wrapped.cache_obj = self
try:
wrapped.__doc__ = self.callable.__doc__
wrapped.__name__ = self.callable.__name__
except:
pass
return wrapped
def clear(self, holder):
holder.__dict__.pop(self.cacheattr, None)
class _MultiValuesCache(_SingleValueCache):
def _get_cache(self, holder):
try:
_cache = holder.__dict__[self.cacheattr]
except KeyError:
_cache = {}
setattr(holder, self.cacheattr, _cache)
return _cache
def __call__(__me, self, *args, **kwargs):
_cache = __me._get_cache(self)
try:
return _cache[args]
except KeyError:
_cache[args] = __me.callable(self, *args)
return _cache[args]
class _MultiValuesKeyArgCache(_MultiValuesCache):
def __init__(self, callableobj, keyarg, cacheattr=None):
super(_MultiValuesKeyArgCache, self).__init__(callableobj, cacheattr)
self.keyarg = keyarg
def __call__(__me, self, *args, **kwargs):
_cache = __me._get_cache(self)
key = args[__me.keyarg-1]
try:
return _cache[key]
except KeyError:
_cache[key] = __me.callable(self, *args, **kwargs)
return _cache[key]
def cached(callableobj=None, keyarg=None, **kwargs):
"""Simple decorator to cache result of method call."""
kwargs['keyarg'] = keyarg
decorator = cached_decorator(**kwargs)
if callableobj is None:
return decorator
else:
return decorator(callableobj)
class cachedproperty(object):
""" Provides a cached property equivalent to the stacking of
@cached and @property, but more efficient.
After first usage, the <property_name> becomes part of the object's
__dict__. Doing:
del obj.<property_name> empties the cache.
Idea taken from the pyramid_ framework and the mercurial_ project.
.. _pyramid: http://pypi.python.org/pypi/pyramid
.. _mercurial: http://pypi.python.org/pypi/Mercurial
"""
__slots__ = ('wrapped',)
def __init__(self, wrapped):
try:
wrapped.__name__
except AttributeError:
raise TypeError('%s must have a __name__ attribute' %
wrapped)
self.wrapped = wrapped
@property
def __doc__(self):
doc = getattr(self.wrapped, '__doc__', None)
return ('<wrapped by the cachedproperty decorator>%s'
% ('\n%s' % doc if doc else ''))
def __get__(self, inst, objtype=None):
if inst is None:
return self
val = self.wrapped(inst)
setattr(inst, self.wrapped.__name__, val)
return val
def get_cache_impl(obj, funcname):
cls = obj.__class__
member = getattr(cls, funcname)
if isinstance(member, property):
member = member.fget
return member.cache_obj
def clear_cache(obj, funcname):
"""Clear a cache handled by the :func:`cached` decorator. If 'x' class has
@cached on its method `foo`, type
>>> clear_cache(x, 'foo')
to purge this method's cache on the instance.
"""
get_cache_impl(obj, funcname).clear(obj)
def copy_cache(obj, funcname, cacheobj):
"""Copy cache for <funcname> from cacheobj to obj."""
cacheattr = get_cache_impl(obj, funcname).cacheattr
try:
setattr(obj, cacheattr, cacheobj.__dict__[cacheattr])
except KeyError:
pass
class wproperty(object):
"""Simple descriptor expecting to take a modifier function as first argument
and looking for a _<function name> to retrieve the attribute.
"""
def __init__(self, setfunc):
self.setfunc = setfunc
self.attrname = '_%s' % setfunc.__name__
def __set__(self, obj, value):
self.setfunc(obj, value)
def __get__(self, obj, cls):
assert obj is not None
return getattr(obj, self.attrname)
class classproperty(object):
"""this is a simple property-like class but for class attributes.
"""
def __init__(self, get):
self.get = get
def __get__(self, inst, cls):
return self.get(cls)
class iclassmethod(object):
'''Descriptor for method which should be available as class method if called
on the class or instance method if called on an instance.
'''
def __init__(self, func):
self.func = func
def __get__(self, instance, objtype):
if instance is None:
return method_type(self.func, objtype, objtype.__class__)
return method_type(self.func, instance, objtype)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def timed(f):
def wrap(*args, **kwargs):
t = time()
c = clock()
res = f(*args, **kwargs)
print('%s clock: %.9f / time: %.9f' % (f.__name__,
clock() - c, time() - t))
return res
return wrap
def locked(acquire, release):
"""Decorator taking two methods to acquire/release a lock as argument,
returning a decorator function which will call the inner method after
having called acquire(self) et will call release(self) afterwards.
"""
def decorator(f):
def wrapper(self, *args, **kwargs):
acquire(self)
try:
return f(self, *args, **kwargs)
finally:
release(self)
return wrapper
return decorator
def monkeypatch(klass, methodname=None):
"""Decorator extending class with the decorated callable. This is basically
a syntactic sugar vs class assignment.
>>> class A:
... pass
>>> @monkeypatch(A)
... def meth(self):
... return 12
...
>>> a = A()
>>> a.meth()
12
>>> @monkeypatch(A, 'foo')
... def meth(self):
... return 12
...
>>> a.foo()
12
"""
def decorator(func):
try:
name = methodname or func.__name__
except AttributeError:
raise AttributeError('%s has no __name__ attribute: '
'you should provide an explicit `methodname`'
% func)
setattr(klass, name, func)
return func
return decorator
| mpl-2.0 |
kif/dahu | plugins/id31/__init__.py | 1 | 6007 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Data Analysis plugin tailored for ID31
* integrate_simple: simple demo of a simple integrator
* integrate: a more advanced options
"""
__authors__ = ["Jérôme Kieffer"]
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "06/02/2020"
__status__ = "development"
version = "0.1.0"
import os
import numpy
from dahu.plugin import Plugin, plugin_from_function
from dahu.factory import register
from dahu.cache import DataCache
from threading import Semaphore
import logging
logger = logging.getLogger("plugin.pyFAI")
import json
try:
import pyFAI
from pyFAI.worker import make_ai
except ImportError:
logger.error("Failed to import PyFAI: download and install it from pypi")
try:
import fabio
except ImportError:
logger.error("Failed to import Fabio: download and install it from pypi")
from . import calculate_flux as flux
def integrate_simple(poni_file, image_file, curve_file, nbins=1000):
"""Simple azimuthal integration for a single frame (very inefficient)
:param poni_file: configuration of the geometry
:param image_file:
:param curve_file: output file
:param nbins: number of output bins
"""
ai = pyFAI.load(poni_file)
img = fabio.open(image_file).data
ai.integrate1d(img, nbins, filename=curve_file, unit="2th_deg", method="splitpixel")
return {"out_file": curve_file}
plugin_from_function(integrate_simple)
# Use the register decorator to make it available from Dahu
@register
class Integrate(Plugin):
"""This is the basic plugin of PyFAI for azimuthal integration
Input parameters:
:param poni_file: configuration of the geometry
:param input_files:
:param
Typical JSON file:
{"poni_file": "/tmp/example.poni",
"input_files": ["/tmp/file1.edf", "/tmp/file2.edf"],
"monitor_values": [1, 1.1],
"npt": 2000,
"unit": "2th_deg",
}
"""
_ais = DataCache() # key: str(a), value= ai
def __init__(self):
"""
"""
Plugin.__init__(self)
self.ai = None # this is the azimuthal integrator to use
self.dest_dir = None
self.json_data = None
self.ntp = 3000
self.input_files = []
self.method = "full_ocl_csr"
self.unit = "q_nm^-1"
self.output_files = []
self.mask = ""
self.wavelength = None
self.dummy = -1
self.delta_dummy = 0
self.polarization_factor = None
self.do_SA = False
self.norm = 1e12
def setup(self, kwargs):
logger.debug("Integrate.setup")
Plugin.setup(self, kwargs)
if "output_dir" not in self.input:
self.log_error("output_dir not in input")
# this needs to be added in the SPEC macro
self.dest_dir = os.path.abspath(self.input["output_dir"])
if "json" not in self.input:
self.log_error("json not in input")
json_path = self.input.get("json", "")
if not os.path.exists(json_path):
self.log_error("Integration setup file (JSON): %s does not exist" % json_path, do_raise=True)
self.json_data = json.load(open(json_path))
ai = make_ai(self.json_data)
stored = self._ais.get(str(ai), ai)
if stored is ai:
self.ai = stored
else:
self.ai = stored.__deepcopy__()
self.npt = int(self.json_data.get("npt", self.npt))
self.unit = self.json_data.get("unit", self.unit)
self.wavelength = self.json_data.get("wavelength", self.wavelength)
if os.path.exists(self.json_data["mask"]):
self.mask = self.json_data.get("mask", self.mask)
self.dummy = self.json_data.get("val_dummy", self.dummy)
self.delta_dummy = self.json_data.get("delta_dummy", self.delta_dummy)
if self.json_data["do_polarziation"]:
self.polarization_factor = self.json_data.get("polarization_factor", self.polarization_factor)
self.do_SA = self.json_data.get("do_SA", self.do_SA)
self.norm = self.json_data.get("norm", self.norm) # need to be added in the spec macro
def process(self):
Plugin.process(self)
logger.debug("Integrate.process")
for fname in self.input_files:
if not os.path.exists(fname):
self.log_error("image file: %s does not exist, skipping" % fname,
do_raise=False)
continue
basename = os.path.splitext(os.path.basename(fname))[0]
destination = os.path.join(self.dest_dir, basename + ".dat")
fimg = fabio.open(fname)
if self.wavelength is not None:
monitor = self.getMon(fimg.header, self.wavelength) / self.norm
else:
monitor = 1.0
self.ai.integrate1d(fimg.data, npt=self.npt, method=self.method,
safe=False,
filename=destination,
normalization_factor=monitor,
unit=self.unit,
dummy=self.dummy,
delta_dummy=self.delta_dummy,
polarization_factor=self.polarization_factor,
correctSolidAngle=self.do_SA
)
self.output_files.append(destination)
def teardown(self):
Plugin.teardown(self)
logger.debug("Integrate.teardown")
# Create some output data
self.output["output_files"] = self.output_files
@staticmethod
def getMon(header, lam):
strCount = header['counter_mne'].split()
strCountPos = header['counter_pos'].split()
E = 4.13566766225e-15 * 299792458 / lam / 1000
return flux.main(float(strCountPos[strCount.index('mondio')]), E)
| gpl-2.0 |
randomtask1155/gpdb | gpMgmt/bin/gppylib/util/ssh_utils.py | 20 | 10885 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# This file contains ssh Session class and support functions/classes.
import cmd
import os
import sys
import socket
import threading
from gppylib.commands.base import WorkerPool, REMOTE, ExecutionError
from gppylib.commands.unix import Hostname, Echo
sys.path.insert(1, sys.path[0] + '/lib')
from pexpect import pxssh
class HostNameError(Exception):
def __init__(self, msg, lineno = 0):
if lineno: self.msg = ('%s at line %d' % (msg, lineno))
else: self.msg = msg
def __str__(self):
return self.msg
class SSHError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
# Utility Functions
def ssh_prefix(host):
ssh = 'ssh -o "BatchMode yes" -o "StrictHostKeyChecking no" ' + host
return ssh
def get_hosts(hostsfile):
hostlist = HostList()
hostlist.parseFile(hostsfile)
return hostlist.get()
class HostList():
def __init__(self):
self.list = []
def get(self):
return self.list
def add(self, host, lineno=0):
'''Add a host to the hostlist.'''
# we don't allow the user@ syntax here
if host.find('@') >= 0:
raise HostNameError(host, lineno)
# MPP-13617 - check for ipv6
if host.find(':') >= 0:
try:
socket.inet_pton(socket.AF_INET6, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
# MPP-13617 - check for ipv4
if host.find('.') >= 0:
octs = host.split('.')
if len(octs) == 4 and False not in [o.isdigit() for o in octs]:
try:
socket.inet_pton(socket.AF_INET, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
self.list.append(host)
return self.list
def parseFile(self, path):
'''Add lines in a file to the hostlist.'''
with open(path) as fp:
for i, line in enumerate(fp):
line = line.strip()
if not line or line[0] == '#':
continue
self.add(line, i+1)
return self.list
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool()
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
# Session is a command session, derived from a base class cmd.Cmd
class Session(cmd.Cmd):
'''Implements a list of open ssh sessions ready to execute commands'''
verbose=False
hostList=[]
userName=None
echoCommand=False
class SessionError(StandardError): pass
class SessionCmdExit(StandardError): pass
def __init__(self, hostList=None, userName=None):
cmd.Cmd.__init__(self)
self.pxssh_list = []
self.prompt = '=> '
self.peerStringFormatRaw = None
if hostList:
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName
def peerStringFormat(self):
if self.peerStringFormatRaw: return self.peerStringFormatRaw
cnt = 0
for p in self.pxssh_list:
if cnt < len(p.x_peer): cnt = len(p.x_peer)
self.peerStringFormatRaw = "[%%%ds]" % cnt
return self.peerStringFormatRaw
def login(self, hostList=None, userName=None, delaybeforesend=0.05, sync_multiplier=1.0):
'''This is the normal entry point used to add host names to the object and log in to each of them'''
if self.verbose: print '\n[Reset ...]'
if not (self.hostList or hostList):
raise self.SessionError('No host list available to Login method')
if not (self.userName or userName):
raise self.SessionError('No user name available to Login method')
#Cleanup
self.clean()
if hostList: #We have a new hostlist to use, initialize it
self.hostList=[]
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName #We have a new userName to use
# MPP-6583. Save off term type and set to nothing before creating ssh process
origTERM = os.getenv('TERM', None)
os.putenv('TERM', '')
good_list = []
print_lock = threading.Lock()
def connect_host(host):
self.hostList.append(host)
p = pxssh.pxssh(delaybeforesend=delaybeforesend,
options={"StrictHostKeyChecking": "no",
"BatchMode": "yes"})
try:
# The sync_multiplier value is passed onto pexpect.pxssh which is used to determine timeout
# values for prompt verification after an ssh connection is established.
p.login(host, self.userName, sync_multiplier=sync_multiplier)
p.x_peer = host
p.x_pid = p.pid
good_list.append(p)
if self.verbose:
with print_lock:
print '[INFO] login %s' % host
except Exception as e:
with print_lock:
print '[ERROR] unable to login to %s' % host
if type(e) is pxssh.ExceptionPxssh:
print e
elif type(e) is pxssh.EOF:
print 'Could not acquire connection.'
else:
print 'hint: use gpssh-exkeys to setup public-key authentication between hosts'
thread_list = []
for host in hostList:
t = threading.Thread(target=connect_host, args=(host,))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
# Restore terminal type
if origTERM:
os.putenv('TERM', origTERM)
self.pxssh_list = good_list
def close(self):
return self.clean()
def reset(self):
'''reads from all the ssh connections to make sure we dont have any pending cruft'''
for s in self.pxssh_list:
s.readlines()
def clean(self):
net_return_code = self.closePxsshList(self.pxssh_list)
self.pxssh_list = []
return net_return_code
def emptyline(self):
pass
def escapeLine(self,line):
'''Escape occurrences of \ and $ as needed and package the line as an "eval" shell command'''
line = line.strip()
if line == 'EOF' or line == 'exit' or line == 'quit':
raise self.SessionCmdExit()
line = line.split('\\')
line = '\\\\'.join(line)
line = line.split('"')
line = '\\"'.join(line)
line = line.split('$')
line = '\\$'.join(line)
line = 'eval "' + line + '" < /dev/null'
return line
def executeCommand(self,command):
commandoutput=[]
if self.echoCommand:
escapedCommand = command.replace('"', '\\"')
command = 'echo "%s"; %s' % (escapedCommand, command)
#Execute the command in all of the ssh sessions
for s in self.pxssh_list:
s.sendline(command)
#Wait for each command and retrieve the output
for s in self.pxssh_list:
#Wait for each command to finish
#!! TODO verify that this is a tight wait loop and find another way to do this
while not s.prompt(120) and s.isalive() and not s.eof(): pass
for s in self.pxssh_list:
#Split the output into an array of lines so that we can add text to the beginning of
# each line
output = s.before.split('\n')
output = output[1:-1]
commandoutput.append(output)
return commandoutput.__iter__()
# Interactive command line handler
# Override of base class, handles commands that aren't recognized as part of a predefined set
# The "command" argument is a command line to be executed on all available command sessions
# The output of the command execution is printed to the standard output, prepended with
# the hostname of each machine from which the output came
def default(self, command):
line = self.escapeLine(command)
if self.verbose: print command
#Execute the command on our ssh sessions
commandoutput=self.executeCommand(command)
self.writeCommandOutput(commandoutput)
def writeCommandOutput(self,commandoutput):
'''Takes a list of output lists as an iterator and writes them to standard output,
formatted with the hostname from which each output array was obtained'''
for s in self.pxssh_list:
output = commandoutput.next()
#Write the output
if len(output) == 0:
print (self.peerStringFormat() % s.x_peer)
else:
for line in output:
print (self.peerStringFormat() % s.x_peer), line
def closePxsshList(self,list):
lock = threading.Lock()
return_codes = [0]
def closePxsshOne(p, return_codes):
p.logout()
with lock:
return_codes.append(p.exitstatus)
th = []
for p in list:
t = threading.Thread(target=closePxsshOne, args=(p, return_codes))
t.start()
th.append(t)
for t in th:
t.join()
return max(return_codes)
| apache-2.0 |
kxliugang/edx-platform | common/djangoapps/config_models/__init__.py | 220 | 2002 | """
Model-Based Configuration
=========================
This app allows other apps to easily define a configuration model
that can be hooked into the admin site to allow configuration management
with auditing.
Installation
------------
Add ``config_models`` to your ``INSTALLED_APPS`` list.
Usage
-----
Create a subclass of ``ConfigurationModel``, with fields for each
value that needs to be configured::
class MyConfiguration(ConfigurationModel):
frobble_timeout = IntField(default=10)
frazzle_target = TextField(defalut="debug")
This is a normal django model, so it must be synced and migrated as usual.
The default values for the fields in the ``ConfigurationModel`` will be
used if no configuration has yet been created.
Register that class with the Admin site, using the ``ConfigurationAdminModel``::
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
admin.site.register(MyConfiguration, ConfigurationModelAdmin)
Use the configuration in your code::
def my_view(self, request):
config = MyConfiguration.current()
fire_the_missiles(config.frazzle_target, timeout=config.frobble_timeout)
Use the admin site to add new configuration entries. The most recently created
entry is considered to be ``current``.
Configuration
-------------
The current ``ConfigurationModel`` will be cached in the ``configuration`` django cache,
or in the ``default`` cache if ``configuration`` doesn't exist. You can specify the cache
timeout in each ``ConfigurationModel`` by setting the ``cache_timeout`` property.
You can change the name of the cache key used by the ``ConfigurationModel`` by overriding
the ``cache_key_name`` function.
Extension
---------
``ConfigurationModels`` are just django models, so they can be extended with new fields
and migrated as usual. Newly added fields must have default values and should be nullable,
so that rollbacks to old versions of configuration work correctly.
"""
| agpl-3.0 |
rahushen/ansible | lib/ansible/modules/windows/win_regedit.py | 24 | 6538 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Adam Keech <akeech@chathamfinancial.com>, Josh Ludwig <jludwig@chathamfinancial.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_regedit
version_added: '2.0'
short_description: Add, change, or remove registry keys and values
description:
- Add, modify or remove registry keys and values.
- More information about the windows registry from Wikipedia
U(https://en.wikipedia.org/wiki/Windows_Registry).
options:
path:
description:
- Name of the registry path.
- 'Should be in one of the following registry hives: HKCC, HKCR, HKCU,
HKLM, HKU.'
required: yes
aliases: [ key ]
name:
description:
- Name of the registry entry in the above C(path) parameters.
- If not provided, or empty then the '(Default)' property for the key will
be used.
aliases: [ entry ]
data:
description:
- Value of the registry entry C(name) in C(path).
- If not specified then the value for the property will be null for the
corresponding C(type).
- Binary and None data should be expressed in a yaml byte array or as comma
separated hex values.
- An easy way to generate this is to run C(regedit.exe) and use the
I(export) option to save the registry values to a file.
- In the exported file, binary value will look like C(hex:be,ef,be,ef), the
C(hex:) prefix is optional.
- DWORD and QWORD values should either be represented as a decimal number
or a hex value.
- Multistring values should be passed in as a list.
- See the examples for more details on how to format this data.
type:
description:
- The registry value data type.
choices: [ binary, dword, expandstring, multistring, string, qword ]
default: string
aliases: [ datatype ]
state:
description:
- The state of the registry entry.
choices: [ absent, present ]
default: present
delete_key:
description:
- When C(state) is 'absent' then this will delete the entire key.
- If C(no) then it will only clear out the '(Default)' property for
that key.
type: bool
default: 'yes'
version_added: '2.4'
hive:
description:
- A path to a hive key like C:\Users\Default\NTUSER.DAT to load in the
registry.
- This hive is loaded under the HKLM:\ANSIBLE key which can then be used
in I(name) like any other path.
- This can be used to load the default user profile registry hive or any
other hive saved as a file.
- Using this function requires the user to have the C(SeRestorePrivilege)
and C(SeBackupPrivilege) privileges enabled.
version_added: '2.5'
notes:
- Check-mode C(-C/--check) and diff output C(-D/--diff) are supported, so that you can test every change against the active configuration before
applying changes.
- Beware that some registry hives (C(HKEY_USERS) in particular) do not allow to create new registry paths in the root folder.
- Since ansible 2.4, when checking if a string registry value has changed, a case-sensitive test is used. Previously the test was case-insensitive.
author:
- Adam Keech (@smadam813)
- Josh Ludwig (@joshludwig)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Create registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
- name: Add or update registry path MyCompany, with entry 'hello', and containing 'world'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: world
- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 1337 as the decimal value
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: 1337
type: dword
- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 0xff2500ae as the hex value
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: 0xff2500ae
type: dword
- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in hex-string format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
type: binary
- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in yaml format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
type: binary
- name: Add or update registry path MyCompany, with expand string entry 'hello'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: '%appdata%\local'
type: expandstring
- name: Add or update registry path MyCompany, with multi string entry 'hello'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: ['hello', 'world']
type: multistring
- name: Disable keyboard layout hotkey for all users (changes existing)
win_regedit:
path: HKU:\.DEFAULT\Keyboard Layout\Toggle
name: Layout Hotkey
data: 3
type: dword
- name: Disable language hotkey for current users (adds new)
win_regedit:
path: HKCU:\Keyboard Layout\Toggle
name: Language Hotkey
data: 3
type: dword
- name: Remove registry path MyCompany (including all entries it contains)
win_regedit:
path: HKCU:\Software\MyCompany
state: absent
delete_key: yes
- name: Clear the existing (Default) entry at path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
state: absent
delete_key: no
- name: Remove entry 'hello' from registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
state: absent
- name: Change default mouse trailing settings for new users
win_regedit:
path: HKLM:\ANSIBLE\Control Panel\Mouse
name: MouseTrails
data: 10
type: string
state: present
hive: C:\Users\Default\NTUSER.dat
'''
RETURN = r'''
data_changed:
description: whether this invocation changed the data in the registry value
returned: success
type: boolean
sample: False
data_type_changed:
description: whether this invocation changed the datatype of the registry value
returned: success
type: boolean
sample: True
'''
| gpl-3.0 |
mdrumond/tensorflow | tensorflow/examples/adding_an_op/zero_out_op_1.py | 190 | 1053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ZeroOut op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_zero_out_module = tf.load_op_library(
os.path.join(tf.resource_loader.get_data_files_path(),
'zero_out_op_kernel_1.so'))
zero_out = _zero_out_module.zero_out
| apache-2.0 |
kevin-coder/tensorflow-fork | tensorflow/python/ops/ragged/ragged_expand_dims_op_test.py | 13 | 4920 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.expand_dims."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedExpandDimsOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
# An example 4-d ragged tensor with shape [3, (D2), (D3), 2], and the
# expected result calling for expand_dims on each axis. c.f. the table of
# expected result shapes in the ragged_array_ops.expand_dims docstring.
EXAMPLE4D = [[[[1, 1], [2, 2]], [[3, 3]]],
[],
[[], [[4, 4], [5, 5], [6, 6]]]] # pyformat: disable
EXAMPLE4D_EXPAND_AXIS = {
0: [EXAMPLE4D],
1: [[d0] for d0 in EXAMPLE4D],
2: [[[d1] for d1 in d0] for d0 in EXAMPLE4D],
3: [[[[d2] for d2 in d1] for d1 in d0] for d0 in EXAMPLE4D],
4: [[[[[d3] for d3 in d2] for d2 in d1] for d1 in d0] for d0 in EXAMPLE4D]
}
@parameterized.parameters([
#=========================================================================
# Docstring examples: 2D Ragged Inputs
dict(rt_input=[[1, 2], [3]],
axis=0,
expected=[[[1, 2], [3]]],
expected_shape=[1, None, None]),
dict(rt_input=[[1, 2], [3]],
axis=1,
expected=[[[1, 2]], [[3]]],
expected_shape=[2, None, None]),
dict(rt_input=[[1, 2], [3]],
axis=2,
expected=[[[1], [2]], [[3]]],
expected_shape=[2, None, 1]),
#=========================================================================
# 2D Tensor Inputs
dict(rt_input=[[1, 2], [3, 4], [5, 6]],
ragged_rank=0,
axis=0,
expected=[[[1, 2], [3, 4], [5, 6]]],
expected_shape=[1, 3, 2]),
dict(rt_input=[[1, 2], [3, 4], [5, 6]],
ragged_rank=0,
axis=1,
expected=[[[1, 2]], [[3, 4]], [[5, 6]]],
expected_shape=[3, 1, 2]),
dict(rt_input=[[1, 2], [3, 4], [5, 6]],
ragged_rank=0,
axis=2,
expected=[[[1], [2]], [[3], [4]], [[5], [6]]],
expected_shape=[3, 2, 1]),
#=========================================================================
# 4D Ragged Inputs: [3, (D2), (D3), 2]
# c.f. the table of expected result shapes in the expand_dims docstring.
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=0,
expected=EXAMPLE4D_EXPAND_AXIS[0],
expected_shape=[1, None, None, None, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=1,
expected=EXAMPLE4D_EXPAND_AXIS[1],
expected_shape=[3, None, None, None, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=2,
expected=EXAMPLE4D_EXPAND_AXIS[2],
expected_shape=[3, None, None, None, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=3,
expected=EXAMPLE4D_EXPAND_AXIS[3],
expected_shape=[3, None, None, 1, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=4,
expected=EXAMPLE4D_EXPAND_AXIS[4],
expected_shape=[3, None, None, 2, 1]),
]) # pyformat: disable
def testRaggedExpandDims(self,
rt_input,
axis,
expected,
ragged_rank=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
expanded = ragged_array_ops.expand_dims(rt, axis=axis)
self.assertEqual(expanded.shape.ndims, rt.shape.ndims + 1)
if expected_shape is not None:
self.assertEqual(expanded.shape.as_list(), expected_shape)
self.assertRaggedEqual(expanded, expected)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
alcides/rdflib | test/test_sparql/test_sparql_recurse.py | 1 | 2965 | from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.namespace import RDFS
from rdflib.sparql.Algebra import RenderSPARQLAlgebra
from StringIO import StringIO
import unittest, sys
import nose
testContent = """
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix dc: <http://purl.org/dc/elements/1.1/>.
@prefix xsd: <http://www.w3.org/2001/XMLSchema#>.
<http://del.icio.us/rss/chimezie/logic>
a foaf:Document;
dc:date "2006-10-01T12:35:00"^^xsd:dateTime.
<http://del.icio.us/rss/chimezie/paper>
a foaf:Document;
dc:date "2005-05-25T08:15:00"^^xsd:dateTime.
<http://del.icio.us/rss/chimezie/illustration>
a foaf:Document;
dc:date "1990-01-01T12:45:00"^^xsd:dateTime."""
BASIC_KNOWS_DATA = '''
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<ex:person.1> foaf:name "person 1";
foaf:knows <ex:person.2>.
<ex:person.2> foaf:knows <ex:person.3>.
<ex:person.3> foaf:name "person 3".
'''
KNOWS_QUERY = '''
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?x ?name
{
?x foaf:knows ?y .
OPTIONAL { ?y foaf:name ?name }
}
RECUR ?y TO ?x
'''
SUBCLASS_DATA = '''
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<ex:ob> a <ex:class.1> .
<ex:class.1> rdfs:subClassOf <ex:class.2> .
<ex:class.2> rdfs:subClassOf <ex:class.3> .
'''
SUBCLASS_QUERY = '''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?x ?t
{ ?x rdf:type ?t }
RECUR ?t TO ?x
{ ?x rdfs:subClassOf ?t }
'''
ANSWER1 = URIRef('http://del.icio.us/rss/chimezie/paper')
class RecursionTests(unittest.TestCase):
def setUp(self):
self.graph = ConjunctiveGraph()
self.graph.load(StringIO(testContent), format='n3')
def test_simple_recursion(self):
graph = ConjunctiveGraph()
graph.load(StringIO(BASIC_KNOWS_DATA), format='n3')
results = graph.query(KNOWS_QUERY,
DEBUG=False).serialize(format='python')
results = set([tuple(result) for result in results])
person1 = URIRef('ex:person.1')
person2 = URIRef('ex:person.2')
nose.tools.assert_equal(
results,
set([(person1, None), (person1, Literal('person 3')),
(person2, Literal('person 3'))]))
def test_secondary_recursion(self):
graph = ConjunctiveGraph()
graph.load(StringIO(SUBCLASS_DATA), format='n3')
results = graph.query(SUBCLASS_QUERY,
DEBUG=False).serialize(format='python')
results = set([tuple(result) for result in results])
ob = URIRef('ex:ob')
class1 = URIRef('ex:class.1')
class2 = URIRef('ex:class.2')
class3 = URIRef('ex:class.3')
nose.tools.assert_equal(
results,
set([(ob, class1), (ob, class2), (ob, class3)]))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
neoareslinux/neutron | neutron/services/service_base.py | 13 | 3183 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.api import extensions
from neutron.db import servicetype_db as sdb
from neutron.i18n import _LE, _LI
from neutron.services import provider_configuration as pconf
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ServicePluginBase(extensions.PluginInterface):
"""Define base interface for any Advanced Service plugin."""
supported_extension_aliases = []
@abc.abstractmethod
def get_plugin_type(self):
"""Return one of predefined service types.
See neutron/plugins/common/constants.py
"""
pass
@abc.abstractmethod
def get_plugin_description(self):
"""Return string description of the plugin."""
pass
def load_drivers(service_type, plugin):
"""Loads drivers for specific service.
Passes plugin instance to driver's constructor
"""
service_type_manager = sdb.ServiceTypeManager.get_instance()
providers = (service_type_manager.
get_service_providers(
None,
filters={'service_type': [service_type]})
)
if not providers:
msg = (_("No providers specified for '%s' service, exiting") %
service_type)
LOG.error(msg)
raise SystemExit(1)
drivers = {}
for provider in providers:
try:
drivers[provider['name']] = importutils.import_object(
provider['driver'], plugin
)
LOG.debug("Loaded '%(provider)s' provider for service "
"%(service_type)s",
{'provider': provider['driver'],
'service_type': service_type})
except ImportError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error loading provider '%(provider)s' for "
"service %(service_type)s"),
{'provider': provider['driver'],
'service_type': service_type})
default_provider = None
try:
provider = service_type_manager.get_default_service_provider(
None, service_type)
default_provider = provider['name']
except pconf.DefaultServiceProviderNotFound:
LOG.info(_LI("Default provider is not specified for service type %s"),
service_type)
return drivers, default_provider
| apache-2.0 |
donNewtonAlpha/onos | tools/test/topos/onosnet.py | 4 | 8616 | #!/usr/bin/python
import itertools
import os
import signal
import sys
from argparse import ArgumentParser
from subprocess import call
from threading import Thread
from time import sleep
import gratuitousArp
from mininet.cli import CLI
from mininet.examples.controlnet import MininetFacade
from mininet.link import TCLink
from mininet.log import info, output, error
from mininet.log import setLogLevel
from mininet.net import Mininet
from mininet.node import RemoteController, Node
ARP_PATH = gratuitousArp.__file__.replace('.pyc', '.py')
class ONOSMininet( Mininet ):
def __init__( self, controllers=[], gratuitousArp=True, build=True, *args, **kwargs ):
"""Create Mininet object for ONOS.
controllers: List of controller IP addresses
gratuitousArp: Send an ARP from each host to aid controller's host discovery"""
# delay building for a second
kwargs[ 'build' ] = False
Mininet.__init__(self, *args, **kwargs )
self.gratArp = gratuitousArp
# If a controller is not provided, use list of remote controller IPs instead.
if 'controller' not in kwargs or not kwargs['controller']:
info ( '*** Adding controllers\n' )
ctrl_count = 0
for controllerIP in controllers:
self.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
info( ' c%d (%s)\n' % ( ctrl_count, controllerIP ) )
ctrl_count = ctrl_count + 1
if self.topo and build:
self.build()
def start( self ):
Mininet.start( self )
if self.gratArp:
self.waitConnected( timeout=5 )
info ( '*** Sending a gratuitious ARP from each host\n' )
self.gratuitousArp()
def verifyHosts( self, hosts ):
for i in range( len( hosts ) ):
if isinstance( hosts[i], str):
if hosts[i] in self:
hosts[i] = self[ hosts[i] ]
else:
info( '*** ERROR: %s is not a host\n' % hosts[i] )
del hosts[i]
elif not isinstance( hosts[i], Node):
del hosts[i]
def gratuitousArp( self, hosts=[] ):
"Send an ARP from each host to aid controller's host discovery; fallback to ping if necessary"
if not hosts:
hosts = self.hosts
self.verifyHosts( hosts )
for host in hosts:
info( '%s ' % host.name )
info( host.cmd( ARP_PATH ) )
info ( '\n' )
def pingloop( self ):
"Loop forever pinging the full mesh of hosts"
setLogLevel( 'error' )
try:
while True:
self.ping()
finally:
setLogLevel( 'info' )
def bgIperf( self, hosts=[], seconds=10 ):
self.verifyHosts( hosts )
servers = [ host.popen("iperf -s") for host in hosts ]
clients = []
for s, d in itertools.combinations(hosts, 2):
info ( '%s <--> %s\n' % ( s.name, d.name ))
cmd = 'iperf -c %s -t %s -y csv' % (d.IP(), seconds)
p = s.popen(cmd)
p.s = s.name
p.d = d.name
clients.append(p)
def handler (_signum, _frame):
raise BackgroundException()
oldSignal = signal.getsignal(signal.SIGTSTP)
signal.signal(signal.SIGTSTP, handler)
def finish( verbose=True ):
for c in clients:
out, err = c.communicate()
if verbose:
if err:
info( err )
else:
bw = out.split( ',' )[8]
info( '%s <--> %s: %s\n' % ( c.s, c.d, formatBw(bw) ) )
for s in servers:
s.terminate()
try:
info ( 'Press ^Z to continue in background or ^C to abort\n')
progress( seconds )
finish()
except KeyboardInterrupt:
for c in clients:
c.terminate()
for s in servers:
s.terminate()
except BackgroundException:
info( '\n*** Continuing in background...\n' )
t = Thread( target=finish, args=[ False ] )
t.start()
finally:
#Disable custom background signal
signal.signal(signal.SIGTSTP, oldSignal)
def progress(t):
while t > 0:
sys.stdout.write( '.' )
t -= 1
sys.stdout.flush()
sleep(1)
print
def formatBw( bw ):
bw = float(bw)
if bw > 1000:
bw /= 1000
if bw > 1000:
bw /= 1000
if bw > 1000:
bw /= 1000
return '%.2f Gbps' % bw
return '%.2f Mbps' % bw
return '%.2f Kbps' % bw
return '%.2f bps' % bw
class BackgroundException( Exception ):
pass
def get_mn(mn):
if isinstance(mn, ONOSMininet):
return mn
elif isinstance(mn, MininetFacade):
# There's more Mininet objects instantiated (e.g. one for the control network in onos.py).
for net in mn.nets:
if isinstance(net, ONOSMininet):
return net
return None
def do_bgIperf( self, line ):
args = line.split()
if not args:
output( 'Provide a list of hosts.\n' )
#Try to parse the '-t' argument as the number of seconds
seconds = 10
for i, arg in enumerate(args):
if arg == '-t':
if i + 1 < len(args):
try:
seconds = int(args[i + 1])
except ValueError:
error( 'Could not parse number of seconds: %s', args[i+1] )
del(args[i+1])
del args[i]
hosts = []
err = False
for arg in args:
if arg not in self.mn:
err = True
error( "node '%s' not in network\n" % arg )
else:
hosts.append( self.mn[ arg ] )
mn = get_mn( self.mn )
if "bgIperf" in dir( mn ) and not err:
mn.bgIperf( hosts, seconds=seconds )
else:
output('Background Iperf is not supported.\n')
def do_gratuitousArp( self, line ):
args = line.split()
mn = get_mn(self.mn)
if "gratuitousArp" in dir( mn ):
mn.gratuitousArp( args )
else:
output( 'Gratuitous ARP is not supported.\n' )
CLI.do_bgIperf = do_bgIperf
CLI.do_gratuitousArp = do_gratuitousArp
def parse_args():
parser = ArgumentParser(description='ONOS Mininet')
parser.add_argument('--cluster-size', help='Starts an ONOS cluster with the given number of instances',
type=int, action='store', dest='clusterSize', required=False, default=0)
parser.add_argument('--netcfg', help='Relative path of the JSON file to be used with netcfg',
type=str, action='store', dest='netcfgJson', required=False, default='')
parser.add_argument('ipAddrs', metavar='IP', type=str, nargs='*',
help='List of controller IP addresses', default=[])
return parser.parse_args()
def run( topo, controllers=None, link=TCLink, autoSetMacs=True):
if not topo:
print 'Need to provide a topology'
exit(1)
args = parse_args()
if not controllers and len(args.ipAddrs) > 0:
controllers = args.ipAddrs
if not controllers and args.clusterSize < 1:
print 'Need to provide a list of controller IPs, or define a cluster size.'
exit( 1 )
setLogLevel( 'info' )
if args.clusterSize > 0:
if 'ONOS_ROOT' not in os.environ:
print "Environment var $ONOS_ROOT not set (needed to import onos.py)"
exit( 1 )
sys.path.append(os.environ["ONOS_ROOT"] + "/tools/dev/mininet")
from onos import ONOSCluster, ONOSOVSSwitch, ONOSCLI
controller = ONOSCluster('c0', args.clusterSize)
onosAddr = controller.nodes()[0].IP()
net = ONOSMininet( topo=topo, controller=controller, switch=ONOSOVSSwitch, link=link,
autoSetMacs=autoSetMacs )
cli = ONOSCLI
else:
onosAddr = controllers[0]
net = ONOSMininet(topo=topo, controllers=controllers, link=link, autoSetMacs=autoSetMacs)
cli = CLI
net.start()
if len(args.netcfgJson) > 0:
if not os.path.isfile(args.netcfgJson):
error('*** WARNING no such netcfg file: %s\n' % args.netcfgJson)
else:
info('*** Setting netcfg: %s\n' % args.netcfgJson)
call(("onos-netcfg", onosAddr, args.netcfgJson))
cli( net )
net.stop()
| apache-2.0 |
msegado/edx-platform | common/lib/xmodule/xmodule/tests/test_xblock_wrappers.py | 13 | 15077 | """
Tests for the wrapping layer that provides the XBlock API using XModule/Descriptor
functionality
"""
# For tests, ignore access to protected members
# pylint: disable=protected-access
import webob
import ddt
from factory import (
BUILD_STRATEGY,
Factory,
lazy_attribute,
LazyAttributeSequence,
post_generation,
SubFactory,
use_strategy,
)
from fs.memoryfs import MemoryFS
from lxml import etree
from mock import Mock
from unittest.case import SkipTest, TestCase
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xblock.core import XBlock
from opaque_keys.edx.locations import Location
from xmodule.x_module import ModuleSystem, XModule, XModuleDescriptor, DescriptorSystem, STUDENT_VIEW, STUDIO_VIEW
from xmodule.annotatable_module import AnnotatableDescriptor
from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor
from xmodule.discussion_module import DiscussionDescriptor
from xmodule.gst_module import GraphicalSliderToolDescriptor
from xmodule.html_module import HtmlDescriptor
from xmodule.poll_module import PollDescriptor
from xmodule.word_cloud_module import WordCloudDescriptor
from xmodule.crowdsource_hinter import CrowdsourceHinterDescriptor
#from xmodule.video_module import VideoDescriptor
from xmodule.seq_module import SequenceDescriptor
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.randomize_module import RandomizeDescriptor
from xmodule.vertical_block import VerticalBlock
from xmodule.wrapper_module import WrapperBlock
from xmodule.tests import get_test_descriptor_system, get_test_system
# A dictionary that maps specific XModuleDescriptor classes without children
# to a list of sample field values to test with.
# TODO: Add more types of sample data
LEAF_XMODULES = {
AnnotatableDescriptor: [{}],
CapaDescriptor: [{}],
DiscussionDescriptor: [{}],
GraphicalSliderToolDescriptor: [{}],
HtmlDescriptor: [{}],
PollDescriptor: [{'display_name': 'Poll Display Name'}],
WordCloudDescriptor: [{}],
# This is being excluded because it has dependencies on django
#VideoDescriptor,
}
# A dictionary that maps specific XModuleDescriptor classes with children
# to a list of sample field values to test with.
# TODO: Add more types of sample data
CONTAINER_XMODULES = {
ConditionalDescriptor: [{}],
CourseDescriptor: [{}],
CrowdsourceHinterDescriptor: [{}],
RandomizeDescriptor: [{}],
SequenceDescriptor: [{}],
VerticalBlock: [{}],
WrapperBlock: [{}],
}
# These modules are editable in studio yet
NOT_STUDIO_EDITABLE = (
CrowdsourceHinterDescriptor,
GraphicalSliderToolDescriptor,
PollDescriptor
)
def flatten(class_dict):
"""
Flatten a dict from cls -> [fields, ...] and yields values of the form (cls, fields)
for each entry in the dictionary value.
"""
for cls, fields_list in class_dict.items():
for fields in fields_list:
yield (cls, fields)
@use_strategy(BUILD_STRATEGY)
class ModuleSystemFactory(Factory):
"""
Factory to build a test ModuleSystem. Creation is
performed by :func:`xmodule.tests.get_test_system`, so
arguments for that function are valid factory attributes.
"""
class Meta(object):
model = ModuleSystem
@classmethod
def _build(cls, target_class, *args, **kwargs): # pylint: disable=unused-argument
"""See documentation from :meth:`factory.Factory._build`"""
return get_test_system(*args, **kwargs)
@use_strategy(BUILD_STRATEGY)
class DescriptorSystemFactory(Factory):
"""
Factory to build a test DescriptorSystem. Creation is
performed by :func:`xmodule.tests.get_test_descriptor_system`, so
arguments for that function are valid factory attributes.
"""
class Meta(object):
model = DescriptorSystem
@classmethod
def _build(cls, target_class, *args, **kwargs): # pylint: disable=unused-argument
"""See documentation from :meth:`factory.Factory._build`"""
return get_test_descriptor_system(*args, **kwargs)
class ContainerModuleRuntimeFactory(ModuleSystemFactory):
"""
Factory to generate a ModuleRuntime that generates children when asked
for them, for testing container XModules.
"""
@post_generation
def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument
"""
When `depth` is specified as a Factory parameter, creates a
tree of children with that many levels.
"""
# pylint: disable=no-member
if depth == 0:
self.get_module.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)
else:
self.get_module.side_effect = lambda x: ContainerModuleFactory(
descriptor_cls=VerticalBlock,
depth=depth - 1
)
@post_generation
def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden
"""
Update the position attribute of the generated ModuleRuntime.
"""
self.position = position
class ContainerDescriptorRuntimeFactory(DescriptorSystemFactory):
"""
Factory to generate a DescriptorRuntime that generates children when asked
for them, for testing container XModuleDescriptors.
"""
@post_generation
def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument
"""
When `depth` is specified as a Factory parameter, creates a
tree of children with that many levels.
"""
# pylint: disable=no-member
if depth == 0:
self.load_item.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)
else:
self.load_item.side_effect = lambda x: ContainerModuleFactory(
descriptor_cls=VerticalBlock,
depth=depth - 1
)
@post_generation
def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden
"""
Update the position attribute of the generated ModuleRuntime.
"""
self.position = position
@use_strategy(BUILD_STRATEGY)
class LeafDescriptorFactory(Factory):
"""
Factory to generate leaf XModuleDescriptors.
"""
# pylint: disable=missing-docstring
class Meta(object):
model = XModuleDescriptor
runtime = SubFactory(DescriptorSystemFactory)
url_name = LazyAttributeSequence('{.block_type}_{}'.format)
@lazy_attribute
def location(self):
return Location('org', 'course', 'run', 'category', self.url_name, None)
@lazy_attribute
def block_type(self):
return self.descriptor_cls.__name__ # pylint: disable=no-member
@lazy_attribute
def definition_id(self):
return self.location
@lazy_attribute
def usage_id(self):
return self.location
@classmethod
def _build(cls, target_class, *args, **kwargs): # pylint: disable=unused-argument
runtime = kwargs.pop('runtime')
desc_cls = kwargs.pop('descriptor_cls')
block_type = kwargs.pop('block_type')
def_id = kwargs.pop('definition_id')
usage_id = kwargs.pop('usage_id')
block = runtime.construct_xblock_from_class(
desc_cls,
ScopeIds(None, block_type, def_id, usage_id),
DictFieldData(dict(**kwargs))
)
block.save()
return block
class LeafModuleFactory(LeafDescriptorFactory):
"""
Factory to generate leaf XModuleDescriptors that are prepped to be
used as XModules.
"""
@post_generation
def xmodule_runtime(self, create, xmodule_runtime, **kwargs): # pylint: disable=method-hidden, unused-argument
"""
Set the xmodule_runtime to make this XModuleDescriptor usable
as an XModule.
"""
if xmodule_runtime is None:
xmodule_runtime = ModuleSystemFactory()
self.xmodule_runtime = xmodule_runtime
class ContainerDescriptorFactory(LeafDescriptorFactory):
"""
Factory to generate XModuleDescriptors that are containers.
"""
runtime = SubFactory(ContainerDescriptorRuntimeFactory)
children = range(3)
class ContainerModuleFactory(LeafModuleFactory):
"""
Factory to generate XModuleDescriptors that are containers
and are ready to act as XModules.
"""
@lazy_attribute
def xmodule_runtime(self):
return ContainerModuleRuntimeFactory(depth=self.depth) # pylint: disable=no-member
@ddt.ddt
class XBlockWrapperTestMixin(object):
"""
This is a mixin for building tests of the implementation of the XBlock
api by wrapping XModule native functions.
You can create an actual test case by inheriting from this class and UnitTest,
and implement skip_if_invalid and check_property.
"""
def skip_if_invalid(self, descriptor_cls):
"""
Raise SkipTest if this descriptor_cls shouldn't be tested.
"""
pass
def check_property(self, descriptor): # pylint: disable=unused-argument
"""
Execute assertions to verify that the property under test is true for
the supplied descriptor.
"""
raise SkipTest("check_property not defined")
# Test that for all of the leaf XModule Descriptors,
# the test property holds
@ddt.data(*flatten(LEAF_XMODULES))
def test_leaf_node(self, cls_and_fields):
descriptor_cls, fields = cls_and_fields
self.skip_if_invalid(descriptor_cls)
descriptor = LeafModuleFactory(descriptor_cls=descriptor_cls, **fields)
mocked_course = Mock()
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
# pylint: disable=no-member
descriptor.runtime.id_reader.get_definition_id = Mock(return_value='a')
descriptor.runtime.modulestore = modulestore
self.check_property(descriptor)
# Test that when an xmodule is generated from descriptor_cls
# with only xmodule children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_xmodules_only(self, cls_and_fields):
descriptor_cls, fields = cls_and_fields
self.skip_if_invalid(descriptor_cls)
descriptor = ContainerModuleFactory(descriptor_cls=descriptor_cls, depth=2, **fields)
# pylint: disable=no-member
descriptor.runtime.id_reader.get_definition_id = Mock(return_value='a')
self.check_property(descriptor)
# Test that when an xmodule is generated from descriptor_cls
# with mixed xmodule and xblock children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_mixed(self, cls_and_fields): # pylint: disable=unused-argument
raise SkipTest("XBlock support in XDescriptor not yet fully implemented")
# Test that when an xmodule is generated from descriptor_cls
# with only xblock children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_xblocks_only(self, cls_and_fields): # pylint: disable=unused-argument
raise SkipTest("XBlock support in XModules not yet fully implemented")
class TestStudentView(XBlockWrapperTestMixin, TestCase):
"""
This tests that student_view and XModule.get_html produce the same results.
"""
def skip_if_invalid(self, descriptor_cls):
pure_xblock_class = issubclass(descriptor_cls, XBlock) and not issubclass(descriptor_cls, XModuleDescriptor)
if pure_xblock_class:
student_view = descriptor_cls.student_view
else:
student_view = descriptor_cls.module_class.student_view
if student_view != XModule.student_view:
raise SkipTest(descriptor_cls.__name__ + " implements student_view")
def check_property(self, descriptor):
"""
Assert that both student_view and get_html render the same.
"""
self.assertEqual(
descriptor._xmodule.get_html(),
descriptor.render(STUDENT_VIEW).content
)
class TestStudioView(XBlockWrapperTestMixin, TestCase):
"""
This tests that studio_view and XModuleDescriptor.get_html produce the same results
"""
def skip_if_invalid(self, descriptor_cls):
if descriptor_cls in NOT_STUDIO_EDITABLE:
raise SkipTest(descriptor_cls.__name__ + " is not editable in studio")
pure_xblock_class = issubclass(descriptor_cls, XBlock) and not issubclass(descriptor_cls, XModuleDescriptor)
if pure_xblock_class:
raise SkipTest(descriptor_cls.__name__ + " is a pure XBlock and implements studio_view")
elif descriptor_cls.studio_view != XModuleDescriptor.studio_view:
raise SkipTest(descriptor_cls.__name__ + " implements studio_view")
def check_property(self, descriptor):
"""
Assert that studio_view and get_html render the same.
"""
html = descriptor.get_html()
rendered_content = descriptor.render(STUDIO_VIEW).content
self.assertEqual(html, rendered_content)
class TestXModuleHandler(TestCase):
"""
Tests that the xmodule_handler function correctly wraps handle_ajax
"""
def setUp(self):
super(TestXModuleHandler, self).setUp()
self.module = XModule(descriptor=Mock(), field_data=Mock(), runtime=Mock(), scope_ids=Mock())
self.module.handle_ajax = Mock(return_value='{}')
self.request = webob.Request({})
def test_xmodule_handler_passed_data(self):
self.module.xmodule_handler(self.request)
self.module.handle_ajax.assert_called_with(None, self.request.POST)
def test_xmodule_handler_dispatch(self):
self.module.xmodule_handler(self.request, 'dispatch')
self.module.handle_ajax.assert_called_with('dispatch', self.request.POST)
def test_xmodule_handler_return_value(self):
response = self.module.xmodule_handler(self.request)
self.assertIsInstance(response, webob.Response)
self.assertEqual(response.body, '{}')
class TestXmlExport(XBlockWrapperTestMixin, TestCase):
"""
This tests that XModuleDescriptor.export_course_to_xml and add_xml_to_node produce the same results.
"""
def skip_if_invalid(self, descriptor_cls):
if descriptor_cls.add_xml_to_node != XModuleDescriptor.add_xml_to_node:
raise SkipTest(descriptor_cls.__name__ + " implements add_xml_to_node")
def check_property(self, descriptor):
xmodule_api_fs = MemoryFS()
xblock_api_fs = MemoryFS()
descriptor.runtime.export_fs = xblock_api_fs
xblock_node = etree.Element('unknown')
descriptor.add_xml_to_node(xblock_node)
xmodule_node = etree.fromstring(descriptor.export_to_xml(xmodule_api_fs))
self.assertEquals(list(xmodule_api_fs.walk()), list(xblock_api_fs.walk()))
self.assertEquals(etree.tostring(xmodule_node), etree.tostring(xblock_node))
| agpl-3.0 |
pylixm/sae-django-demo | django1.7-sae/site-packages/django/db/backends/postgresql_psycopg2/operations.py | 63 | 9574 | from __future__ import unicode_literals
from django.conf import settings
from django.db.backends import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
super(DatabaseOperations, self).__init__(connection)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for Postgres:
(datefield + interval '3 days 200 seconds 5 microseconds')
"""
modifiers = []
if timedelta.days:
modifiers.append('%s days' % timedelta.days)
if timedelta.seconds:
modifiers.append('%s seconds' % timedelta.seconds)
if timedelta.microseconds:
modifiers.append('%s microseconds' % timedelta.microseconds)
mods = ' '.join(modifiers)
conn = ' %s ' % connector
return '(%s)' % conn.join([sql, 'interval \'%s\'' % mods])
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
sql = "EXTRACT('dow' FROM %s) + 1" % field_name
else:
sql = "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def field_cast_sql(self, db_type, internal_type):
if internal_type == "GenericIPAddressField" or internal_type == "IPAddressField":
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql, _allow_fallback=False):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
| apache-2.0 |
Arzie/deluge | deluge/ui/gtkui/pluginmanager.py | 2 | 4510 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007, 2008 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
import deluge.component as component
import deluge.pluginmanagerbase
from deluge.configmanager import ConfigManager
from deluge.ui.client import client
log = logging.getLogger(__name__)
class PluginManager(deluge.pluginmanagerbase.PluginManagerBase, component.Component):
def __init__(self):
component.Component.__init__(self, "PluginManager")
self.config = ConfigManager("gtkui.conf")
deluge.pluginmanagerbase.PluginManagerBase.__init__(
self, "gtkui.conf", "deluge.plugin.gtkui")
self.hooks = {
"on_apply_prefs": [],
"on_show_prefs": []
}
client.register_event_handler("PluginEnabledEvent", self._on_plugin_enabled_event)
client.register_event_handler("PluginDisabledEvent", self._on_plugin_disabled_event)
def register_hook(self, hook, function):
"""Register a hook function with the plugin manager"""
try:
self.hooks[hook].append(function)
except KeyError:
log.warning("Plugin attempting to register invalid hook.")
def deregister_hook(self, hook, function):
"""Deregisters a hook function"""
try:
self.hooks[hook].remove(function)
except:
log.warning("Unable to deregister hook %s", hook)
def start(self):
"""Start the plugin manager"""
# Update the enabled_plugins from the core
client.core.get_enabled_plugins().addCallback(self._on_get_enabled_plugins)
for instance in self.plugins.values():
component.start([instance.plugin._component_name])
def stop(self):
# Disable the plugins
self.disable_plugins()
def update(self):
pass
def _on_get_enabled_plugins(self, enabled_plugins):
log.debug("Core has these plugins enabled: %s", enabled_plugins)
for plugin in enabled_plugins:
self.enable_plugin(plugin)
def _on_plugin_enabled_event(self, name):
self.enable_plugin(name)
self.run_on_show_prefs()
def _on_plugin_disabled_event(self, name):
self.disable_plugin(name)
# Hook functions
def run_on_show_prefs(self):
"""This hook is run before the user is shown the preferences dialog.
It is designed so that plugins can update their preference page with
the config."""
log.debug("run_on_show_prefs")
for function in self.hooks["on_show_prefs"]:
function()
def run_on_apply_prefs(self):
"""This hook is run after the user clicks Apply or OK in the preferences
dialog.
"""
log.debug("run_on_apply_prefs")
for function in self.hooks["on_apply_prefs"]:
function()
# Plugin functions.. will likely move to own class..
def add_torrentview_text_column(self, *args, **kwargs):
return component.get("TorrentView").add_text_column(*args, **kwargs)
def remove_torrentview_column(self, *args):
return component.get("TorrentView").remove_column(*args)
def add_toolbar_separator(self):
return component.get("ToolBar").add_separator()
def add_toolbar_button(self, *args, **kwargs):
return component.get("ToolBar").add_toolbutton(*args, **kwargs)
def remove_toolbar_button(self, *args):
return component.get("ToolBar").remove(*args)
def add_torrentmenu_menu(self, *args):
return component.get("MenuBar").torrentmenu.append(*args)
def add_torrentmenu_separator(self):
return component.get("MenuBar").add_torrentmenu_separator()
def remove_torrentmenu_item(self, *args):
return component.get("MenuBar").torrentmenu.remove(*args)
def add_preferences_page(self, *args):
return component.get("Preferences").add_page(*args)
def remove_preferences_page(self, *args):
return component.get("Preferences").remove_page(*args)
def update_torrent_view(self, *args):
return component.get("TorrentView").update(*args)
def get_selected_torrents(self):
"""Returns a list of the selected torrent_ids"""
return component.get("TorrentView").get_selected_torrents()
| gpl-3.0 |
numenta/nupic | tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/base.py | 20 | 15012 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
waheedahmed/edx-platform | lms/djangoapps/shoppingcart/tests/test_models.py | 11 | 57845 | """
Tests for the Shopping Cart Models
"""
from decimal import Decimal
import datetime
import sys
import json
import copy
import smtplib
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from mock import patch, MagicMock
from nose.plugins.attrib import attr
import pytz
import ddt
from django.core import mail
from django.core.mail.message import EmailMessage
from django.conf import settings
from django.db import DatabaseError
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from shoppingcart.models import (
Order, OrderItem, CertificateItem,
InvalidCartItem, CourseRegistrationCode, PaidCourseRegistration, CourseRegCodeItem,
Donation, OrderItemSubclassPK,
Invoice, CourseRegistrationCodeInvoiceItem, InvoiceTransaction, InvoiceHistory,
RegistrationCodeRedemption,
Coupon, CouponRedemption)
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from course_modes.models import CourseMode
from shoppingcart.exceptions import (
PurchasedCallbackException,
CourseDoesNotExistException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
)
from opaque_keys.edx.locator import CourseLocator
@attr('shard_3')
@ddt.ddt
class OrderTest(ModuleStoreTestCase):
"""
Test shopping cart orders (e.g., cart contains various items,
order is taken through various pieces of cart state, etc.)
"""
def setUp(self):
super(OrderTest, self).setUp()
self.user = UserFactory.create()
course = CourseFactory.create()
self.course_key = course.id
self.other_course_keys = []
for __ in xrange(1, 5):
course_key = CourseFactory.create().id
CourseMode.objects.create(
course_id=course_key,
mode_slug=CourseMode.HONOR,
mode_display_name="Honor"
)
self.other_course_keys.append(course_key)
self.cost = 40
# Add mock tracker for event testing.
patcher = patch('shoppingcart.models.analytics')
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
CourseMode.objects.create(
course_id=self.course_key,
mode_slug=CourseMode.HONOR,
mode_display_name="Honor"
)
def test_get_cart_for_user(self):
# create a cart
cart = Order.get_cart_for_user(user=self.user)
# add something to it
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
# should return the same cart
cart2 = Order.get_cart_for_user(user=self.user)
self.assertEquals(cart2.orderitem_set.count(), 1)
def test_user_cart_has_items(self):
anon = AnonymousUser()
self.assertFalse(Order.user_cart_has_items(anon))
self.assertFalse(Order.user_cart_has_items(self.user))
cart = Order.get_cart_for_user(self.user)
item = OrderItem(order=cart, user=self.user)
item.save()
self.assertTrue(Order.user_cart_has_items(self.user))
self.assertFalse(Order.user_cart_has_items(self.user, [CertificateItem]))
self.assertFalse(Order.user_cart_has_items(self.user, [PaidCourseRegistration]))
def test_user_cart_has_paid_course_registration_items(self):
cart = Order.get_cart_for_user(self.user)
item = PaidCourseRegistration(order=cart, user=self.user)
item.save()
self.assertTrue(Order.user_cart_has_items(self.user, [PaidCourseRegistration]))
self.assertFalse(Order.user_cart_has_items(self.user, [CertificateItem]))
def test_user_cart_has_certificate_items(self):
cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
self.assertTrue(Order.user_cart_has_items(self.user, [CertificateItem]))
self.assertFalse(Order.user_cart_has_items(self.user, [PaidCourseRegistration]))
def test_cart_clear(self):
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
CertificateItem.add_to_order(cart, self.other_course_keys[0], self.cost, 'honor')
self.assertEquals(cart.orderitem_set.count(), 2)
self.assertTrue(cart.has_items())
cart.clear()
self.assertEquals(cart.orderitem_set.count(), 0)
self.assertFalse(cart.has_items())
def test_add_item_to_cart_currency_match(self):
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor', currency='eur')
# verify that a new item has been added
self.assertEquals(cart.orderitem_set.count(), 1)
# verify that the cart's currency was updated
self.assertEquals(cart.currency, 'eur')
with self.assertRaises(InvalidCartItem):
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor', currency='usd')
# assert that this item did not get added to the cart
self.assertEquals(cart.orderitem_set.count(), 1)
def test_total_cost(self):
cart = Order.get_cart_for_user(user=self.user)
# add items to the order
course_costs = [(self.other_course_keys[0], 30),
(self.other_course_keys[1], 40),
(self.other_course_keys[2], 10),
(self.other_course_keys[3], 20)]
for course, cost in course_costs:
CertificateItem.add_to_order(cart, course, cost, 'honor')
self.assertEquals(cart.orderitem_set.count(), len(course_costs))
self.assertEquals(cart.total_cost, sum(cost for _course, cost in course_costs))
def test_start_purchase(self):
# Start the purchase, which will mark the cart as "paying"
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor', currency='usd')
cart.start_purchase()
self.assertEqual(cart.status, 'paying')
for item in cart.orderitem_set.all():
self.assertEqual(item.status, 'paying')
# Starting the purchase should be idempotent
cart.start_purchase()
self.assertEqual(cart.status, 'paying')
for item in cart.orderitem_set.all():
self.assertEqual(item.status, 'paying')
# If we retrieve the cart for the user, we should get a different order
next_cart = Order.get_cart_for_user(user=self.user)
self.assertNotEqual(cart, next_cart)
self.assertEqual(next_cart.status, 'cart')
# Complete the first purchase
cart.purchase()
self.assertEqual(cart.status, 'purchased')
for item in cart.orderitem_set.all():
self.assertEqual(item.status, 'purchased')
# Starting the purchase again should be a no-op
cart.start_purchase()
self.assertEqual(cart.status, 'purchased')
for item in cart.orderitem_set.all():
self.assertEqual(item.status, 'purchased')
def test_retire_order_cart(self):
"""Test that an order in cart can successfully be retired"""
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor', currency='usd')
cart.retire()
self.assertEqual(cart.status, 'defunct-cart')
self.assertEqual(cart.orderitem_set.get().status, 'defunct-cart')
def test_retire_order_paying(self):
"""Test that an order in "paying" can successfully be retired"""
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor', currency='usd')
cart.start_purchase()
cart.retire()
self.assertEqual(cart.status, 'defunct-paying')
self.assertEqual(cart.orderitem_set.get().status, 'defunct-paying')
@ddt.data(
("cart", "paying", UnexpectedOrderItemStatus),
("purchased", "purchased", InvalidStatusToRetire),
)
@ddt.unpack
def test_retire_order_error(self, order_status, item_status, exception):
"""
Test error cases for retiring an order:
1) Order item has a different status than the order
2) The order's status isn't in "cart" or "paying"
"""
cart = Order.get_cart_for_user(user=self.user)
item = CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor', currency='usd')
cart.status = order_status
cart.save()
item.status = item_status
item.save()
with self.assertRaises(exception):
cart.retire()
@ddt.data('defunct-paying', 'defunct-cart')
def test_retire_order_already_retired(self, status):
"""
Check that orders that have already been retired noop when the method
is called on them again.
"""
cart = Order.get_cart_for_user(user=self.user)
item = CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor', currency='usd')
cart.status = item.status = status
cart.save()
item.save()
cart.retire()
self.assertEqual(cart.status, status)
self.assertEqual(item.status, status)
@override_settings(
LMS_SEGMENT_KEY="foobar",
FEATURES={
'STORE_BILLING_INFO': True,
}
)
def test_purchase(self):
# This test is for testing the subclassing functionality of OrderItem, but in
# order to do this, we end up testing the specific functionality of
# CertificateItem, which is not quite good unit test form. Sorry.
cart = Order.get_cart_for_user(user=self.user)
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_key))
item = CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
# Course enrollment object should be created but still inactive
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_key))
# Analytics client pipes output to stderr when using the default client
with patch('sys.stderr', sys.stdout.write):
cart.purchase()
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# Test email sending
self.assertEquals(len(mail.outbox), 1)
self.assertEquals('Order Payment Confirmation', mail.outbox[0].subject)
self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, mail.outbox[0].body)
self.assertIn(unicode(cart.total_cost), mail.outbox[0].body)
self.assertIn(item.additional_instruction_text(), mail.outbox[0].body)
# Verify Google Analytics event fired for purchase
self.mock_tracker.track.assert_called_once_with( # pylint: disable=maybe-no-member
self.user.id,
'Completed Order',
{
'orderId': 1,
'currency': 'usd',
'total': '40.00',
'products': [
{
'sku': u'CertificateItem.honor',
'name': unicode(self.course_key),
'category': unicode(self.course_key.org),
'price': '40.00',
'id': 1,
'quantity': 1
}
]
},
context={'ip': None, 'Google Analytics': {'clientId': None}}
)
def test_purchase_item_failure(self):
# once again, we're testing against the specific implementation of
# CertificateItem
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
with patch('shoppingcart.models.CertificateItem.save', side_effect=DatabaseError):
with self.assertRaises(DatabaseError):
cart.purchase()
# verify that we rolled back the entire transaction
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_key))
# verify that e-mail wasn't sent
self.assertEquals(len(mail.outbox), 0)
def test_purchase_twice(self):
cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
# purchase the cart more than once
cart.purchase()
cart.purchase()
self.assertEquals(len(mail.outbox), 1)
@patch('shoppingcart.models.log.error')
def test_purchase_item_email_smtp_failure(self, error_logger):
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
with patch('shoppingcart.models.EmailMessage.send', side_effect=smtplib.SMTPException):
cart.purchase()
self.assertTrue(error_logger.called)
@patch('shoppingcart.models.log.error')
def test_purchase_item_email_boto_failure(self, error_logger):
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
with patch.object(EmailMessage, 'send') as mock_send:
mock_send.side_effect = BotoServerError("status", "reason")
cart.purchase()
self.assertTrue(error_logger.called)
def purchase_with_data(self, cart):
""" purchase a cart with billing information """
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
cart.purchase(
first='John',
last='Smith',
street1='11 Cambridge Center',
street2='Suite 101',
city='Cambridge',
state='MA',
postalcode='02412',
country='US',
ccnum='1111',
cardtype='001',
)
@patch('shoppingcart.models.render_to_string')
@patch.dict(settings.FEATURES, {'STORE_BILLING_INFO': True})
def test_billing_info_storage_on(self, render):
cart = Order.get_cart_for_user(self.user)
self.purchase_with_data(cart)
self.assertNotEqual(cart.bill_to_first, '')
self.assertNotEqual(cart.bill_to_last, '')
self.assertNotEqual(cart.bill_to_street1, '')
self.assertNotEqual(cart.bill_to_street2, '')
self.assertNotEqual(cart.bill_to_postalcode, '')
self.assertNotEqual(cart.bill_to_ccnum, '')
self.assertNotEqual(cart.bill_to_cardtype, '')
self.assertNotEqual(cart.bill_to_city, '')
self.assertNotEqual(cart.bill_to_state, '')
self.assertNotEqual(cart.bill_to_country, '')
((_, context), _) = render.call_args
self.assertTrue(context['has_billing_info'])
@patch('shoppingcart.models.render_to_string')
@patch.dict(settings.FEATURES, {'STORE_BILLING_INFO': False})
def test_billing_info_storage_off(self, render):
cart = Order.get_cart_for_user(self.user)
self.purchase_with_data(cart)
self.assertNotEqual(cart.bill_to_first, '')
self.assertNotEqual(cart.bill_to_last, '')
self.assertNotEqual(cart.bill_to_city, '')
self.assertNotEqual(cart.bill_to_state, '')
self.assertNotEqual(cart.bill_to_country, '')
self.assertNotEqual(cart.bill_to_postalcode, '')
# things we expect to be missing when the feature is off
self.assertEqual(cart.bill_to_street1, '')
self.assertEqual(cart.bill_to_street2, '')
self.assertEqual(cart.bill_to_ccnum, '')
self.assertEqual(cart.bill_to_cardtype, '')
((_, context), _) = render.call_args
self.assertFalse(context['has_billing_info'])
def test_generate_receipt_instructions_callchain(self):
"""
This tests the generate_receipt_instructions call chain (ie calling the function on the
cart also calls it on items in the cart
"""
mock_gen_inst = MagicMock(return_value=(OrderItemSubclassPK(OrderItem, 1), set([])))
cart = Order.get_cart_for_user(self.user)
item = OrderItem(user=self.user, order=cart)
item.save()
self.assertTrue(cart.has_items())
with patch.object(OrderItem, 'generate_receipt_instructions', mock_gen_inst):
cart.generate_receipt_instructions()
mock_gen_inst.assert_called_with()
def test_confirmation_email_error(self):
CourseMode.objects.create(
course_id=self.course_key,
mode_slug="verified",
mode_display_name="Verified",
min_price=self.cost
)
cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'verified')
# Simulate an error when sending the confirmation
# email. This should NOT raise an exception.
# If it does, then the implicit view-level
# transaction could cause a roll-back, effectively
# reversing order fulfillment.
with patch.object(mail.message.EmailMessage, 'send') as mock_send:
mock_send.side_effect = Exception("Kaboom!")
cart.purchase()
# Verify that the purchase completed successfully
self.assertEqual(cart.status, 'purchased')
# Verify that the user is enrolled as "verified"
mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_key)
self.assertTrue(is_active)
self.assertEqual(mode, 'verified')
class OrderItemTest(TestCase):
def setUp(self):
super(OrderItemTest, self).setUp()
self.user = UserFactory.create()
def test_order_item_purchased_callback(self):
"""
This tests that calling purchased_callback on the base OrderItem class raises NotImplementedError
"""
item = OrderItem(user=self.user, order=Order.get_cart_for_user(self.user))
with self.assertRaises(NotImplementedError):
item.purchased_callback()
def test_order_item_generate_receipt_instructions(self):
"""
This tests that the generate_receipt_instructions call chain and also
that calling it on the base OrderItem class returns an empty list
"""
cart = Order.get_cart_for_user(self.user)
item = OrderItem(user=self.user, order=cart)
item.save()
self.assertTrue(cart.has_items())
(inst_dict, inst_set) = cart.generate_receipt_instructions()
self.assertDictEqual({item.pk_with_subclass: set([])}, inst_dict)
self.assertEquals(set([]), inst_set)
def test_is_discounted(self):
"""
This tests the is_discounted property of the OrderItem
"""
cart = Order.get_cart_for_user(self.user)
item = OrderItem(user=self.user, order=cart)
item.list_price = None
item.unit_cost = 100
self.assertFalse(item.is_discounted)
item.list_price = 100
item.unit_cost = 100
self.assertFalse(item.is_discounted)
item.list_price = 100
item.unit_cost = 90
self.assertTrue(item.is_discounted)
def test_get_list_price(self):
"""
This tests the get_list_price() method of the OrderItem
"""
cart = Order.get_cart_for_user(self.user)
item = OrderItem(user=self.user, order=cart)
item.list_price = None
item.unit_cost = 100
self.assertEqual(item.get_list_price(), item.unit_cost)
item.list_price = 200
item.unit_cost = 100
self.assertEqual(item.get_list_price(), item.list_price)
@attr('shard_3')
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class PaidCourseRegistrationTest(ModuleStoreTestCase):
"""
Paid Course Registration Tests.
"""
def setUp(self):
super(PaidCourseRegistrationTest, self).setUp()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.cost = 40
self.course = CourseFactory.create()
self.course_key = self.course.id
self.course_mode = CourseMode(
course_id=self.course_key,
mode_slug=CourseMode.HONOR,
mode_display_name="honor cert",
min_price=self.cost
)
self.course_mode.save()
self.percentage_discount = 20.0
self.cart = Order.get_cart_for_user(self.user)
def test_get_total_amount_of_purchased_items(self):
"""
Test to check the total amount of the
purchased items.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug=CourseMode.HONOR)
self.cart.purchase()
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key=self.course_key)
self.assertEqual(total_amount, 40.00)
def test_get_total_amount_empty(self):
"""
Test to check the total amount of the
purchased items.
"""
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key=self.course_key)
self.assertEqual(total_amount, 0.00)
def test_add_to_order(self):
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug=CourseMode.HONOR)
self.assertEqual(reg1.unit_cost, self.cost)
self.assertEqual(reg1.line_cost, self.cost)
self.assertEqual(reg1.unit_cost, self.course_mode.min_price)
self.assertEqual(reg1.mode, "honor")
self.assertEqual(reg1.user, self.user)
self.assertEqual(reg1.status, "cart")
self.assertTrue(PaidCourseRegistration.contained_in_order(self.cart, self.course_key))
self.assertFalse(PaidCourseRegistration.contained_in_order(
self.cart, CourseLocator(org="MITx", course="999", run="Robot_Super_Course_abcd"))
)
self.assertEqual(self.cart.total_cost, self.cost)
def test_order_generated_registration_codes(self):
"""
Test to check for the order generated registration
codes.
"""
self.cart.order_type = 'business'
self.cart.save()
item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
self.cart.purchase()
registration_codes = CourseRegistrationCode.order_generated_registration_codes(self.course_key)
self.assertEqual(registration_codes.count(), item.qty)
def test_order_generated_totals(self):
"""
Test to check for the order generated registration
codes.
"""
total_amount = CourseRegCodeItem.get_total_amount_of_purchased_item(self.course_key)
self.assertEqual(total_amount, 0)
self.cart.order_type = 'business'
self.cart.save()
item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2, mode_slug=CourseMode.HONOR)
self.cart.purchase()
registration_codes = CourseRegistrationCode.order_generated_registration_codes(self.course_key)
self.assertEqual(registration_codes.count(), item.qty)
total_amount = CourseRegCodeItem.get_total_amount_of_purchased_item(self.course_key)
self.assertEqual(total_amount, 80.00)
def add_coupon(self, course_key, is_active, code):
"""
add dummy coupon into models
"""
Coupon.objects.create(
code=code,
description='testing code',
course_id=course_key,
percentage_discount=self.percentage_discount,
created_by=self.user,
is_active=is_active
)
def login_user(self, username):
"""
login the user to the platform.
"""
self.client.login(username=username, password="password")
def test_get_top_discount_codes_used(self):
"""
Test to check for the top coupon codes used.
"""
self.login_user(self.user.username)
self.add_coupon(self.course_key, True, 'Ad123asd')
self.add_coupon(self.course_key, True, '32213asd')
self.purchases_using_coupon_codes()
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(self.course_key)
self.assertTrue(top_discounted_codes[0]['coupon__code'], 'Ad123asd')
self.assertTrue(top_discounted_codes[0]['coupon__used_count'], 1)
self.assertTrue(top_discounted_codes[1]['coupon__code'], '32213asd')
self.assertTrue(top_discounted_codes[1]['coupon__used_count'], 2)
def test_get_total_coupon_code_purchases(self):
"""
Test to assert the number of coupon code purchases.
"""
self.login_user(self.user.username)
self.add_coupon(self.course_key, True, 'Ad123asd')
self.add_coupon(self.course_key, True, '32213asd')
self.purchases_using_coupon_codes()
total_coupon_code_purchases = CouponRedemption.get_total_coupon_code_purchases(self.course_key)
self.assertTrue(total_coupon_code_purchases['coupon__count'], 3)
def test_get_self_purchased_seat_count(self):
"""
Test to assert the number of seats
purchased using individual purchases.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
self.cart.purchase()
test_student = UserFactory.create()
test_student.set_password('password')
test_student.save()
self.cart = Order.get_cart_for_user(test_student)
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
self.cart.purchase()
total_seats_count = PaidCourseRegistration.get_self_purchased_seat_count(course_key=self.course_key)
self.assertTrue(total_seats_count, 2)
def purchases_using_coupon_codes(self):
"""
helper method that uses coupon codes when purchasing courses.
"""
self.cart.order_type = 'business'
self.cart.save()
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'Ad123asd'})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
self.cart.clear()
self.cart = Order.get_cart_for_user(self.user)
self.cart.order_type = 'business'
self.cart.save()
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'Ad123asd'})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
self.cart.clear()
self.cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': '32213asd'})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
def test_cart_type_business(self):
self.cart.order_type = 'business'
self.cart.save()
item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
self.cart.purchase()
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_key))
# check that the registration codes are generated against the order
registration_codes = CourseRegistrationCode.order_generated_registration_codes(self.course_key)
self.assertEqual(registration_codes.count(), item.qty)
def test_regcode_redemptions(self):
"""
Asserts the data model around RegistrationCodeRedemption
"""
self.cart.order_type = 'business'
self.cart.save()
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
self.cart.purchase()
reg_code = CourseRegistrationCode.order_generated_registration_codes(self.course_key)[0]
enrollment = CourseEnrollment.enroll(self.user, self.course_key)
redemption = RegistrationCodeRedemption(
registration_code=reg_code,
redeemed_by=self.user,
course_enrollment=enrollment
)
redemption.save()
test_redemption = RegistrationCodeRedemption.registration_code_used_for_enrollment(enrollment)
self.assertEqual(test_redemption.id, redemption.id)
def test_regcode_multi_redemptions(self):
"""
Asserts the data model around RegistrationCodeRedemption and
what happens when we do multiple redemptions by same user
"""
self.cart.order_type = 'business'
self.cart.save()
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
self.cart.purchase()
reg_codes = CourseRegistrationCode.order_generated_registration_codes(self.course_key)
self.assertEqual(len(reg_codes), 2)
enrollment = CourseEnrollment.enroll(self.user, self.course_key)
ids = []
for reg_code in reg_codes:
redemption = RegistrationCodeRedemption(
registration_code=reg_code,
redeemed_by=self.user,
course_enrollment=enrollment
)
redemption.save()
ids.append(redemption.id)
test_redemption = RegistrationCodeRedemption.registration_code_used_for_enrollment(enrollment)
self.assertIn(test_redemption.id, ids)
def test_add_with_default_mode(self):
"""
Tests add_to_cart where the mode specified in the argument is NOT
in the database and NOT the default "audit". In this case it
just adds the user in the CourseMode.DEFAULT_MODE for free.
"""
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug="DNE")
self.assertEqual(reg1.unit_cost, 0)
self.assertEqual(reg1.line_cost, 0)
self.assertEqual(reg1.mode, CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
self.assertEqual(reg1.user, self.user)
self.assertEqual(reg1.status, "cart")
self.assertEqual(self.cart.total_cost, 0)
self.assertTrue(PaidCourseRegistration.contained_in_order(self.cart, self.course_key))
course_reg_code_item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2, mode_slug="DNE")
self.assertEqual(course_reg_code_item.unit_cost, 0)
self.assertEqual(course_reg_code_item.line_cost, 0)
self.assertEqual(course_reg_code_item.mode, CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
self.assertEqual(course_reg_code_item.user, self.user)
self.assertEqual(course_reg_code_item.status, "cart")
self.assertEqual(self.cart.total_cost, 0)
self.assertTrue(CourseRegCodeItem.contained_in_order(self.cart, self.course_key))
def test_add_course_reg_item_with_no_course_item(self):
fake_course_id = CourseLocator(org="edx", course="fake", run="course")
with self.assertRaises(CourseDoesNotExistException):
CourseRegCodeItem.add_to_order(self.cart, fake_course_id, 2)
def test_course_reg_item_already_in_cart(self):
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
with self.assertRaises(ItemAlreadyInCartException):
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
def test_course_reg_item_already_enrolled_in_course(self):
CourseEnrollment.enroll(self.user, self.course_key)
with self.assertRaises(AlreadyEnrolledInCourseException):
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
def test_purchased_callback(self):
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
self.cart.purchase()
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
reg1 = PaidCourseRegistration.objects.get(id=reg1.id) # reload from DB to get side-effect
self.assertEqual(reg1.status, "purchased")
self.assertIsNotNone(reg1.course_enrollment)
self.assertEqual(reg1.course_enrollment.id, CourseEnrollment.objects.get(user=self.user, course_id=self.course_key).id)
def test_generate_receipt_instructions(self):
"""
Add 2 courses to the order and make sure the instruction_set only contains 1 element (no dups)
"""
course2 = CourseFactory.create()
course_mode2 = CourseMode(course_id=course2.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
course_mode2.save()
pr1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
pr2 = PaidCourseRegistration.add_to_order(self.cart, course2.id)
self.cart.purchase()
inst_dict, inst_set = self.cart.generate_receipt_instructions()
self.assertEqual(2, len(inst_dict))
self.assertEqual(1, len(inst_set))
self.assertIn("dashboard", inst_set.pop())
self.assertIn(pr1.pk_with_subclass, inst_dict)
self.assertIn(pr2.pk_with_subclass, inst_dict)
def test_purchased_callback_exception(self):
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
reg1.course_id = CourseLocator(org="changed", course="forsome", run="reason")
reg1.save()
with self.assertRaises(PurchasedCallbackException):
reg1.purchased_callback()
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_key))
reg1.course_id = CourseLocator(org="abc", course="efg", run="hij")
reg1.save()
with self.assertRaises(PurchasedCallbackException):
reg1.purchased_callback()
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_key))
course_reg_code_item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
course_reg_code_item.course_id = CourseLocator(org="changed1", course="forsome1", run="reason1")
course_reg_code_item.save()
with self.assertRaises(PurchasedCallbackException):
course_reg_code_item.purchased_callback()
def test_user_cart_has_both_items(self):
"""
This test exists b/c having both CertificateItem and PaidCourseRegistration in an order used to break
PaidCourseRegistration.contained_in_order
"""
cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
self.assertTrue(PaidCourseRegistration.contained_in_order(cart, self.course_key))
class CertificateItemTest(ModuleStoreTestCase):
"""
Tests for verifying specific CertificateItem functionality
"""
def setUp(self):
super(CertificateItemTest, self).setUp()
self.user = UserFactory.create()
self.cost = 40
course = CourseFactory.create()
self.course_key = course.id
course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
course_mode.save()
course_mode = CourseMode(course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost)
course_mode.save()
patcher = patch('student.models.tracker')
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
analytics_patcher = patch('shoppingcart.models.analytics')
self.mock_analytics_tracker = analytics_patcher.start()
self.addCleanup(analytics_patcher.stop)
def _assert_refund_tracked(self):
"""
Assert that we fired a refund event.
"""
self.mock_analytics_tracker.track.assert_called_with( # pylint: disable=maybe-no-member
self.user.id,
'Refunded Order',
{
'orderId': 1,
'currency': 'usd',
'total': '40.00',
'products': [
{
'sku': u'CertificateItem.verified',
'name': unicode(self.course_key),
'category': unicode(self.course_key.org),
'price': '40.00',
'id': 1,
'quantity': 1
}
]
},
context={'ip': None, 'Google Analytics': {'clientId': None}}
)
def test_existing_enrollment(self):
CourseEnrollment.enroll(self.user, self.course_key)
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'verified')
# verify that we are still enrolled
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
self.mock_tracker.reset_mock()
cart.purchase()
enrollment = CourseEnrollment.objects.get(user=self.user, course_id=self.course_key)
self.assertEquals(enrollment.mode, u'verified')
def test_single_item_template(self):
cart = Order.get_cart_for_user(user=self.user)
cert_item = CertificateItem.add_to_order(cart, self.course_key, self.cost, 'verified')
self.assertEquals(cert_item.single_item_receipt_template, 'shoppingcart/receipt.html')
cert_item = CertificateItem.add_to_order(cart, self.course_key, self.cost, 'honor')
self.assertEquals(cert_item.single_item_receipt_template, 'shoppingcart/receipt.html')
@override_settings(
LMS_SEGMENT_KEY="foobar",
FEATURES={
'STORE_BILLING_INFO': True,
}
)
def test_refund_cert_callback_no_expiration(self):
# When there is no expiration date on a verified mode, the user can always get a refund
# need to prevent analytics errors from appearing in stderr
with patch('sys.stderr', sys.stdout.write):
CourseEnrollment.enroll(self.user, self.course_key, 'verified')
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'verified')
cart.purchase()
CourseEnrollment.unenroll(self.user, self.course_key)
target_certs = CertificateItem.objects.filter(course_id=self.course_key, user_id=self.user, status='refunded', mode='verified')
self.assertTrue(target_certs[0])
self.assertTrue(target_certs[0].refund_requested_time)
self.assertEquals(target_certs[0].order.status, 'refunded')
self._assert_refund_tracked()
def test_no_refund_on_cert_callback(self):
# If we explicitly skip refunds, the unenroll action should not modify the purchase.
CourseEnrollment.enroll(self.user, self.course_key, 'verified')
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'verified')
cart.purchase()
CourseEnrollment.unenroll(self.user, self.course_key, skip_refund=True)
target_certs = CertificateItem.objects.filter(
course_id=self.course_key,
user_id=self.user,
status='purchased',
mode='verified'
)
self.assertTrue(target_certs[0])
self.assertFalse(target_certs[0].refund_requested_time)
self.assertEquals(target_certs[0].order.status, 'purchased')
@override_settings(
LMS_SEGMENT_KEY="foobar",
FEATURES={
'STORE_BILLING_INFO': True,
}
)
def test_refund_cert_callback_before_expiration(self):
# If the expiration date has not yet passed on a verified mode, the user can be refunded
many_days = datetime.timedelta(days=60)
course = CourseFactory.create()
self.course_key = course.id
course_mode = CourseMode(course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost,
expiration_datetime=(datetime.datetime.now(pytz.utc) + many_days))
course_mode.save()
# need to prevent analytics errors from appearing in stderr
with patch('sys.stderr', sys.stdout.write):
CourseEnrollment.enroll(self.user, self.course_key, 'verified')
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'verified')
cart.purchase()
CourseEnrollment.unenroll(self.user, self.course_key)
target_certs = CertificateItem.objects.filter(course_id=self.course_key, user_id=self.user, status='refunded', mode='verified')
self.assertTrue(target_certs[0])
self.assertTrue(target_certs[0].refund_requested_time)
self.assertEquals(target_certs[0].order.status, 'refunded')
self._assert_refund_tracked()
def test_refund_cert_callback_before_expiration_email(self):
""" Test that refund emails are being sent correctly. """
course = CourseFactory.create()
course_key = course.id
many_days = datetime.timedelta(days=60)
course_mode = CourseMode(course_id=course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost,
expiration_datetime=datetime.datetime.now(pytz.utc) + many_days)
course_mode.save()
CourseEnrollment.enroll(self.user, course_key, 'verified')
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, course_key, self.cost, 'verified')
cart.purchase()
mail.outbox = []
with patch('shoppingcart.models.log.error') as mock_error_logger:
CourseEnrollment.unenroll(self.user, course_key)
self.assertFalse(mock_error_logger.called)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals('[Refund] User-Requested Refund', mail.outbox[0].subject)
self.assertEquals(settings.PAYMENT_SUPPORT_EMAIL, mail.outbox[0].from_email)
self.assertIn('has requested a refund on Order', mail.outbox[0].body)
@patch('shoppingcart.models.log.error')
def test_refund_cert_callback_before_expiration_email_error(self, error_logger):
# If there's an error sending an email to billing, we need to log this error
many_days = datetime.timedelta(days=60)
course = CourseFactory.create()
course_key = course.id
course_mode = CourseMode(course_id=course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost,
expiration_datetime=datetime.datetime.now(pytz.utc) + many_days)
course_mode.save()
CourseEnrollment.enroll(self.user, course_key, 'verified')
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, course_key, self.cost, 'verified')
cart.purchase()
with patch('shoppingcart.models.send_mail', side_effect=smtplib.SMTPException):
CourseEnrollment.unenroll(self.user, course_key)
self.assertTrue(error_logger.call_args[0][0].startswith('Failed sending email'))
def test_refund_cert_callback_after_expiration(self):
# If the expiration date has passed, the user cannot get a refund
many_days = datetime.timedelta(days=60)
course = CourseFactory.create()
course_key = course.id
course_mode = CourseMode(course_id=course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost,)
course_mode.save()
CourseEnrollment.enroll(self.user, course_key, 'verified')
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, course_key, self.cost, 'verified')
cart.purchase()
course_mode.expiration_datetime = (datetime.datetime.now(pytz.utc) - many_days)
course_mode.save()
CourseEnrollment.unenroll(self.user, course_key)
target_certs = CertificateItem.objects.filter(course_id=course_key, user_id=self.user, status='refunded', mode='verified')
self.assertEqual(len(target_certs), 0)
def test_refund_cert_no_cert_exists(self):
# If there is no paid certificate, the refund callback should return nothing
CourseEnrollment.enroll(self.user, self.course_key, 'verified')
ret_val = CourseEnrollment.unenroll(self.user, self.course_key)
self.assertFalse(ret_val)
def test_no_id_prof_confirm_email(self):
# Pay for a no-id-professional course
course_mode = CourseMode(course_id=self.course_key,
mode_slug="no-id-professional",
mode_display_name="No Id Professional Cert",
min_price=self.cost)
course_mode.save()
CourseEnrollment.enroll(self.user, self.course_key)
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_key, self.cost, 'no-id-professional')
# verify that we are still enrolled
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
self.mock_tracker.reset_mock()
cart.purchase()
enrollment = CourseEnrollment.objects.get(user=self.user, course_id=self.course_key)
self.assertEquals(enrollment.mode, u'no-id-professional')
# Check that the tax-deduction information appears in the confirmation email
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEquals('Order Payment Confirmation', email.subject)
self.assertNotIn("If you haven't verified your identity yet, please start the verification process", email.body)
self.assertIn(
"You can unenroll in the course and receive a full refund for 2 days after the course start date. ",
email.body
)
class DonationTest(ModuleStoreTestCase):
"""Tests for the donation order item type. """
COST = Decimal('23.45')
def setUp(self):
"""Create a test user and order. """
super(DonationTest, self).setUp()
self.user = UserFactory.create()
self.cart = Order.get_cart_for_user(self.user)
def test_donate_to_org(self):
# No course ID provided, so this is a donation to the entire organization
donation = Donation.add_to_order(self.cart, self.COST)
self._assert_donation(
donation,
donation_type="general",
unit_cost=self.COST,
line_desc="Donation for edX"
)
def test_donate_to_course(self):
# Create a test course
course = CourseFactory.create(display_name="Test Course")
# Donate to the course
donation = Donation.add_to_order(self.cart, self.COST, course_id=course.id)
self._assert_donation(
donation,
donation_type="course",
course_id=course.id,
unit_cost=self.COST,
line_desc=u"Donation for Test Course"
)
def test_confirmation_email(self):
# Pay for a donation
Donation.add_to_order(self.cart, self.COST)
self.cart.start_purchase()
self.cart.purchase()
# Check that the tax-deduction information appears in the confirmation email
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEquals('Order Payment Confirmation', email.subject)
self.assertIn("tax purposes", email.body)
def test_donate_no_such_course(self):
fake_course_id = CourseLocator(org="edx", course="fake", run="course")
with self.assertRaises(CourseDoesNotExistException):
Donation.add_to_order(self.cart, self.COST, course_id=fake_course_id)
def _assert_donation(self, donation, donation_type=None, course_id=None, unit_cost=None, line_desc=None):
"""Verify the donation fields and that the donation can be purchased. """
self.assertEqual(donation.order, self.cart)
self.assertEqual(donation.user, self.user)
self.assertEqual(donation.donation_type, donation_type)
self.assertEqual(donation.course_id, course_id)
self.assertEqual(donation.qty, 1)
self.assertEqual(donation.unit_cost, unit_cost)
self.assertEqual(donation.currency, "usd")
self.assertEqual(donation.line_desc, line_desc)
# Verify that the donation is in the cart
self.assertTrue(self.cart.has_items(item_type=Donation))
self.assertEqual(self.cart.total_cost, unit_cost)
# Purchase the item
self.cart.start_purchase()
self.cart.purchase()
# Verify that the donation is marked as purchased
donation = Donation.objects.get(pk=donation.id)
self.assertEqual(donation.status, "purchased")
class InvoiceHistoryTest(TestCase):
"""Tests for the InvoiceHistory model. """
INVOICE_INFO = {
'is_valid': True,
'internal_reference': 'Test Internal Ref Num',
'customer_reference_number': 'Test Customer Ref Num',
}
CONTACT_INFO = {
'company_name': 'Test Company',
'company_contact_name': 'Test Company Contact Name',
'company_contact_email': 'test-contact@example.com',
'recipient_name': 'Test Recipient Name',
'recipient_email': 'test-recipient@example.com',
'address_line_1': 'Test Address 1',
'address_line_2': 'Test Address 2',
'address_line_3': 'Test Address 3',
'city': 'Test City',
'state': 'Test State',
'zip': '12345',
'country': 'US',
}
def setUp(self):
super(InvoiceHistoryTest, self).setUp()
invoice_data = copy.copy(self.INVOICE_INFO)
invoice_data.update(self.CONTACT_INFO)
self.course_key = CourseLocator('edX', 'DemoX', 'Demo_Course')
self.invoice = Invoice.objects.create(total_amount="123.45", course_id=self.course_key, **invoice_data)
self.user = UserFactory.create()
def test_get_invoice_total_amount(self):
"""
test to check the total amount
of the invoices for the course.
"""
total_amount = Invoice.get_invoice_total_amount_for_course(self.course_key)
self.assertEqual(total_amount, 123.45)
def test_get_total_amount_of_paid_invoices(self):
"""
Test to check the Invoice Transactions amount.
"""
InvoiceTransaction.objects.create(
invoice=self.invoice,
amount='123.45',
currency='usd',
comments='test comments',
status='completed',
created_by=self.user,
last_modified_by=self.user
)
total_amount_paid = InvoiceTransaction.get_total_amount_of_paid_course_invoices(self.course_key)
self.assertEqual(float(total_amount_paid), 123.45)
def test_get_total_amount_of_no_invoices(self):
"""
Test to check the Invoice Transactions amount.
"""
total_amount_paid = InvoiceTransaction.get_total_amount_of_paid_course_invoices(self.course_key)
self.assertEqual(float(total_amount_paid), 0)
def test_invoice_contact_info_history(self):
self._assert_history_invoice_info(
is_valid=True,
internal_ref=self.INVOICE_INFO['internal_reference'],
customer_ref=self.INVOICE_INFO['customer_reference_number']
)
self._assert_history_contact_info(**self.CONTACT_INFO)
self._assert_history_items([])
self._assert_history_transactions([])
def test_invoice_generated_registration_codes(self):
"""
test filter out the registration codes
that were generated via Invoice.
"""
invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.invoice,
qty=5,
unit_price='123.45',
course_id=self.course_key
)
for i in range(5):
CourseRegistrationCode.objects.create(
code='testcode{counter}'.format(counter=i),
course_id=self.course_key,
created_by=self.user,
invoice=self.invoice,
invoice_item=invoice_item,
mode_slug='honor'
)
registration_codes = CourseRegistrationCode.invoice_generated_registration_codes(self.course_key)
self.assertEqual(registration_codes.count(), 5)
def test_invoice_history_items(self):
# Create an invoice item
CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.invoice,
qty=1,
unit_price='123.45',
course_id=self.course_key
)
self._assert_history_items([{
'qty': 1,
'unit_price': '123.45',
'currency': 'usd',
'course_id': unicode(self.course_key)
}])
# Create a second invoice item
CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.invoice,
qty=2,
unit_price='456.78',
course_id=self.course_key
)
self._assert_history_items([
{
'qty': 1,
'unit_price': '123.45',
'currency': 'usd',
'course_id': unicode(self.course_key)
},
{
'qty': 2,
'unit_price': '456.78',
'currency': 'usd',
'course_id': unicode(self.course_key)
}
])
def test_invoice_history_transactions(self):
# Create an invoice transaction
first_transaction = InvoiceTransaction.objects.create(
invoice=self.invoice,
amount='123.45',
currency='usd',
comments='test comments',
status='completed',
created_by=self.user,
last_modified_by=self.user
)
self._assert_history_transactions([{
'amount': '123.45',
'currency': 'usd',
'comments': 'test comments',
'status': 'completed',
'created_by': self.user.username,
'last_modified_by': self.user.username,
}])
# Create a second invoice transaction
second_transaction = InvoiceTransaction.objects.create(
invoice=self.invoice,
amount='456.78',
currency='usd',
comments='test more comments',
status='started',
created_by=self.user,
last_modified_by=self.user
)
self._assert_history_transactions([
{
'amount': '123.45',
'currency': 'usd',
'comments': 'test comments',
'status': 'completed',
'created_by': self.user.username,
'last_modified_by': self.user.username,
},
{
'amount': '456.78',
'currency': 'usd',
'comments': 'test more comments',
'status': 'started',
'created_by': self.user.username,
'last_modified_by': self.user.username,
}
])
# Delete the transactions
first_transaction.delete()
second_transaction.delete()
self._assert_history_transactions([])
def _assert_history_invoice_info(self, is_valid=True, customer_ref=None, internal_ref=None):
"""Check top-level invoice information in the latest history record. """
latest = self._latest_history()
self.assertEqual(latest['is_valid'], is_valid)
self.assertEqual(latest['customer_reference'], customer_ref)
self.assertEqual(latest['internal_reference'], internal_ref)
def _assert_history_contact_info(self, **kwargs):
"""Check contact info in the latest history record. """
contact_info = self._latest_history()['contact_info']
for key, value in kwargs.iteritems():
self.assertEqual(contact_info[key], value)
def _assert_history_items(self, expected_items):
"""Check line item info in the latest history record. """
items = self._latest_history()['items']
self.assertItemsEqual(items, expected_items)
def _assert_history_transactions(self, expected_transactions):
"""Check transactions (payments/refunds) in the latest history record. """
transactions = self._latest_history()['transactions']
self.assertItemsEqual(transactions, expected_transactions)
def _latest_history(self):
"""Retrieve the snapshot from the latest history record. """
latest = InvoiceHistory.objects.latest()
return json.loads(latest.snapshot)
| agpl-3.0 |
wadoon/smvtools | smvtools/smvstutseq.py | 1 | 2968 | #!/usr/bin/python3
# smvtools -- Tools around NuSMV and NuXMV
# Copyright (C) 2014-2016 - Alexander Weigl
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
"""
import argparse
import csv
from itertools import *
from .core import *
def read(filename):
states = []
with open(filename) as fp:
rd = csv.DictReader(fp) # default: excel
for row in rd:
states.append(row)
return states
def condition(state: dict):
def to_literal(n):
if is_true(state[n]):
return n
elif is_false(state[n]):
return "! %s" % n
elif is_dont_care(state[n]):
return "TRUE"
else:
return "%s = %s" % (n, state[n])
names = sorted(state.keys())
literals = map(to_literal, names)
return ' & '.join(literals)
def generate(states: list,
module_name: str = "StutterAutomata",
triggerfml='TRUE'):
state_names = starmap(lambda i, s: "line_%d" % (1 + i), enumerate(states))
variables = sorted(states[0].keys())
print("""
MODULE %s(%s)
VAR state : { inactive, %s, accept, error}
DEFINE Trigger := %s
FOUND := (state = success);
ASSIGN
\tinit(state) := inactive;
\tnext(state) := case
\t\t(state=inactive | state=success) & Trigger : line_1;
\t\tstate = error : error;""" % (module_name, ','.join(variables), ','.join(state_names), triggerfml))
for i, state in enumerate(states[:-1]):
if i < len(states) - 2:
next_state = "line_%d" % (i + 2)
else:
next_state = "success"
print("\t\t state = line_%d &" % (i + 1), condition(state), ':', 'line_%d' % (i + 1), '; -- stuttering')
print("\t\t state = line_%d &" % (i + 1), condition(states[i + 1]), ":", next_state, '; -- next line ')
print("""\t\tTRUE : error; -- if no transition matches above we have a counterexample against this word under stuttering
esac;""")
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--name", help="name of the generated module",
action="store", default="StutteringAutomata", dest="name")
ap.add_argument("file")
ns = ap.parse_args()
states = read(ns.file)
generate(states)
if __name__ == "__main__":
main()
| gpl-3.0 |
louyihua/edx-platform | common/djangoapps/microsite_configuration/backends/database.py | 41 | 7974 | """
Microsite backend that reads the configuration from the database
"""
from mako.template import Template
from util.cache import cache
from django.conf import settings
from django.dispatch import receiver
from django.db.models.signals import post_save
from util.memcache import fasthash
from util.url import strip_port_from_host
from microsite_configuration.backends.base import (
BaseMicrositeBackend,
BaseMicrositeTemplateBackend,
)
from microsite_configuration.models import (
Microsite,
MicrositeOrganizationMapping,
MicrositeTemplate
)
from microsite_configuration.microsite import get_value as microsite_get_value
class DatabaseMicrositeBackend(BaseMicrositeBackend):
"""
Microsite backend that reads the microsites definitions
from a table in the database according to the models.py file
This backend would allow us to save microsite configurations
into database and load them in local storage when HTTRequest
is originated from microsite.
E.g. we have setup a microsite with key `monster-university-academy` and
We would have a DB entry like this in table created by Microsite model.
key = monster-university-academy
subdomain = mua.edx.org
values = {
"platform_name": "Monster University Academy".
"course_org_filter: "MonsterX"
}
While using DatabaseMicrositeBackend any request coming from mua.edx.org
would get microsite configurations from `values` column.
"""
def has_configuration_set(self):
"""
Returns whether there is any Microsite configuration settings
"""
if Microsite.objects.all()[:1].exists():
return True
else:
return False
def set_config_by_domain(self, domain):
"""
For a given request domain, find a match in our microsite configuration
and then assign it to the thread local in order to make it available
to the complete Django request processing
"""
if not self.has_configuration_set() or not domain:
return
# look up based on the HTTP request domain name
# this will need to be a full domain name match,
# not a 'startswith' match
microsite = Microsite.get_microsite_for_domain(domain)
if not microsite:
# if no match, then try to find a 'default' key in Microsites
try:
microsite = Microsite.objects.get(key='default')
except Microsite.DoesNotExist:
pass
if microsite:
# if we have a match, then set up the microsite thread local
# data
self._set_microsite_config_from_obj(microsite.site.domain, domain, microsite)
def get_all_config(self):
"""
This returns all configuration for all microsites
"""
config = {}
candidates = Microsite.objects.all()
for microsite in candidates:
values = microsite.values
config[microsite.key] = values
return config
def get_value_for_org(self, org, val_name, default=None):
"""
This returns a configuration value for a microsite which has an org_filter that matches
what is passed in
"""
microsite = MicrositeOrganizationMapping.get_microsite_for_organization(org)
if not microsite:
return default
# cdodge: This approach will not leverage any caching, although I think only Studio calls
# this
config = microsite.values
return config.get(val_name, default)
def get_all_orgs(self):
"""
This returns a set of orgs that are considered within a microsite. This can be used,
for example, to do filtering
"""
# This should be cacheable (via memcache to keep consistent across a cluster)
# I believe this is called on the dashboard and catalog pages, so it'd be good to optimize
return set(MicrositeOrganizationMapping.objects.all().values_list('organization', flat=True))
def _set_microsite_config_from_obj(self, subdomain, domain, microsite_object):
"""
Helper internal method to actually find the microsite configuration
"""
config = microsite_object.values
config['subdomain'] = strip_port_from_host(subdomain)
config['site_domain'] = strip_port_from_host(domain)
config['microsite_config_key'] = microsite_object.key
# we take the list of ORGs associated with this microsite from the database mapping
# tables. NOTE, for now, we assume one ORG per microsite
organizations = microsite_object.get_organizations()
# we must have at least one ORG defined
if not organizations:
raise Exception(
'Configuration error. Microsite {key} does not have any ORGs mapped to it!'.format(
key=microsite_object.key
)
)
# just take the first one for now, we'll have to change the upstream logic to allow
# for more than one ORG binding
config['course_org_filter'] = organizations[0]
self.current_request_configuration.data = config
class DatabaseMicrositeTemplateBackend(BaseMicrositeTemplateBackend):
"""
Specialized class to pull templates from the database.
This Backend would allow us to save templates in DB and pull
them from there when required for a specific microsite.
This backend can be enabled by `MICROSITE_TEMPLATE_BACKEND` setting.
E.g. we have setup a microsite for subdomain `mua.edx.org` and
We have a DB entry like this in table created by MicrositeTemplate model.
microsite = Key for microsite(mua.edx.org)
template_uri = about.html
template = <html><body>Template from DB</body></html>
While using DatabaseMicrositeTemplateBackend any request coming from mua.edx.org/about.html
would get about.html template from DB and response would be the value of `template` column.
"""
def get_template_path(self, relative_path, **kwargs):
return relative_path
def get_template(self, uri):
"""
Override of the base class for us to look into the
database tables for a template definition, if we can't find
one we'll return None which means "use default means" (aka filesystem)
"""
cache_key = "template_cache." + fasthash(microsite_get_value('site_domain') + '.' + uri)
template_text = cache.get(cache_key) # pylint: disable=maybe-no-member
if not template_text:
# cache is empty so pull template from DB and fill cache.
template_obj = MicrositeTemplate.get_template_for_microsite(
microsite_get_value('site_domain'),
uri
)
if not template_obj:
# We need to set something in the cache to improve performance
# of the templates stored in the filesystem as well
cache.set( # pylint: disable=maybe-no-member
cache_key, '##none', settings.MICROSITE_DATABASE_TEMPLATE_CACHE_TTL
)
return None
template_text = template_obj.template
cache.set( # pylint: disable=maybe-no-member
cache_key, template_text, settings.MICROSITE_DATABASE_TEMPLATE_CACHE_TTL
)
if template_text == '##none':
return None
return Template(
text=template_text
)
@staticmethod
@receiver(post_save, sender=MicrositeTemplate)
def clear_cache(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Clear the cached template when the model is saved
"""
cache_key = "template_cache." + fasthash(instance.microsite.site.domain + '.' + instance.template_uri)
cache.delete(cache_key) # pylint: disable=maybe-no-member
| agpl-3.0 |
PaulKinlan/cli-caniuse | site/app/scripts/bower_components/jsrepl-build/extern/python/closured/lib/python2.7/encodings/mac_latin2.py | 647 | 8565 | """ Python Character Mapping Codec generated from 'LATIN2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-latin2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x0082: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x0089: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x008c: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x008d: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x0090: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x0091: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x0094: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x0095: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x0096: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x009e: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x2020, # DAGGER
0x00a1: 0x00b0, # DEGREE SIGN
0x00a2: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a4: 0x00a7, # SECTION SIGN
0x00a5: 0x2022, # BULLET
0x00a6: 0x00b6, # PILCROW SIGN
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x2122, # TRADE MARK SIGN
0x00ab: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ac: 0x00a8, # DIAERESIS
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00af: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00b0: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00b1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00b5: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
0x00b7: 0x2211, # N-ARY SUMMATION
0x00b8: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00b9: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00ba: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00bb: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x00bc: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x00bd: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x00be: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x00bf: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00c0: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00c1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x221a, # SQUARE ROOT
0x00c4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00c5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00c6: 0x2206, # INCREMENT
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00cc: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ce: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00cf: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2014, # EM DASH
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x25ca, # LOZENGE
0x00d8: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00d9: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00da: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00db: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00dc: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x00dd: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x00de: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00df: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x00e0: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x00e1: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00e4: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e5: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00e6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e8: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x00e9: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00eb: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00ec: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00ed: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00f0: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00f1: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00f3: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x00f4: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00f5: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00f6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00f7: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00f8: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00f9: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00fa: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00fb: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00fc: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00fd: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00fe: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00ff: 0x02c7, # CARON
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| apache-2.0 |
nburn42/tensorflow | tensorflow/contrib/specs/python/specs_ops.py | 44 | 6689 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow network models.
This module is used as an environment for evaluating expressions
in the "specs" DSL.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.specs.python import specs_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# The following assignments don't appear to follow Google naming
# conventions, but that's because these are functions defined by
# higher-order function application, not "constants" and because they
# are the commands of the DSL.
# pylint: disable=invalid-name
class Idx(specs_lib.Composable):
"""Implements the identity function in network specifications."""
def funcall(self, x):
return x
class Conc(specs_lib.Composable):
"""Implements tensor concatenation in network specifications."""
def __init__(self, dim, *args):
"""Concatenates tensors along the given dimension.
Args:
dim: dimension along which concatenation takes place
*args: argument tensor functions to be concatenated
"""
self.dim = dim
self.funs = args
def funcall(self, x):
outputs = [f.funcall(x) for f in self.funs]
return array_ops.concat(outputs, self.dim)
External = specs_lib.External
Import = specs_lib.Import
Fun = specs_lib.Function
debug = specs_lib.debug
Print = Fun(logging_ops.Print)
Id = Fun(array_ops.identity)
# TODO(tmb) add Assert
# Two letter names for the most common layers.
# 2D Convolutional layers with nonlinearities (s/t/r/m/l)
# TODO(tmb) add Cbs, Fbs etc. for batch norms
Cx = Fun(layers.conv2d)
Cs = Fun(layers.conv2d, activation_fn=math_ops.sigmoid)
Ct = Fun(layers.conv2d, activation_fn=math_ops.tanh)
Cr = Fun(layers.conv2d, activation_fn=nn_ops.relu)
Cm = Fun(layers.conv2d, activation_fn=nn_ops.softmax)
Cl = Fun(layers.conv2d, activation_fn=None)
# Fully connected slim with nonlinearities (s/t/r/m/l)
Fx = Fun(layers.fully_connected)
Fs = Fun(layers.fully_connected, activation_fn=math_ops.sigmoid)
Ft = Fun(layers.fully_connected, activation_fn=math_ops.tanh)
Fr = Fun(layers.fully_connected, activation_fn=nn_ops.relu)
Fm = Fun(layers.fully_connected, activation_fn=nn_ops.softmax)
Fl = Fun(layers.fully_connected, activation_fn=None)
# Pooling
Mp = Fun(layers.max_pool2d)
Ap = Fun(layers.avg_pool2d)
# Batch manipulations
Do = Fun(layers.dropout)
Bn = Fun(layers.batch_norm)
Lrn = Fun(nn.local_response_normalization)
Unit = Fun(layers.unit_norm)
# Shape changes
Flat = Fun(layers.flatten)
Reshape = Fun(array_ops.reshape)
Transpose = Fun(array_ops.transpose)
Squeeze = Fun(array_ops.squeeze)
Expand = Fun(array_ops.expand_dims)
# Nonlinearities (rarely needed on their own)
Relu = Fun(nn_ops.relu)
Sig = Fun(math_ops.sigmoid)
Tanh = Fun(math_ops.tanh)
Smax = Fun(nn_ops.softmax)
def Dws(n):
"""Depth-wise convolution + sigmoid (used after LSTM)."""
return Cs(n, [1, 1])
def Dwm(n):
"""Depth-wise convolution + softmax (used after LSTM)."""
return Cm(n, [1, 1])
# Sharing of Variables
def Var(name, *args, **kw):
"""Implements an operator that generates a variable.
This function is still experimental. Use it only
for generating a single variable instance for
each name.
Args:
name: Name of the variable.
*args: Other arguments to get_variable.
**kw: Other keywords for get_variable.
Returns:
A specs object for generating a variable.
"""
def var(_):
return variable_scope.get_variable(name, *args, **kw)
return specs_lib.Callable(var)
class Shared(specs_lib.Composable):
"""Wraps a scope with variable reuse around the subnetwork.
This function is still experimental.
Attributes:
f: The shared subnetwork.
name: A name for the shared scope.
used: A flag indicating whether the scope has already been used.
"""
shared_number = 1
def __init__(self, subnet, name=None, scope=None):
"""Create the Shared operator.
Use this as:
f = Shared(Cr(100, 3))
g = f | f | f
Ordinarily, you do not need to provide either a name or a scope.
Providing a name is useful if you want a well-defined namespace
for the variables (e.g., for saving a subnet).
Args:
subnet: Definition of the shared network.
name: Optional name for the shared context.
scope: Optional shared scope (must be a Scope, not a string).
Raises:
ValueError: Scope is not of type tf.Scope, name is not
of type string, or both scope and name are given together.
"""
if scope is not None and not isinstance(scope,
variable_scope.VariableScope):
raise ValueError("scope must be None or a VariableScope")
if name is not None and not isinstance(scope, str):
raise ValueError("name must be None or a string")
if scope is not None and name is not None:
raise ValueError("cannot provide both a name and a scope")
if name is None:
name = "Shared_%d" % Shared.shared_number
Shared.shared_number += 1
self.subnet = subnet
self.name = name
self.scope = scope
def funcall(self, x):
"""Apply the shared operator to an input.
This wraps a variable scope around the creation of the subnet.
Args:
x: The input argument on which the subnet is invoked.
Returns:
The output tensor from invoking the subnet constructor.
"""
if self.scope is None:
with variable_scope.variable_scope(self.name, values=[x]) as scope:
self.scope = scope
return self.subnet.funcall(x)
else:
with variable_scope.variable_scope(self.scope, values=[x], reuse=True):
return self.subnet.funcall(x)
| apache-2.0 |
Tony-Zhang03/scribe | lib/py/scribe/ttypes.py | 35 | 2620 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
import fb303.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ResultCode:
OK = 0
TRY_LATER = 1
_VALUES_TO_NAMES = {
0: "OK",
1: "TRY_LATER",
}
_NAMES_TO_VALUES = {
"OK": 0,
"TRY_LATER": 1,
}
class LogEntry:
"""
Attributes:
- category
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'category', None, None, ), # 1
(2, TType.STRING, 'message', None, None, ), # 2
)
def __init__(self, category=None, message=None,):
self.category = category
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.category = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LogEntry')
if self.category != None:
oprot.writeFieldBegin('category', TType.STRING, 1)
oprot.writeString(self.category)
oprot.writeFieldEnd()
if self.message != None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
akshatharaj/django | django/db/models/fields/related_lookups.py | 287 | 6153 | from django.db.models.lookups import (
Exact, GreaterThan, GreaterThanOrEqual, In, LessThan, LessThanOrEqual,
)
class MultiColSource(object):
contains_aggregate = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = targets, sources, field, alias
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias),
self.targets, self.sources, self.field)
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
if getattr(lhs.output_field, 'primary_key', False):
return (value.pk,)
sources = lhs.output_field.get_path_info()[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(source.remote_field.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Run the target field's get_prep_lookup. We can safely assume there is
# only one as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedIn, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need to be compiled to
# SQL) or a OR-combined list of (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import WhereNode, SubqueryConstraint, AND, OR
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(self.lhs.sources, self.lhs.targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(self.lhs.alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias, [target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources], self.rhs),
AND)
return root_constraint.as_sql(compiler, connection)
else:
return super(RelatedIn, self).as_sql(compiler, connection)
class RelatedLookupMixin(object):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedLookupMixin, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import WhereNode, AND
root_constraint = WhereNode()
for target, source, val in zip(self.lhs.targets, self.lhs.sources, self.rhs):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND)
return root_constraint.as_sql(compiler, connection)
return super(RelatedLookupMixin, self).as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
| bsd-3-clause |
rnyberg/pyfibot | pyfibot/modules/available/module_btc.py | 1 | 2168 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals, print_function, division
import logging
log = logging.getLogger("cryptocoin")
def command_ltc(bot, user, channel, args):
"""Display current LRC exchange rates from BTC-E"""
r = bot.get_url("https://btc-e.com/api/2/ltc_usd/ticker")
j = r.json()['ticker']
return bot.say(channel, "BTC-E: avg:$%s last:$%s low:$%s high:$%s vol:%s" % (j['avg'], j['last'], j['low'], j['high'], j['vol']))
def command_bsbtc(bot, user, channel, args):
"""Display current BTC exchange rates from bitstamp"""
r = bot.get_url("https://www.bitstamp.net/api/ticker/")
try:
j = r.json()
except AttributeError:
print(r.text)
return
return bot.say(channel, "BitStamp: bid:$%s last:$%s low:$%s high:$%s vol:%s" % (j['bid'], j['last'], j['low'], j['high'], j['volume']))
def command_btc(bot, user, channel, args):
"""Display current BTC exchange rates from mtgox. Usage: btc [whitespace separated list of currency codes]"""
currencies = ["USD"]
if args:
currencies = args.split(" ")
value = _get_coin_value(bot, "BTC", currencies)
if value:
return bot.say(channel, "MtGox: %s" % value)
log.debug('Failed to fetch value with currencies "%s"' % args)
return bot.say(channel, 'Failed to fetch BTC value.')
def _get_coin_value(bot, coin, currencies):
rates = []
for currency in currencies:
rate = _gen_string(bot, coin, currency)
if rate:
rates.append(rate)
if rates:
return " | ".join(rates)
else:
return None
def _gen_string(bot, coin="BTC", currency="EUR"):
r = bot.get_url("http://data.mtgox.com/api/2/%s%s/money/ticker" % (coin, currency.upper()))
if r.json()['result'] != 'success':
log.warn("API call failed:")
log.warn(r.text)
return None
data = r.json()['data']
avg = data['avg']['display_short']
low = data['low']['display_short']
high = data['high']['display_short']
vol = data['vol']['display_short']
return "%s avg:%s low:%s high:%s vol:%s" % (currency.upper(), avg, low, high, vol)
| bsd-3-clause |
kittiu/sale-workflow | sale_exception_nostock/tests/test_dropshipping_skip_check.py | 35 | 1548 | # Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.tests.common import TransactionCase
class TestDropshippingSkipCheck(TransactionCase):
def setUp(self):
"""Set up an dropshipping sale order line.
To do that, mock the computed source location to be a supplier.
"""
super(TestDropshippingSkipCheck, self).setUp()
source_loc = self.env['stock.location'].new({'usage': 'supplier'})
self.order_line = self.env['sale.order.line'].new()
self.order_line._get_line_location = lambda: source_loc
def test_dropshipping_sale_can_always_be_delivered(self):
self.assertIs(True, self.order_line.can_command_at_delivery_date())
def test_dropshipping_sale_does_not_affect_future_orders(self):
self.assertIs(False, self.order_line.future_orders_are_affected())
| agpl-3.0 |
y12uc231/edx-platform | common/lib/xmodule/xmodule/peer_grading_module.py | 7 | 29051 | import json
import logging
from datetime import datetime
from lxml import etree
from pkg_resources import resource_string
from xblock.fields import Dict, String, Scope, Boolean, Float, Reference
from xmodule.capa_module import ComplexEncoder
from xmodule.fields import Date, Timedelta
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.raw_module import RawDescriptor
from xmodule.timeinfo import TimeInfo
from xmodule.util.duedate import get_extended_due_date
from xmodule.x_module import XModule, module_attr
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, MockPeerGradingService
from open_ended_grading_classes import combined_open_ended_rubric
from django.utils.timezone import UTC
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff."
MAX_ALLOWED_FEEDBACK_LENGTH = 5000
class PeerGradingFields(object):
use_for_single_location = Boolean(
display_name=_("Show Single Problem"),
help=_('When True, only the single problem specified by "Link to Problem Location" is shown. '
'When False, a panel is displayed with all problems available for peer grading.'),
default=False,
scope=Scope.settings
)
link_to_location = Reference(
display_name=_("Link to Problem Location"),
help=_('The location of the problem being graded. Only used when "Show Single Problem" is True.'),
default="",
scope=Scope.settings
)
graded = Boolean(
display_name=_("Graded"),
help=_('Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.'),
default=False,
scope=Scope.settings
)
due = Date(
help=_("Due date that should be displayed."),
scope=Scope.settings)
extended_due = Date(
help=_("Date that this problem is due by for a particular student. This "
"can be set by an instructor, and will override the global due "
"date if it is set to a date that is later than the global due "
"date."),
default=None,
scope=Scope.user_state,
)
graceperiod = Timedelta(
help=_("Amount of grace to give on the due date."),
scope=Scope.settings
)
student_data_for_location = Dict(
help=_("Student data for a given peer grading problem."),
scope=Scope.user_state
)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."),
scope=Scope.settings, values={"min": 0, "step": ".1"},
default=1
)
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_("Peer Grading Interface")
)
data = String(
help=_("Html contents to display for this module"),
default='<peergrading></peergrading>',
scope=Scope.content
)
class InvalidLinkLocation(Exception):
"""
Exception for the case in which a peer grading module tries to link to an invalid location.
"""
pass
class PeerGradingModule(PeerGradingFields, XModule):
"""
PeerGradingModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
_VERSION = 1
js = {
'coffee': [
resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'),
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, *args, **kwargs):
super(PeerGradingModule, self).__init__(*args, **kwargs)
# Copy this to a new variable so that we can edit it if needed.
# We need to edit it if the linked module cannot be found, so
# we can revert to panel model.
self.use_for_single_location_local = self.use_for_single_location
# We need to set the location here so the child modules can use it.
self.runtime.set('location', self.location)
if (self.runtime.open_ended_grading_interface):
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system.render_template)
else:
self.peer_gs = MockPeerGradingService()
if self.use_for_single_location_local:
linked_descriptors = self.descriptor.get_required_module_descriptors()
if len(linked_descriptors) == 0:
error_msg = "Peer grading module {0} is trying to use single problem mode without "
"a location specified.".format(self.location)
log.error(error_msg)
# Change module over to panel mode from single problem mode.
self.use_for_single_location_local = False
else:
self.linked_problem = self.system.get_module(linked_descriptors[0])
try:
self.timeinfo = TimeInfo(
get_extended_due_date(self), self.graceperiod)
except Exception:
log.error("Error parsing due date information in location {0}".format(self.location))
raise
self.display_due_date = self.timeinfo.display_due_date
try:
self.student_data_for_location = json.loads(self.student_data_for_location)
except Exception: # pylint: disable=broad-except
# OK with this broad exception because we just want to continue on any error
pass
@property
def ajax_url(self):
"""
Returns the `ajax_url` from the system, with any trailing '/' stripped off.
"""
ajax_url = self.system.ajax_url
if not ajax_url.endswith("/"):
ajax_url += "/"
return ajax_url
def closed(self):
return self._closed(self.timeinfo)
def _closed(self, timeinfo):
if timeinfo.close_date is not None and datetime.now(UTC()) > timeinfo.close_date:
return True
return False
def _err_response(self, msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
"""
return {'success': False, 'error': msg}
def _check_required(self, data, required):
actual = set(data.keys())
missing = required - actual
if len(missing) > 0:
return False, "Missing required keys: {0}".format(', '.join(missing))
else:
return True, ""
def get_html(self):
"""
Needs to be implemented by inheritors. Renders the HTML that students see.
@return:
"""
if self.closed():
return self.peer_grading_closed()
if not self.use_for_single_location_local:
return self.peer_grading()
else:
# b/c handle_ajax expects serialized data payload and directly calls peer_grading
return self.peer_grading_problem({'location': self.link_to_location.to_deprecated_string()})['html']
def handle_ajax(self, dispatch, data):
"""
Needs to be implemented by child modules. Handles AJAX events.
@return:
"""
handlers = {
'get_next_submission': self.get_next_submission,
'show_calibration_essay': self.show_calibration_essay,
'is_student_calibrated': self.is_student_calibrated,
'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem,
}
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
data_dict = handlers[dispatch](data)
return json.dumps(data_dict, cls=ComplexEncoder)
def query_data_for_location(self, location):
student_id = self.system.anonymous_student_id
success = False
response = {}
try:
response = self.peer_gs.get_data_for_location(location, student_id)
_count_graded = response['count_graded']
_count_required = response['count_required']
success = True
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting location data from controller for location %s, student %s", location, student_id)
return success, response
def get_progress(self):
pass
def get_score(self):
max_score = None
score = None
weight = self.weight
#The old default was None, so set to 1 if it is the old default weight
if weight is None:
weight = 1
score_dict = {
'score': score,
'total': max_score,
}
if not self.use_for_single_location_local or not self.graded:
return score_dict
try:
count_graded = self.student_data_for_location['count_graded']
count_required = self.student_data_for_location['count_required']
except:
success, response = self.query_data_for_location(self.link_to_location)
if not success:
log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.to_deprecated_string(), self.system.anonymous_student_id
))
return None
count_graded = response['count_graded']
count_required = response['count_required']
if count_required > 0 and count_graded >= count_required:
# Ensures that once a student receives a final score for peer grading, that it does not change.
self.student_data_for_location = response
score = int(count_graded >= count_required and count_graded > 0) * float(weight)
total = float(weight)
score_dict['score'] = score
score_dict['total'] = total
return score_dict
def max_score(self):
''' Maximum score. Two notes:
* This is generic; in abstract, a problem could be 3/5 points on one
randomization, and 5/7 on another
'''
max_grade = None
if self.use_for_single_location_local and self.graded:
max_grade = self.weight
return max_grade
def get_next_submission(self, data):
"""
Makes a call to the grading controller for the next essay that should be graded
Returns a json dict with the following keys:
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.get_next_submission(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting next submission. server url: %s location: %s, grader_id: %s", self.peer_gs.url, location, grader_id)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, data):
"""
Saves the grade of a given submission.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
"""
required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown']
if data.get("submission_flagged", False) in ["false", False, "False", "FALSE"]:
required.append("rubric_scores[]")
success, message = self._check_required(data, set(required))
if not success:
return self._err_response(message)
success, message = self._check_feedback_length(data)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
if 'rubric_scores[]' in required:
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['grader_id'] = self.system.anonymous_student_id
try:
response = self.peer_gs.save_grade(**data_dict)
success, location_data = self.query_data_for_location(data_dict['location'])
#Don't check for success above because the response = statement will raise the same Exception as the one
#that will cause success to be false.
response.update({'required_done': False})
if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded']) >= int(location_data['count_required']):
response['required_done'] = True
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving grade to open ended grading service. server url: %s", self.peer_gs.url)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def is_student_calibrated(self, data):
"""
Calls the grading controller to see if the given student is calibrated
on the given problem
Input:
In the request, we need the following arguments:
location - problem location
Returns:
Json object with the following keys
success - bool indicating whether or not the call was successful
calibrated - true if the grader has fully calibrated and can now move on to grading
- false if the grader is still working on calibration problems
total_calibrated_on_so_far - the number of calibration essays for this problem
that this grader has graded
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.is_student_calibrated(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, grader_id: %s, location: %s", self.peer_gs.url, grader_id, location)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def show_calibration_essay(self, data):
"""
Fetch the next calibration essay from the grading controller and return it
Inputs:
In the request
location - problem location
Returns:
A json dict with the following keys
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.show_calibration_essay(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, location: %s", self.peer_gs.url, location)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
# This is a dev_facing_error
log.exception("Cannot parse rubric string.")
# This is a student_facing_error
return {'success': False,
'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, data):
"""
Saves the grader's grade of a given calibration.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
actual_score: the score that the instructor gave to this calibration essay
"""
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['student_id'] = self.system.anonymous_student_id
data_dict['calibration_essay_id'] = data_dict['submission_id']
try:
response = self.peer_gs.save_calibration_essay(**data_dict)
if 'actual_rubric' in response:
rubric_renderer = combined_open_ended_rubric.CombinedOpenEndedRubric(self.system.render_template, True)
response['actual_rubric'] = rubric_renderer.render_rubric(response['actual_rubric'])['html']
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving calibration grade")
# This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.')
def peer_grading_closed(self):
'''
Show the Peer grading closed template
'''
html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location_local
})
return html
def _find_corresponding_module_for_location(self, location):
"""
Find the peer grading module that exists at the given location.
"""
try:
return self.descriptor.system.load_item(location)
except ItemNotFoundError:
# The linked problem doesn't exist.
log.error("Problem {0} does not exist in this course.".format(location))
raise
except NoPathToItem:
# The linked problem does not have a path to it (ie is in a draft or other strange state).
log.error("Cannot find a path to problem {0} in this course.".format(location))
raise
def peer_grading(self, _data=None):
'''
Show a peer grading interface
'''
# call problem list service
success = False
error_text = ""
problem_list = []
try:
problem_list_dict = self.peer_gs.get_problem_list(self.course_id, self.system.anonymous_student_id)
success = problem_list_dict['success']
if 'error' in problem_list_dict:
error_text = problem_list_dict['error']
problem_list = problem_list_dict['problem_list']
except GradingServiceError:
# This is a student_facing_error
error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR
log.error(error_text)
success = False
# catch error if if the json loads fails
except ValueError:
# This is a student_facing_error
error_text = "Could not get list of problems to peer grade. Please notify course staff."
log.error(error_text)
success = False
except Exception:
log.exception("Could not contact peer grading service.")
success = False
good_problem_list = []
for problem in problem_list:
problem_location = problem['location']
try:
descriptor = self._find_corresponding_module_for_location(problem_location)
except (NoPathToItem, ItemNotFoundError):
continue
if descriptor:
problem['due'] = get_extended_due_date(descriptor)
grace_period = descriptor.graceperiod
try:
problem_timeinfo = TimeInfo(problem['due'], grace_period)
except Exception:
log.error("Malformed due date or grace period string for location {0}".format(problem_location))
raise
if self._closed(problem_timeinfo):
problem['closed'] = True
else:
problem['closed'] = False
else:
# if we can't find the due date, assume that it doesn't have one
problem['due'] = None
problem['closed'] = False
good_problem_list.append(problem)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'ajax_url': ajax_url,
'success': success,
'problem_list': good_problem_list,
'error_text': error_text,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return html
def peer_grading_problem(self, data=None):
'''
Show individual problem interface
'''
if data is None or data.get('location') is None:
if not self.use_for_single_location_local:
# This is an error case, because it must be set to use a single location to be called without get parameters
# This is a dev_facing_error
log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
elif data.get('location') is not None:
problem_location = self.course_id.make_usage_key_from_deprecated_string(data.get('location'))
self._find_corresponding_module_for_location(problem_location)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading_problem.html', {
'view_html': '',
'problem_location': problem_location,
'course_id': self.course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return {'html': html, 'success': True}
def get_instance_state(self):
"""
Returns the current instance state. The module can be recreated from the instance state.
Input: None
Output: A dictionary containing the instance state.
"""
state = {
'student_data_for_location': self.student_data_for_location,
}
return json.dumps(state)
def _check_feedback_length(self, data):
feedback = data.get("feedback")
if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH:
return False, "Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
else:
return True, ""
class PeerGradingDescriptor(PeerGradingFields, RawDescriptor):
"""
Module for adding peer grading questions
"""
mako_template = "widgets/raw-edit.html"
module_class = PeerGradingModule
filename_extension = "xml"
has_score = True
always_recalculate_grades = True
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
'due_data': 'due'
}
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([PeerGradingFields.due, PeerGradingFields.graceperiod])
return non_editable_fields
def get_required_module_descriptors(self):
"""
Returns a list of XModuleDescriptor instances upon which this module depends, but are
not children of this module.
"""
# If use_for_single_location is True, this is linked to an open ended problem.
if self.use_for_single_location:
# Try to load the linked module.
# If we can't load it, return empty list to avoid exceptions on progress page.
try:
linked_module = self.system.load_item(self.link_to_location)
return [linked_module]
except (NoPathToItem, ItemNotFoundError):
error_message = ("Cannot find the combined open ended module "
"at location {0} being linked to from peer "
"grading module {1}").format(self.link_to_location, self.location)
log.error(error_message)
return []
else:
return []
# Proxy to PeerGradingModule so that external callers don't have to know if they're working
# with a module or a descriptor
closed = module_attr('closed')
get_instance_state = module_attr('get_instance_state')
get_next_submission = module_attr('get_next_submission')
graded = module_attr('graded')
is_student_calibrated = module_attr('is_student_calibrated')
peer_grading = module_attr('peer_grading')
peer_grading_closed = module_attr('peer_grading_closed')
peer_grading_problem = module_attr('peer_grading_problem')
peer_gs = module_attr('peer_gs')
query_data_for_location = module_attr('query_data_for_location')
save_calibration_essay = module_attr('save_calibration_essay')
save_grade = module_attr('save_grade')
show_calibration_essay = module_attr('show_calibration_essay')
use_for_single_location_local = module_attr('use_for_single_location_local')
_find_corresponding_module_for_location = module_attr('_find_corresponding_module_for_location')
| agpl-3.0 |
shahin/deepdive | ddlib/test/with_ddlib.py | 3 | 1693 | #! /usr/bin/env python
# File: udf/ext_has_spouse_features.py
import sys, json
import ddlib
# For each input tuple
# TODO: Sample Data and the input schema.
# sample json
for row in sys.stdin:
# Unpack input into tuples.
#
obj = json.loads(row)
words, lemmas = obj["words"], obj["lemma"]
span1 = ddlib.Span(begin_word_id=obj['p1.start_position'], length=obj['p1.length'])
span2 = ddlib.Span(begin_word_id=obj['p2.start_position'], length=obj['p2.length'])
features = set()
# Feature 1: Find out if a lemma of marry occurs.
# A better feature would ensure this is on the dependency path between the two.
#
lemma_between = ddlib.tokens_between_spans(lemmas, span1, span2)
married_words = ('marry', 'widow')
for lemma in lemma_between.elements:
if lemma in married_words:
features.add("important_word=%s" % lemma)
# Feature 2: The number of words between the two phrases.
# Intuition: if they are close by, the link may be stronger.
#
words_between = ddlib.tokens_between_spans(words, span1, span2)
l = len(list(words_between.elements))
features.add("num_words_between=%s" % l if l<5 else "many_words_between")
# Feature 3: Check if the last name matches heuristically.
#
last_word_left = list(ddlib.materialize_span(words, span1))[-1]
last_word_right = list(ddlib.materialize_span(words, span2))[-1]
if (last_word_left == last_word_right):
features.add("potential_last_name_match")
# Use this line if you want to print out all features extracted
#
#ddlib.log(features)
for feature in sorted(features):
print(json.dumps({
"relation_id": obj["relation_id"],
"feature": feature
}, sort_keys=True))
| apache-2.0 |
mosquito/docker-compose | compose/service.py | 4 | 27979 | from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
import sys
from operator import attrgetter
import six
from docker.errors import APIError
from docker.utils import create_host_config, LogConfig
from . import __version__
from .config import DOCKER_CONFIG_KEYS, merge_environment
from .const import (
DEFAULT_TIMEOUT,
LABEL_CONTAINER_NUMBER,
LABEL_ONE_OFF,
LABEL_PROJECT,
LABEL_SERVICE,
LABEL_VERSION,
LABEL_CONFIG_HASH,
)
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output, StreamOutputError
from .utils import json_hash
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'read_only',
'net',
'log_driver',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
VALID_NAME_CHARS = '[a-zA-Z0-9]'
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, name):
raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
if 'image' in options and 'build' in options:
raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
if 'image' not in options and 'build' not in options:
raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name)
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False):
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
stopped=stopped,
one_off=one_off)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
def scale(self, desired_num):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if not self.can_be_scaled():
log.warn('Service %s specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
# Create enough containers
containers = self.containers(stopped=True)
while len(containers) < desired_num:
containers.append(self.create_container())
running_containers = []
stopped_containers = []
for c in containers:
if c.is_running:
running_containers.append(c)
else:
stopped_containers.append(c)
running_containers.sort(key=lambda c: c.number)
stopped_containers.sort(key=lambda c: c.number)
# Stop containers
while len(running_containers) > desired_num:
c = running_containers.pop()
log.info("Stopping %s..." % c.name)
c.stop(timeout=1)
stopped_containers.append(c)
# Start containers
while len(running_containers) < desired_num:
c = stopped_containers.pop(0)
log.info("Starting %s..." % c.name)
self.start_container(c)
running_containers.append(c)
self.remove_stopped()
def remove_stopped(self, **options):
for c in self.containers(stopped=True):
if not c.is_running:
log.info("Removing %s..." % c.name)
c.remove(**options)
def create_container(self,
one_off=False,
insecure_registry=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
insecure_registry=insecure_registry,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s..." % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True,
insecure_registry=False):
if self.image():
return
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull(insecure_registry=insecure_registry)
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
return None
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self,
allow_recreate=True,
smart_recreate=False):
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if smart_recreate and not self._containers_have_diverged(containers):
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
if not allow_recreate:
return ConvergencePlan('start', containers)
return ConvergencePlan('recreate', containers)
def _containers_have_diverged(self, containers):
config_hash = self.config_hash()
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
insecure_registry=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
insecure_registry=insecure_registry,
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
insecure_registry=insecure_registry,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
insecure_registry=False,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s..." % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
insecure_registry=insecure_registry,
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
}
def get_dependency_names(self):
net_name = self.get_net_name()
return (self.get_linked_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
numbers = [
Container.from_ps(self.client, container).number
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
return volumes_from
def _get_net(self):
if not self.net:
return "bridge"
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
container_options['name'] = self.get_container_name(number, one_off)
if add_config_hash:
config_hash = self.config_hash()
if 'labels' not in container_options:
container_options['labels'] = {}
container_options['labels'][LABEL_CONFIG_HASH] = config_hash
log.debug("Added config hash: %s" % config_hash)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port in all_ports:
port = str(port)
if ':' in port:
port = port.split(':')[-1]
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(type=options.get('log_driver', 'json-file'))
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self._get_net(),
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt
)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
path = six.binary_type(self.options['build'])
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, unicode(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def can_be_scaled(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return False
return True
def pull(self, insecure_registry=False):
if 'image' not in self.options:
return
repo, tag = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
insecure_registry=insecure_registry)
stream_output(output, sys.stdout)
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(s):
if ":" not in s:
return s, ""
repo, tag = s.rsplit(":", 1)
if "/" in tag:
return s, ""
return repo, tag
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return volume_bindings.values()
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + image_volumes.keys()):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
return VolumeSpec(None, parts[0], 'rw')
if len(parts) == 2:
parts.append('rw')
external, internal, mode = parts
if mode not in ('rw', 'ro'):
raise ConfigError("Volume %s has invalid mode (%s), should be "
"one of: rw, ro." % (volume_config, mode))
return VolumeSpec(external, internal, mode)
# Ports
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port, external = split_port(port)
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
return port_bindings
def split_port(port):
parts = str(port).split(':')
if not 1 <= len(parts) <= 3:
raise ConfigError('Invalid port "%s", should be '
'[[remote_ip:]remote_port:]port[/protocol]' % port)
if len(parts) == 1:
internal_port, = parts
return internal_port, None
if len(parts) == 2:
external_port, internal_port = parts
return internal_port, external_port
external_ip, external_port, internal_port = parts
return internal_port, (external_ip, external_port or None)
# Labels
def build_container_labels(label_options, service_labels, number, one_off=False):
labels = label_options or {}
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
| apache-2.0 |
eestay/edx-platform | common/lib/xmodule/xmodule/video_module/video_handlers.py | 17 | 12983 | """
Handlers for video module.
StudentViewHandlers are handlers for video module instance.
StudioViewHandlers are handlers for video descriptor instance.
"""
import json
import logging
from webob import Response
from xblock.core import XBlock
from xmodule.exceptions import NotFoundError
from xmodule.fields import RelativeTime
from .transcripts_utils import (
get_or_create_sjson,
TranscriptException,
TranscriptsGenerationException,
generate_sjson_for_all_speeds,
youtube_speed_dict,
Transcript,
save_to_store,
subs_filename
)
log = logging.getLogger(__name__)
# Disable no-member warning:
# pylint: disable=no-member
class VideoStudentViewHandlers(object):
"""
Handlers for video module instance.
"""
def handle_ajax(self, dispatch, data):
"""
Update values of xfields, that were changed by student.
"""
accepted_keys = [
'speed', 'saved_video_position', 'transcript_language',
'transcript_download_format', 'youtube_is_available'
]
conversions = {
'speed': json.loads,
'saved_video_position': RelativeTime.isotime_to_timedelta,
'youtube_is_available': json.loads,
}
if dispatch == 'save_user_state':
for key in data:
if key in accepted_keys:
if key in conversions:
value = conversions[key](data[key])
else:
value = data[key]
setattr(self, key, value)
if key == 'speed':
self.global_speed = self.speed
return json.dumps({'success': True})
log.debug(u"GET {0}".format(data))
log.debug(u"DISPATCH {0}".format(dispatch))
raise NotFoundError('Unexpected dispatch type')
def translation(self, youtube_id):
"""
This is called to get transcript file for specific language.
youtube_id: str: must be one of youtube_ids or None if HTML video
Logic flow:
If youtube_id doesn't exist, we have a video in HTML5 mode. Otherwise,
video video in Youtube or Flash modes.
if youtube:
If english -> give back youtube_id subtitles:
Return what we have in contentstore for given youtube_id.
If non-english:
a) extract youtube_id from srt file name.
b) try to find sjson by youtube_id and return if successful.
c) generate sjson from srt for all youtube speeds.
if non-youtube:
If english -> give back `sub` subtitles:
Return what we have in contentstore for given subs_if that is stored in self.sub.
If non-english:
a) try to find previously generated sjson.
b) otherwise generate sjson from srt and return it.
Filenames naming:
en: subs_videoid.srt.sjson
non_en: uk_subs_videoid.srt.sjson
Raises:
NotFoundError if for 'en' subtitles no asset is uploaded.
NotFoundError if youtube_id does not exist / invalid youtube_id
"""
if youtube_id:
# Youtube case:
if self.transcript_language == 'en':
return Transcript.asset(self.location, youtube_id).data
youtube_ids = youtube_speed_dict(self)
if youtube_id not in youtube_ids:
log.info("Youtube_id %s does not exist", youtube_id)
raise NotFoundError
try:
sjson_transcript = Transcript.asset(self.location, youtube_id, self.transcript_language).data
except (NotFoundError):
log.info("Can't find content in storage for %s transcript: generating.", youtube_id)
generate_sjson_for_all_speeds(
self,
self.transcripts[self.transcript_language],
{speed: youtube_id for youtube_id, speed in youtube_ids.iteritems()},
self.transcript_language
)
sjson_transcript = Transcript.asset(self.location, youtube_id, self.transcript_language).data
return sjson_transcript
else:
# HTML5 case
if self.transcript_language == 'en':
return Transcript.asset(self.location, self.sub).data
else:
return get_or_create_sjson(self)
def get_static_transcript(self, request):
"""
Courses that are imported with the --nostatic flag do not show
transcripts/captions properly even if those captions are stored inside
their static folder. This adds a last resort method of redirecting to
the static asset path of the course if the transcript can't be found
inside the contentstore and the course has the static_asset_path field
set.
"""
response = Response(status=404)
# Only do redirect for English
if not self.transcript_language == 'en':
return response
video_id = request.GET.get('videoId', None)
if video_id:
transcript_name = video_id
else:
transcript_name = self.sub
if transcript_name:
# Get the asset path for course
asset_path = None
course = self.descriptor.runtime.modulestore.get_course(self.course_id)
if course.static_asset_path:
asset_path = course.static_asset_path
else:
# It seems static_asset_path is not set in any XMLModuleStore courses.
asset_path = getattr(course, 'data_dir', '')
if asset_path:
response = Response(
status=307,
location='/static/{0}/{1}'.format(
asset_path,
subs_filename(transcript_name, self.transcript_language)
)
)
return response
@XBlock.handler
def transcript(self, request, dispatch):
"""
Entry point for transcript handlers for student_view.
Request GET may contain `videoId` for `translation` dispatch.
Dispatches, (HTTP GET):
/translation/[language_id]
/download
/available_translations/
Explanations:
`download`: returns SRT or TXT file.
`translation`: depends on HTTP methods:
Provide translation for requested language, SJSON format is sent back on success,
Proper language_id should be in url.
`available_translations`:
Returns list of languages, for which transcript files exist.
For 'en' check if SJSON exists. For non-`en` check if SRT file exists.
"""
if dispatch.startswith('translation'):
language = dispatch.replace('translation', '').strip('/')
if not language:
log.info("Invalid /translation request: no language.")
return Response(status=400)
if language not in ['en'] + self.transcripts.keys():
log.info("Video: transcript facilities are not available for given language.")
return Response(status=404)
if language != self.transcript_language:
self.transcript_language = language
try:
transcript = self.translation(request.GET.get('videoId', None))
except (TypeError, NotFoundError) as ex:
log.info(ex.message)
# Try to return static URL redirection as last resort
# if no translation is required
return self.get_static_transcript(request)
except (
TranscriptException,
UnicodeDecodeError,
TranscriptsGenerationException
) as ex:
log.info(ex.message)
response = Response(status=404)
else:
response = Response(transcript, headerlist=[('Content-Language', language)])
response.content_type = Transcript.mime_types['sjson']
elif dispatch == 'download':
try:
transcript_content, transcript_filename, transcript_mime_type = self.get_transcript(self.transcript_download_format)
except (NotFoundError, ValueError, KeyError, UnicodeDecodeError):
log.debug("Video@download exception")
return Response(status=404)
else:
response = Response(
transcript_content,
headerlist=[
('Content-Disposition', 'attachment; filename="{}"'.format(transcript_filename.encode('utf8'))),
('Content-Language', self.transcript_language),
]
)
response.content_type = transcript_mime_type
elif dispatch == 'available_translations':
available_translations = self.available_translations()
if available_translations:
response = Response(json.dumps(available_translations))
response.content_type = 'application/json'
else:
response = Response(status=404)
else: # unknown dispatch
log.debug("Dispatch is not allowed")
response = Response(status=404)
return response
class VideoStudioViewHandlers(object):
"""
Handlers for Studio view.
"""
@XBlock.handler
def studio_transcript(self, request, dispatch):
"""
Entry point for Studio transcript handlers.
Dispatches:
/translation/[language_id] - language_id sould be in url.
`translation` dispatch support following HTTP methods:
`POST`:
Upload srt file. Check possibility of generation of proper sjson files.
For now, it works only for self.transcripts, not for `en`.
Do not update self.transcripts, as fields are updated on save in Studio.
`GET:
Return filename from storage. SRT format is sent back on success. Filename should be in GET dict.
We raise all exceptions right in Studio:
NotFoundError:
Video or asset was deleted from module/contentstore, but request came later.
Seems impossible to be raised. module_render.py catches NotFoundErrors from here.
/translation POST:
TypeError:
Unjsonable filename or content.
TranscriptsGenerationException, TranscriptException:
no SRT extension or not parse-able by PySRT
UnicodeDecodeError: non-UTF8 uploaded file content encoding.
"""
_ = self.runtime.service(self, "i18n").ugettext
if dispatch.startswith('translation'):
language = dispatch.replace('translation', '').strip('/')
if not language:
log.info("Invalid /translation request: no language.")
return Response(status=400)
if request.method == 'POST':
subtitles = request.POST['file']
try:
file_data = subtitles.file.read()
unicode(file_data, "utf-8", "strict")
except UnicodeDecodeError:
log.info("Invalid encoding type for transcript file: {}".format(subtitles.filename))
msg = _("Invalid encoding type, transcripts should be UTF-8 encoded.")
return Response(msg, status=400)
save_to_store(file_data, unicode(subtitles.filename), 'application/x-subrip', self.location)
generate_sjson_for_all_speeds(self, unicode(subtitles.filename), {}, language)
response = {'filename': unicode(subtitles.filename), 'status': 'Success'}
return Response(json.dumps(response), status=201)
elif request.method == 'GET':
filename = request.GET.get('filename')
if not filename:
log.info("Invalid /translation request: no filename in request.GET")
return Response(status=400)
content = Transcript.get_asset(self.location, filename).data
response = Response(content, headerlist=[
('Content-Disposition', 'attachment; filename="{}"'.format(filename.encode('utf8'))),
('Content-Language', language),
])
response.content_type = Transcript.mime_types['srt']
else: # unknown dispatch
log.debug("Dispatch is not allowed")
response = Response(status=404)
return response
| agpl-3.0 |
mattattack7/canvas-contrib | API_Examples/reports/provisioning_report/python/pull_provisioning_report.py | 4 | 4267 | #!/usr/bin/env python
import requests
import time, json, os
import re,pprint
# Change this to match your access token
token="<access_token>"
# This should be your account number. This is the number you see when logged into canvas
# as an admin. i.e. https://schoolname.insructure.com/accounts/SOME_NUMBER_HERE
ACCOUNT_ID = '88888'
# Change this to match the domain you use to access Canvas.
CANVAS_DOMAIN = "<schoolname>.instructure.com"
# Change this to the full path of your desired output folder. I've set it to the current
# directory for the sake of this script
OUTPUT_FOLDER = os.path.dirname(os.path.abspath(__file__))
# Change this to the term to pull for, otherwise this will pull for all terms.
ENROLLMENT_TERM = False
# Edit each of these to determine which to include in the report
include_deleted_items = True
do_accounts = True
do_courses = False
do_enrollments = True
do_sections = False
do_terms = True
do_users = True
do_xlist = True
do_group_membership = True
do_groups = True
###################################################################################
#### DON'T CHANGE anything after this unless you know what you are doing. #########
BASE_DOMAIN = "https://%s/api/v1/%%s/" % CANVAS_DOMAIN
BASE_URI = BASE_DOMAIN % "accounts/%s/reports" % ACCOUNT_ID
BASE_START_URI = BASE_DOMAIN % "accounts/%s/reports/%%s" % ACCOUNT_ID
BASE_FILE_URI = BASE_DOMAIN % "files/%s"
# This headers dictionary is used for almost every request
headers = {"Authorization":"Bearer %s" % token}
""" These are the standard reports every account has access to. """
standard_reports = (
'student_assignment_outcome_map_csv',
'grade_export_csv',
'sis_export_csv',
'provisioning_csv')
# This is the list of parameters used for the sis_export_csv report, I think I'm actually
# missing one, parameters[enrollment_term], but I'm not sure
report_parameters = {
"parameters[accounts]": do_accounts,
"parameters[courses]": do_courses,
"parameters[enrollments]": do_enrollments,
"parameters[groups]": do_groups,
"parameters[group_membership]": do_group_membership,
"parameters[include_deleted]": include_deleted_items,
"parameters[sections]": do_sections,
"parameters[terms]": do_terms,
"parameters[users]": do_users,
"parameters[xlist]": do_xlist}
# If ENROLLMENT_TERM isn't False, add it to the parameters list
if ENROLLMENT_TERM != False:
report_parameters["parameters[enrollment_term]"]=ENROLLMENT_TERM
# Step 1: Start the report
start_report_url = BASE_START_URI % standard_reports[3]
print "running the report..."
start_report_response = requests.post(start_report_url,headers=headers,params=report_parameters)
print start_report_response.text
# Use the id from that output to check the progress of the report.
status_url = start_report_url + "%s" % start_report_response.json()['id']
status_response = requests.get(status_url,headers=headers)
status_response_json = status_response.json()
# Step 2: Wait for the report to be finished
while status_response_json['progress'] < 100:
status_response = requests.get(status_url,headers=headers)
status_response_json = status_response.json()
time.sleep(4)
print 'report progress',status_response_json['progress']
file_url = status_response_json['file_url']
file_id_pattern = re.compile('files\/(\d+)\/download')
# Once "progress" is 100 then parse out the number between "files" and "download",
# 22591162 in this case, and use this number to request the files
# Step 3: Pull out the file number
try:
found_id = file_id_pattern.findall(file_url)[0]
except:
found_id = False
if not found_id:
print "I couldn't find the file id"
else:
file_info_url = BASE_FILE_URI % found_id
# Step 4: Pull out the Canvas file info from the files API
file_info_response = requests.get(file_info_url,headers=headers)
print file_info_response.text
file_info_response_json = file_info_response.json()
getter_url = file_info_response_json['url']
print 'getter',getter_url
# Step 5: Finally fetch the file and save it to the output directory
end_file_response = requests.get(getter_url,allow_redirects=True)
print end_file_response.status_code
with open(file_info_response_json['filename'],'w+b') as filename:
filename.write(end_file_response.content)
| agpl-3.0 |
apporc/nova | nova/api/openstack/compute/legacy_v2/contrib/createserverext.py | 100 | 1156 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Createserverext(extensions.ExtensionDescriptor):
"""Extended support to the Create Server v1.1 API."""
name = "Createserverext"
alias = "os-create-server-ext"
namespace = ("http://docs.openstack.org/compute/ext/"
"createserverext/api/v1.1")
updated = "2011-07-19T00:00:00Z"
def get_resources(self):
res = extensions.ResourceExtension('os-create-server-ext',
inherits='servers')
return [res]
| apache-2.0 |
gauravbose/digital-menu | digimenu2/django/contrib/sessions/backends/db.py | 37 | 2943 | import logging
from django.contrib.sessions.backends.base import CreateError, SessionBase
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def load(self):
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
return {}
def exists(self, session_key):
return Session.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
obj = Session(
session_key=self._get_or_create_session_key(),
session_data=self.encode(self._get_session(no_load=must_create)),
expire_date=self.get_expiry_date()
)
using = router.db_for_write(Session, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
Session.objects.get(session_key=session_key).delete()
except Session.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
Session.objects.filter(expire_date__lt=timezone.now()).delete()
# At bottom to avoid circular import
from django.contrib.sessions.models import Session # isort:skip
| bsd-3-clause |
dankcoin/dankcoin | contrib/devtools/optimize-pngs.py | 126 | 3201 | #!/usr/bin/env python
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)};
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
| mit |
gorjuce/odoo | addons/l10n_in_hr_payroll/__openerp__.py | 374 | 2622 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Indian Payroll',
'category': 'Localization',
'author': 'OpenERP SA',
'website':'http://www.openerp.com',
'depends': ['hr_payroll'],
'version': '1.0',
'description': """
Indian Payroll Salary Rules.
============================
-Configuration of hr_payroll for India localization
-All main contributions rules for India payslip.
* New payslip report
* Employee Contracts
* Allow to configure Basic / Gross / Net Salary
* Employee PaySlip
* Allowance / Deduction
* Integrated with Holiday Management
* Medical Allowance, Travel Allowance, Child Allowance, ...
- Payroll Advice and Report
- Yearly Salary by Head and Yearly Salary by Employee Report
""",
'active': False,
'data': [
'l10n_in_hr_payroll_view.xml',
'data/l10n_in_hr_payroll_data.xml',
'data/hr.salary.rule.csv',
'security/ir.model.access.csv',
'l10n_in_hr_payroll_report.xml',
'l10n_in_hr_payroll_sequence.xml',
'views/report_payslipdetails.xml',
'views/report_hrsalarybymonth.xml',
'wizard/hr_salary_employee_bymonth_view.xml',
'wizard/hr_yearly_salary_detail_view.xml',
'report/payment_advice_report_view.xml',
'report/payslip_report_view.xml',
'views/report_hryearlysalary.xml',
'views/report_payrolladvice.xml',
],
'test': [
'test/payment_advice.yml',
'test/payment_advice_batch.yml'
],
'demo': ['l10n_in_hr_payroll_demo.xml'],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hjanime/VisTrails | vistrails/packages/controlflow/conditional.py | 1 | 7838 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.core.modules.vistrails_module import Module, InvalidOutput, \
ModuleError
import copy
#################################################################################
## If Operator
class If(Module):
"""
The If Module alows the user to choose the part of the workflow to be
executed through the use of a condition.
"""
def update_upstream(self):
"""A modified version of the update_upstream method."""
# everything is the same except that we don't update anything
# upstream of TruePort or FalsePort
excluded_ports = set(['TruePort', 'FalsePort', 'TrueOutputPorts',
'FalseOutputPorts'])
for port_name, connector_list in self.inputPorts.iteritems():
if port_name not in excluded_ports:
for connector in connector_list:
connector.obj.update()
for port_name, connectorList in copy.copy(self.inputPorts.items()):
if port_name not in excluded_ports:
for connector in connectorList:
if connector.obj.get_output(connector.port) is \
InvalidOutput:
self.remove_input_connector(port_name, connector)
def compute(self):
""" The compute method for the If module."""
if not self.has_input('Condition'):
raise ModuleError(self, 'Must set condition')
cond = self.get_input('Condition')
if cond:
port_name = 'TruePort'
output_ports_name = 'TrueOutputPorts'
else:
port_name = 'FalsePort'
output_ports_name = 'FalseOutputPorts'
if self.has_input(output_ports_name):
for connector in self.inputPorts.get(output_ports_name):
connector.obj.update()
if not self.has_input(port_name):
raise ModuleError(self, 'Must set ' + port_name)
for connector in self.inputPorts.get(port_name):
connector.obj.update()
if self.has_input(output_ports_name):
output_ports = self.get_input(output_ports_name)
result = []
for output_port in output_ports:
result.append(connector.obj.get_output(output_port))
# FIXME can we just make this a list?
if len(output_ports) == 1:
self.set_output('Result', result[0])
else:
self.set_output('Result', result)
#################################################################################
## Default module
class Default(Module):
"""
The Default module allows the user to provide a default value.
This module can be put in the middle of a connection to provide a default
value from the Default port in case nothing is set on the Input port. This
is particularly useful when using subworkflows, with InputPort modules with
optional set to True.
"""
def compute(self):
if self.has_input('Input'):
self.set_output('Result', self.get_input('Input'))
else:
self.set_output('Result', self.get_input('Default'))
###############################################################################
import unittest
import urllib2
from vistrails.tests.utils import intercept_result, execute
class TestIf(unittest.TestCase):
def do_if(self, val):
with intercept_result(If, 'Result') as results:
interp_dict = execute([
('If', 'org.vistrails.vistrails.control_flow', [
('FalseOutputPorts', [('List', "['value']")]),
('TrueOutputPorts', [('List', "['value']")]),
('Condition', [('Boolean', str(val))]),
]),
('Integer', 'org.vistrails.vistrails.basic', [
('value', [('Integer', '42')]),
]),
('Integer', 'org.vistrails.vistrails.basic', [
('value', [('Integer', '28')]),
]),
],
[
(1, 'self', 0, 'TruePort'),
(2, 'self', 0, 'FalsePort'),
],
full_results=True)
self.assertFalse(interp_dict.errors)
if val:
self.assertEqual(results, [42])
else:
self.assertEqual(results, [28])
self.assertEqual(interp_dict.executed, {0: True, 1: val, 2: not val})
def test_if_true(self):
self.do_if(True)
def test_if_false(self):
self.do_if(False)
class TestDefault(unittest.TestCase):
def do_default(self, val):
if val:
src = 'o = 42'
else:
src = ('from vistrails.core.modules.vistrails_module import '
'InvalidOutput\n'
'o = InvalidOutput')
src = urllib2.quote(src)
with intercept_result(Default, 'Result') as results:
self.assertFalse(execute([
('Default', 'org.vistrails.vistrails.control_flow', [
('Default', [('Integer', '28')]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', src)]),
]),
],
[
(1, 'o', 0, 'Input'),
],
add_port_specs=[
(1, 'output', 'o',
'org.vistrails.vistrails.basic:Integer'),
]))
if val:
self.assertEqual(results, [42])
else:
self.assertEqual(results, [28])
def test_default_set(self):
self.do_default(True)
def test_default_unset(self):
self.do_default(False)
| bsd-3-clause |
mrkm4ntr/incubator-airflow | airflow/contrib/operators/gcp_compute_operator.py | 7 | 4822 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.compute`."""
import warnings
from airflow.providers.google.cloud.operators.compute import (
ComputeEngineBaseOperator,
ComputeEngineCopyInstanceTemplateOperator,
ComputeEngineInstanceGroupUpdateManagerTemplateOperator,
ComputeEngineSetMachineTypeOperator,
ComputeEngineStartInstanceOperator,
ComputeEngineStopInstanceOperator,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.compute`.",
DeprecationWarning,
stacklevel=2,
)
class GceBaseOperator(ComputeEngineBaseOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GceInstanceGroupManagerUpdateTemplateOperator(ComputeEngineInstanceGroupUpdateManagerTemplateOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute
.ComputeEngineInstanceGroupUpdateManagerTemplateOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated. Please use
`airflow.providers.google.cloud.operators.compute
.ComputeEngineInstanceGroupUpdateManagerTemplateOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GceInstanceStartOperator(ComputeEngineStartInstanceOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators
.compute.ComputeEngineStartInstanceOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute
.ComputeEngineStartInstanceOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GceInstanceStopOperator(ComputeEngineStopInstanceOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineStopInstanceOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute
.ComputeEngineStopInstanceOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GceInstanceTemplateCopyOperator(ComputeEngineCopyInstanceTemplateOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineCopyInstanceTemplateOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
""""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute
.ComputeEngineCopyInstanceTemplateOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GceSetMachineTypeOperator(ComputeEngineSetMachineTypeOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineSetMachineTypeOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.compute
.ComputeEngineSetMachineTypeOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
| apache-2.0 |
jcurbelo/networkx | networkx/linalg/laplacianmatrix.py | 12 | 7749 | """Laplacian matrix of graphs.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)',
'Alejandro Weinstein <alejandro.weinstein@gmail.com>'])
__all__ = ['laplacian_matrix',
'normalized_laplacian_matrix',
'directed_laplacian_matrix']
@not_implemented_for('directed')
def laplacian_matrix(G, nodelist=None, weight='weight'):
"""Return the Laplacian matrix of G.
The graph Laplacian is the matrix L = D - A, where
A is the adjacency matrix and D is the diagonal matrix of node degrees.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
L : SciPy sparse matrix
The Laplacian matrix of G.
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See Also
--------
to_numpy_matrix
normalized_laplacian_matrix
"""
import scipy.sparse
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
format='csr')
n,m = A.shape
diags = A.sum(axis=1)
D = scipy.sparse.spdiags(diags.flatten(), [0], m, n, format='csr')
return D - A
@not_implemented_for('directed')
def normalized_laplacian_matrix(G, nodelist=None, weight='weight'):
r"""Return the normalized Laplacian matrix of G.
The normalized graph Laplacian is the matrix
.. math::
N = D^{-1/2} L D^{-1/2}
where `L` is the graph Laplacian and `D` is the diagonal matrix of
node degrees.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
N : NumPy matrix
The normalized Laplacian matrix of G.
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
If the Graph contains selfloops, D is defined as diag(sum(A,1)), where A is
the adjacency matrix [2]_.
See Also
--------
laplacian_matrix
References
----------
.. [1] Fan Chung-Graham, Spectral Graph Theory,
CBMS Regional Conference Series in Mathematics, Number 92, 1997.
.. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized
Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,
March 2007.
"""
import scipy
import scipy.sparse
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
format='csr')
n,m = A.shape
diags = A.sum(axis=1).flatten()
D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')
L = D - A
with scipy.errstate(divide='ignore'):
diags_sqrt = 1.0/scipy.sqrt(diags)
diags_sqrt[scipy.isinf(diags_sqrt)] = 0
DH = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format='csr')
return DH.dot(L.dot(DH))
###############################################################################
# Code based on
# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def directed_laplacian_matrix(G, nodelist=None, weight='weight',
walk_type=None, alpha=0.95):
r"""Return the directed Laplacian matrix of G.
The graph directed Laplacian is the matrix
.. math::
L = I - (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} ) / 2
where `I` is the identity matrix, `P` is the transition matrix of the
graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and
zeros elsewhere.
Depending on the value of walk_type, `P` can be the transition matrix
induced by a random walk, a lazy random walk, or a random walk with
teleportation (PageRank).
Parameters
----------
G : DiGraph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
walk_type : string or None, optional (default=None)
If None, `P` is selected depending on the properties of the
graph. Otherwise is one of 'random', 'lazy', or 'pagerank'
alpha : real
(1 - alpha) is the teleportation probability used with pagerank
Returns
-------
L : NumPy array
Normalized Laplacian of G.
Raises
------
NetworkXError
If NumPy cannot be imported
NetworkXNotImplemnted
If G is not a DiGraph
Notes
-----
Only implemented for DiGraphs
See Also
--------
laplacian_matrix
References
----------
.. [1] Fan Chung (2005).
Laplacians and the Cheeger inequality for directed graphs.
Annals of Combinatorics, 9(1), 2005
"""
import scipy as sp
from scipy.sparse import identity, spdiags, linalg
if walk_type is None:
if nx.is_strongly_connected(G):
if nx.is_aperiodic(G):
walk_type = "random"
else:
walk_type = "lazy"
else:
walk_type = "pagerank"
M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
dtype=float)
n, m = M.shape
if walk_type in ["random", "lazy"]:
DI = spdiags(1.0/sp.array(M.sum(axis=1).flat), [0], n, n)
if walk_type == "random":
P = DI * M
else:
I = identity(n)
P = (I + DI * M) / 2.0
elif walk_type == "pagerank":
if not (0 < alpha < 1):
raise nx.NetworkXError('alpha must be between 0 and 1')
# this is using a dense representation
M = M.todense()
# add constant to dangling nodes' row
dangling = sp.where(M.sum(axis=1) == 0)
for d in dangling[0]:
M[d] = 1.0 / n
# normalize
M = M / M.sum(axis=1)
P = alpha * M + (1 - alpha) / n
else:
raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")
evals, evecs = linalg.eigs(P.T, k=1)
v = evecs.flatten().real
p = v / v.sum()
sqrtp = sp.sqrt(p)
Q = spdiags(sqrtp, [0], n, n) * P * spdiags(1.0/sqrtp, [0], n, n)
I = sp.identity(len(G))
return I - (Q + Q.T) /2.0
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.